diff --git a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_7dcf08f4-e760-4be7-be8c-0a533074883e.png b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_7dcf08f4-e760-4be7-be8c-0a533074883e.png index fec85cbec9ed6525fe6bfa57920489845b2e8467..e31a02acb4618c78669e3e979c0a0ff9b3ee6817 100644 --- a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_7dcf08f4-e760-4be7-be8c-0a533074883e.png +++ b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_7dcf08f4-e760-4be7-be8c-0a533074883e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed73eaa4621ea4f62bbc2019f61e0558b9fc8eecbd41fa4c9bfb17886b7525a1 -size 693670 +oid sha256:5ea5dc7e3b9a8d10da784db15466494aa8b78712e304c1d95f206230264e0705 +size 666376 diff --git a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_80ff8fd1-64c6-4df9-91bd-7478a1730329.png b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_80ff8fd1-64c6-4df9-91bd-7478a1730329.png index fe3967a7fdcf07ffdbd9150c74b6494fb1a96630..be413a2589621995680fdb6205fcd920f915ca60 100644 --- a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_80ff8fd1-64c6-4df9-91bd-7478a1730329.png +++ b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_80ff8fd1-64c6-4df9-91bd-7478a1730329.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28e5a02973fe8fecd54c5c90b8532f3a5a0f73b27d94d96564c30113c3b79a5a -size 689394 +oid sha256:71820ff86d03d69943c68394da9d67d8f60e8acffdaba1a37854e807e5184358 +size 428718 diff --git a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_dc03dd08-a61b-430e-97ce-1c37fec505ff.png b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_dc03dd08-a61b-430e-97ce-1c37fec505ff.png index 988bd64bfcc063f328dbf4fa2061c6683c31e07b..05d99fb5066db56b1cbe25772db2629acd7c24cd 100644 --- a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_dc03dd08-a61b-430e-97ce-1c37fec505ff.png +++ b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_dc03dd08-a61b-430e-97ce-1c37fec505ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a298a24b7a887744dc23119bd04c0256997781b73a2821038477e4540cb636a0 -size 695959 +oid sha256:82639c331231f2d0dbe8044e3bfe9a159e849af7707733b1bd0befded60d3b76 +size 657641 diff --git a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_eb050362-242d-4e15-bf1a-82c746f71bc8.png b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_eb050362-242d-4e15-bf1a-82c746f71bc8.png index fd6f11a7548e305fdcb598251d45fd9cd2758297..55b8e28f78979c31908ae823dbda9062ad1fda7e 100644 --- a/images/000c2828-e8ce-4b18-bfe0-89ed333add06_eb050362-242d-4e15-bf1a-82c746f71bc8.png +++ b/images/000c2828-e8ce-4b18-bfe0-89ed333add06_eb050362-242d-4e15-bf1a-82c746f71bc8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dac34ad62b136659137d875fce28c512410bf7f6140c6c40aad027c4e0eb1c3 -size 703468 +oid sha256:f85c51a9a0e6016a4de3fdae2bec30e085a7bdd9274497146405986c72a95447 +size 664965 diff --git a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_7abcc2cf-5142-4193-a68a-ccc119801db5.png b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_7abcc2cf-5142-4193-a68a-ccc119801db5.png index 1efe25d6106ef13f744ff23511b36f059bd69c29..6a3032ca242a7c461045fa62440ce89c9004dfcc 100644 --- a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_7abcc2cf-5142-4193-a68a-ccc119801db5.png +++ b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_7abcc2cf-5142-4193-a68a-ccc119801db5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:587a6ba10c80469072ae166c619beb7d534fe7ae8d894b52eed17e3357aabdb7 -size 1588625 +oid sha256:dec9fe7085a4e59d04cd707758e722acb0af762458f1dee9f01b3f553a3e439e +size 903731 diff --git a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_8336d7c7-ca0a-4911-a354-f8a00a547a1e.png b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_8336d7c7-ca0a-4911-a354-f8a00a547a1e.png index 71a256eaa2045b5e6cba6d9001da655b39c88f1b..7beecf48e61a99555fda4e8b733472fcf7fa6b19 100644 --- a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_8336d7c7-ca0a-4911-a354-f8a00a547a1e.png +++ b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_8336d7c7-ca0a-4911-a354-f8a00a547a1e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad88604f99113646557592e9491fd8cab377af2b35f52cfea0f8fc188dc2edd2 -size 994190 +oid sha256:34c5dba0ebad9bf8f4fcaa502c440ef40166416e8fd7ca721836b879645472d3 +size 453902 diff --git a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_abd904e6-4b53-4414-95db-52a328c92bb3.png b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_abd904e6-4b53-4414-95db-52a328c92bb3.png index 90761f0937ad5a1a392e5c7efb924af71f864dc9..e5fd2e3a7e68d6a85889f4d7be9718243642a2ca 100644 --- a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_abd904e6-4b53-4414-95db-52a328c92bb3.png +++ b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_abd904e6-4b53-4414-95db-52a328c92bb3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8aa91a38808614de7975f5223a738044aab594fb87e6324ac34d09de78b42050 -size 1082479 +oid sha256:edbe608fc59f98abf787d25c55c36f0708fd125552e6c6262ce6531cc65a1454 +size 491452 diff --git a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c8172921-931f-4897-badb-a46e41361d4a.png b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c8172921-931f-4897-badb-a46e41361d4a.png index a844cbdbfe8a2507c8dcb3f9fed8ff0d4df53177..adb3c4db637d6218a23e8c3d3ebd8cabb0d83ee5 100644 --- a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c8172921-931f-4897-badb-a46e41361d4a.png +++ b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c8172921-931f-4897-badb-a46e41361d4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:159c68ec2846cdd0f49d3230eb6be92088da428366f6680cc50554c1c2741b7f -size 331058 +oid sha256:037fce890b82dd679cb83863caf66063eed6eb73e4ab7752b8ef8a0d9adfa695 +size 308192 diff --git a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_d9a6085d-eed6-4737-bec7-50f8e2953d86.png b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_d9a6085d-eed6-4737-bec7-50f8e2953d86.png index b69787de403558fc71b1065c57eff5e8089cdec2..ee460672e9e6833ea05bee4178f914a62f722dbf 100644 --- a/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_d9a6085d-eed6-4737-bec7-50f8e2953d86.png +++ b/images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_d9a6085d-eed6-4737-bec7-50f8e2953d86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18feda0b8a8060cbbca978b68ca3ddd2d5701e43392979c8f741b2c0103b0493 -size 855463 +oid sha256:61c6134c7a5ca04ceee51e13ba7259cddf6bcb3abc7c13f3047f33b1d7cad4b4 +size 716237 diff --git a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_04943aa9-b541-411a-8ed3-e4c259733e76.png b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_04943aa9-b541-411a-8ed3-e4c259733e76.png index 5c967bd107af3a5e3fe930e1a8ba6320f466f0d6..169ce0087b51f2edc0455db4adc441516412a6cf 100644 --- a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_04943aa9-b541-411a-8ed3-e4c259733e76.png +++ b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_04943aa9-b541-411a-8ed3-e4c259733e76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:128db0903e4ab1879fc07ce6f09265aa7e3f0fcf95cfc782c689f2f246ad1d4b -size 1378502 +oid sha256:cd4b2f4f50083f1e70c68c61ed1462ceaead02da21df7c1b34a8eaa6ae883237 +size 1635716 diff --git a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_b9c1f781-bdec-4323-a3d6-2930774d05bc.png b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_b9c1f781-bdec-4323-a3d6-2930774d05bc.png index 19b56ab7de01edb8ac2c6d154df9cd9d9f9b97c8..5f2de2593002bc4982e013c39b0e9f125c213594 100644 --- a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_b9c1f781-bdec-4323-a3d6-2930774d05bc.png +++ b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_b9c1f781-bdec-4323-a3d6-2930774d05bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b503cc1005023393cc5cbc4c94080112b0fdc5cc6ff6c7899269a87203e32682 -size 342957 +oid sha256:344339a66cef9990f99574a29dacd2bbd7545355f06a42cc49e5349ee0497c21 +size 467406 diff --git a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_c00eecee-db09-45ba-935f-9db580215fc6.png b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_c00eecee-db09-45ba-935f-9db580215fc6.png index ec327b61baefcedd8adc5872a32f93a743c68195..9ba55a50be3dba4ddd486679e7b16474568e73df 100644 --- a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_c00eecee-db09-45ba-935f-9db580215fc6.png +++ b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_c00eecee-db09-45ba-935f-9db580215fc6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:adfc4b9b1f380aa272a07701454f5f142e976c7ef6a4e534c851376a507864ff -size 1378855 +oid sha256:89aa26a9aee768c4b7803d7802d7c8c06a9bb65b5bf535bbae8782be0240361e +size 1247553 diff --git a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_d5ea9d5f-8db5-43db-812e-7810f8c7a683.png b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_d5ea9d5f-8db5-43db-812e-7810f8c7a683.png index 68c89dbd6127c7c90263435382160ec368303337..de88b4fcc51154e124bbbee73a96fb21e0a154e3 100644 --- a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_d5ea9d5f-8db5-43db-812e-7810f8c7a683.png +++ b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_d5ea9d5f-8db5-43db-812e-7810f8c7a683.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:223e4d9a28b751c493d31695b069bd24e0929dbf5d951d40e4c398e6671ee5ab -size 3626483 +oid sha256:dfa144181a747afc4d277884ec00bec60c0b82281cefb6ed3e5475b2042761e8 +size 931569 diff --git a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_e851afe1-7aac-43ac-ab6d-e36cb60ccbd5.png b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_e851afe1-7aac-43ac-ab6d-e36cb60ccbd5.png index 8839e62c0a5fa8a6fe005b5330527ccc99c68fde..38f42ffa3e281d463c3873f363fd296111702839 100644 --- a/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_e851afe1-7aac-43ac-ab6d-e36cb60ccbd5.png +++ b/images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_e851afe1-7aac-43ac-ab6d-e36cb60ccbd5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae02be12071cd45642d32b0b9ac5f7523c9fff067a6a2ba529bc5afe25118e3b -size 1385801 +oid sha256:8ffe74d7ff4cc310342c1e705d82ca18dfed51d14e4f725550a8b97e26648ef2 +size 1016678 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_2161ad6c-0a74-439a-ad07-2493fe8039c8.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_2161ad6c-0a74-439a-ad07-2493fe8039c8.png index 6431079d46ffbf04237ecf32f43723e81305ea2d..a4cdee8ca3687049376c9a8c1a371dac633d4414 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_2161ad6c-0a74-439a-ad07-2493fe8039c8.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_2161ad6c-0a74-439a-ad07-2493fe8039c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f60930d9d6dbd5019913e404d5098615589cdca560f69aab67440fef4fe2db0 -size 1254830 +oid sha256:252482159e08d1f827e0494ad187563ffd6701b421c208cef584a4f3bfe4b794 +size 907321 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_318ea7e0-6ab8-4357-9d9a-e407c2f4dfad.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_318ea7e0-6ab8-4357-9d9a-e407c2f4dfad.png index c60de768712e9bd3d7cd78dcf9dd840322e8b796..c9d7997ec633346effc6296d9cd6fdd399cc1267 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_318ea7e0-6ab8-4357-9d9a-e407c2f4dfad.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_318ea7e0-6ab8-4357-9d9a-e407c2f4dfad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34735442816baff4a17ff7ccb9e920000978cae17d7cbac59b11d79e5f56c850 -size 830488 +oid sha256:9bd6d20dd6bd248230b8678f7e007a1fdb5c6ac35d97f534a63805685ef957a9 +size 579724 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_4058fdcb-00c9-479e-a343-0bf9db5ff23e.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_4058fdcb-00c9-479e-a343-0bf9db5ff23e.png index 5af624795f17267e8fd0cc332bdf8103bb0fd184..a85ddd41f13dee1e50d41a976033ab2a5975348c 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_4058fdcb-00c9-479e-a343-0bf9db5ff23e.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_4058fdcb-00c9-479e-a343-0bf9db5ff23e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8734d53731d11103dfe1cc25b34a3b0e4e4eb1c40fe20805bd27d8bdbfd8f08 -size 921329 +oid sha256:224d504c31251a4cc60fc8fb9a99fae509f88be327dfafd48a007a3fb3344fd5 +size 565563 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_5a39e836-5fa1-4b38-b70d-d1191480b770.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_5a39e836-5fa1-4b38-b70d-d1191480b770.png index 2556186d5843621724ed7eb0ad72bf7e59ca0fb7..318eb889edd2dceaa93104ce155ae40aa9196458 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_5a39e836-5fa1-4b38-b70d-d1191480b770.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_5a39e836-5fa1-4b38-b70d-d1191480b770.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:089ec3055ab6acaeec3877a1aeffe3abf813d4214cf31d9025a5c02640465f25 -size 844340 +oid sha256:63d5865ceda804f54d8aa5532e830889c3b895dd92129164f96f3c78c626dd33 +size 503962 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_74f68853-2766-4276-8df3-1703d486591f.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_74f68853-2766-4276-8df3-1703d486591f.png index 726540a904be971f18a79fe6b78e4a4931724600..f26022b17ce424d00ca1e16b07dc64e8c559e6d0 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_74f68853-2766-4276-8df3-1703d486591f.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_74f68853-2766-4276-8df3-1703d486591f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9a881c148a272be627b9e8e12432afcee2b85196ee5122bb746f6ebbe76f2a1 -size 925105 +oid sha256:f7a02b08a2d4b9b2b9f79c1a3abdaf7aade2bbdbd4f219ca305c871a3942d92d +size 442932 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_9fd0d975-1788-4b24-ae73-d661fc03b8ea.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_9fd0d975-1788-4b24-ae73-d661fc03b8ea.png index afdf27610aae201ad130bd9085c600735aa558ac..f95a688f217689be92baab82ad547edc8716e031 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_9fd0d975-1788-4b24-ae73-d661fc03b8ea.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_9fd0d975-1788-4b24-ae73-d661fc03b8ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:914d1e5af062cfd7afb1742ad93201815f809ce60a95c1a317d4083d0c97cf7c -size 931795 +oid sha256:f02b06144ca114f8a7d1d54c70061ac9c0d14c3b8de65e452804c785a4d686d7 +size 794396 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_a2229723-f483-4aad-a049-63b0de313d31.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_a2229723-f483-4aad-a049-63b0de313d31.png index 66b4fb4d2790648d84ac58215a23c828d90898e2..b96c86e2826456d5254982f4eb24844166e0901d 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_a2229723-f483-4aad-a049-63b0de313d31.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_a2229723-f483-4aad-a049-63b0de313d31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b057d402243fec43d11a2cf93d1934f4638fa2c7f7c979e55c652f14b2b80ea3 -size 777364 +oid sha256:3051d074b5482e4b5a5c078923b930793392d39d5bf075353edeaf4e1010a107 +size 770900 diff --git a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_c73b04b6-058a-4c28-9cb2-ca6eb698b205.png b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_c73b04b6-058a-4c28-9cb2-ca6eb698b205.png index bf043f2a694d24333310cf4b841d2a26e3adaa92..fcbc19ca9f590b83f89c0f8aabf70b09807d2f70 100644 --- a/images/00deddc4-8991-47b5-92f1-f9eb88011b16_c73b04b6-058a-4c28-9cb2-ca6eb698b205.png +++ b/images/00deddc4-8991-47b5-92f1-f9eb88011b16_c73b04b6-058a-4c28-9cb2-ca6eb698b205.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca35005bd5956b751b1189c17ae277e0c308af8ac21ccbec915221ff2a21a75c -size 958165 +oid sha256:e5ce3b0d3ecf09e81ed46a6e3183c0f030ee96d969c2d539938a0c3d0d44a4e4 +size 673149 diff --git a/images/00e83fae-f2f0-48ab-912c-32c03982642b_012c30d7-d7bb-42ba-9e01-2f8f6b5d986a.png b/images/00e83fae-f2f0-48ab-912c-32c03982642b_012c30d7-d7bb-42ba-9e01-2f8f6b5d986a.png index 3d44b0c878b740ecd37437e604fa860a18befc7d..234ec839b049c87994bbd7eae0d2aa1aaf3560d5 100644 --- a/images/00e83fae-f2f0-48ab-912c-32c03982642b_012c30d7-d7bb-42ba-9e01-2f8f6b5d986a.png +++ b/images/00e83fae-f2f0-48ab-912c-32c03982642b_012c30d7-d7bb-42ba-9e01-2f8f6b5d986a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f95e2b5781fc37438ba09e01d71d52a37c7884e002be5a8565591c87579123e -size 1817308 +oid sha256:0d1548f74fbd1bc55c2041d99c3137ed3c0a3e521f1c005be5ebf354bb21901c +size 1457962 diff --git a/images/00e83fae-f2f0-48ab-912c-32c03982642b_4abb3e7b-7920-47cd-9268-2df8e7a4c4c3.png b/images/00e83fae-f2f0-48ab-912c-32c03982642b_4abb3e7b-7920-47cd-9268-2df8e7a4c4c3.png index 9ab28755858a6db45238b0c407e3ebb138e5170c..6a2c066848d88a059d0d61a0b2757b8231d115c7 100644 --- a/images/00e83fae-f2f0-48ab-912c-32c03982642b_4abb3e7b-7920-47cd-9268-2df8e7a4c4c3.png +++ b/images/00e83fae-f2f0-48ab-912c-32c03982642b_4abb3e7b-7920-47cd-9268-2df8e7a4c4c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:961d869a0b82c9d80ca8137b234f663e521e9dcc3b90f45206fe7bbe97295445 -size 1316986 +oid sha256:d6428dcfb53948a5941544cded120a891093af442900e983765fe74308b4eee5 +size 970170 diff --git a/images/00e83fae-f2f0-48ab-912c-32c03982642b_a0430d87-5a11-4409-8dc3-fff996002ea6.png b/images/00e83fae-f2f0-48ab-912c-32c03982642b_a0430d87-5a11-4409-8dc3-fff996002ea6.png index 0e266acfd8707df2700dae6bdba76db8ec8c1dcc..ea06853891a45e2b213a7163313c2c930d277da0 100644 --- a/images/00e83fae-f2f0-48ab-912c-32c03982642b_a0430d87-5a11-4409-8dc3-fff996002ea6.png +++ b/images/00e83fae-f2f0-48ab-912c-32c03982642b_a0430d87-5a11-4409-8dc3-fff996002ea6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e38e6cff940a5f069839c31ceb20e8d5f1a37f175e9d63220174f3761f207e90 -size 1991280 +oid sha256:9a1c4569cc499951db2bde639932f3d5e796334d075453788772d023301b1f4a +size 1506871 diff --git a/images/00e83fae-f2f0-48ab-912c-32c03982642b_dfbd7cb5-d7b8-4500-b831-e7be9b8494eb.png b/images/00e83fae-f2f0-48ab-912c-32c03982642b_dfbd7cb5-d7b8-4500-b831-e7be9b8494eb.png index 0a1a1bfcf3beeca85b977d2d8427bda5a307a80f..f55636c8b2c55f707a9358f729c4fd58e81ed3eb 100644 --- a/images/00e83fae-f2f0-48ab-912c-32c03982642b_dfbd7cb5-d7b8-4500-b831-e7be9b8494eb.png +++ b/images/00e83fae-f2f0-48ab-912c-32c03982642b_dfbd7cb5-d7b8-4500-b831-e7be9b8494eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03724bda3e6ce769fe1f3e9c4d81b11d4b0bd50627e9acdbfee3d0979daf692a -size 1073729 +oid sha256:fd15cb8e387f63cdcc2ca9dc4770868bf2cab4b374f8552fe278ae77fa1589e4 +size 1313414 diff --git a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_374ff5ac-f7ac-41b8-9db1-62af14e7b4a1.png b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_374ff5ac-f7ac-41b8-9db1-62af14e7b4a1.png index 0c9d1a0c3708e5986f652e11e015257c90d92380..72582d40e7183fed66f5390ccdb46605b65d76a3 100644 --- a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_374ff5ac-f7ac-41b8-9db1-62af14e7b4a1.png +++ b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_374ff5ac-f7ac-41b8-9db1-62af14e7b4a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad40bc098cb697a6098aee5b8778593a05b890a774ebf6d3cf32c0d70474db19 -size 1860814 +oid sha256:3a4cf138c5a52d47eab87d4200c8248b65b866ed3ed51dd427ef252ed6f4ee41 +size 1759567 diff --git a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_50ad5ebd-d46d-4c0a-ad59-f00475a2a57d.png b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_50ad5ebd-d46d-4c0a-ad59-f00475a2a57d.png index f149b70425a6302e0683d5d072a89034d07bab8f..4a23df88f7eef0f594de389bda4dbbb9afc6b55d 100644 --- a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_50ad5ebd-d46d-4c0a-ad59-f00475a2a57d.png +++ b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_50ad5ebd-d46d-4c0a-ad59-f00475a2a57d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab7c522bd766c2c19eddeeb979e404705a092da3e864567d4793e82d191b7814 -size 1196260 +oid sha256:1b24e955b5cbcff3f37b7e706dc2c578099ac000fe3544432bdfc10db241cf72 +size 692049 diff --git a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_54d26b8d-20c4-482b-99b5-1e444c403105.png b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_54d26b8d-20c4-482b-99b5-1e444c403105.png index 08c304614205c602649be4d710a47c24903ec97d..bcb6f244afd027007772b3b4a31fc57c9914511e 100644 --- a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_54d26b8d-20c4-482b-99b5-1e444c403105.png +++ b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_54d26b8d-20c4-482b-99b5-1e444c403105.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4852ff492337c5400be2aecd8e0c96bff1974c40f76cc48af54fc24ed5d41d1 -size 1784678 +oid sha256:157cf497990f27277976a95336607f543e5fc7ba7b3d15e7f82ec883dea2cf7b +size 1583613 diff --git a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_c696d8ea-3bb7-4e69-9be9-d9f7228436db.png b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_c696d8ea-3bb7-4e69-9be9-d9f7228436db.png index 2b841c2839e4ca9b3c000140de2f9510d5924ffc..066eb51de8161d0a63a8e6f60b4e6fb617738d82 100644 --- a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_c696d8ea-3bb7-4e69-9be9-d9f7228436db.png +++ b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_c696d8ea-3bb7-4e69-9be9-d9f7228436db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed3cc22531212ee2b87dc8c340b2c45f1195c6c96c70090a448c5c531937dc2d -size 1678348 +oid sha256:8f5acef5b25eeec0b70a4242ecfdd98921b90d16a20dbc91e8d2c256cfb78693 +size 1751051 diff --git a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_ec1bbbbe-ce3c-4e80-8b4a-5549ce2cb133.png b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_ec1bbbbe-ce3c-4e80-8b4a-5549ce2cb133.png index 9bef35106272d5d18f88b134cf9a40f916c62fd5..36857ec00e592b3d9253075032269dadce879379 100644 --- a/images/013faefc-26df-4eeb-be59-5638c5f9dc72_ec1bbbbe-ce3c-4e80-8b4a-5549ce2cb133.png +++ b/images/013faefc-26df-4eeb-be59-5638c5f9dc72_ec1bbbbe-ce3c-4e80-8b4a-5549ce2cb133.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3286a0e98a459c5616759df811ff0d0d42e904b502d3dc3a54cb46acc9f36ad -size 1195689 +oid sha256:d15316395b532eccdbf09fcccd93d5ef916431cbe0df73781cabf685f6c73af6 +size 1124775 diff --git a/images/01bede1e-91ec-4fe0-a6bc-173826497800_3740da81-2f79-42c0-be33-7f148bf3f1d2.png b/images/01bede1e-91ec-4fe0-a6bc-173826497800_3740da81-2f79-42c0-be33-7f148bf3f1d2.png index 59304a5431bc911b4a52ec6134b1822dcfb68468..79a0641f08e749a1206c1339321953109fdfad83 100644 --- a/images/01bede1e-91ec-4fe0-a6bc-173826497800_3740da81-2f79-42c0-be33-7f148bf3f1d2.png +++ b/images/01bede1e-91ec-4fe0-a6bc-173826497800_3740da81-2f79-42c0-be33-7f148bf3f1d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdd8d92dad534b41a03f9f451c8ea4269569930d1fc65c24101bd179a28fd103 -size 558510 +oid sha256:41c58d7166b4f54e69aed47c49542b050b43d2d292172b6e9022fc6875ee8462 +size 782209 diff --git a/images/01bede1e-91ec-4fe0-a6bc-173826497800_6434f7c3-76f6-4375-a6fa-3179c23dd6cb.png b/images/01bede1e-91ec-4fe0-a6bc-173826497800_6434f7c3-76f6-4375-a6fa-3179c23dd6cb.png index 6e8720b47546e549763dbc1007b7cf57ef8c5723..ea3c4c80a2116db9108dea7f100a8d3a65af590f 100644 --- a/images/01bede1e-91ec-4fe0-a6bc-173826497800_6434f7c3-76f6-4375-a6fa-3179c23dd6cb.png +++ b/images/01bede1e-91ec-4fe0-a6bc-173826497800_6434f7c3-76f6-4375-a6fa-3179c23dd6cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1430cbb2f88e8ad89cb90223305044e4b2f91f18b077e20925520852fab36d40 -size 556569 +oid sha256:ed3b6eb7df21ea9f04bed41138d402c1ae0fd1426d5ccd9de2198dc60ac7e255 +size 576674 diff --git a/images/01bede1e-91ec-4fe0-a6bc-173826497800_bc0fdec2-b755-425e-b766-c7376a85bd3b.png b/images/01bede1e-91ec-4fe0-a6bc-173826497800_bc0fdec2-b755-425e-b766-c7376a85bd3b.png index 4f2e3d4b114b53d09b348b7e3bd07d3168ecb604..a53910635695ced13da778c940367747e261f957 100644 --- a/images/01bede1e-91ec-4fe0-a6bc-173826497800_bc0fdec2-b755-425e-b766-c7376a85bd3b.png +++ b/images/01bede1e-91ec-4fe0-a6bc-173826497800_bc0fdec2-b755-425e-b766-c7376a85bd3b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a9e33971187a993675951cad501142455272b61a8aa7c85cd94f6cf06477e08 -size 604869 +oid sha256:57038b70b7c55c414f749517e1d5d783ec4d91adf2e5abd5d447d4790e55ee91 +size 782387 diff --git a/images/01bede1e-91ec-4fe0-a6bc-173826497800_c027c8ec-b3e0-44d0-b671-5700374e6284.png b/images/01bede1e-91ec-4fe0-a6bc-173826497800_c027c8ec-b3e0-44d0-b671-5700374e6284.png index fbdfee5ac037c6679dc6e9b7fe01dd1a2aa726fc..44e2c0024818b5b48a0dcf922f50307a4b13b2d0 100644 --- a/images/01bede1e-91ec-4fe0-a6bc-173826497800_c027c8ec-b3e0-44d0-b671-5700374e6284.png +++ b/images/01bede1e-91ec-4fe0-a6bc-173826497800_c027c8ec-b3e0-44d0-b671-5700374e6284.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e07948de02a93a456c239fd32e3533959da69c914eeaa0bf44a7bdf056a121a -size 1009412 +oid sha256:d42168b77c4341c7e475024164a16000a1246f134e6ff0068bdb7daa74999b16 +size 898105 diff --git a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_2aaf6417-c1df-4b09-9d93-18d067f6930b.png b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_2aaf6417-c1df-4b09-9d93-18d067f6930b.png index 929e207216082557083ba612c2715125eac2ed80..53c7b391381e60e0aa92485d56dbc85e80faf40b 100644 --- a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_2aaf6417-c1df-4b09-9d93-18d067f6930b.png +++ b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_2aaf6417-c1df-4b09-9d93-18d067f6930b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:850d2bb29bcebc6941d82ef54c72e1cba44734b2edb72682bbfd60ca88166e2b -size 1148439 +oid sha256:4af0f10e9a557c5b7d51cb0368ebf0aa81070bea5ea08e73435a756eb3f03d75 +size 1165636 diff --git a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_4b438c6e-a35c-4841-979d-677c72c26074.png b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_4b438c6e-a35c-4841-979d-677c72c26074.png index e7375ae4dab7132fb0274d4fde6a98b5e86c63ec..fc76af8d96f26ffe698166acb8d267fc77fa2c66 100644 --- a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_4b438c6e-a35c-4841-979d-677c72c26074.png +++ b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_4b438c6e-a35c-4841-979d-677c72c26074.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd297d5a47596d4209047d0e4dd63872ac11ea7bb596f685b7297a7b3a889695 -size 1509228 +oid sha256:5373b834cb5be00c50891b1c6b359e14aadf68574bb883fe463109237b5af872 +size 1517005 diff --git a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_80c2d342-8948-49b9-b18b-846b6b5dd105.png b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_80c2d342-8948-49b9-b18b-846b6b5dd105.png index b2f5bd77bf2c0b1ede1d50acfb8bf94cd10c53fa..5ff5aa0425f10dd58c3ff7844db23b2ea8256d12 100644 --- a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_80c2d342-8948-49b9-b18b-846b6b5dd105.png +++ b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_80c2d342-8948-49b9-b18b-846b6b5dd105.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c598ebeb4d4963ef8f12846ca67328f8b3fd44f6e640e2bd6c8f17ba50ac16e -size 1508990 +oid sha256:b26936c1620a66f2b249d89adebf09b266237f5fe233874866c7ffccd8009ae2 +size 971591 diff --git a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_99d67dc4-ec1a-4417-a267-430411f4c20a.png b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_99d67dc4-ec1a-4417-a267-430411f4c20a.png index 1aab11d89931387f14c3426a42049662434a1e6f..957985da61ac6cc3d0ee2046de8106ea7131a67a 100644 --- a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_99d67dc4-ec1a-4417-a267-430411f4c20a.png +++ b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_99d67dc4-ec1a-4417-a267-430411f4c20a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df0d35b6a7038f0992b55657029ae38bc933ef3d6b7daa42396c605ac7b5b100 -size 1491307 +oid sha256:a18f84f9226dbf9b6fd8bbb4b2b28c85977fe648af2988c402e2dffae547a2b4 +size 1011099 diff --git a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_ea5b4be4-62c9-4afd-8e22-69ce9b7b0102.png b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_ea5b4be4-62c9-4afd-8e22-69ce9b7b0102.png index a563cad0d6205f1ba04b724b8e953e6dc045428d..c1195fd20f8e6701e5eeb1ac8cf937be96e737ce 100644 --- a/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_ea5b4be4-62c9-4afd-8e22-69ce9b7b0102.png +++ b/images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_ea5b4be4-62c9-4afd-8e22-69ce9b7b0102.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3cc5d7d883bc875db114c145ea7bd52d2a01e9dc22d023bce1ecb7d77bbea6e -size 1450268 +oid sha256:641d954e1a5cee97c88fe52ad60b5630649ea0065f7003f55f737b08f8f8ac29 +size 1156596 diff --git a/images/020bc054-a829-4af5-8f0a-6efce012c7ac_104a87a5-25a2-48c5-add0-206e46511d03.png b/images/020bc054-a829-4af5-8f0a-6efce012c7ac_104a87a5-25a2-48c5-add0-206e46511d03.png index e629c127c38baf758d0fbfb3d12c3026f3bebd95..c63931adaa1cc40b934fa32ae01411bd70b11aca 100644 --- a/images/020bc054-a829-4af5-8f0a-6efce012c7ac_104a87a5-25a2-48c5-add0-206e46511d03.png +++ b/images/020bc054-a829-4af5-8f0a-6efce012c7ac_104a87a5-25a2-48c5-add0-206e46511d03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:050ca402724bd386c686e64740bd344667ef802e68ef09dcf74e63aad16e3c68 -size 1401938 +oid sha256:c34ca90a0a1da92cf824cdc5d6de899737877642dd24b7ec0d766a75d0001127 +size 1424978 diff --git a/images/020bc054-a829-4af5-8f0a-6efce012c7ac_72174279-0b65-4da3-8ed8-69a5f4bd03cd.png b/images/020bc054-a829-4af5-8f0a-6efce012c7ac_72174279-0b65-4da3-8ed8-69a5f4bd03cd.png index 4cd7c128299d8666e3c4ce899379b07ea1f891f7..cfd248d0dd12a6da82d33bbd14594c4523a931ec 100644 --- a/images/020bc054-a829-4af5-8f0a-6efce012c7ac_72174279-0b65-4da3-8ed8-69a5f4bd03cd.png +++ b/images/020bc054-a829-4af5-8f0a-6efce012c7ac_72174279-0b65-4da3-8ed8-69a5f4bd03cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06ae0b3b402e3d5611c4324135e54652b3ba769720738fede90a36f975d803ca -size 1466313 +oid sha256:56bf9884c9fd2c3a73ad0b7fbc488383719201e7e7b7cf8bebd315f19e015797 +size 1222104 diff --git a/images/02142919-1e63-4059-9471-419158e159a7_32e7b754-8ce2-4176-a691-0dce0ebe24af.png b/images/02142919-1e63-4059-9471-419158e159a7_32e7b754-8ce2-4176-a691-0dce0ebe24af.png index cfc4c4b0e90fd675281c8909fddfc83ac3392b10..bb123fa37e0c0839f7096c01b1e3eb6e62a78c36 100644 --- a/images/02142919-1e63-4059-9471-419158e159a7_32e7b754-8ce2-4176-a691-0dce0ebe24af.png +++ b/images/02142919-1e63-4059-9471-419158e159a7_32e7b754-8ce2-4176-a691-0dce0ebe24af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:979d779b4d12982a3bb7a382d275940a4c57f2c7151cf6fa417595dc23815864 -size 919628 +oid sha256:fd7174766dbfbb444b1229dbb671ebf6e1d38a82abe5328b763b5020b7f5f783 +size 1083331 diff --git a/images/02142919-1e63-4059-9471-419158e159a7_72feb769-7538-4166-8839-69d4ab675c3c.png b/images/02142919-1e63-4059-9471-419158e159a7_72feb769-7538-4166-8839-69d4ab675c3c.png index da2a753e4f4a99a148712c1ca83ce20fe93319d1..4514ae1db2a03d0bf73e2138d4fd8d84edf3ba54 100644 --- a/images/02142919-1e63-4059-9471-419158e159a7_72feb769-7538-4166-8839-69d4ab675c3c.png +++ b/images/02142919-1e63-4059-9471-419158e159a7_72feb769-7538-4166-8839-69d4ab675c3c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f74c86cf9ad184f2edf956dc98e8fb4fe1800146eb5b8d6ee70e0917f87b11d -size 889546 +oid sha256:5bd361e410dbbba8c944c1ea856d84af175cbb8dfa66c6a9b56ac395f6629080 +size 1289081 diff --git a/images/02142919-1e63-4059-9471-419158e159a7_aeeb7a00-f146-4eef-9051-5cce99f1d2d8.png b/images/02142919-1e63-4059-9471-419158e159a7_aeeb7a00-f146-4eef-9051-5cce99f1d2d8.png index 7ae0e1d8809d150111c1b8633ab075b578397479..3492aa5e8a16c5f3a9b2c45ab20f0d7738f7f298 100644 --- a/images/02142919-1e63-4059-9471-419158e159a7_aeeb7a00-f146-4eef-9051-5cce99f1d2d8.png +++ b/images/02142919-1e63-4059-9471-419158e159a7_aeeb7a00-f146-4eef-9051-5cce99f1d2d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95881d08a05acec22e27cccd3958a76c2624c708b19def40193e73afff3e738e -size 916360 +oid sha256:bb829fe69b2e1a36676ed891c262714af9fafbf1107375d47372a556792058d9 +size 845127 diff --git a/images/02142919-1e63-4059-9471-419158e159a7_c6af11c8-db3a-4a7c-8774-ded5eec79969.png b/images/02142919-1e63-4059-9471-419158e159a7_c6af11c8-db3a-4a7c-8774-ded5eec79969.png index 19bd0d65957912ce665a101f74e096bc0fac624f..5b3ed0fe88510d941372a1319696f65e306058cc 100644 --- a/images/02142919-1e63-4059-9471-419158e159a7_c6af11c8-db3a-4a7c-8774-ded5eec79969.png +++ b/images/02142919-1e63-4059-9471-419158e159a7_c6af11c8-db3a-4a7c-8774-ded5eec79969.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68ecc50ee0ca66d6815d3d3792b45ecba1b2a65f1868d220490c33fcbf6d96bb -size 579794 +oid sha256:ceaaa09b20b917ab9e845490a9000cb9467668a40d5b0250be87e398838264ee +size 525589 diff --git a/images/02142919-1e63-4059-9471-419158e159a7_eaf630ca-a28b-46b3-8f1b-a3b32cfb073f.png b/images/02142919-1e63-4059-9471-419158e159a7_eaf630ca-a28b-46b3-8f1b-a3b32cfb073f.png index e9ad9c7a903f9fc9cdf173a02e3d98e0d20bb977..c109919324d2398f933d6ce39c24dac67c99b292 100644 --- a/images/02142919-1e63-4059-9471-419158e159a7_eaf630ca-a28b-46b3-8f1b-a3b32cfb073f.png +++ b/images/02142919-1e63-4059-9471-419158e159a7_eaf630ca-a28b-46b3-8f1b-a3b32cfb073f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:384b1859b77930272d23ca3e8a87aaa304e217cfb34329741b57d3d34777f9c5 -size 1881397 +oid sha256:ec7c2ad7cae8bdf509573c5e339c98df71aea24a1a492c7a021418dafe9dbe90 +size 1009280 diff --git a/images/0245df99-2909-465a-861e-7fbca948e82f_4b19285c-34b9-4ee7-9e6e-a6a4e9f23b4a.png b/images/0245df99-2909-465a-861e-7fbca948e82f_4b19285c-34b9-4ee7-9e6e-a6a4e9f23b4a.png index 94301ab2f81258f28f5adcad225b5ed6e0133604..a8256a7fd42432502db8970ede7e3386a1d088cf 100644 --- a/images/0245df99-2909-465a-861e-7fbca948e82f_4b19285c-34b9-4ee7-9e6e-a6a4e9f23b4a.png +++ b/images/0245df99-2909-465a-861e-7fbca948e82f_4b19285c-34b9-4ee7-9e6e-a6a4e9f23b4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:488ebdc812216446812ddb8a9c0614c071b3c04078478492bbf9a0cfc0275c55 -size 785310 +oid sha256:ea36357c2157f9788546946900d65eb30377ec7609f26329ab42e7367abdb911 +size 731451 diff --git a/images/0245df99-2909-465a-861e-7fbca948e82f_83e37cb8-d3a2-4121-a740-a1d75d3dd80e.png b/images/0245df99-2909-465a-861e-7fbca948e82f_83e37cb8-d3a2-4121-a740-a1d75d3dd80e.png index c8f60d8a8fb65bda64a3ed703e498dfa42d790a4..2f11b3ba2dc87171e2630c04070607ce29ee8f84 100644 --- a/images/0245df99-2909-465a-861e-7fbca948e82f_83e37cb8-d3a2-4121-a740-a1d75d3dd80e.png +++ b/images/0245df99-2909-465a-861e-7fbca948e82f_83e37cb8-d3a2-4121-a740-a1d75d3dd80e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b64ee4d1344c1ad16ecd7ec480776fb617d6520e12f50ce9d9a0c6709dd70e1 -size 887651 +oid sha256:a0fa27d80ee610e7b5930585b7a5448a6c923defe7f4a0b578d6392298744c2a +size 888215 diff --git a/images/0245df99-2909-465a-861e-7fbca948e82f_9044b1b5-0c59-4b6b-beda-cd9346f97119.png b/images/0245df99-2909-465a-861e-7fbca948e82f_9044b1b5-0c59-4b6b-beda-cd9346f97119.png index fa78b1a3494a44ecec418e8492db02ee4d055b8d..00303243dc08b830497e02d06fb76028075338bb 100644 --- a/images/0245df99-2909-465a-861e-7fbca948e82f_9044b1b5-0c59-4b6b-beda-cd9346f97119.png +++ b/images/0245df99-2909-465a-861e-7fbca948e82f_9044b1b5-0c59-4b6b-beda-cd9346f97119.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d31c2978c5c11a22a6393912ca346201cd8ab45584259bfd44f55807c2b0007 -size 817809 +oid sha256:1f997db3183b40b681ac52fe89973d3c8f716de1e0d58620b10f6af1b3f1411b +size 650309 diff --git a/images/0245df99-2909-465a-861e-7fbca948e82f_dc1847f7-919b-4a2f-b778-2ee33edacc46.png b/images/0245df99-2909-465a-861e-7fbca948e82f_dc1847f7-919b-4a2f-b778-2ee33edacc46.png index 3abe84a6c6f4f3855d88c2e319ec5d4e37536886..a489992c2613ca827430399efae298ddfc1c83d0 100644 --- a/images/0245df99-2909-465a-861e-7fbca948e82f_dc1847f7-919b-4a2f-b778-2ee33edacc46.png +++ b/images/0245df99-2909-465a-861e-7fbca948e82f_dc1847f7-919b-4a2f-b778-2ee33edacc46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba78a63fa198a4f1c5471b8be8bbac3c78ebabe1ece8700d722df4c7126b6c62 -size 778485 +oid sha256:c30bea3493b7f71cc4d6bb434def7177e963ef6b2d47d5fef7df3cf64d7eed13 +size 864913 diff --git a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_14bb477c-5382-4aa5-9c10-767f73d2e3ed.png b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_14bb477c-5382-4aa5-9c10-767f73d2e3ed.png index c76e147f52c4031d76d25e33274ef0ab844795d0..be563fbf4827f17f02091e4363ce993400ff56d8 100644 --- a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_14bb477c-5382-4aa5-9c10-767f73d2e3ed.png +++ b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_14bb477c-5382-4aa5-9c10-767f73d2e3ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d94df3941d240577fd4dd08f1531f4a36c5409683b7cbfc8994dd9e493fcd32b -size 1478798 +oid sha256:2addf0b34c3533a4de035f165dda58847d4d86820c589b8dcf091257d37cbef7 +size 1064282 diff --git a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_474d7869-7905-4b42-90a7-c75117862cbe.png b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_474d7869-7905-4b42-90a7-c75117862cbe.png index 103459fb21de853cbca6d2bac6fbce979b6aeb89..a4e3a526da229a4e055acbe2f78da9aa764a2aa5 100644 --- a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_474d7869-7905-4b42-90a7-c75117862cbe.png +++ b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_474d7869-7905-4b42-90a7-c75117862cbe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:000f15efdce2a9300656b37ba5ae4e1d30904e6ac75631442bc7af263b9f7912 -size 874460 +oid sha256:2a2ffe7a395603624ca26039b75a01b08067599e43660572975b390be99e86dd +size 1193194 diff --git a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_9ef05896-4029-4ebf-a6ba-c5fee0ad34ee.png b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_9ef05896-4029-4ebf-a6ba-c5fee0ad34ee.png index 9af2b1c7a03436e171d9ee3cff250279fca3b34a..2f676bde7dd13d41ce1cb68c9a48d72c33fad85a 100644 --- a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_9ef05896-4029-4ebf-a6ba-c5fee0ad34ee.png +++ b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_9ef05896-4029-4ebf-a6ba-c5fee0ad34ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f00a738dae476cbcf91e3940f9c347b57527cd14e2238f6c248d314d691da7d1 -size 619700 +oid sha256:c4b41564a34827b5080a17ba751812d9420804cd21f07e4af6f52ae398786735 +size 1121355 diff --git a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_af9e9a40-200b-4453-83ce-3ff86dd64154.png b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_af9e9a40-200b-4453-83ce-3ff86dd64154.png index 7cd9eb2f76c903a2d7cfa8552101422a9a7fe934..384a0ffbdc074d591300646974e2bce0e29608b0 100644 --- a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_af9e9a40-200b-4453-83ce-3ff86dd64154.png +++ b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_af9e9a40-200b-4453-83ce-3ff86dd64154.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4a743509d0efa4cafb9984e9a675366089f53248cd8113cdab22b120ba4a7c4 -size 732242 +oid sha256:fdf01a4d76907060670268d4bb12f78b7eee484a8424e8f8c5515c88ef213036 +size 685269 diff --git a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_bdb53510-a545-40a0-a881-f19507ac47ee.png b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_bdb53510-a545-40a0-a881-f19507ac47ee.png index 7cd9eb2f76c903a2d7cfa8552101422a9a7fe934..de4b2bdb34089a680b0f145d7279340e4c6a9bd2 100644 --- a/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_bdb53510-a545-40a0-a881-f19507ac47ee.png +++ b/images/02e7bae3-c67f-4227-b6ea-7b87d111202a_bdb53510-a545-40a0-a881-f19507ac47ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4a743509d0efa4cafb9984e9a675366089f53248cd8113cdab22b120ba4a7c4 -size 732242 +oid sha256:644ef83185d864c258fa33ff2481b8978233648b6be23f8ca14892a2fba513d7 +size 1181879 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_2b86d495-4492-4196-9f85-257dbbf27d61.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_2b86d495-4492-4196-9f85-257dbbf27d61.png index 6efc6e86143a068b7f51d39bc049aebfb5d277d0..1fce9955da7612e15724f3885430d6653714780a 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_2b86d495-4492-4196-9f85-257dbbf27d61.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_2b86d495-4492-4196-9f85-257dbbf27d61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa24bf8e4f97741ac592696acf3426f66af64047f306047ab34082e899fe56c5 -size 475235 +oid sha256:47f2ed3506f3fdbdc6048b86ca92f4761d6647c07c9a33a8c857d8dcdca0c45a +size 860041 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_3622c0d9-2623-4c45-a5a0-cb7dacecec7f.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_3622c0d9-2623-4c45-a5a0-cb7dacecec7f.png index db51d5778da763ca77134bd71953f6abc08f904a..498022a79dabaacc8510c102cba15f0d88ce8e33 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_3622c0d9-2623-4c45-a5a0-cb7dacecec7f.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_3622c0d9-2623-4c45-a5a0-cb7dacecec7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b250af00f7f196a6bbb20ea18d99cbcff032670bdebed11c2d8299a43d8fc0d8 -size 882896 +oid sha256:39d6ac6775affd9e974523f627242c3aab142b03f38fadb7b1fd881122bb1aba +size 884348 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_7e4cd11c-208f-4b21-a9d4-0bd61c860ab8.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_7e4cd11c-208f-4b21-a9d4-0bd61c860ab8.png index 8cd46812e0e2b094cc734e716a626295a9ad6cdf..154767233b9aa4f7203aea059be82e81d303f374 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_7e4cd11c-208f-4b21-a9d4-0bd61c860ab8.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_7e4cd11c-208f-4b21-a9d4-0bd61c860ab8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c1edd5d86fbd884d3d66ec02fba71a81bd41ad7936eb48732cecc7e226910d0 -size 715201 +oid sha256:15650398a6fe9b65d5dbd5b9f92594d2c0a9d291c4df8c0ca97b9f095796ddcd +size 679023 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_81263190-8c66-422d-ba92-ece80af4d80b.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_81263190-8c66-422d-ba92-ece80af4d80b.png index 4f2457d6eae87163cb750f84d22811ce8e1428d6..ae0660a4c39d2d3fa7ad52e2faccda602ecabe9f 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_81263190-8c66-422d-ba92-ece80af4d80b.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_81263190-8c66-422d-ba92-ece80af4d80b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c698ce137bf3dae75ce2f6a65900027313d0c6cef01450ee70b5cc6067c260c7 -size 875049 +oid sha256:3ed92598d6c5fa502bcd4de23a64d7e484a021f2e7d1afbf70afb06bad688766 +size 733402 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_a3e5cb5c-e864-45c7-98d1-9518af067926.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_a3e5cb5c-e864-45c7-98d1-9518af067926.png index b84c1c51014a87f6b972eccb30eb6f2e59e07b79..d054c467d63e8b22244f8bd27d6c99289675bb73 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_a3e5cb5c-e864-45c7-98d1-9518af067926.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_a3e5cb5c-e864-45c7-98d1-9518af067926.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10ec36f566b2ea18359ee973bc2d412d66d23f2a53705d3a30bacc7ca9455218 -size 770021 +oid sha256:40be4e50bdbe7a2ac5ec0aa5107557a469deb7bb6ec463cf5d6afb67eb9b1ce8 +size 866837 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_bcd7cb41-c53c-406b-bce1-125e51ce307e.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_bcd7cb41-c53c-406b-bce1-125e51ce307e.png index 5fc5a2fe80f71c0ce8ee4bc0dd1e9c7b948e1cc4..19b590a4ff98063735f394543f70401c57aec952 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_bcd7cb41-c53c-406b-bce1-125e51ce307e.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_bcd7cb41-c53c-406b-bce1-125e51ce307e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b05a148943fffff3af787d3c0c4b98cba003d9bc0688e22589b287061e6f69fe -size 820350 +oid sha256:c49d72fccf64027ee6d927bb6bd6b117e45d6cdc84b9eeb0a56a19303ef6466e +size 836836 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_d1b22a5f-79ea-4ec1-adf8-4b3f11890d9e.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_d1b22a5f-79ea-4ec1-adf8-4b3f11890d9e.png index 0edc5dfe93b6ef4815ad9c49edc1cac19e483fc9..456a97fb21ef1a0d89c21fd3cfb4196fe353df51 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_d1b22a5f-79ea-4ec1-adf8-4b3f11890d9e.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_d1b22a5f-79ea-4ec1-adf8-4b3f11890d9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c59107fc7e731091a846155f24d0822908e45a060e8ac2d3ff011579a079b041 -size 730327 +oid sha256:9ce7b3bd5adac8e405fd31986d80efc8ff4088d2a4943ea64024a7bd94e6ec51 +size 714972 diff --git a/images/03103341-f9d3-40a8-8435-f7946dd707b3_ed48cb9a-ee51-4e80-9a71-bdd03240fef1.png b/images/03103341-f9d3-40a8-8435-f7946dd707b3_ed48cb9a-ee51-4e80-9a71-bdd03240fef1.png index 9144176b5e92752d077156ddb58b2b7db29db987..6279e93b443ea2cbf1e31e386ff970b42f85c1d1 100644 --- a/images/03103341-f9d3-40a8-8435-f7946dd707b3_ed48cb9a-ee51-4e80-9a71-bdd03240fef1.png +++ b/images/03103341-f9d3-40a8-8435-f7946dd707b3_ed48cb9a-ee51-4e80-9a71-bdd03240fef1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e138240708389232bd3aa7842114b8a0350a0e386c4eb246a98f0bbb738a9a8 -size 720637 +oid sha256:5e8c46f69050c67f406c90a04c8e72c14792d4e6c3aa65558eb7f727e35622ff +size 763333 diff --git a/images/03ac581c-ef95-4435-9d35-435e198b82de_6a5ae849-09da-452e-8f6b-7757dca46690.png b/images/03ac581c-ef95-4435-9d35-435e198b82de_6a5ae849-09da-452e-8f6b-7757dca46690.png index 994fb68921587c786e1f6a0b9e2ec492ec7c3ec7..8afe548a741ac6c98e408fd2c4f6de6c88f2fdfb 100644 --- a/images/03ac581c-ef95-4435-9d35-435e198b82de_6a5ae849-09da-452e-8f6b-7757dca46690.png +++ b/images/03ac581c-ef95-4435-9d35-435e198b82de_6a5ae849-09da-452e-8f6b-7757dca46690.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4874a91414521f0305a52f54aaf68ba00e30182607164cc9fd5a35f62cc410ae -size 805253 +oid sha256:1fafe3ab5a089cc8e7273067b2af922705dcadd9f22e4d02cfefc26ec21e2d5a +size 587366 diff --git a/images/03ac581c-ef95-4435-9d35-435e198b82de_a04c978f-a418-4035-9e7e-24eccfb178df.png b/images/03ac581c-ef95-4435-9d35-435e198b82de_a04c978f-a418-4035-9e7e-24eccfb178df.png index c8fc528af9cfd476572a37baa45a9ede72039b1e..9bdef4c4ae027acccdae7e57183e6067af459297 100644 --- a/images/03ac581c-ef95-4435-9d35-435e198b82de_a04c978f-a418-4035-9e7e-24eccfb178df.png +++ b/images/03ac581c-ef95-4435-9d35-435e198b82de_a04c978f-a418-4035-9e7e-24eccfb178df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f175b9342d4046c12be79e6c0e6990d64e48a090acc2a8c77a30b017859d24fb -size 888072 +oid sha256:859423020c327b28e404b098348e1f8f2501152064ee84b116f83e424ad28f35 +size 736972 diff --git a/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_073152ad-a25b-4229-b88e-710c06a9e4cc.png b/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_073152ad-a25b-4229-b88e-710c06a9e4cc.png index d8c60c5cbb5290bfc1fdb42d9ed65faa7b94fdbc..edda069bfa341551a58b86ff990c5bfcc6624cbe 100644 --- a/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_073152ad-a25b-4229-b88e-710c06a9e4cc.png +++ b/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_073152ad-a25b-4229-b88e-710c06a9e4cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d57b322d3d042f09a2ecd3dc1e9f8a96ba6457e148e63940a2648e419ced3b7c -size 584352 +oid sha256:1caf9ca34c26e6f15aabcd8fbcdc67fb83196ffd533c1def55d628714f1a6ea9 +size 879500 diff --git a/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_08c9c18f-c5f0-460c-ba3d-a1e51201ddf5.png b/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_08c9c18f-c5f0-460c-ba3d-a1e51201ddf5.png index f65ad36e5d9ec607a316a8c8567a67752cc5b145..e61bb5d1f22933e4afd5a9ad12225d574c39ae59 100644 --- a/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_08c9c18f-c5f0-460c-ba3d-a1e51201ddf5.png +++ b/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_08c9c18f-c5f0-460c-ba3d-a1e51201ddf5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eddcb4e60b06c582edee1a62c29a11624685a621037e0263a8e273303b2d06a5 -size 359059 +oid sha256:cf0b95dcfcfd5f9406bd9bf78c0d3c816c61bf8984c4573c88b30b224fffc46a +size 565112 diff --git a/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_540f58be-846a-4639-988b-214d708f6238.png b/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_540f58be-846a-4639-988b-214d708f6238.png index bc7156aa5254bde133d799e74529461c3a27cc42..06971165432fea30a14d4e2b11bbae1064a58de5 100644 --- a/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_540f58be-846a-4639-988b-214d708f6238.png +++ b/images/03e45ce0-4375-44aa-b57f-cf439ccbe363_540f58be-846a-4639-988b-214d708f6238.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:035a4c03d20f1b8a9ff66c77dd00fa0c2f8dd0000ad874d29a2c67f17b087c47 -size 1271650 +oid sha256:1d59fbd36773df530815a03641ba11fe334dc532919e6122947ddbc64d66da47 +size 1016860 diff --git a/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_b7042017-f4ae-4879-a6b1-8e464d022490.png b/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_b7042017-f4ae-4879-a6b1-8e464d022490.png index caef21a7dd74dc2964e8344cf2e9c3a2f674c0d2..ae0ed4bdc8188cc573b331b8f2dd0a21befba7dd 100644 --- a/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_b7042017-f4ae-4879-a6b1-8e464d022490.png +++ b/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_b7042017-f4ae-4879-a6b1-8e464d022490.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c310ae0a542135dea615e52a962af514c906277cd6438ae776fe90560382c1c -size 938987 +oid sha256:e5fb09532c0682b8888a4b0acff815a75eeb08542ca2cd717b80a75d0de80118 +size 217501 diff --git a/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_d55856ae-eeb8-4e08-8c7e-9a4b384ab9f1.png b/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_d55856ae-eeb8-4e08-8c7e-9a4b384ab9f1.png index d3a2fad092d0a5ff69a0f6e10aceeaf130bd7359..072d4656b985d94ebc542afc90bfe04361849e55 100644 --- a/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_d55856ae-eeb8-4e08-8c7e-9a4b384ab9f1.png +++ b/images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_d55856ae-eeb8-4e08-8c7e-9a4b384ab9f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbae5ef2ee406443d74e89334b3bf2dc7c86ece7ddebc02015e8b029b99ee0be -size 991764 +oid sha256:5c0c6503dae97c364d8a5c0f2114a043e60ff4bb930a1f375ec3777baa3a9d7a +size 931648 diff --git a/images/04b8b406-a031-48a3-a002-b1791d872e16_6ed1aa8a-c227-4ee2-8dfb-d04fe3d3fdb8.png b/images/04b8b406-a031-48a3-a002-b1791d872e16_6ed1aa8a-c227-4ee2-8dfb-d04fe3d3fdb8.png index a8f46fb01dbf8f75347417d483a3dcc840d66fbe..5b09e8aaab3007788b7d01ebb87e6ec8e13140a9 100644 --- a/images/04b8b406-a031-48a3-a002-b1791d872e16_6ed1aa8a-c227-4ee2-8dfb-d04fe3d3fdb8.png +++ b/images/04b8b406-a031-48a3-a002-b1791d872e16_6ed1aa8a-c227-4ee2-8dfb-d04fe3d3fdb8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be065ad9ff8174f1568648daccf826b793df0b6849f46cacb382c4d4606d3117 -size 952935 +oid sha256:69c6d8fa204b34395a6b4317718a242f370a9043510ddc7ba752cc21765128de +size 899628 diff --git a/images/04b8b406-a031-48a3-a002-b1791d872e16_83a425dd-09e4-4a42-b6c7-440c00333fd8.png b/images/04b8b406-a031-48a3-a002-b1791d872e16_83a425dd-09e4-4a42-b6c7-440c00333fd8.png index 0c35136dbbe606afe5f2e838302545b68561a068..1cc68d45bc6378286d564e686999da377155020d 100644 --- a/images/04b8b406-a031-48a3-a002-b1791d872e16_83a425dd-09e4-4a42-b6c7-440c00333fd8.png +++ b/images/04b8b406-a031-48a3-a002-b1791d872e16_83a425dd-09e4-4a42-b6c7-440c00333fd8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf8a8c9e92c7ec287aef8fd68e4c67c40fedfd83012eaa23cadea2d763e69184 -size 616905 +oid sha256:464907890552ec5401994e478c8ad189d808a81778d1a09dbfa211aca331288a +size 720169 diff --git a/images/04b8b406-a031-48a3-a002-b1791d872e16_95435f7e-87e9-47de-ba6b-3818d8a47081.png b/images/04b8b406-a031-48a3-a002-b1791d872e16_95435f7e-87e9-47de-ba6b-3818d8a47081.png index 422bdf80394b262744317acb4e14d3e02831b17f..eac376c9a2a548a010012062c28f669595aefc5e 100644 --- a/images/04b8b406-a031-48a3-a002-b1791d872e16_95435f7e-87e9-47de-ba6b-3818d8a47081.png +++ b/images/04b8b406-a031-48a3-a002-b1791d872e16_95435f7e-87e9-47de-ba6b-3818d8a47081.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cab60ad79a2658bd151f8f7bc96aee4564e8f5dac2bbe6d4c1532e693b769e3a -size 935013 +oid sha256:c8753c947b34c997793ae515138b2822ea468708fb25259411038dff4affe0cb +size 1085378 diff --git a/images/04b8b406-a031-48a3-a002-b1791d872e16_96fadcb9-f9f6-4d1d-b696-4208d5b98b38.png b/images/04b8b406-a031-48a3-a002-b1791d872e16_96fadcb9-f9f6-4d1d-b696-4208d5b98b38.png index d8cbb10f46d12aa758c94d9050fbba3edd6c7206..82e4b8dec64ab55048b3c855851bed37453596fe 100644 --- a/images/04b8b406-a031-48a3-a002-b1791d872e16_96fadcb9-f9f6-4d1d-b696-4208d5b98b38.png +++ b/images/04b8b406-a031-48a3-a002-b1791d872e16_96fadcb9-f9f6-4d1d-b696-4208d5b98b38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:543f2b1a769de196f85b11d7a76051c768d822048ca3bf00e5bf555a5d87668a -size 554224 +oid sha256:6f2c3866563efee08e7a15300f83b37c15de7164058de3d83ced8f41e0b9c59f +size 826489 diff --git a/images/04b8b406-a031-48a3-a002-b1791d872e16_ba9f1b5e-9f7d-4890-b949-fab2446b19cb.png b/images/04b8b406-a031-48a3-a002-b1791d872e16_ba9f1b5e-9f7d-4890-b949-fab2446b19cb.png index d03a32183c0e905bb40b21adc22215c8835336be..0507f9d33d998636aa24e116476067268430aecc 100644 --- a/images/04b8b406-a031-48a3-a002-b1791d872e16_ba9f1b5e-9f7d-4890-b949-fab2446b19cb.png +++ b/images/04b8b406-a031-48a3-a002-b1791d872e16_ba9f1b5e-9f7d-4890-b949-fab2446b19cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf4f7807d05a3b1b4bf0d7dcd945f3bbf44f67ff83b67126e874513e8b8d43aa -size 783554 +oid sha256:a4c80f7e4296e8e3fce7a02c2def4db273395796dcb36984354332c951a6a643 +size 1784203 diff --git a/images/04b8b406-a031-48a3-a002-b1791d872e16_ce745d88-3511-43d3-9e02-401be37eca9a.png b/images/04b8b406-a031-48a3-a002-b1791d872e16_ce745d88-3511-43d3-9e02-401be37eca9a.png index e119a80d23dba573304d3e4b0460bee96f1abd0d..ac191e1936d368ef1fd4eeb16cde402a20b8543a 100644 --- a/images/04b8b406-a031-48a3-a002-b1791d872e16_ce745d88-3511-43d3-9e02-401be37eca9a.png +++ b/images/04b8b406-a031-48a3-a002-b1791d872e16_ce745d88-3511-43d3-9e02-401be37eca9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94bc736b69fd8a3d14620f73711a8e19ee996ada5a8dae64b2a7c93422927560 -size 1220207 +oid sha256:5ad4293765709883d2054ee2b6eede0a62a8b9c729e6285d5d4f886106e6a1f8 +size 1896695 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_0200f0fe-002c-4088-8037-f34bfff4156c.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_0200f0fe-002c-4088-8037-f34bfff4156c.png index c27d4cc6e51946edbc9983981147b11dfc5b3ade..429253c0c079f3fbf1909914b0fabf5acd003c73 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_0200f0fe-002c-4088-8037-f34bfff4156c.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_0200f0fe-002c-4088-8037-f34bfff4156c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:106c51545fd8b658ebea523d1ab2d1f1cade90ea2947c2af766ddb62080f4ea6 -size 778803 +oid sha256:87ee4db00848cde227e2cc16f509051015429b0d5b89faec9e4af1b19f68126f +size 502302 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_1b864842-2093-41bd-9dd5-2a2e967afdf4.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_1b864842-2093-41bd-9dd5-2a2e967afdf4.png index 3510404672f0cebaaf20dc4e272c3ec2b0651165..972e5fcf7c9cac337cc81a95553d13aabc1efa6a 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_1b864842-2093-41bd-9dd5-2a2e967afdf4.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_1b864842-2093-41bd-9dd5-2a2e967afdf4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61ac24ebfd6e338fccc1e7d4daff1122241f1e6e1ddaec13b28815d2730bb80f -size 1585385 +oid sha256:ec2f6eecd3e94729fbebe001b0adc6a4bb73cc4be2f4e7ecfa4e7ee7732e90ca +size 1582432 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_458554fd-debe-4dbe-a011-b64a5301fbd0.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_458554fd-debe-4dbe-a011-b64a5301fbd0.png index d11efe281cc60c40173eac5c4883e31a7e2a8996..5b2ecea3b6e1d6f3da0e468e307a5c4185829476 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_458554fd-debe-4dbe-a011-b64a5301fbd0.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_458554fd-debe-4dbe-a011-b64a5301fbd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f64b3131896c4d9f173aaa03fe09bdd59575b3c5156423149132018df57ce8ac -size 158905 +oid sha256:c6ba64332dc1ce8cf348bbf32f7068246a2f3d319b6ae9d85a774a3a83a96a67 +size 821040 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_6cc4b85e-f193-43cb-a661-b6a4f7cb1c59.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_6cc4b85e-f193-43cb-a661-b6a4f7cb1c59.png index 2fb96897bf78c106e52bd3a709f00a266d91b047..d7d3977a2a1d86fe3455da3b806db5bf4976b9f7 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_6cc4b85e-f193-43cb-a661-b6a4f7cb1c59.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_6cc4b85e-f193-43cb-a661-b6a4f7cb1c59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6222e3e712a1ef9bd6d56f1260c74f910fd6a73c32eeea06a4e6bd9b44b61339 -size 155707 +oid sha256:05c5353075d4aa579726d516af8f92419b74cd9ee5ae5494994b13c29088c446 +size 801874 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_88859090-cc5c-4b82-b5cc-3a7c2cce4f4f.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_88859090-cc5c-4b82-b5cc-3a7c2cce4f4f.png index efcd6dfb6a5909d1ef2f311ab2a4332c68addd79..a00638a860a51b29288db95e5f7d218af926c53c 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_88859090-cc5c-4b82-b5cc-3a7c2cce4f4f.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_88859090-cc5c-4b82-b5cc-3a7c2cce4f4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:812b38776f41f0144aa2216c885d93110b909f56aebd5a55ac3ad82db2fb69bd -size 142836 +oid sha256:e5189b4c379c720d51c5b183c1b2dfbcba1d63a2fc8a2373daaff7438f0a9840 +size 747966 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_af730851-81f0-4bb7-b065-a2c06a4d7121.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_af730851-81f0-4bb7-b065-a2c06a4d7121.png index eee8f2aff6fdc3547edb6a1af89a8fb61e6f7ab5..e20c8d51e1fcefcf4cf3bb516c5bf7d61de38d6c 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_af730851-81f0-4bb7-b065-a2c06a4d7121.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_af730851-81f0-4bb7-b065-a2c06a4d7121.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b96c443da1db9646f4a9754af7b8dde3a592011c4659ff0309d8b576cb7fbcd4 -size 562139 +oid sha256:5642783751a48a8ae802250399d16062d06562551c45fb5e476e19abc5c8339f +size 579265 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d44b31d5-4480-44c8-b773-49adca6d7e9c.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d44b31d5-4480-44c8-b773-49adca6d7e9c.png index b707fd8dd5c3d826037dc36ca8c8f62cf5a8afa6..7a51db431dfca68102f1b7ff00d642affb91016c 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d44b31d5-4480-44c8-b773-49adca6d7e9c.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d44b31d5-4480-44c8-b773-49adca6d7e9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2985b6ce31882838aca8bec2e447255d86c89f010e3486167e90943fd0b71a93 -size 159737 +oid sha256:4898053857c313fc7b456843805bd3cdea608c10be86cf3b60eed2dedc95e9ff +size 801481 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d49c529a-3bf3-4aa5-a54f-0de7ededcd0c.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d49c529a-3bf3-4aa5-a54f-0de7ededcd0c.png index c5a354a33efa77df100c3dd21af489686797ec6c..c6bd1ad0d925246ea5487581d1623cb8dd5f9081 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d49c529a-3bf3-4aa5-a54f-0de7ededcd0c.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d49c529a-3bf3-4aa5-a54f-0de7ededcd0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:908f04cc4738c26a8c8c62778f2d63bd37f78b74989f989018a6a4e8772f8c05 -size 598472 +oid sha256:d49442237d4590bf774bd7bebf3850d867d0d1fb9c608d553a6a678859425012 +size 364314 diff --git a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d796ca41-ff49-434e-9980-14d8b156e4c4.png b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d796ca41-ff49-434e-9980-14d8b156e4c4.png index 975237ee935de9d4a2361b4337da8683a4389b85..078c9532c86c1aa51a577f7e129aee1d01113eb1 100644 --- a/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d796ca41-ff49-434e-9980-14d8b156e4c4.png +++ b/images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d796ca41-ff49-434e-9980-14d8b156e4c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb84b5da3a5c5e8149733a714558151e2aa3d3d49b29c040479e8a8e53df9e16 -size 583920 +oid sha256:907d13b767d474624210ad0bb325309f493dcdbb726bd7b04bc4e4bc2fe84a87 +size 827826 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_33d871a9-cab5-4efa-a4b0-d1fed5245166.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_33d871a9-cab5-4efa-a4b0-d1fed5245166.png index 1f68db0e0e0b3617943c9481ee2c0c785cf1a243..bbb75976ac199838b5de1738676115c79cdb8ded 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_33d871a9-cab5-4efa-a4b0-d1fed5245166.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_33d871a9-cab5-4efa-a4b0-d1fed5245166.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ca56727bb7428b1d839fcf8edd7d77cb22573f41d606158357e65572fec0e1f -size 1308079 +oid sha256:7f22436dc194c0e4a310cbdf96be37d1558c58f331b32d7a677764d28162c83b +size 1396668 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_58d10ad0-dc54-4dad-9f1e-4e11611a1176.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_58d10ad0-dc54-4dad-9f1e-4e11611a1176.png index 2ab12a78c36e3cc449a48d6fa689e8726f4fbf53..81ea90d67e460ef0eb3b954dec646a8c3fb1f7d0 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_58d10ad0-dc54-4dad-9f1e-4e11611a1176.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_58d10ad0-dc54-4dad-9f1e-4e11611a1176.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a5264a30d6e8f44e15a24bbd95fecc1d3238053a13f09883e38a4646a69d689 -size 1818596 +oid sha256:738e30927ff4ba5a03f2ecb2c16329343978147d07a9b8eb9319e3e0e7f36471 +size 1638891 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_6a7d4322-2bb4-4427-a64a-a4e4e9ef5731.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_6a7d4322-2bb4-4427-a64a-a4e4e9ef5731.png index 129432a6c09137a5aeebb565334295a5ee3e5563..404929d499e2657e318745f43df13fe972c0e015 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_6a7d4322-2bb4-4427-a64a-a4e4e9ef5731.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_6a7d4322-2bb4-4427-a64a-a4e4e9ef5731.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c00db82cf5a1350d98631898285637e41fed8cd40db439cdd1176af98cdac6ff -size 1158030 +oid sha256:be5b80ea2bd2b4107d6fd6718bfbbb9eed36069cf5f48d6ec865e9a4c7607589 +size 1763337 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_804c3901-8299-4c08-b8b9-3ec0bee96528.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_804c3901-8299-4c08-b8b9-3ec0bee96528.png index 20068c1bbab72075ed14e199f05a8b16c56b4713..5abd4ec5cb54fea6075245c64323ba9b3fa4e224 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_804c3901-8299-4c08-b8b9-3ec0bee96528.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_804c3901-8299-4c08-b8b9-3ec0bee96528.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b28f77b6e6db5bcfad8a7dbba2b96b4b92e022ac1674a1997661c44c2189609a -size 1974441 +oid sha256:1a262ff091a53b00bb73ffb63a1aee7eaae637aff17ce647ad6a8e8f2ddc22c8 +size 1957765 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_8be97cad-f129-4f15-bdc1-5d22eb161c88.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_8be97cad-f129-4f15-bdc1-5d22eb161c88.png index 61770d5f2f0958e6beb754b2f9b729ea01cf5dfd..6caa722c0009185573f18989fe374e263c78720c 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_8be97cad-f129-4f15-bdc1-5d22eb161c88.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_8be97cad-f129-4f15-bdc1-5d22eb161c88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18540704e8d3f8733fe2ebfdf0bd6f7c5444d92e811aea91f4b6f195ef7772b8 -size 1304799 +oid sha256:01c3a4c2bc6cd27e1b9c58ce73c0aa1ab526523b5ea9a39969531b639b232396 +size 1210358 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_9f957e9c-cf83-4dc3-8223-a5da537ceafc.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_9f957e9c-cf83-4dc3-8223-a5da537ceafc.png index 5531a02e68000c9b53a13a64750c6893aa9082e3..7cb342420acff3c00543590493115c67ebebb16a 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_9f957e9c-cf83-4dc3-8223-a5da537ceafc.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_9f957e9c-cf83-4dc3-8223-a5da537ceafc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25ca8279bf3a690f0bca9ca1973c4ca7b98a696c2f4c1b7beb7b0a31cbbbff13 -size 1660715 +oid sha256:3d8c8b37cc9413854e0098788cde990158aa16301e14c27e2c3c980565b081d7 +size 1911448 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_be4c997b-89fc-4e4f-93d1-092dc7cde1a4.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_be4c997b-89fc-4e4f-93d1-092dc7cde1a4.png index dd2101d6db3c31db054fe4d7c3a509139ca2f7d1..e0b4d2fd76f2b7c8c2304469741dc57841d9831f 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_be4c997b-89fc-4e4f-93d1-092dc7cde1a4.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_be4c997b-89fc-4e4f-93d1-092dc7cde1a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec27f32883845dd04e199684db04fb251c097ea8d5ae2e9662fd1eeba4b7cfb2 -size 1059257 +oid sha256:ec76d527b8cd162383a66d0c127098c8361679a78c2a4650b6a55e593fbe17db +size 1253654 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_d88b5180-5170-4103-9593-db363c4caede.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_d88b5180-5170-4103-9593-db363c4caede.png index af3a565c6f0773ea5ee1027e69cc127c34013b73..048584280812fa2bad0a8ddc20c6bbac9365ab89 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_d88b5180-5170-4103-9593-db363c4caede.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_d88b5180-5170-4103-9593-db363c4caede.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd690aca59d49763187c3e8167a83c46d8cc8056cd6c1493705ec39396349ef7 -size 1727180 +oid sha256:9401b7e2bbac1625f658353e1a703fb97c34b2863f1e9c3a5940a2e01c7a08fe +size 1096301 diff --git a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_fae30fbd-7fbe-4be2-a718-1695f357385d.png b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_fae30fbd-7fbe-4be2-a718-1695f357385d.png index 8761858f0a675eb85d54f05f768ae04b642cca26..65805b7ca41fc73a0d34de3df9e0282ad0b519d9 100644 --- a/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_fae30fbd-7fbe-4be2-a718-1695f357385d.png +++ b/images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_fae30fbd-7fbe-4be2-a718-1695f357385d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3af92e0b3b6130797587f2b6fae48db8f39ee5479a614ca8ee496126379df22e -size 1036287 +oid sha256:67f45ab61ac6b908acd91fb24b48f86d5ca85ea47bc746cc7741c00242f5998d +size 1354820 diff --git a/images/05238c0f-514f-4af2-bc4c-f7521d649825_5a806dc6-8254-4dee-a2cc-c981755d5bb3.png b/images/05238c0f-514f-4af2-bc4c-f7521d649825_5a806dc6-8254-4dee-a2cc-c981755d5bb3.png index f3145f89015186c30c374dee04e6080445dd0eef..c25268ee1831a2ee4e193db1aa661adf42f75cca 100644 --- a/images/05238c0f-514f-4af2-bc4c-f7521d649825_5a806dc6-8254-4dee-a2cc-c981755d5bb3.png +++ b/images/05238c0f-514f-4af2-bc4c-f7521d649825_5a806dc6-8254-4dee-a2cc-c981755d5bb3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b9c2ac3bd0f36cb7cbe870df80cbf4c92107037db17c510d88a9ad94126dc7b -size 1960237 +oid sha256:c6c0e7638b001e97864745b432efdb7b473a7c09777a438db9b712f6965ffb94 +size 1973203 diff --git a/images/05238c0f-514f-4af2-bc4c-f7521d649825_86b75914-108e-4670-923c-28f40115d397.png b/images/05238c0f-514f-4af2-bc4c-f7521d649825_86b75914-108e-4670-923c-28f40115d397.png index eb36032116de6715413c7814301c835d39f43402..8f4c55c4baa8b58f6a9c81fa0fe806f8d626d1d8 100644 --- a/images/05238c0f-514f-4af2-bc4c-f7521d649825_86b75914-108e-4670-923c-28f40115d397.png +++ b/images/05238c0f-514f-4af2-bc4c-f7521d649825_86b75914-108e-4670-923c-28f40115d397.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3fd50a0c21ca89b4b025e6b06175e6f7239dfff683b07cbb0bbc51f6da32d22 -size 1934454 +oid sha256:148f0ba1fb4f327dffe5778236c6c9d0bbea75c9fe0bf066b0b81834516e59fa +size 1941991 diff --git a/images/05238c0f-514f-4af2-bc4c-f7521d649825_ebfd5d22-29c0-4188-8b78-a1901e05974b.png b/images/05238c0f-514f-4af2-bc4c-f7521d649825_ebfd5d22-29c0-4188-8b78-a1901e05974b.png index 8391d89983b9f32e1ba13c333b7f8b3ed73e1baf..301a2aafdf043961745011221b3da3c8a4f9de19 100644 --- a/images/05238c0f-514f-4af2-bc4c-f7521d649825_ebfd5d22-29c0-4188-8b78-a1901e05974b.png +++ b/images/05238c0f-514f-4af2-bc4c-f7521d649825_ebfd5d22-29c0-4188-8b78-a1901e05974b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef2b09169851aa45bcf2fd3e32cae5efbe9f72d560fd919569cb912b2b7ab839 -size 1251911 +oid sha256:eb78faae46be087aa61af9c249d00e0853eb764ebec30652e95fef8d8c4fb403 +size 1187424 diff --git a/images/05238c0f-514f-4af2-bc4c-f7521d649825_f48ac382-e936-49f1-944b-ed81052b0e12.png b/images/05238c0f-514f-4af2-bc4c-f7521d649825_f48ac382-e936-49f1-944b-ed81052b0e12.png index 979a90877a6bf66d618c2e034eb575057e8ed055..2cedd5a26b37d4b38d15c92101ea50ce11b3b5cf 100644 --- a/images/05238c0f-514f-4af2-bc4c-f7521d649825_f48ac382-e936-49f1-944b-ed81052b0e12.png +++ b/images/05238c0f-514f-4af2-bc4c-f7521d649825_f48ac382-e936-49f1-944b-ed81052b0e12.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2540a4d531ab0d350c82a086377e76ac929d344812ab376f5af03b12729282b -size 1784535 +oid sha256:99df7c35267390ea310ce3ca75b8700d1ae013a9a7568ee138468db306b4b0c4 +size 1980144 diff --git a/images/0592744b-ea69-4724-80f8-3924916b7758_021fde47-dd12-4ac5-b8f5-224b962a26ea.png b/images/0592744b-ea69-4724-80f8-3924916b7758_021fde47-dd12-4ac5-b8f5-224b962a26ea.png index be35596fc693aba88d423e74fa2fe86afc8cca50..f16b07dcf8c7456b99a73f8559157795937fb365 100644 --- a/images/0592744b-ea69-4724-80f8-3924916b7758_021fde47-dd12-4ac5-b8f5-224b962a26ea.png +++ b/images/0592744b-ea69-4724-80f8-3924916b7758_021fde47-dd12-4ac5-b8f5-224b962a26ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f8cbf850bdb53cc7f66a1f4be356c09183851bc933bf7f4b25a3ab85876532b -size 274688 +oid sha256:ddac358e0b14801c8ff3df9938068c49ec93fc8041d64a686413930bc8280ad9 +size 274747 diff --git a/images/0592744b-ea69-4724-80f8-3924916b7758_e85650b5-205b-4c62-8430-558ab5a7a477.png b/images/0592744b-ea69-4724-80f8-3924916b7758_e85650b5-205b-4c62-8430-558ab5a7a477.png index 197e4afdc2a31a5ef1467ba75238b6d1e04dcb3f..19e4a8403c6c9b3fec927daf2d00e66ae6779d88 100644 --- a/images/0592744b-ea69-4724-80f8-3924916b7758_e85650b5-205b-4c62-8430-558ab5a7a477.png +++ b/images/0592744b-ea69-4724-80f8-3924916b7758_e85650b5-205b-4c62-8430-558ab5a7a477.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9726baf6f2b08580e1b8849de9b40a87b21de4a1569e5877598498cdb4d4e95 -size 1024863 +oid sha256:1211e16392d27bb05c55715841cf69b2ded064f84beb2cc54eaacd83614df443 +size 1527891 diff --git a/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_b567c00f-f405-4acf-999c-13b2ccdc84f4.png b/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_b567c00f-f405-4acf-999c-13b2ccdc84f4.png index 8abb783b424529a17514f99757831ff3742ac507..5980c69728d1d07008a4c04c2c366577e646616b 100644 --- a/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_b567c00f-f405-4acf-999c-13b2ccdc84f4.png +++ b/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_b567c00f-f405-4acf-999c-13b2ccdc84f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:714f004a9bc65255a5796b6305ad75b895fae38b59b825b2a28fa55e71402b71 -size 349968 +oid sha256:8919c49aabcfe131f761bf584e5550bd10445885520b787b881123481a184d2b +size 600283 diff --git a/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_dc6589d2-8730-4e3b-8b8a-905044c2167f.png b/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_dc6589d2-8730-4e3b-8b8a-905044c2167f.png index 7ea4df2fbf223eb6b45d9223012d1e6bc2ffea54..733f2afbe450ed323e8ddd0708bc361b6e6999b6 100644 --- a/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_dc6589d2-8730-4e3b-8b8a-905044c2167f.png +++ b/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_dc6589d2-8730-4e3b-8b8a-905044c2167f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44cf801e6e33a3556e06384d59b27b02617e419e7398c09772cc2cfc3584f0b3 -size 1017476 +oid sha256:f5dc98f530c7ed155967f2d557dee07b1709b346b272d774cccce42f7f7bd98e +size 1386447 diff --git a/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_ea6d65a2-979e-4fa0-816e-5c637b48c014.png b/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_ea6d65a2-979e-4fa0-816e-5c637b48c014.png index fbca193aa0f16ce14f907acac03c2daf8b49d7df..e9353656f4eac4f7dd2f8046ec35c2752cfa37db 100644 --- a/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_ea6d65a2-979e-4fa0-816e-5c637b48c014.png +++ b/images/059327ab-e26e-4af6-9063-f58c50ecd2d2_ea6d65a2-979e-4fa0-816e-5c637b48c014.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebf0232ad1971b3fcf292c2e705bc5370fc0a139f575776e3a35a378f8c0d998 -size 972979 +oid sha256:0f6e63c97300bb8ec2b3c2134b6815a5c74b0f67816847b9235182c57f3b823d +size 1140349 diff --git a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_1fb87a1c-d99f-48e1-ae71-ba6f01482933.png b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_1fb87a1c-d99f-48e1-ae71-ba6f01482933.png index cc137d6ac22f51b3cb7c92d04d715c7c2f918dc8..d98eec3ca783c18cd895604afb72045692f29928 100644 --- a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_1fb87a1c-d99f-48e1-ae71-ba6f01482933.png +++ b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_1fb87a1c-d99f-48e1-ae71-ba6f01482933.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1c4062f0f0053aa290964b7ea8db7a1d5efe22898c79255b8052c902b4fad5f -size 1206274 +oid sha256:fb6eb4b16fe998ce79c7da56f8c0d599e4ca01f611675aa4ecd06000a4d2468b +size 896622 diff --git a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_435a480b-10e4-45fb-b384-378735865d8e.png b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_435a480b-10e4-45fb-b384-378735865d8e.png index 9595151076abddce24170e740b07ae4c6041199c..ea5ca6704d4d0e520f697dae1f918ae2c41d10ac 100644 --- a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_435a480b-10e4-45fb-b384-378735865d8e.png +++ b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_435a480b-10e4-45fb-b384-378735865d8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:057c7b363135f2e789de5d47636181c2d8dd66dfc194f3b6ec318c3156f1acae -size 1228091 +oid sha256:7c25f79f700c6f9653ad9b8a6372e603292b4fe28be2ee0c90081311872020a7 +size 1264149 diff --git a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_5dfefa56-2c3f-4f70-b6f5-da3a613a883d.png b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_5dfefa56-2c3f-4f70-b6f5-da3a613a883d.png index 082f5d3d11e9dab23a43a9b0590976028885e339..f7667c7842752d15b80f94251dd2509456b75809 100644 --- a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_5dfefa56-2c3f-4f70-b6f5-da3a613a883d.png +++ b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_5dfefa56-2c3f-4f70-b6f5-da3a613a883d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ef17f85ecc7a05cc2973b127ab12ead82cf10a702d26169780f5a7947fd7508 -size 830077 +oid sha256:ff4148587be66c22c5edeccb114af6d24fcbb938010c162718dc024c9e321289 +size 947679 diff --git a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_770aa60a-7be7-436b-9b5d-59111c135246.png b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_770aa60a-7be7-436b-9b5d-59111c135246.png index ed1a4d43bd0d002ceadd4aa9711a0b25f63d6539..d1a2c1db736ba76cd1f269abecf549e00c8d4252 100644 --- a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_770aa60a-7be7-436b-9b5d-59111c135246.png +++ b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_770aa60a-7be7-436b-9b5d-59111c135246.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ecacdd4aa0d755ad2f050713ccf2b5a89b0b8ec36282d0ddc432556160465c13 -size 869516 +oid sha256:a196a55fc2fa8990f5e0ac2127eb6cc5dbf664f0e5e0c9ecf9f9cdcd62db6765 +size 679052 diff --git a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_a89f2fe4-6202-487f-b994-ddcd7cdac194.png b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_a89f2fe4-6202-487f-b994-ddcd7cdac194.png index 953820d31ae2c69b382a8dad44807125e304dd62..eaf643ac8fb55ad3fc4236bb852dd4c2958bf6e4 100644 --- a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_a89f2fe4-6202-487f-b994-ddcd7cdac194.png +++ b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_a89f2fe4-6202-487f-b994-ddcd7cdac194.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0df9c7908fd88af761fba2bbd4ba83cf2812678370c3651c324c379079139262 -size 823875 +oid sha256:535c50a1b0ee7c18537f772cc8c7a30dba2e161743ced6f33629a1cca21acff6 +size 1245887 diff --git a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_b58ca5a0-af78-4288-9f0d-78f2c0f18b1f.png b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_b58ca5a0-af78-4288-9f0d-78f2c0f18b1f.png index 7186549bc8027e6bc718f515817dc16834220d83..26fe1bc85579c067f1e9a8f081e6d82a0cd48c62 100644 --- a/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_b58ca5a0-af78-4288-9f0d-78f2c0f18b1f.png +++ b/images/05c4da5b-263d-40a4-9982-6cf6311b57a1_b58ca5a0-af78-4288-9f0d-78f2c0f18b1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:565a02fb5e97e0d684d2547f36f458a3ebdef58362919ec3b18c0f9c3dc7b2a0 -size 826726 +oid sha256:4e317842457a158a53d4dbc73fc826c61c189fb1e47636f0f32425435e02b40e +size 1132014 diff --git a/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_76b15b20-c42e-40bd-8e7e-d686c716d096.png b/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_76b15b20-c42e-40bd-8e7e-d686c716d096.png index fd38ae188425b42a98bcf483fbedca60db802be2..e8c8735c7b8931250b3aa074328690f1f517e2b4 100644 --- a/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_76b15b20-c42e-40bd-8e7e-d686c716d096.png +++ b/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_76b15b20-c42e-40bd-8e7e-d686c716d096.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97100acc99e49a0735dc802dcc585840117f80c5c01edd4a9690771543195902 -size 679128 +oid sha256:0ce26ef3a515f1dcdcfc1e344378750b535c231350a086e0eba888b27ba693bf +size 325898 diff --git a/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_fd00b193-5c69-47ae-89a7-19293dbe9c61.png b/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_fd00b193-5c69-47ae-89a7-19293dbe9c61.png index 153ddd96dcb818f80702d3f1ff036d717391a28a..aafbc083771a8f4f66adcd50f5cda84d3b9e0122 100644 --- a/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_fd00b193-5c69-47ae-89a7-19293dbe9c61.png +++ b/images/05e1f2bd-3498-4fcb-8d43-37c903397ded_fd00b193-5c69-47ae-89a7-19293dbe9c61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:721cffd1bfaeb450adf94958f96692f06ae717952e6649985c9b5003e9cde620 -size 977082 +oid sha256:ff00babd4407cfe244564241253d9a1206aa0ed8e8485993f6b4947e39d0148a +size 1142151 diff --git a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_23a4d8ba-846f-4a6b-9d5e-8e9059bbd4be.png b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_23a4d8ba-846f-4a6b-9d5e-8e9059bbd4be.png index 9e435f490e6ea4f7e244fd1cfb9aa37951585ea9..bc05def509847c5f9ba64472e3973030eceeb74f 100644 --- a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_23a4d8ba-846f-4a6b-9d5e-8e9059bbd4be.png +++ b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_23a4d8ba-846f-4a6b-9d5e-8e9059bbd4be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56ba5ef3295422b471a210697de05c4847b30e28f6787a9bc01cc419de165da0 -size 1237921 +oid sha256:55aec5de90ea98836515c8027733fa159b2153b022fececfc31483a777c4439f +size 1215157 diff --git a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_2a4fda62-04d0-4158-8033-5c8be0ba3f71.png b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_2a4fda62-04d0-4158-8033-5c8be0ba3f71.png index bfd363b9d27dd79449748143900e8947d2c023eb..29faf37c2084527d6782af1f4674690ad18930a3 100644 --- a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_2a4fda62-04d0-4158-8033-5c8be0ba3f71.png +++ b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_2a4fda62-04d0-4158-8033-5c8be0ba3f71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93daf38bc412b8b013be60896c68e068f4e82a5845e889e254a13ad47981d574 -size 1060375 +oid sha256:bba79cabf64bd5fce950b50bdfd27d9b5eb2a1bdca1e8ca105b0af7e45e60485 +size 473977 diff --git a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_59276a5b-5c7e-49bc-ba3e-07e7219dfcd1.png b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_59276a5b-5c7e-49bc-ba3e-07e7219dfcd1.png index 6835dff9cdfb9803a0d77696fb555a1c144b7b13..1a3f2334dea059166b1c515f8e8e86273ff28d32 100644 --- a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_59276a5b-5c7e-49bc-ba3e-07e7219dfcd1.png +++ b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_59276a5b-5c7e-49bc-ba3e-07e7219dfcd1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5cd795aa927bf3c511adeb9dd45bb4b505283606a9b55e87188d2e0fc1de72e -size 1342825 +oid sha256:c35b37ac15aa132986d4a20bef01d417ec658bd6de7fa0a58e21af9398dae893 +size 984316 diff --git a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_5ca5d0ff-3a34-437d-bee2-1fac238301f0.png b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_5ca5d0ff-3a34-437d-bee2-1fac238301f0.png index 98b7ae5538c5650f6ef4d2eae188adfecb6ba363..86b867b8797bab671858db8934a53978e9d708d2 100644 --- a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_5ca5d0ff-3a34-437d-bee2-1fac238301f0.png +++ b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_5ca5d0ff-3a34-437d-bee2-1fac238301f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b14cc9f6243de2c292f52103a2b0a841690713561d5ec065c2e3b49613a3bbeb -size 765659 +oid sha256:9c6d7d866dc22b904b21018a6d877e6165210023523d19c520451b970de5fd83 +size 669695 diff --git a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_b28fa573-26af-4855-b623-b308ad84fd80.png b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_b28fa573-26af-4855-b623-b308ad84fd80.png index 59dcd572fd5001f0a0813cdfb8a3ffe68d1d58a7..8d1d2119c0762d5c4c247c2f3c1838cc718d22aa 100644 --- a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_b28fa573-26af-4855-b623-b308ad84fd80.png +++ b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_b28fa573-26af-4855-b623-b308ad84fd80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2df40c53b9711bf0272398ed0e7c09da12c4e7c27f8e5165c31092c086ed84d3 -size 1383403 +oid sha256:c028b23ee8b75b0544b5770e7d855b385f8d53fddcb6392d5507b6f89d7b6642 +size 1402267 diff --git a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_c43e32b7-d7af-4d60-b11f-d2f9c45da006.png b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_c43e32b7-d7af-4d60-b11f-d2f9c45da006.png index 9780c80b0d3ccc7d27ef2a810df780214340e170..ecf6a5edc56390f3212099754f5b875f5af2e339 100644 --- a/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_c43e32b7-d7af-4d60-b11f-d2f9c45da006.png +++ b/images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_c43e32b7-d7af-4d60-b11f-d2f9c45da006.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a4d2bc32d7f3dfe98defa6fa236985574995aa2e09e304bb8f6be0ab01b8a93 -size 777695 +oid sha256:997a63b4ec30004d1b58bbeb0a32b2c86409c9039d26fc805c2d7afe2d93aac6 +size 906080 diff --git a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_7426506d-f253-4977-9475-faa2e4975689.png b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_7426506d-f253-4977-9475-faa2e4975689.png index 81127d962f115b7a73da54d84784dc657095d361..ade2794445f14b3c806c8eeaa31d54e695af9376 100644 --- a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_7426506d-f253-4977-9475-faa2e4975689.png +++ b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_7426506d-f253-4977-9475-faa2e4975689.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9bc4f3ca290225d33265780f569a60af25240cb28d16d5d9c0a748baf64a7a5a -size 2028620 +oid sha256:e8a18acf62e1f44312dd69e393a0cb731ede1e5fee0612a5e050e83ad16b8d66 +size 1823932 diff --git a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_b770f788-e3a6-45d2-96df-e3a62380ac46.png b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_b770f788-e3a6-45d2-96df-e3a62380ac46.png index 04b7ce4444c8d78119c8c1e5201881ef1f663cea..f5a7209ec80636bfaaeb7fd9d8480354cba9b5da 100644 --- a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_b770f788-e3a6-45d2-96df-e3a62380ac46.png +++ b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_b770f788-e3a6-45d2-96df-e3a62380ac46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23b00d1c7bcf5cbc0b06863015cbe311183d2f0638e1d8dae0cd433af7f3b575 -size 1663294 +oid sha256:7b33fdfe4dcd5ca41cf2174ffd029ca9d384eef3f9551d4cf5d74f0efee3bbc7 +size 1568900 diff --git a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_be3cf6de-378f-44d3-bcd7-8c7d715c04f0.png b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_be3cf6de-378f-44d3-bcd7-8c7d715c04f0.png index 43481278ff545f4fec767ec1ea9396f576c29b6f..f69f142476ee45f4a267d2cecd8eb834beb8dda5 100644 --- a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_be3cf6de-378f-44d3-bcd7-8c7d715c04f0.png +++ b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_be3cf6de-378f-44d3-bcd7-8c7d715c04f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae06f94ab334d04fdac8d64efe8f76be11c1b3b1f77256662a7f83f830d8bebf -size 1876678 +oid sha256:cf8ae92381b70fd7831780cce41760da8a13d6d0a742af1e5223bb0410ff5c24 +size 1616122 diff --git a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_c9c98fc1-8b2b-42ba-a708-893695e385df.png b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_c9c98fc1-8b2b-42ba-a708-893695e385df.png index 04b7ce4444c8d78119c8c1e5201881ef1f663cea..26bc12b968d423799f5a870c75a37f746123e4ba 100644 --- a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_c9c98fc1-8b2b-42ba-a708-893695e385df.png +++ b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_c9c98fc1-8b2b-42ba-a708-893695e385df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23b00d1c7bcf5cbc0b06863015cbe311183d2f0638e1d8dae0cd433af7f3b575 -size 1663294 +oid sha256:1cacd5c6ba7a3d417e48307e7a8df7fafe9f446c754d2feeddc2ab258dfb154b +size 3281781 diff --git a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_d86079d3-0dbc-483f-a352-a5b6b204d119.png b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_d86079d3-0dbc-483f-a352-a5b6b204d119.png index ba25608d7d8c625d4817034b50c3d70ca3d0a233..7e22ca531c87551e9dd676d3f78f3ce6d474ba92 100644 --- a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_d86079d3-0dbc-483f-a352-a5b6b204d119.png +++ b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_d86079d3-0dbc-483f-a352-a5b6b204d119.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37fedac9a28a9bdd0de3dc9ebd44393af19748868e0ba1c65e3666a6fdfbc0de -size 1859400 +oid sha256:0c3e9b88b6a176494ced96827a235298512164202b509056784696e472824212 +size 1682987 diff --git a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_f7ef0388-f470-4a60-8d1d-a720b444c577.png b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_f7ef0388-f470-4a60-8d1d-a720b444c577.png index 6489844963ed68e8a670cc48e61a6e00aa0c3ca2..f21f6bce1635c0d1c44cbd1c347ac5a6f00e631e 100644 --- a/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_f7ef0388-f470-4a60-8d1d-a720b444c577.png +++ b/images/0633c328-f1ad-42bd-8aec-7a1883b5898d_f7ef0388-f470-4a60-8d1d-a720b444c577.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7109a9293bbe20659709d7093e91696baebf93e780d61e92519f9695594afad1 -size 1905285 +oid sha256:c11f852f968217fe40f5886368eb9641e967fc3a117040a643a3e14fbb05fa03 +size 1935516 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_0eff6738-107d-4395-8229-d5632a45aedc.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_0eff6738-107d-4395-8229-d5632a45aedc.png index cb1671264b460db94dbf6c792e550843d6b4a232..567873eba6f22443142b2ac7a5692095d37adbe5 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_0eff6738-107d-4395-8229-d5632a45aedc.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_0eff6738-107d-4395-8229-d5632a45aedc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99e309c8800209c1d6af86fea04de7b17f5418270065e29d79a6efe61d6a9f1e -size 943292 +oid sha256:8f0c5743912306f8c87217e6a2a8eb9cb73a7c6ecd1076c5fc900bb277165ca0 +size 1299485 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_329bca7f-0638-4eff-83b5-50f793f10541.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_329bca7f-0638-4eff-83b5-50f793f10541.png index ff8e0f4e58018792f370941a35c70cea3c1c17aa..5a5a3cdb02f3f53695b84cbc9286a61487405305 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_329bca7f-0638-4eff-83b5-50f793f10541.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_329bca7f-0638-4eff-83b5-50f793f10541.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8851c2e632054345ea686d006a991e92b0eb644b7213877f8dd6af8dde3b1da0 -size 915790 +oid sha256:472385627177247d14c7f9232325c158c79df21756aa96fe88d4393d5a9f8012 +size 1333056 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_43f0a0c4-1c11-406e-b5e6-38cdca83e896.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_43f0a0c4-1c11-406e-b5e6-38cdca83e896.png index 9bb423efa7b4c201b90018857acc950a88b43e23..fc3c7e9b15187a1593f98a18872ef29cf0fe95cc 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_43f0a0c4-1c11-406e-b5e6-38cdca83e896.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_43f0a0c4-1c11-406e-b5e6-38cdca83e896.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5802868ad0c2dcf1f354d35b95a3578ef8199c682f98dd5eac03c85e462e2f95 -size 959651 +oid sha256:8c6677c913bf0a7cf5d07082c9cbca30fc2fc4a3577fa132f16e50bae904f4a0 +size 915779 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_5bab90f5-78c2-4d19-82ab-2f2aabc94fb2.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_5bab90f5-78c2-4d19-82ab-2f2aabc94fb2.png index fd66848481e5b78365d46e0c2ccdaee5e2580220..c365cae02788e0bbac2063f9e8accadfb0f6dbaa 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_5bab90f5-78c2-4d19-82ab-2f2aabc94fb2.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_5bab90f5-78c2-4d19-82ab-2f2aabc94fb2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:465f337b6c49d2a7167a24c468576557905871d3cded54cfa13abcccf0a5f142 -size 2049010 +oid sha256:52e71f45a09109eaca335549f81a988a48c76a963e3ee69350c2615f7ed985c8 +size 657606 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_60718d13-f9e1-432f-9901-b1bdea2cc50d.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_60718d13-f9e1-432f-9901-b1bdea2cc50d.png index ecff52e5437e8ebb7f2adac47f991a0c34c1ffab..96a9fa41a5eabed56067023949d8ff19b57f319f 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_60718d13-f9e1-432f-9901-b1bdea2cc50d.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_60718d13-f9e1-432f-9901-b1bdea2cc50d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e07dd0140e63c6bee57624b9bdfc9728b6ba5bf6592eaba26bf824f0796a01c9 -size 587124 +oid sha256:23a81472f97116b46165bd9a7a92292b82dc7136d3dffb771ab0dd3dad8da485 +size 1116444 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_63bb5158-324b-4f1e-9cf1-226e776b0641.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_63bb5158-324b-4f1e-9cf1-226e776b0641.png index 92607b1589c4eab4acafaa6a166840f3044a33c8..7f076bf214680af1561be43ffa30f75f6bab830b 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_63bb5158-324b-4f1e-9cf1-226e776b0641.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_63bb5158-324b-4f1e-9cf1-226e776b0641.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acb611e041c13bff1506b4cfe49280ec2e5c47be4f2b1bcde32bdbebd986de12 -size 886057 +oid sha256:2660fab180c5d923cceb3e7e74345d19e34682aee3ba69963fc45b0d5ff5c914 +size 947945 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_6f204b33-e51a-4ce0-ae28-d22278162aaa.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_6f204b33-e51a-4ce0-ae28-d22278162aaa.png index cea2485d188d40ac30dd70c5f667b2a97db904ed..fda6e0f8c021e9c4368ab87a9f814adc3ef7ce42 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_6f204b33-e51a-4ce0-ae28-d22278162aaa.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_6f204b33-e51a-4ce0-ae28-d22278162aaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91d4235facd570fa0786784478270e917dd2a2b94e7f4bd63a3e9352565e0c2b -size 897088 +oid sha256:19eaed53988bebeaddd8ac2f3317c76a7bd8fa71402b53006ec8223250b9ec03 +size 1296164 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_9539b195-8f21-4470-aa1b-46904e797e75.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_9539b195-8f21-4470-aa1b-46904e797e75.png index 10de3aa39cc69dd7f0da77d4d112cb8668b0d367..7614b1d2c97566f853b8bf661c29e87462f1efb5 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_9539b195-8f21-4470-aa1b-46904e797e75.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_9539b195-8f21-4470-aa1b-46904e797e75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:323def062c361762e3058937cd99af9747124102764038e3716e4ccac2b7de28 -size 898020 +oid sha256:2808bfbad6c69ef190537a6a6894271844744dea0c9efb42ffa5ae7e7195aadc +size 1315061 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_a5bc6789-f1f0-4aeb-9457-993e852ff0fa.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_a5bc6789-f1f0-4aeb-9457-993e852ff0fa.png index 0bbd0ee7d237724b07f5045dc9739a3a57a6ec92..bdac8d7f52d61905e042d2580aff474a6dce2c3c 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_a5bc6789-f1f0-4aeb-9457-993e852ff0fa.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_a5bc6789-f1f0-4aeb-9457-993e852ff0fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af91644806df3819b3be0e9d4328e0150161cdbce17845209f364b5fb173d9b3 -size 646455 +oid sha256:6f90a02aca0c734ffbd5d2981684a71f052f50676a64bf55f6109ece74b3e2a3 +size 774130 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_bb6f1a40-1fd1-4508-973c-5492eac6636a.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_bb6f1a40-1fd1-4508-973c-5492eac6636a.png index a52a7f0b83401df1d6e0c543491efeb264f3f9db..506da57d150814fbbd22ef495f8d89fc9983185e 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_bb6f1a40-1fd1-4508-973c-5492eac6636a.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_bb6f1a40-1fd1-4508-973c-5492eac6636a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7cc4ea72fe574b6142b0424f4dcde8d0886f0b0219eb67b9cf5481921e123ba5 -size 898703 +oid sha256:b54a008ca55cd1eddf1b4edf0e0a1c212357ab5586e4dc9b31da11b8f4d937e9 +size 1018646 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_f00c7706-22fe-42e0-bc77-2312bea8c3d4.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_f00c7706-22fe-42e0-bc77-2312bea8c3d4.png index 8b1293526c2aa3cf0bfa5eeb820f7ac4d5b9fc9f..491b73eee2a72e05296fb5444803d5220a9aa9a0 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_f00c7706-22fe-42e0-bc77-2312bea8c3d4.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_f00c7706-22fe-42e0-bc77-2312bea8c3d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aea427e84a7c66473708d6fac25cf9521dbb96386e28c2f1ca665ee3d566312c -size 2026835 +oid sha256:9d8942b9a411a493a3bd6f0af41e7eeba15eb3f5341866c20533a172fc595b21 +size 801379 diff --git a/images/06497906-8b76-4367-a1b3-ebc8238bd470_f9650777-77a8-4cf0-9fc6-788cb349e8e8.png b/images/06497906-8b76-4367-a1b3-ebc8238bd470_f9650777-77a8-4cf0-9fc6-788cb349e8e8.png index b150c026d8bb369665160c3be2836bcb11788d18..ee17039908a2a94e0422e5656684231ee4f00052 100644 --- a/images/06497906-8b76-4367-a1b3-ebc8238bd470_f9650777-77a8-4cf0-9fc6-788cb349e8e8.png +++ b/images/06497906-8b76-4367-a1b3-ebc8238bd470_f9650777-77a8-4cf0-9fc6-788cb349e8e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:783d1a0eb0260a20a06e32b33468696932f84611f1d00b988de869fd70e9ad1a -size 891219 +oid sha256:273b63b5fae310fe3651a14f0a00b3606437acd929b6b48247276b1a91986aa8 +size 1292117 diff --git a/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_12063f39-2856-480c-aa96-7e4eb94ffcc2.png b/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_12063f39-2856-480c-aa96-7e4eb94ffcc2.png index 88666f64164225d46a29cc9015636d61e5182a04..87c5b017f29b0acb4338620f2a2c4e1437044752 100644 --- a/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_12063f39-2856-480c-aa96-7e4eb94ffcc2.png +++ b/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_12063f39-2856-480c-aa96-7e4eb94ffcc2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb6c087344a0b3fa93e99ce1c5b54bb2266975407cc01f77b2555d14bda0dc1d -size 1046416 +oid sha256:d0ff49f9ff61e61d6bbc08fee5867af0a68bfdf695f8c551934b39f3ecad313b +size 1544482 diff --git a/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_94548ec8-59f2-4d02-9f36-8261307d4a80.png b/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_94548ec8-59f2-4d02-9f36-8261307d4a80.png index d2154d98559d262d072323b601483b18464920aa..950144f107096b4bb6c19b65e697ef86aa1e5c3c 100644 --- a/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_94548ec8-59f2-4d02-9f36-8261307d4a80.png +++ b/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_94548ec8-59f2-4d02-9f36-8261307d4a80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:996573f5717c628b527022438a1d58f52af3d00285a16d8c5645b3a28e5de4d6 -size 1058228 +oid sha256:441ab14ffd9ff167f7d52ce56b5fee4651b2cf19be2e31d48bb7081f3a7b9716 +size 1925571 diff --git a/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_abf1e30f-2fb3-47f7-9b39-c10d02703d4b.png b/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_abf1e30f-2fb3-47f7-9b39-c10d02703d4b.png index 9c44c0d408cdf873e2ea7a00b144048a97a358e3..6d0cf594cd9befd9db33fbd139efadbf0fc20715 100644 --- a/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_abf1e30f-2fb3-47f7-9b39-c10d02703d4b.png +++ b/images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_abf1e30f-2fb3-47f7-9b39-c10d02703d4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5baf21f3870100bd18392064b065fd943f822472a1adcdadc89500d3a9ba99ee -size 1031042 +oid sha256:bfb7e63f41c0becc969bb22bdea60d8fe6fb4d797df517ef474a871bdec15be6 +size 1655628 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_11dfd474-542d-4f33-b6c6-caf5d6b23e4d.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_11dfd474-542d-4f33-b6c6-caf5d6b23e4d.png index 6d383f4b5750f6a506d626b06266f8910371aeea..89056d2d341f45058d13531a2f69262627c9b8bf 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_11dfd474-542d-4f33-b6c6-caf5d6b23e4d.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_11dfd474-542d-4f33-b6c6-caf5d6b23e4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c605af8d4e3acb92a4e19dd1abe226467593c8b905829ee9a6d800236910c815 -size 1690134 +oid sha256:2440aa206553ef0f1384d96a4a525c0dc83b71afea86dee2d57dd27f9de64371 +size 1682680 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_128f72a9-5531-4202-a730-5c09d7d9aaa0.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_128f72a9-5531-4202-a730-5c09d7d9aaa0.png index db9bf809f4e230723dc588866270110bfdfe9071..1bdfc0c3f37764ddab34c79fe6f3112e33849c84 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_128f72a9-5531-4202-a730-5c09d7d9aaa0.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_128f72a9-5531-4202-a730-5c09d7d9aaa0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2799c61407a386818e14192f775f0e97a99fc969dfba0561623fceda425ce8d8 -size 1689819 +oid sha256:2c44e845969914d96c646b0de13cbd7e44757b66cd2436a75d9b1668e7113de2 +size 1663353 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_18f61a73-f84b-4cb1-a2f3-7b6865a53d80.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_18f61a73-f84b-4cb1-a2f3-7b6865a53d80.png index 755f89c991a5097c3754430e5ee9ae44667d58f6..d0bbfe32c13f857d1f1b260677c8d13a9e2bd69f 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_18f61a73-f84b-4cb1-a2f3-7b6865a53d80.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_18f61a73-f84b-4cb1-a2f3-7b6865a53d80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9aaa37e15782fef8c1cdb2418662106327eaee37812a51f417e6e547f9038c90 -size 1315974 +oid sha256:4ad2f411d9c9468c390cefb5357d3aff0ff7746a6d6a16a04e232d083d1adede +size 767048 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_399cefa2-831f-4fc6-83da-87899078705b.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_399cefa2-831f-4fc6-83da-87899078705b.png index 3c4b19a328987e7542a005a339de44a530ff091c..fb17c4bb07efba31e6c32ca99af335117ee4ccb6 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_399cefa2-831f-4fc6-83da-87899078705b.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_399cefa2-831f-4fc6-83da-87899078705b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d53c4e62c312f4b97f37596afd170ff18cfaacd3d40567ca90966799867001b -size 648295 +oid sha256:0700cc66c10c4c6ac969051a856685c55895a0ca0db8ae3f4cabb292c8e49329 +size 708883 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_432a59c6-c207-4996-8339-e180f43164bb.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_432a59c6-c207-4996-8339-e180f43164bb.png index 1c36e12b016b017a77870998b14adb587080346c..21f0fb8b35edd6df940f497d7dd38f393c61d029 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_432a59c6-c207-4996-8339-e180f43164bb.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_432a59c6-c207-4996-8339-e180f43164bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a30d7dc799357e3d6c79753a02c1cc3b41c80a03ff8e37ebe1f7c8482a23b9b7 -size 1661173 +oid sha256:08e4da6d15fbde68f388ea393a2c2e1c732563fc51eab7f96f513bf8a541057d +size 1631482 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_438fdc63-2ab2-4f1f-9731-321dc68fda6d.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_438fdc63-2ab2-4f1f-9731-321dc68fda6d.png index 7f6e4da99214345aa24722019162505b6a6250b2..460975bbf82a02e5a4b7bb3864713aa8320b4540 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_438fdc63-2ab2-4f1f-9731-321dc68fda6d.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_438fdc63-2ab2-4f1f-9731-321dc68fda6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:749f746845e824078f87b16265b139265312d822806ac48417242670cfa5ad3a -size 1660947 +oid sha256:1f733180bc06195f2395f5ef84f2812e7dc6f8245ff66f94388bee8deb4bc494 +size 2420665 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_4b90e183-4ddd-4768-a0b3-ba25a5dbd94a.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_4b90e183-4ddd-4768-a0b3-ba25a5dbd94a.png index 2f5a09d8df58a8f01c38f35580944d7524102a6a..5d915b2cc2c0da08a0bef5d13fb0d8931b40651c 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_4b90e183-4ddd-4768-a0b3-ba25a5dbd94a.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_4b90e183-4ddd-4768-a0b3-ba25a5dbd94a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02257e1a4ced3ad240c08915475794bdad016e371f6f56c7f4af53ff19b68175 -size 2224328 +oid sha256:5ad645ca423690f0e9199a9aac1a88535406ca72d9fa9edbcdd9e11d4da253a6 +size 1108129 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_50186aaf-d2b7-49ce-91c2-7bf37430ea50.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_50186aaf-d2b7-49ce-91c2-7bf37430ea50.png index c7614e4f26ef401ac19d07b03ff8f2810cdfc045..41a39b52cd64c8f914703acf19770302598453d2 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_50186aaf-d2b7-49ce-91c2-7bf37430ea50.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_50186aaf-d2b7-49ce-91c2-7bf37430ea50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d358ea6d36e9fe598516cab98cad5eee1fb987e82fca945699ad2657bf3f3f5b -size 669386 +oid sha256:10ed58df1a3509656c61745e5933991533089c19c757ace2850a17d6bb676491 +size 920947 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_51b45d5f-8bb8-4178-8db0-ea0a9c2a2138.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_51b45d5f-8bb8-4178-8db0-ea0a9c2a2138.png index 9f5051e1f426cbb96e3e39b799fda2f90f3cdcb3..84d01da6db60318bda98a8092918f7af74a7cd46 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_51b45d5f-8bb8-4178-8db0-ea0a9c2a2138.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_51b45d5f-8bb8-4178-8db0-ea0a9c2a2138.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61c4c68f25d38307bf57bc6505f61da76537666a34af4cefdc01416a841fa99e -size 633917 +oid sha256:0ee9cc2f973109651fcf8c24d9944b72cd269aa9d1f9ca2fc301e34b9481898a +size 859911 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_5da862ec-0736-4eef-82ff-2920815a68b6.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_5da862ec-0736-4eef-82ff-2920815a68b6.png index c8605431821dba51c353f8a90c0860c57dafbfed..8f7e21f2ad4522a925e22c809916c0624e0bd36a 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_5da862ec-0736-4eef-82ff-2920815a68b6.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_5da862ec-0736-4eef-82ff-2920815a68b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09c2d3df6104fd948574d98ea01d3e6256ecc1a6e81457610b05ec56de53b92a -size 644766 +oid sha256:5f12d851d72fbc774a63d54fcbb0b726c8aae565ce962504be2ee9ab666eac84 +size 910076 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_620c6af5-4ea4-4b24-9a37-6d5b9f511ead.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_620c6af5-4ea4-4b24-9a37-6d5b9f511ead.png index 093d4516fbd9b05f5633c7e40f5c148ca6381e4f..7d2ac52c3a3f7087486fdd7fe947cb3e01165310 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_620c6af5-4ea4-4b24-9a37-6d5b9f511ead.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_620c6af5-4ea4-4b24-9a37-6d5b9f511ead.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0841c14bad0da881a3e4b18a654be199e6c0100a5f6b4050b370f41836a931da -size 1431006 +oid sha256:fc1e85524b707c357d60259c8674bc3a57cd769d48bbb4400f0da48ea970f2bd +size 957583 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_63b445da-7103-4ceb-b2b2-afc1395d10c2.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_63b445da-7103-4ceb-b2b2-afc1395d10c2.png index 1fc889833111c275a27493fa22bbb791f1081e34..0f7751c5bddda4a9bd2325b14792ea0c9858bdb4 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_63b445da-7103-4ceb-b2b2-afc1395d10c2.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_63b445da-7103-4ceb-b2b2-afc1395d10c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78bdacf13e59e55482fe66606803aa03b9f9f1865dd50e61d3451d3107842a90 -size 2764984 +oid sha256:ccb991223caa2c0c4b1bc122554c21e66cca0e61f0cc09f888c42af3fc4c57bb +size 1353793 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_654b35b4-a888-4328-a473-69f63632a8e6.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_654b35b4-a888-4328-a473-69f63632a8e6.png index 2dfbb34dba3cef1f67148aeeef6d5fcf34b43685..9c8f67dbde9573100444f496f113409b6ba97458 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_654b35b4-a888-4328-a473-69f63632a8e6.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_654b35b4-a888-4328-a473-69f63632a8e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b31792bf1bf7cec02b64fb642cb9e1f6aa6262b8ef902fb70de25bbc8c6fcc8 -size 1191233 +oid sha256:4dd1def9310c8c86f0edd7f52060bd35b5765bc3b43a38f6f8dd8d9b9f40d657 +size 847076 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_7e75bead-d0bb-4243-ab8b-7c062cd37053.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_7e75bead-d0bb-4243-ab8b-7c062cd37053.png index 7309388049942f7993dd8639a8f7602d9e670cf3..96b8389e4fbc0c8b31ddbaa018b2d51789b61709 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_7e75bead-d0bb-4243-ab8b-7c062cd37053.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_7e75bead-d0bb-4243-ab8b-7c062cd37053.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16b16dfa9a9e2391c9efa46107a9b50955d17e034799d214fad5ceb36c9e03f8 -size 704768 +oid sha256:861cbdcfceb31de3e3ac4ae3006ae5f6ed4e1cdd9d84ae7a4eb52b5933bb83b6 +size 615178 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_8b324073-038b-40d6-b5b3-7566305fb60e.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_8b324073-038b-40d6-b5b3-7566305fb60e.png index 6fcb419f3274ab44940c14f872873cbe2b997cd9..e29b67a3da48471e2907f6b5f60cfac7dad43f82 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_8b324073-038b-40d6-b5b3-7566305fb60e.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_8b324073-038b-40d6-b5b3-7566305fb60e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:128a69948529943054fc7515ec8f6784212067d92effb310c91a9f71fe45ffc7 -size 1425769 +oid sha256:1d2bfffdc79d487ea7b4301f54595d0786cc57ebfb315a15714b439a8ccc2607 +size 2386098 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_9200113b-0a04-4426-8bed-76f54a25cd86.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_9200113b-0a04-4426-8bed-76f54a25cd86.png index 9dedd053684cf837cce56e2dbfe46c1c12c1b538..f8272f42d7c3cd829f5d66b4735348f86598cbb7 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_9200113b-0a04-4426-8bed-76f54a25cd86.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_9200113b-0a04-4426-8bed-76f54a25cd86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4220ab2616989ec9f0dc50aa339b761aa507eba412a7a9c2c4ef1e8ea2e94c88 -size 1421481 +oid sha256:a3fb425b9ad5d6e93bb55f1cb2902dc2e6ae90f9b2d96cea841ce59ff68a13b7 +size 2286282 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_98fe0126-b4a4-4fb4-af52-ca93b1a10f9d.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_98fe0126-b4a4-4fb4-af52-ca93b1a10f9d.png index 30b3838c81f8aa20ba4433b6150965868ce05edf..f6a3f97dbb8256f26c1e04ba8e98607ff3dafbc6 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_98fe0126-b4a4-4fb4-af52-ca93b1a10f9d.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_98fe0126-b4a4-4fb4-af52-ca93b1a10f9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8e39e516246c95ed76a5061c6bbd9943c1cdfc2d30c48c5ae9b54549924c451 -size 141268 +oid sha256:ae64672f50301edcc6a9dc7f367d9dff6eb57a3cc48402d9f9776c8b9f75c81b +size 135482 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_a451a11c-b4e5-4d40-a845-71a26097a776.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_a451a11c-b4e5-4d40-a845-71a26097a776.png index fbd0aa390e2c6519dc40b316506f3f0c81a0f1f5..30152324bbd8904f9c9a5e4ecb58c48cb0d956eb 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_a451a11c-b4e5-4d40-a845-71a26097a776.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_a451a11c-b4e5-4d40-a845-71a26097a776.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e6af1ab90b83027b93fea30570550fa210327cebc9c158beb24e5f2edd5d65e -size 1713997 +oid sha256:c0c026ebfeb267ad7462cc723fc68b14bbf80067eb81671c7d0a26be9fb509da +size 1508204 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_a9db226a-fd14-429d-9f96-905cb66d254e.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_a9db226a-fd14-429d-9f96-905cb66d254e.png index 417cb66e55cbaefafc6c54d2e426ab36f561c21e..96d00905f2c0f72cddc942707c3f82b38abfa4fc 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_a9db226a-fd14-429d-9f96-905cb66d254e.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_a9db226a-fd14-429d-9f96-905cb66d254e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ead9cc17caa9d88127890483e69752b48c1dbb70a4c4780d1c7feae42f0557b1 -size 1690058 +oid sha256:8a64c2204003245ce91e0c0574e298b1025941bdec8a9713950d1535e7911e51 +size 2868944 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_ac56a867-e610-41b4-a583-605eb29cd9c4.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_ac56a867-e610-41b4-a583-605eb29cd9c4.png index 67529ca8b2e2d7445e670488b3c344fd44bbeff2..2f3ca0b1551aadf9a8ad2368dd046fe566dab0e2 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_ac56a867-e610-41b4-a583-605eb29cd9c4.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_ac56a867-e610-41b4-a583-605eb29cd9c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b80783420964c8eaf61c896312c8772dc41d0cbc8343865aa92618bdb40992c9 -size 2952196 +oid sha256:1f00ea0ab27e99c8e2d738b4c986c9aef11ec20fcf779d445084ddcee1a2ee43 +size 2437859 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_af642869-dd83-4561-92f1-f004a419fc6e.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_af642869-dd83-4561-92f1-f004a419fc6e.png index bb67c25e368ba3e95a74b764d02aca4104125c1f..ca605e21dc3c73198de905e9ec964a9efc467516 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_af642869-dd83-4561-92f1-f004a419fc6e.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_af642869-dd83-4561-92f1-f004a419fc6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc1974b5490abefba683d9d9fc533e1e95f9a692d04e5183329866f1159a7657 -size 555482 +oid sha256:203d0d5f238eede5a51c9ce17aac6b09e27a838b916cc95a90b91958c0620388 +size 729633 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_b69043a4-7684-42c4-b359-62bb1badccce.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_b69043a4-7684-42c4-b359-62bb1badccce.png index 84ef53f66f99fc7695dd74dfd92852e508796c65..0afb7a1b67661b04187d929d200da68427117bac 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_b69043a4-7684-42c4-b359-62bb1badccce.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_b69043a4-7684-42c4-b359-62bb1badccce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e0a5e6ab2a911d3e121bbc112798b5d9662569a4b4db1e5e19f666df6dbc217 -size 142576 +oid sha256:3fda6286caf4af491f8beff921c2ab28092d19db630783df0feddec08b37a1a2 +size 146094 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_b93b8c53-e686-471d-bcf7-ed74fe2190a4.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_b93b8c53-e686-471d-bcf7-ed74fe2190a4.png index 4016008643dbca91185031df5054e4f9b55b8cb7..ff733096473308cb31f5a42cf8c0d58b1fa55948 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_b93b8c53-e686-471d-bcf7-ed74fe2190a4.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_b93b8c53-e686-471d-bcf7-ed74fe2190a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd50aa504dc413653b198708a9a95fb2ad7f815a06b09d517fd577c8dd6ea0cc -size 686501 +oid sha256:ed973fe8d17f128801a8a04668f39a7bcd7be845dfc5d589a8e2686a198487bf +size 233627 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_bb8274d2-dd40-4967-9c7d-b3b4bbdc86c7.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_bb8274d2-dd40-4967-9c7d-b3b4bbdc86c7.png index e18556d959282905698b23328c89b73e536167c7..3a2ddb945d436e72a7661d54cf3b973d617c21b2 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_bb8274d2-dd40-4967-9c7d-b3b4bbdc86c7.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_bb8274d2-dd40-4967-9c7d-b3b4bbdc86c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5331b11a15ca978f008edb432abcfce31b6df095b1c6b85abb664d0d11e68427 -size 1661219 +oid sha256:fd52398c49a6e01ef3c76b667129344484e34a29a5766ec99d7ca93f87a3dc0e +size 1962395 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_d4035166-f027-406d-a033-54f1537852f8.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_d4035166-f027-406d-a033-54f1537852f8.png index 013dc7c89bc7d76c608ee24da4d9e046f23e81f2..6f28007fb49bc68fb185383c03f076eac89f09cb 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_d4035166-f027-406d-a033-54f1537852f8.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_d4035166-f027-406d-a033-54f1537852f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f14f9db7b716a25243dea7aabae2d68ed61ff0ab0fa96e02ed300ff060b295c -size 1663171 +oid sha256:5224c68554484e9e29444a43bc38dc8b8a209cd5dee78cbf7a23755a4a1d1546 +size 1891733 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_d4f72df1-7bad-4682-801b-1306a7dbf865.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_d4f72df1-7bad-4682-801b-1306a7dbf865.png index 85fea0349eda313f9360d27452486454cd1a9323..e537c1f3d0318a9c6c7db9c982b9a3bc051987eb 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_d4f72df1-7bad-4682-801b-1306a7dbf865.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_d4f72df1-7bad-4682-801b-1306a7dbf865.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c94419408e42c50b4d0229225fc8acfa0d629e11a13a5c3f06fe095d4afcd726 -size 2764979 +oid sha256:a5299fdf3df028a3b9017289b1137bcf47d4b17cef2512995cbdbe1011eec169 +size 2819090 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_d9c1a3c4-2953-4441-b535-b0ae2fed6215.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_d9c1a3c4-2953-4441-b535-b0ae2fed6215.png index 39d2c597f97ba57d7aee466d813fd2a4a4392ab1..1a9a642b456c98bd7e8b869ecbb4cfdeec990892 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_d9c1a3c4-2953-4441-b535-b0ae2fed6215.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_d9c1a3c4-2953-4441-b535-b0ae2fed6215.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38340f432727aae0c4b26ba31427bebb611f703ebf3ed293e4c98b96c1766c20 -size 1661957 +oid sha256:44dc07743f011f355b91678fc9b7a98b0d2344feaa687a474ea35c27a5119587 +size 2490997 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_e8c5af4e-b575-4093-9b8f-02ec489f76a7.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_e8c5af4e-b575-4093-9b8f-02ec489f76a7.png index 1eac8ce314aa271667e423edf92e24454134775a..b3fd7ccd4499881dc3fc85dc3e1bec45d8caf48e 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_e8c5af4e-b575-4093-9b8f-02ec489f76a7.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_e8c5af4e-b575-4093-9b8f-02ec489f76a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64447edf7386e8e296b238d7d2238755f47ae6b26a953cc70a1cb45bfcd66847 -size 757669 +oid sha256:35952d02c792c4cf260623d80b6f3a0141ec8b4cdbf7c624ad7e66c885b27f49 +size 689237 diff --git a/images/06a6d90b-071d-4644-8372-b928e99ae970_f326b843-6523-487d-8fce-82e72e9a0bd2.png b/images/06a6d90b-071d-4644-8372-b928e99ae970_f326b843-6523-487d-8fce-82e72e9a0bd2.png index 72492539166b099a65a1382c6e55516fda647083..b5da116db762f7d7b78c43a5143bb692be3517b6 100644 --- a/images/06a6d90b-071d-4644-8372-b928e99ae970_f326b843-6523-487d-8fce-82e72e9a0bd2.png +++ b/images/06a6d90b-071d-4644-8372-b928e99ae970_f326b843-6523-487d-8fce-82e72e9a0bd2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31eeb2a3e1890dd7ef4a0060303f91774f9044e326ce1b84bd7063f081d76e5a -size 1395558 +oid sha256:49aeeb936277b4eaaa4d939faffa5348b1445e5dac1c9d7ba0fa06146ec5f80a +size 665728 diff --git a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_5987f07d-d700-45ea-b55d-163cb8e28520.png b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_5987f07d-d700-45ea-b55d-163cb8e28520.png index 599ca6392e126324c04a4407a73614fa2c9f7faa..22f26173048d5b88d4054c3276ca369b7ad3b03d 100644 --- a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_5987f07d-d700-45ea-b55d-163cb8e28520.png +++ b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_5987f07d-d700-45ea-b55d-163cb8e28520.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba725a2acc44eec6d98b5db05b09ee416455dcfede4a5dffc9ff325de7d33cdd -size 1457246 +oid sha256:aac7980f4a41179678452dda0f55eb17a61567e6165c0abb73af0c2c57eff33f +size 1895731 diff --git a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_b4d4000c-1f63-49c4-9616-44eecec411f5.png b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_b4d4000c-1f63-49c4-9616-44eecec411f5.png index a706205c4d537397824bf35c1eda5c507c2af6c0..153d6ed8ec9edd4cfd3e596d840b7588b14e9752 100644 --- a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_b4d4000c-1f63-49c4-9616-44eecec411f5.png +++ b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_b4d4000c-1f63-49c4-9616-44eecec411f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79caa593aeed779b5fcdae240565b97836fb8f76245b8f23964f0affa36a27f8 -size 2960331 +oid sha256:62fd364fe86b8af86fceb6e361bec2c17aed578491ba41387028f935dcbe67be +size 1388783 diff --git a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_bb95cc56-dd2c-4f80-9154-2b7fc0e2737c.png b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_bb95cc56-dd2c-4f80-9154-2b7fc0e2737c.png index 9fa7403df506367d0f0b004355106d86bd84a84d..19fdd4e630f923359eba2c260428a89ffe415f3a 100644 --- a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_bb95cc56-dd2c-4f80-9154-2b7fc0e2737c.png +++ b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_bb95cc56-dd2c-4f80-9154-2b7fc0e2737c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1779a02e44e9daf3473d62452dccb49247234724b99e5f73d7264ed2b4efef38 -size 1966330 +oid sha256:1d7dadb88b26adc7dff532b0c28ed56a5c259f6d377ba8c82a2292bdce3dbf0a +size 1717027 diff --git a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_d6fc3568-7f65-4ebd-9102-c451c4285736.png b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_d6fc3568-7f65-4ebd-9102-c451c4285736.png index 450233362fd3fcf181ff7602e0336d6038418063..6916d8cfef9c5bb75db90660d641db6db9bf843c 100644 --- a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_d6fc3568-7f65-4ebd-9102-c451c4285736.png +++ b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_d6fc3568-7f65-4ebd-9102-c451c4285736.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd5021038be8195ff3f841d3fc84546d34d45d629db3e2fc30aa1a75d3c2d7df -size 2165730 +oid sha256:ea8c273589178f3b0fd955907b74c4e7d2fd88bf03d7a490325b5b295974f982 +size 2722712 diff --git a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_e3a1714a-ede8-4672-9707-2030e3484f5e.png b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_e3a1714a-ede8-4672-9707-2030e3484f5e.png index da4b83121c7154cc8f985015b26a02a3605309d0..71d040e0c9a151a61899a7c9a3a3223bc5ae1a78 100644 --- a/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_e3a1714a-ede8-4672-9707-2030e3484f5e.png +++ b/images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_e3a1714a-ede8-4672-9707-2030e3484f5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6454710a69d297c1e7a845ea5c87a560d6c7facf3fc7ebc8607bb92d21c4fe9c -size 1656233 +oid sha256:9ce32267d6949a77a7ebc1b56000dd5424a2565d32a46a7007a32978bbd80980 +size 1949240 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_0c664ad1-d63c-45e2-bb2e-95f9b295e8f4.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_0c664ad1-d63c-45e2-bb2e-95f9b295e8f4.png index 86757fda5a838d28bf785742457995deccb9ab27..22c1fec5c0457c6fc99bd9b99bf855496a1e4c21 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_0c664ad1-d63c-45e2-bb2e-95f9b295e8f4.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_0c664ad1-d63c-45e2-bb2e-95f9b295e8f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba5fd034e238a192e1f9310eb558a518d6782baa04d2bf181b1d9d9391667630 -size 1448365 +oid sha256:e034ba3fc1e5dd2dda57bf17795f42b4df26e568250c83bcb2db5c84bb076e24 +size 1899861 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_23bec80e-fc2a-4ca2-afa5-ea11e0911edc.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_23bec80e-fc2a-4ca2-afa5-ea11e0911edc.png index 7523ed5bd895cc20840f92c43aefabe298a5ee3b..88411f399f4ab09b3dd1bdf454a10bac160400fd 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_23bec80e-fc2a-4ca2-afa5-ea11e0911edc.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_23bec80e-fc2a-4ca2-afa5-ea11e0911edc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fbeaf76fd07627cd519022659f9c3ef120caecc9291e961cd3a14cbcd6a3560 -size 1605049 +oid sha256:266281b9da80186117289b79cdb66f13e7b7f7610dd5134aeb223942bfa6659e +size 1821446 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_8bcadcd3-882c-4c95-a59f-121cd8e75eba.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_8bcadcd3-882c-4c95-a59f-121cd8e75eba.png index c594fead69ba5e5bcf79dbd21af3c0ecf17cf666..f9490b503b0115dece6660d537790e6b3efab825 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_8bcadcd3-882c-4c95-a59f-121cd8e75eba.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_8bcadcd3-882c-4c95-a59f-121cd8e75eba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a70db0d0509e1da031abf6e37469740a5acffe18ae863c74ece7572a7748929 -size 1210817 +oid sha256:e357c1b30b7e2e4b540a2e1e673df3a23e645f6b61935ade7039a76fb3800cc3 +size 1906791 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_a99be392-4e33-4fb6-9e75-0b6db4e3c636.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_a99be392-4e33-4fb6-9e75-0b6db4e3c636.png index 847f831cf1df0f9590d97627fe354742cfaddb01..e9c85fe97cf00b68d2d0a6e406b5155b79e032f6 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_a99be392-4e33-4fb6-9e75-0b6db4e3c636.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_a99be392-4e33-4fb6-9e75-0b6db4e3c636.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a12b74b448ddd3fb9d07921149d1b6263c5d808ef0933d54b77dc821bb7b4701 -size 433421 +oid sha256:3272add3632982fcfa2b5ecf992f9cc5d656a9a91fe046de4105c4b28efe128d +size 431446 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d183a68c-3454-480a-9d79-b2d033f7853d.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d183a68c-3454-480a-9d79-b2d033f7853d.png index 8c5d0610adb9e08acb65609731ecfaef2d833aaf..884132f7e9aa00a276f16b5cb993d2100477bac8 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d183a68c-3454-480a-9d79-b2d033f7853d.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d183a68c-3454-480a-9d79-b2d033f7853d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6946b7a211afed1d7b3a2d12b26c8999c25e871b074cde03daeacd25f65ca8b -size 425524 +oid sha256:f6d0785281b3374f060314c9e5afc7acf15715fd6946f5c3349da0195b7a57b3 +size 632139 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d23e8ae2-e172-437f-93f0-db24ea60fe87.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d23e8ae2-e172-437f-93f0-db24ea60fe87.png index 6f9b23dc59b08483c5f634478f878be149e56d1a..3101244fc473a12e93cde052256871a99b42a5c1 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d23e8ae2-e172-437f-93f0-db24ea60fe87.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d23e8ae2-e172-437f-93f0-db24ea60fe87.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9bc7d1c32c09e0c39f81911642df69af67e4fb5b62a30d4b4bed4e78e9ff59f1 -size 451198 +oid sha256:112a640f3125d49bd6e96fde6c50008f2123a454d1d5b047c6383002914dc45b +size 696317 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_ed70e106-6213-44c9-ae88-85dc9af09e6e.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_ed70e106-6213-44c9-ae88-85dc9af09e6e.png index 32df7dafc779a5948abd17eab081c64cff14ef63..4e6e5278aee36c2159cb5441fc0b0e86b234aa25 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_ed70e106-6213-44c9-ae88-85dc9af09e6e.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_ed70e106-6213-44c9-ae88-85dc9af09e6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1924aad96323f727f8a21db14321e34e0eb694eb45f6121bc54f83b96dafdd19 -size 1608454 +oid sha256:f69d7f7bc124b793bf8674e8845c658b87a7ffbd25ed3b6a866f193c733729e5 +size 1606902 diff --git a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_f489ea2d-fd52-48bc-a2dc-b225a500c1c4.png b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_f489ea2d-fd52-48bc-a2dc-b225a500c1c4.png index d88ee6f14cb76e476063495b7aaeb7d85a047c98..94ff075993411d5ce1ebd30eaee0bacd7c8ed7f0 100644 --- a/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_f489ea2d-fd52-48bc-a2dc-b225a500c1c4.png +++ b/images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_f489ea2d-fd52-48bc-a2dc-b225a500c1c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5d3470bca0ced7b507def693f2b8156fad158ffd31efb09c3dfa0ce740aee22 -size 434136 +oid sha256:f4e9721e598381de24fe69aac7950213b8fb3cb987d5a2381101cf31874ec83f +size 363434 diff --git a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_455d1b00-132a-42d9-bb64-a7c94ae46392.png b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_455d1b00-132a-42d9-bb64-a7c94ae46392.png index 4cd9c5c3830d06a565f1cd50b2a35cb72d9a5374..e47ca960f58f96fc0ac36c13c6ae4ad7712fa7f1 100644 --- a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_455d1b00-132a-42d9-bb64-a7c94ae46392.png +++ b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_455d1b00-132a-42d9-bb64-a7c94ae46392.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d051dce57519cadb80d873c8adcf985b3229c8b3d3bdcef96b340d54ce53cd11 -size 3355554 +oid sha256:bf77f28555b43c267f39eec728227f4be038ac502d4fef49c5014356293e2576 +size 2698425 diff --git a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_61b7da8b-1015-40c8-8a7a-7fe00288aacb.png b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_61b7da8b-1015-40c8-8a7a-7fe00288aacb.png index 238912faecc91b512c1b2946c090f5f2b699752d..24d89d9107b4cd24f0d900d7d3e1e085a5e99db1 100644 --- a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_61b7da8b-1015-40c8-8a7a-7fe00288aacb.png +++ b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_61b7da8b-1015-40c8-8a7a-7fe00288aacb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:554e4d33593cced12168754b0d7b83b48b1e227693cfbcbbc02f3260039e1f62 -size 2826570 +oid sha256:ca29617576c78d7c0c04b31e1a697e489790bb1ab5a9a085f8aa151ffaa4ff4c +size 3297761 diff --git a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_9e62b475-81ab-4342-974b-bc13968dad2c.png b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_9e62b475-81ab-4342-974b-bc13968dad2c.png index d54578f44b9e84a720ac192fc3e0cb0f904af519..442e8b2405cfa0c8f40da10e38698e2ebc49e132 100644 --- a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_9e62b475-81ab-4342-974b-bc13968dad2c.png +++ b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_9e62b475-81ab-4342-974b-bc13968dad2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dc0b8b7a5c99a81b0f32ad7f38ff3c81265d72972f422a16b2733ecb8ddb891 -size 2103617 +oid sha256:369b9d51eb104ee2aab100d0168b6ec57a7b1c92fa9ae9af275c3adf878afb78 +size 1070415 diff --git a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_aade8bc2-34c6-4374-8812-2fa5b8bba84a.png b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_aade8bc2-34c6-4374-8812-2fa5b8bba84a.png index e4efe604516bf61f2da91e64ac4c03f8634b82df..c00228caa39dce938f9e137c675ba7992b20adc6 100644 --- a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_aade8bc2-34c6-4374-8812-2fa5b8bba84a.png +++ b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_aade8bc2-34c6-4374-8812-2fa5b8bba84a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d9b79b6f004024fe8f53e9649d29b1a0e9221fdef9ff14108a5112c449189b0 -size 3169164 +oid sha256:3359d4a78d90926ad7070001ebfd6ffd04e7b0e494ace058533c05ba8f69f495 +size 2529488 diff --git a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_c0865eda-5f46-4b7d-bb9e-a5592539ec17.png b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_c0865eda-5f46-4b7d-bb9e-a5592539ec17.png index c1d217649440da10b6dd25a7f71192209239a2bb..9a771403b5cd1e7da0d5620cf449b55dbb12f3c3 100644 --- a/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_c0865eda-5f46-4b7d-bb9e-a5592539ec17.png +++ b/images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_c0865eda-5f46-4b7d-bb9e-a5592539ec17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb648b97923bed60ab339ab1c645bf6e15baebee4a54d1bec226a2b22b6fcedf -size 3203248 +oid sha256:a61e0200b3c393fd649c3c03dbe6edae79c782d4f711fab06de3e25f6663453d +size 1209856 diff --git a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_2bc8e547-5166-4076-90bc-4c1d37ee725b.png b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_2bc8e547-5166-4076-90bc-4c1d37ee725b.png index e59f348c5db4cb2eb47264d6bb1fb3edc3a55197..eea74286c373003a2150929bbd144ab7e00b8105 100644 --- a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_2bc8e547-5166-4076-90bc-4c1d37ee725b.png +++ b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_2bc8e547-5166-4076-90bc-4c1d37ee725b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44f03a31beaf149c9c2e550271e72f3fed2cd7120cc1600496fa025181c8fca9 -size 1219688 +oid sha256:95fdc1e1710a4befb1e5cbb228c9d0aa821eca198a13bdf4a84ae3aadff61e12 +size 176806 diff --git a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_4fa8f954-d625-412d-9832-bf3a4b9c1388.png b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_4fa8f954-d625-412d-9832-bf3a4b9c1388.png index 18b7c9b2a27c5724b9184f850962d0526832302e..b5342b849e65786f3c537484f5aad2ecf8496ed5 100644 --- a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_4fa8f954-d625-412d-9832-bf3a4b9c1388.png +++ b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_4fa8f954-d625-412d-9832-bf3a4b9c1388.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d00614867e976f528ff085b2c80f9a53d2733c61f3ff4befdc6c78d9c37c15b0 -size 342435 +oid sha256:74b7f18e2701cf78221430ceb1a9435c76662dce70db1ae6c42ee52ca892a2d5 +size 114992 diff --git a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_85e614b4-41b1-43b9-8519-71f86c26641d.png b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_85e614b4-41b1-43b9-8519-71f86c26641d.png index 832b2c57b38611a5e07be5ac3cd63f8da83be090..c795ee240b780d3fdce29bf2d1c6ac61715f31f9 100644 --- a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_85e614b4-41b1-43b9-8519-71f86c26641d.png +++ b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_85e614b4-41b1-43b9-8519-71f86c26641d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13f2c63505e28b1ce20de45549c5a3fdf55eee37d0bf61f9d08ec6ffd5796dd4 -size 333169 +oid sha256:1fc83f475eb66907e80783b2fe4df28657179e2c892723d5a7f87ece63a78e6e +size 339697 diff --git a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_95bdb2f9-afcd-4088-bb07-12fb2b494992.png b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_95bdb2f9-afcd-4088-bb07-12fb2b494992.png index 63ac7f9deedbb2ccf822712602c5af1777d7936d..8883003a688d7445ff5d1b2b17ecb92713a9750c 100644 --- a/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_95bdb2f9-afcd-4088-bb07-12fb2b494992.png +++ b/images/08a998f9-82f7-48c8-b6ba-72a58d39b457_95bdb2f9-afcd-4088-bb07-12fb2b494992.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61338116b0d55e142d37543646f27d963a95e50ad0b0c40160c35f979d0a643b -size 339080 +oid sha256:d80608d3615f8d61b1bf28b9927519191cefd9fad55ced4ba4e8963bf15ee00c +size 357687 diff --git a/images/08f78082-3408-4714-99ea-5a699840317d_5660db7c-d327-43d3-ba70-3c9541460e84.png b/images/08f78082-3408-4714-99ea-5a699840317d_5660db7c-d327-43d3-ba70-3c9541460e84.png index 1d2bdec63e04056b7e4b5abd7bd2b20e453224d6..27f9597a44b45413447cb34ee8337efaac34e1e4 100644 --- a/images/08f78082-3408-4714-99ea-5a699840317d_5660db7c-d327-43d3-ba70-3c9541460e84.png +++ b/images/08f78082-3408-4714-99ea-5a699840317d_5660db7c-d327-43d3-ba70-3c9541460e84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3cf9e97e418323e72b3b3997e924003abe459e67ace49ec0fa9f17c3e31261e -size 390159 +oid sha256:b5f88a3f7a5bfeb802e084cd17c63cd735862da249b8a7c49c657610f220c3ff +size 571862 diff --git a/images/08f78082-3408-4714-99ea-5a699840317d_88495664-04bf-43d1-93fd-d2afd216d7f5.png b/images/08f78082-3408-4714-99ea-5a699840317d_88495664-04bf-43d1-93fd-d2afd216d7f5.png index 86b068bf4652b611b61f07030e9811d35cbd3804..840026ea2c69053169597b428285fba3562b5b8f 100644 --- a/images/08f78082-3408-4714-99ea-5a699840317d_88495664-04bf-43d1-93fd-d2afd216d7f5.png +++ b/images/08f78082-3408-4714-99ea-5a699840317d_88495664-04bf-43d1-93fd-d2afd216d7f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78f6dba3adfb47093a7ce8de0a4a5d5fadffbb6d74ec67f7049bb8ab7623c418 -size 369213 +oid sha256:5cc7d75761587b6a8fa609af1931e4871ecc26e1b53d1ec11cda1243dc83b077 +size 379316 diff --git a/images/08f78082-3408-4714-99ea-5a699840317d_c266e30f-94b5-4161-a61d-b00f033b1e7f.png b/images/08f78082-3408-4714-99ea-5a699840317d_c266e30f-94b5-4161-a61d-b00f033b1e7f.png index 86105afa659ee52f4e385c572374b4b8179a0ff0..a6c282e8e189aeb9032034ffe5a2e57a18572b84 100644 --- a/images/08f78082-3408-4714-99ea-5a699840317d_c266e30f-94b5-4161-a61d-b00f033b1e7f.png +++ b/images/08f78082-3408-4714-99ea-5a699840317d_c266e30f-94b5-4161-a61d-b00f033b1e7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c11605099ecd4d438f7970ab3f04efa7224bae6a45ff2331494a6663dff52f0e -size 762541 +oid sha256:4832ecb7b9248397d6d7e5902b9057d86689954ec3ac8a3684660c8b36742353 +size 949272 diff --git a/images/08f78082-3408-4714-99ea-5a699840317d_d2ee59f3-468a-4eb4-a530-2babf9e4d776.png b/images/08f78082-3408-4714-99ea-5a699840317d_d2ee59f3-468a-4eb4-a530-2babf9e4d776.png index 74b9ab0dddd68ce14bb163952d118c222b0a86cb..41bbf1df33e61a918dca796ab1250b1d4e7bae53 100644 --- a/images/08f78082-3408-4714-99ea-5a699840317d_d2ee59f3-468a-4eb4-a530-2babf9e4d776.png +++ b/images/08f78082-3408-4714-99ea-5a699840317d_d2ee59f3-468a-4eb4-a530-2babf9e4d776.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ad6f6fcd8a11e8b0387fb741f257e215380217485d89590d89b4422832bdb5e -size 2549951 +oid sha256:a48be50d4c447e7838a669e1a23c1be63735cda1de4aa01303f0e63dae2c462d +size 1361018 diff --git a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_828eeb3d-81d9-49a8-a848-523adbcf487e.png b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_828eeb3d-81d9-49a8-a848-523adbcf487e.png index 73c5ba7378bf18ad164fb2a16167c679de224bf1..aa4e25f9d3f1e04e2589e3b7a6c4f9d1c8202648 100644 --- a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_828eeb3d-81d9-49a8-a848-523adbcf487e.png +++ b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_828eeb3d-81d9-49a8-a848-523adbcf487e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1860386ff11d248d452e3002b9a26892178c562c8514e5c8b569418d7d977419 -size 1686962 +oid sha256:01bc1d794ac44234981b25adc4456c5f39e194c098111f8fab4f96a0e2548f07 +size 1347021 diff --git a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_df246174-70ac-41c1-ba3c-7f741eb5afda.png b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_df246174-70ac-41c1-ba3c-7f741eb5afda.png index 6f9c1da64951bd8dde7be7227b0dee9c1a9c23a7..dd23c9c84940738b3979b9bad76d93d00130d249 100644 --- a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_df246174-70ac-41c1-ba3c-7f741eb5afda.png +++ b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_df246174-70ac-41c1-ba3c-7f741eb5afda.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9730d15a7701b39dd24272d8b4317425148bf6f28e5a7df97be79083e882ed17 -size 1340261 +oid sha256:b43253ff4e2bf9b3279b6be48337637b3b57b2ee01d3755a8e05507dd383d582 +size 1340440 diff --git a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_e22f283f-8da1-4294-ac2c-90e0d472d487.png b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_e22f283f-8da1-4294-ac2c-90e0d472d487.png index 1fddfcb96a072d4e9e718549b3df70e8807e416f..7cf64648ab0d16a93048512954333fce47d77d1d 100644 --- a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_e22f283f-8da1-4294-ac2c-90e0d472d487.png +++ b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_e22f283f-8da1-4294-ac2c-90e0d472d487.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfee59fdde217e9f07d87da2c68662bf3f281db84c2110edc808060a26a764e9 -size 1606935 +oid sha256:e0306f675272f789b0391ffbb74bb061158795e908fe091514d4e9efccccbf6d +size 1299892 diff --git a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_f421e549-c5ee-43bb-94ed-5df3f9ec7af6.png b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_f421e549-c5ee-43bb-94ed-5df3f9ec7af6.png index a5b3e8279857bd295c6eac35afefebb9a174b803..a6cc3c43b38d4cf37d8d2d0f36fd1d3df9084e7f 100644 --- a/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_f421e549-c5ee-43bb-94ed-5df3f9ec7af6.png +++ b/images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_f421e549-c5ee-43bb-94ed-5df3f9ec7af6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02323cde21119a787fc4374a6678a6e7be906b38995a1cbfe5eb8d81faedadb6 -size 1422462 +oid sha256:1497ac6135d3d7a74afdf030b784c0fb374263b60a50110abcbb95809bb3c2b4 +size 1422062 diff --git a/images/09675529-c12d-42dc-a260-c1e046f87256_1222eefa-0175-4eef-a66f-e6bd0d109c4a.png b/images/09675529-c12d-42dc-a260-c1e046f87256_1222eefa-0175-4eef-a66f-e6bd0d109c4a.png index 6c613bccbfb50c33bffcd0ac5547a9fc0279ee88..7bb908dfac3d803d721f02de7d99d3e40a4ecbf2 100644 --- a/images/09675529-c12d-42dc-a260-c1e046f87256_1222eefa-0175-4eef-a66f-e6bd0d109c4a.png +++ b/images/09675529-c12d-42dc-a260-c1e046f87256_1222eefa-0175-4eef-a66f-e6bd0d109c4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fe3741e1114a0d489a185c0c36b2892a970f446693774f9168c46aa75d85961 -size 195952 +oid sha256:5c3ae23db6b79ff624aa36d222e09d3530d593ad4220ae7070597362356418b4 +size 170653 diff --git a/images/09675529-c12d-42dc-a260-c1e046f87256_26e80001-b32e-4aee-982e-5d3ff6fb21bb.png b/images/09675529-c12d-42dc-a260-c1e046f87256_26e80001-b32e-4aee-982e-5d3ff6fb21bb.png index fd7017ff3ac8764b032f71eb1d65491a89cd8e84..769fc537007b0c56964570ec11121eca7c11ca6e 100644 --- a/images/09675529-c12d-42dc-a260-c1e046f87256_26e80001-b32e-4aee-982e-5d3ff6fb21bb.png +++ b/images/09675529-c12d-42dc-a260-c1e046f87256_26e80001-b32e-4aee-982e-5d3ff6fb21bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d9fbefe18f9e0d19a083069c0505c53d045c50a0cb09204995237d632045e94 -size 714134 +oid sha256:5d1978d527676158f70b7371394332e87bdb08ae100cf4feed21e366645091f7 +size 517984 diff --git a/images/09675529-c12d-42dc-a260-c1e046f87256_3ae4ddd5-b5f1-47cf-bcaf-9e139114ca80.png b/images/09675529-c12d-42dc-a260-c1e046f87256_3ae4ddd5-b5f1-47cf-bcaf-9e139114ca80.png index 593a94dab224aadd4abbdeb686974d401bd481cf..6e01ab65b7a72897d58e3cdce10139cd2f2f0c38 100644 --- a/images/09675529-c12d-42dc-a260-c1e046f87256_3ae4ddd5-b5f1-47cf-bcaf-9e139114ca80.png +++ b/images/09675529-c12d-42dc-a260-c1e046f87256_3ae4ddd5-b5f1-47cf-bcaf-9e139114ca80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e35279b324032db905392d11f2ef14aadbc4e681c66cb995a45820ecfd35db3 -size 483515 +oid sha256:06d17aeb2563367efc56de13ddd85130d414f42e56eda1af929e70d61ced9d91 +size 385166 diff --git a/images/09675529-c12d-42dc-a260-c1e046f87256_56a0811a-d418-4d40-a9c6-3db908dfbfe7.png b/images/09675529-c12d-42dc-a260-c1e046f87256_56a0811a-d418-4d40-a9c6-3db908dfbfe7.png index ae312715a02e95f4140095340b7f15fca6c362fa..506992801cd8cc5dcad0b057fe8857053392e45a 100644 --- a/images/09675529-c12d-42dc-a260-c1e046f87256_56a0811a-d418-4d40-a9c6-3db908dfbfe7.png +++ b/images/09675529-c12d-42dc-a260-c1e046f87256_56a0811a-d418-4d40-a9c6-3db908dfbfe7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00d09ab40b9969050ff7aa04bfc2e59dc2bf3863c789dd89b5267f6fa02638ac -size 786736 +oid sha256:03b07fa5dfb236977c1b5a7fa71391fde251cbab5e9cedd684ec2492063b9eb6 +size 550383 diff --git a/images/09675529-c12d-42dc-a260-c1e046f87256_b946c050-003b-4cd1-a7cc-f23d7e291ba1.png b/images/09675529-c12d-42dc-a260-c1e046f87256_b946c050-003b-4cd1-a7cc-f23d7e291ba1.png index 0a5c4603e377f57b9ab5f822151d070066fd1023..a5043871f3361568da5b8121a2df864a38929e18 100644 --- a/images/09675529-c12d-42dc-a260-c1e046f87256_b946c050-003b-4cd1-a7cc-f23d7e291ba1.png +++ b/images/09675529-c12d-42dc-a260-c1e046f87256_b946c050-003b-4cd1-a7cc-f23d7e291ba1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:399815490ccba056826acaaf96dc10587903ea2c98bf35d819be574a715c886b -size 603745 +oid sha256:5a31a066405c1a09052eee5eefadb59a5a726b6ba9a6678e0cc64be9dc5c8037 +size 617276 diff --git a/images/09675529-c12d-42dc-a260-c1e046f87256_de150a34-2838-4669-80db-ac4bf235c452.png b/images/09675529-c12d-42dc-a260-c1e046f87256_de150a34-2838-4669-80db-ac4bf235c452.png index 92897cf7c5cece82d0152e440aeb53c5713aa992..72bea5f83ac14720ec65e29b500101cb78a2a75b 100644 --- a/images/09675529-c12d-42dc-a260-c1e046f87256_de150a34-2838-4669-80db-ac4bf235c452.png +++ b/images/09675529-c12d-42dc-a260-c1e046f87256_de150a34-2838-4669-80db-ac4bf235c452.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8099f4eab235fbfa1c24e933c9ebb079bc2ed4556b150e435cb9efc7a0284e9a -size 1179992 +oid sha256:40ae8bb7323dbc8060c2fe893e0f3812917182a78ed3255432b1410ae5cc7fb0 +size 1179604 diff --git a/images/0991035b-6acb-4dca-aaef-5384a0739781_90f4e811-90b3-4d4c-9a0f-ae51d58d2121.png b/images/0991035b-6acb-4dca-aaef-5384a0739781_90f4e811-90b3-4d4c-9a0f-ae51d58d2121.png index 265dae28fa3d1627047832f8e0229e0489bc58fd..d2fb6d402f2f637f5f60925fea7f26356e0a5729 100644 --- a/images/0991035b-6acb-4dca-aaef-5384a0739781_90f4e811-90b3-4d4c-9a0f-ae51d58d2121.png +++ b/images/0991035b-6acb-4dca-aaef-5384a0739781_90f4e811-90b3-4d4c-9a0f-ae51d58d2121.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afea40d21a7111dfb7647b055462da8e890584e888ea860e4cca123eb40b4cc1 -size 852993 +oid sha256:18e3871ca8580fbea4440d82f188cff5473fcfdba953f4fb1e777f0df48bf509 +size 533919 diff --git a/images/0991035b-6acb-4dca-aaef-5384a0739781_b57c2e90-05e1-41db-9d48-e8af6eb99120.png b/images/0991035b-6acb-4dca-aaef-5384a0739781_b57c2e90-05e1-41db-9d48-e8af6eb99120.png index d41e114c2454fc12382f813191296c3926c2fc0d..3ff9f121e2978b7d82b20ed71a6d9f53413dd865 100644 --- a/images/0991035b-6acb-4dca-aaef-5384a0739781_b57c2e90-05e1-41db-9d48-e8af6eb99120.png +++ b/images/0991035b-6acb-4dca-aaef-5384a0739781_b57c2e90-05e1-41db-9d48-e8af6eb99120.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8014166f0bbcda262f89d9381f0105be1b7c2e192a93942302219321ddeb3202 -size 522113 +oid sha256:ad6ddfbeea1bbc6f167b88844e6fc3e12c7e1eb0e0939caefccf819aa163a2b1 +size 593871 diff --git a/images/0991035b-6acb-4dca-aaef-5384a0739781_cdb3023c-a1e1-4791-aeb1-ecfdcd3e3c26.png b/images/0991035b-6acb-4dca-aaef-5384a0739781_cdb3023c-a1e1-4791-aeb1-ecfdcd3e3c26.png index f7a476c3873355b29ed0a60d835a87cbabe4b23a..ed4cc42728f4b76860cfc131ab8b690caea127ef 100644 --- a/images/0991035b-6acb-4dca-aaef-5384a0739781_cdb3023c-a1e1-4791-aeb1-ecfdcd3e3c26.png +++ b/images/0991035b-6acb-4dca-aaef-5384a0739781_cdb3023c-a1e1-4791-aeb1-ecfdcd3e3c26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2f000dfee7c54be4287264220b724ea7e28261a5fcfa70295e409457791140d -size 1434929 +oid sha256:42e4c3ca523470a3d662360b77976410d92f8e1e40e21458bb0658f1f885064c +size 1729069 diff --git a/images/099a9da4-c8db-4900-ada2-76600f3655a4_adb826f9-2b2e-4ed0-979a-348c5e3bb7bb.png b/images/099a9da4-c8db-4900-ada2-76600f3655a4_adb826f9-2b2e-4ed0-979a-348c5e3bb7bb.png index 811cc1ab447c927bf02031125eff66f433a29d96..ce1f74720c4e08eb03798a9f8f342654c119f472 100644 --- a/images/099a9da4-c8db-4900-ada2-76600f3655a4_adb826f9-2b2e-4ed0-979a-348c5e3bb7bb.png +++ b/images/099a9da4-c8db-4900-ada2-76600f3655a4_adb826f9-2b2e-4ed0-979a-348c5e3bb7bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fedc19f5610cebe70616b6406889a40d3168a28ad6860102b45349de2d66716b -size 772375 +oid sha256:58043c3e4b94ec78171fd76d7fb876ff57619642060f99856e1576847aa2704f +size 629969 diff --git a/images/099a9da4-c8db-4900-ada2-76600f3655a4_d1da3a0f-8824-4fcf-ba81-24debe082563.png b/images/099a9da4-c8db-4900-ada2-76600f3655a4_d1da3a0f-8824-4fcf-ba81-24debe082563.png index b96c6e1040a86c3966d8c0a80cd071cbdf85f4eb..bd58e4e508aa0a735e0e739e32448e2bcbaed17c 100644 --- a/images/099a9da4-c8db-4900-ada2-76600f3655a4_d1da3a0f-8824-4fcf-ba81-24debe082563.png +++ b/images/099a9da4-c8db-4900-ada2-76600f3655a4_d1da3a0f-8824-4fcf-ba81-24debe082563.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58802404f5afb9ee0efc1ddd2d4fe08047ca9c3b4f32133f94469bab92d8c03f -size 593673 +oid sha256:7c23f28e966d420be456777de39095d5b40c3519e021ff3ed1f4238b74cb1703 +size 592161 diff --git a/images/099a9da4-c8db-4900-ada2-76600f3655a4_d6e40d10-518e-4a8e-95a4-ff8756b67c8e.png b/images/099a9da4-c8db-4900-ada2-76600f3655a4_d6e40d10-518e-4a8e-95a4-ff8756b67c8e.png index 497198bdd1bb455f03b3aa3af7179ec9b7a4bada..5b64f623a916092e0f9af0b042be90d83a29ab4c 100644 --- a/images/099a9da4-c8db-4900-ada2-76600f3655a4_d6e40d10-518e-4a8e-95a4-ff8756b67c8e.png +++ b/images/099a9da4-c8db-4900-ada2-76600f3655a4_d6e40d10-518e-4a8e-95a4-ff8756b67c8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8ff9eb12fce7ed2fcdc29dc95f13284c29c9858fa76c370919d3189331cc8e9 -size 855865 +oid sha256:4593a092a94cb33011d26119f1ad4f39b8ee98edbea2d8290fccd19a2eecc25a +size 769891 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_1cc8fab2-5512-4790-95ae-8349beb1f6f5.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_1cc8fab2-5512-4790-95ae-8349beb1f6f5.png index 1ed5d796ed48fc1d1a4fd4e4ff6eb24320522358..f68ff17e98026390c4c9d9bc864e75b361b7a919 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_1cc8fab2-5512-4790-95ae-8349beb1f6f5.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_1cc8fab2-5512-4790-95ae-8349beb1f6f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb3742a4b00a7bcc985c77339c421401132682b7d88c33ae02ee9bde7b1ddea1 -size 342722 +oid sha256:2b980a5a064cff34f182cc66eaa5b26f8c5713d2cebd659ff34c01f104c4a1e2 +size 227883 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_7672056b-5964-4cb7-95fb-579dec1a1d72.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_7672056b-5964-4cb7-95fb-579dec1a1d72.png index 4b4746ddfe1aaf3969b2e9017d524ce2e83cd288..1d5384c8346541eae37d3cccfadedb340bdc3252 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_7672056b-5964-4cb7-95fb-579dec1a1d72.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_7672056b-5964-4cb7-95fb-579dec1a1d72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0656d5bbc290a0f8889ca5d653530578422acd189c2039ecee1c39498b5c4101 -size 2712120 +oid sha256:82c4aec121c6cce8fae0273551012c6edf1938417fb9e575ce0af6d3c2c88ece +size 2548556 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_808a3d44-fd0e-4a1e-aef7-55fead922731.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_808a3d44-fd0e-4a1e-aef7-55fead922731.png index 00110ff5ce591c50b0580f46d98d3d9fee23e014..c15ec785461d22b6d9b157c168942c2f11c4cf3e 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_808a3d44-fd0e-4a1e-aef7-55fead922731.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_808a3d44-fd0e-4a1e-aef7-55fead922731.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:964fa61ee4247c84d2e9f4bb5b537c6fdacb6eb8ab0dcf8576cdf0d2f6664d56 -size 734761 +oid sha256:4850ee184807203973a25477a5ab967c29c4c22bdd239506eb09ca12d9e7d983 +size 557616 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_8c85d1f6-5d5f-4b7f-8ad8-8fcdb58ca94b.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_8c85d1f6-5d5f-4b7f-8ad8-8fcdb58ca94b.png index 39c8efdd30b80fd8f24d56e95b076082d0e105a1..76e64098dd386a98526dd2c983ed76a0f33c5f1b 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_8c85d1f6-5d5f-4b7f-8ad8-8fcdb58ca94b.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_8c85d1f6-5d5f-4b7f-8ad8-8fcdb58ca94b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59a5a8fb466852af60b57933704fe183ce1e917e2c42de9f5ae0f2e5fd7bdd2d -size 1419640 +oid sha256:40ab7323cd5291ce59f43213e42a33ebe941425a8c1068e739045cfc6f918249 +size 1693685 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_a0b7d226-b1ea-47b0-a653-d6eb5ba4ba05.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_a0b7d226-b1ea-47b0-a653-d6eb5ba4ba05.png index 6d63f35288b0b97d2a5992f23eb380450bdf1cb7..b686c32f9ebb48d796ad5ff27c0ef30f5e3cf7fd 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_a0b7d226-b1ea-47b0-a653-d6eb5ba4ba05.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_a0b7d226-b1ea-47b0-a653-d6eb5ba4ba05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d154c29fb9ba423f3200b164299ffde2b884afb243343fcabb14ba527b12a57 -size 1341959 +oid sha256:f8fe12f8816a6b9f72d621e602e6f1f6f7ee862935e346ead1a91eefda7fddb7 +size 1029053 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_cffee1ce-d1d4-44c7-8978-a4a91b399818.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_cffee1ce-d1d4-44c7-8978-a4a91b399818.png index b3a9aec0a85a533616859c17f010b2aad97e82a4..f4e54e9fc7bb82424a9163cb7fd10b3b621351d2 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_cffee1ce-d1d4-44c7-8978-a4a91b399818.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_cffee1ce-d1d4-44c7-8978-a4a91b399818.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:052e44c1607c3aa5586ec6551a376da5f8ecec8bf12bc8aae3b86ea9ffd0a965 -size 2042942 +oid sha256:4be0c28f0bda5cad49926228f8327162b7c04ca2fc7d65940ee7e146663256d2 +size 1390916 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_d1c3e4dc-571d-4ee7-84e2-6751f69713c3.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_d1c3e4dc-571d-4ee7-84e2-6751f69713c3.png index d97468bf26f78a4d87c3f3ed6cee799bc0de3d75..18428f81b26916c4c94bcb077dff9bb3db232730 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_d1c3e4dc-571d-4ee7-84e2-6751f69713c3.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_d1c3e4dc-571d-4ee7-84e2-6751f69713c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:229354d9d08678de9da2156f22a47d729dcf6421bc84e345414337d4e046f463 -size 1552001 +oid sha256:61f6ec1e32a3c1630b9b52f91e3d8b2d0c90140299ad716792ef7f1c8c06d680 +size 1526538 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_d345b944-ded9-4a4b-b8c0-e1eaf822f340.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_d345b944-ded9-4a4b-b8c0-e1eaf822f340.png index b547d01ea8cf3eebdcc301ad062cbf3ef4d1bbe1..8211e4816a74b50f926a6a1fc90a890b94f5b1dc 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_d345b944-ded9-4a4b-b8c0-e1eaf822f340.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_d345b944-ded9-4a4b-b8c0-e1eaf822f340.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83a4d7b0f2477345702cb3f1584ac74a84a93599f94aa8bb9afe61eb221fc2e3 -size 734973 +oid sha256:fd80447594dd78899359781c4729a2e80f66c18a2f4f31c847fc75bed27f4742 +size 568418 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_d8cb1754-877f-4815-9831-75dfd9de4b51.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_d8cb1754-877f-4815-9831-75dfd9de4b51.png index f4e3e8ad31456b55377e9dc44bb09284eec9f390..9e41954d9929a0b05b95e26b0344bdfa7b73a87e 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_d8cb1754-877f-4815-9831-75dfd9de4b51.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_d8cb1754-877f-4815-9831-75dfd9de4b51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9b9fdae5eb420333b2250a6b1e444f00ca6e46cec24f4a18264c36a04e5ac24 -size 1314694 +oid sha256:ba02602c3fb90da99567fbd6984d0486ee42a2efa9c52372548a7325e29af23d +size 1522989 diff --git a/images/0a2130e7-1108-4281-8772-25c8671fb88e_eb82b9a7-62d4-4ad0-93aa-7ddc9c93cb65.png b/images/0a2130e7-1108-4281-8772-25c8671fb88e_eb82b9a7-62d4-4ad0-93aa-7ddc9c93cb65.png index 5919d2818337cfae9deed9baef8788f40679355b..a96807da1af8893a455d4719cc3ad3ab9301b37f 100644 --- a/images/0a2130e7-1108-4281-8772-25c8671fb88e_eb82b9a7-62d4-4ad0-93aa-7ddc9c93cb65.png +++ b/images/0a2130e7-1108-4281-8772-25c8671fb88e_eb82b9a7-62d4-4ad0-93aa-7ddc9c93cb65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a1171cb518a785477c2c13fdb37f9eb0c945b8045bea4ab32cdb80d910cc455 -size 1345036 +oid sha256:9614642fd9df2d50be5102f99b707a6d5a11f63eb96476a3ca932821b3b0de4e +size 1387069 diff --git a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_431d589f-9030-4d59-8246-23b942dbc896.png b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_431d589f-9030-4d59-8246-23b942dbc896.png index 652bc0acaf828bf3f8dead508513c0473e676fc3..5d3d5570ab1c8fb411e83daea5b5255b38b0bbde 100644 --- a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_431d589f-9030-4d59-8246-23b942dbc896.png +++ b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_431d589f-9030-4d59-8246-23b942dbc896.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97fea752e2688ce683a93811f51aa2c35ee55c74735c43d216492f2604a02bb3 -size 964331 +oid sha256:235633ade44388b9a422fbf417b3ac876d0f0d0e06cacb5b83dd3d37707de287 +size 881456 diff --git a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_4b6c0d2f-2736-4ca7-890c-7b1a6b188e32.png b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_4b6c0d2f-2736-4ca7-890c-7b1a6b188e32.png index e68f51abe93c0241e0ceff61a3978137646a7fb3..baa516c19442b44847fdd9a74cc5adaf44103dc4 100644 --- a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_4b6c0d2f-2736-4ca7-890c-7b1a6b188e32.png +++ b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_4b6c0d2f-2736-4ca7-890c-7b1a6b188e32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a03f9a500e6d81d7b46287fadb0e62785c4456a947c9de778ed05dc5ed0d633d -size 972533 +oid sha256:615798bc9637c509780670ef9aa63cb68f37f5c8ec022a569708d9a0bab050ba +size 506651 diff --git a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_858f46ac-aa0f-44ff-8278-4b53cdae0c70.png b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_858f46ac-aa0f-44ff-8278-4b53cdae0c70.png index 95d7e97916235d447de7d81800d43c56291e7aec..43e9523cf0240c2fa0642bc64c9139bae4ad337d 100644 --- a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_858f46ac-aa0f-44ff-8278-4b53cdae0c70.png +++ b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_858f46ac-aa0f-44ff-8278-4b53cdae0c70.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e9196d470f19092fd771e3d13d517284ad86104eb96a82db83e6f0551e1ae09 -size 465950 +oid sha256:27b974f6bfdb157b5cbfc6ea77848dba14d436011ef65c2c49e0b7b739b8afaf +size 530482 diff --git a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_9c71b00d-199e-437e-a510-ab151f6b1539.png b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_9c71b00d-199e-437e-a510-ab151f6b1539.png index bd7fa16676c23c3ce9630016408756823a6958f4..f8f3023672842f96fe60dc3b178bc8f86d84552f 100644 --- a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_9c71b00d-199e-437e-a510-ab151f6b1539.png +++ b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_9c71b00d-199e-437e-a510-ab151f6b1539.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c45852f38bdd49107199e9e14f28a02d7e8860b0991eb087af4f9ff9bf32eb5 -size 1014198 +oid sha256:e9c0f5643440f06dc9b9d4fa27070e46a94dfb6a97d02771b7b56598991b843b +size 1008806 diff --git a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_c07dee8e-5b45-432f-80b3-c79f3ff2f1d5.png b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_c07dee8e-5b45-432f-80b3-c79f3ff2f1d5.png index a66e000cb5fb754b87c837fb12ea66e048952e0f..659a23aa8e302ee315e44090b1b622299aabc2a1 100644 --- a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_c07dee8e-5b45-432f-80b3-c79f3ff2f1d5.png +++ b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_c07dee8e-5b45-432f-80b3-c79f3ff2f1d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1528a122257834ea383767a39cf482205fe18aa3431d74a068a012d809fa980c -size 824484 +oid sha256:0209568fd2297f2e2e85f050635bd331875de7c2fdea6e44953435e92f36ad55 +size 868861 diff --git a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_f18d3105-6e8d-4c4a-b12b-a3a1351bdca2.png b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_f18d3105-6e8d-4c4a-b12b-a3a1351bdca2.png index 5c7051ab507fbde57bf23663873aa3665982b7b5..a808395c7028bb153fc3608aa3195c73ba8460ae 100644 --- a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_f18d3105-6e8d-4c4a-b12b-a3a1351bdca2.png +++ b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_f18d3105-6e8d-4c4a-b12b-a3a1351bdca2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5f9d1a1933fae0af84e9ba77a5c45586c6b186a3da268ce9178ef68f6a10fb1 -size 801666 +oid sha256:208c8426ad18aeb8038db1017d07199d02a818383d5149e9d2e0b7514a4c95da +size 907001 diff --git a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_fc6195f2-3260-40a9-a000-5a0d2faf4e98.png b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_fc6195f2-3260-40a9-a000-5a0d2faf4e98.png index 8ae9595d8df340b4ea124c990950925467e468f5..76cb72d7eacffa58db280033048cbc1bf877918c 100644 --- a/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_fc6195f2-3260-40a9-a000-5a0d2faf4e98.png +++ b/images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_fc6195f2-3260-40a9-a000-5a0d2faf4e98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb055e738178dd0fad93922fc5c16325f9e1a545e13dc69d0ecc9a8a84500fc2 -size 840304 +oid sha256:176478ee1017e92e6d99497e19ef70b5fab47cdd34fa7ef0225ec51462d71fdf +size 936541 diff --git a/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_2724b6e4-0312-4e04-8e67-424ee5c3c16c.png b/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_2724b6e4-0312-4e04-8e67-424ee5c3c16c.png index 2732b71e4723169ba2828671e0101d78add6836f..56e2b227d721a353a07ba094f1ea926756645184 100644 --- a/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_2724b6e4-0312-4e04-8e67-424ee5c3c16c.png +++ b/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_2724b6e4-0312-4e04-8e67-424ee5c3c16c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67407f567e12bc79f635424933991728effc842bfbdcde472551626eca0880f8 -size 850474 +oid sha256:0b3ba8728dc93aa1be39a9b341c425512f0d9091d1991618d8abd44468cde5c9 +size 1022993 diff --git a/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3a3dcc2b-8ba5-4ecc-9ebe-8935157be036.png b/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3a3dcc2b-8ba5-4ecc-9ebe-8935157be036.png index fe17ca4720428e60967bad2d2591fa5e414b1313..82f965c21dc50078363d6b77a9296fac29c2dfd0 100644 --- a/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3a3dcc2b-8ba5-4ecc-9ebe-8935157be036.png +++ b/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3a3dcc2b-8ba5-4ecc-9ebe-8935157be036.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19f689fc381807f599818e8ae903ec78ebf4bc546c512c0fc6270e53f4e5ba9e -size 1175660 +oid sha256:673c8c5c8907d046c250caeeaeb830d20df06533f64864e3744288cdedd5c11b +size 830381 diff --git a/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3d393bea-c440-4777-9e40-6d7d9bc4fac5.png b/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3d393bea-c440-4777-9e40-6d7d9bc4fac5.png index e2316fde5fb2fd300109ee70e16de5717c036bd1..9540e0560260fcc25684ffc45e5fb5e8be1bd0a1 100644 --- a/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3d393bea-c440-4777-9e40-6d7d9bc4fac5.png +++ b/images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3d393bea-c440-4777-9e40-6d7d9bc4fac5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a644c7041f49d81313471704e5fa05a270ea8c2a5175a6708226caa9526492ad -size 1172383 +oid sha256:a2c9b868e2ee9ccf0bb6a852fbce3370284a5249d60cd346e3e2a6102c7df69c +size 1277391 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_0134b2a2-0960-4c1c-b128-61aeb08dd0c8.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_0134b2a2-0960-4c1c-b128-61aeb08dd0c8.png index b94473af6594c268ea6d1b4ee55af28ac06869cf..2ea4113c36f5aa7bface58cf87dc5c46353e5deb 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_0134b2a2-0960-4c1c-b128-61aeb08dd0c8.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_0134b2a2-0960-4c1c-b128-61aeb08dd0c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92bc738e0133b142009a4d6161ebf1bba115c62b3a395ade2111b0da2419fdb9 -size 1179247 +oid sha256:cabbdeec29b8e9f073c8cffbc3aee2d654bab5070de90d851f0cb8b45c422534 +size 847679 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_2d7f9a07-3428-4891-8d3b-24e22be9e7b8.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_2d7f9a07-3428-4891-8d3b-24e22be9e7b8.png index 2180e13c207c76c6b3c13bec2cf5c4699573c02e..f5da2863129b12b4941f99d7d6daac646cb4a9e7 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_2d7f9a07-3428-4891-8d3b-24e22be9e7b8.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_2d7f9a07-3428-4891-8d3b-24e22be9e7b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e51f99e8b91e3fd0490a9b45086ae6cc8475718172e03183a9a039501ea0c35e -size 686580 +oid sha256:3972dee87e49257c100f27c2c6f1369ec6945791d8e251bdea2153b9fbac8200 +size 804895 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_4ae02d45-a9d7-49e0-b784-860687b59016.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_4ae02d45-a9d7-49e0-b784-860687b59016.png index 466a707f4a50b8961c838d939894d380e4e714a3..dfcd325a02bfbb9a70a448be9afb0612b78ad968 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_4ae02d45-a9d7-49e0-b784-860687b59016.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_4ae02d45-a9d7-49e0-b784-860687b59016.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3616de567040c021df6df68a193da68177aa1c4b8f1cdab499da23a402dccd8b -size 1343630 +oid sha256:34c1902ec94ffd01257f51f49d5060498d099ec842207a12dae7c365375889fa +size 343817 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_80087b51-ce0f-4a04-b7ef-512f6c67dfc4.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_80087b51-ce0f-4a04-b7ef-512f6c67dfc4.png index 3cf1f2142ca4cc0b0c39aa6db4baa01ef08f8591..d1a5ddd828e4cf1f9318dee860cfcc94daa9fe86 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_80087b51-ce0f-4a04-b7ef-512f6c67dfc4.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_80087b51-ce0f-4a04-b7ef-512f6c67dfc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0a459579094eaa4d7e0bd6db4d247d676fd308dc33f1f949e16063872992be8 -size 1098967 +oid sha256:b9fa8305334f339ff2dc826ab22a7e7ed6a4bacf0e4affac15b712080175aeb5 +size 739727 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_89b591ba-9b75-494d-8261-e69acb082d04.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_89b591ba-9b75-494d-8261-e69acb082d04.png index 0b21c60a628f0f6549ab8ec0651a54e9caf52a57..04e3018adc7f93ce9eb799b383e8a7062e4ebe45 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_89b591ba-9b75-494d-8261-e69acb082d04.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_89b591ba-9b75-494d-8261-e69acb082d04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dedb57824615d27a06a49afe0a43d798d6f5032b9409210855ae0b9fb9a42a7a -size 1522601 +oid sha256:7e7bdb22a1577360ce8287ca5c54d951949aeb4003612b11013928d61aee6fed +size 523925 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_a2427697-98ac-41ad-9fbf-861751daa293.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_a2427697-98ac-41ad-9fbf-861751daa293.png index e1e31d05bf0c9f264795109560ab9d8ec6fc810e..678d4425a7670d4f1027cc9249a510761d59b19e 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_a2427697-98ac-41ad-9fbf-861751daa293.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_a2427697-98ac-41ad-9fbf-861751daa293.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be76d710c596d4c2bb480c4b82558929c3e579328903e00974d1e94524ec5c30 -size 646731 +oid sha256:b5e8521970df8bb50c3896f6650d15e6d415ee8a830dea898e168a19558bdaff +size 765824 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ae175601-aa78-4ea8-91ea-1f7aa0a5e4d1.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ae175601-aa78-4ea8-91ea-1f7aa0a5e4d1.png index 4278c3c6b868e14b386407361c6ca5ffbb03fd6c..38f1638b1ada49152852843b3980c1d29937d6d3 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ae175601-aa78-4ea8-91ea-1f7aa0a5e4d1.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ae175601-aa78-4ea8-91ea-1f7aa0a5e4d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c426fca0b161e7ce8895414c010876af2c1335e6eba1d753e979fe22dbf673a -size 1096462 +oid sha256:e659a3f0589dc2a53c545012d2f0f305ec385bc2a660fdc311768f6f43e9c3dc +size 1081543 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_cb1b0ad6-b6ce-4345-bb63-f83f179d8bba.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_cb1b0ad6-b6ce-4345-bb63-f83f179d8bba.png index 0176a54497686cc6d5708aca0eeab640f783becc..5551ec43193de744be80efbc56e12547ea1be2d3 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_cb1b0ad6-b6ce-4345-bb63-f83f179d8bba.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_cb1b0ad6-b6ce-4345-bb63-f83f179d8bba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f17a0531cd7735ad9e188e0e5e00d251aafff2bdbb2ce4f7b3de81aa0260ab2a -size 1205053 +oid sha256:61d97c86d14033f449acfe043ebcb3d54f2c268b7362568be0aebf20b8d69004 +size 873472 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_dcc79ac5-57a9-4ec7-8035-f7bc14000e30.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_dcc79ac5-57a9-4ec7-8035-f7bc14000e30.png index 686b5fc809f969a0320baa317f85299249d73c3d..c530f53fa18192adf355da914cfc503afd2b8781 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_dcc79ac5-57a9-4ec7-8035-f7bc14000e30.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_dcc79ac5-57a9-4ec7-8035-f7bc14000e30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e832bc2ce594b1b965025dfcf183384092b4478a1ca1e344aac438f91c62ce2 -size 1162982 +oid sha256:21d71063d4299211d6ff23fb0bb5a47cf6311a64381b85f85101b2400e549aee +size 475134 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ee1e8ed0-43c7-4576-941e-61bb00b10218.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ee1e8ed0-43c7-4576-941e-61bb00b10218.png index 22bbf83f4ae68693c68065761efb06d1b2c57f70..a6d4304965f83306bc772c11887b1e81377c27d1 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ee1e8ed0-43c7-4576-941e-61bb00b10218.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_ee1e8ed0-43c7-4576-941e-61bb00b10218.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7db8f888b0ede1a3e804b3ed1c1a8f18235a8bc6e497c90dfff8723beadd13f1 -size 654342 +oid sha256:325c704708460c015306ae967e9cdf0e18c994b27949edd307ff4ae5c637eb2b +size 624538 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_f16dc8f3-48c9-43db-9468-9db70f01934a.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_f16dc8f3-48c9-43db-9468-9db70f01934a.png index e9f326d870d9976ecff79b644fac5f3213530dad..f26440a98acc0528aab20ebeda4d05067dd12308 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_f16dc8f3-48c9-43db-9468-9db70f01934a.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_f16dc8f3-48c9-43db-9468-9db70f01934a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e11616b6f1e57981028c56344159e84a9802db773b8e92b532c70b38c770629 -size 1152562 +oid sha256:1bb6d2a38ee1bd7aa7fdd856365d82ed98c3bdd3a676c822ab2dbbf2aef4ba16 +size 1052903 diff --git a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_fc37ffb2-77a7-460f-a6bb-b4b3437bd545.png b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_fc37ffb2-77a7-460f-a6bb-b4b3437bd545.png index 3e852cf9e1adae68c7b1a89d497c571105ad8577..eaa3b9eff37c2537fcb6caa18a185181d3d570da 100644 --- a/images/0b16b42c-dea7-4079-a2a8-79eb23447193_fc37ffb2-77a7-460f-a6bb-b4b3437bd545.png +++ b/images/0b16b42c-dea7-4079-a2a8-79eb23447193_fc37ffb2-77a7-460f-a6bb-b4b3437bd545.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b9641874292b50e4a88cdc8f03670c86963de08ca6f93cd242e83948581c384 -size 1148841 +oid sha256:9c5f384275e52f4724931f36552a3be589104318f01661117f4d290e2f4da108 +size 1267377 diff --git a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_1a402d99-0a6f-43eb-b962-740175d36fd6.png b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_1a402d99-0a6f-43eb-b962-740175d36fd6.png index 067a7ac81e6f41b5c7fd58ab55060e15a49641e3..9ddf74f7d01e2b7ba67db648175a855cc6f38daf 100644 --- a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_1a402d99-0a6f-43eb-b962-740175d36fd6.png +++ b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_1a402d99-0a6f-43eb-b962-740175d36fd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:891d9a2a8fdd82b90d52a2a92b3d8b4aa51479a014a6e2ff198c876b83f50d44 -size 1741676 +oid sha256:7ff3fa6cfac0f8f0d8106783be6f2396b46f350046cc7c8d27ec343ba8dea6ae +size 1325716 diff --git a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_31825227-163b-4f9a-8253-81e49cd90371.png b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_31825227-163b-4f9a-8253-81e49cd90371.png index 45073e543881e0ad362a3d26c3a2a3dfeac96669..51763a1aa52337df1890ffc745b62d7e3bcab931 100644 --- a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_31825227-163b-4f9a-8253-81e49cd90371.png +++ b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_31825227-163b-4f9a-8253-81e49cd90371.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41ce36aba560464006bf289ecf11a3e857cf4d2ff73d4579fb875aa3cb235b06 -size 1537596 +oid sha256:a423bd4eaf2ee43466d86658bd862876b1650a55091242b6a61503606d091acd +size 1388591 diff --git a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_670d6d1f-a265-4b87-bb85-bcbe74cf3740.png b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_670d6d1f-a265-4b87-bb85-bcbe74cf3740.png index dda238fdebcc935be598cbc1a2bad02af6201b16..f0496a88e4c003c2f0d3a0bcfa387cbf7fc7d6fb 100644 --- a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_670d6d1f-a265-4b87-bb85-bcbe74cf3740.png +++ b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_670d6d1f-a265-4b87-bb85-bcbe74cf3740.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27772b19e96a3a21a864f4382d9da64c845a402094f396b1dcc094a4117eaba8 -size 1119277 +oid sha256:dd2227f1924a8645169935b6348e824a88c84b65fd7ddb75227cc940ff7d60eb +size 1128777 diff --git a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_b60f4bf4-01db-45c4-99f2-28275b4807ec.png b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_b60f4bf4-01db-45c4-99f2-28275b4807ec.png index 067a7ac81e6f41b5c7fd58ab55060e15a49641e3..212a2a68a694e592199793e2001817a8319489cf 100644 --- a/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_b60f4bf4-01db-45c4-99f2-28275b4807ec.png +++ b/images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_b60f4bf4-01db-45c4-99f2-28275b4807ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:891d9a2a8fdd82b90d52a2a92b3d8b4aa51479a014a6e2ff198c876b83f50d44 -size 1741676 +oid sha256:d6e897bc469cebf4692ad75effdb36f0d03418ffe272136127c8d389e9f3b7b3 +size 1805922 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_174d6b34-d5d3-4c75-907c-9547ae8607cf.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_174d6b34-d5d3-4c75-907c-9547ae8607cf.png index 4ed3d946ca8d7b5fc6dde4bff57031e02afce394..c1747884fdb964996bba94677ea7f7abbf91e6b1 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_174d6b34-d5d3-4c75-907c-9547ae8607cf.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_174d6b34-d5d3-4c75-907c-9547ae8607cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:595bfe34556a9c753da0377763488eeb056fcefbb8ba14d94843ad19669d0070 -size 1442611 +oid sha256:8576c172c926fff2bd299e4b8f7a81e7f65f90870a6c3f03a2f39117057f6987 +size 1694676 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_451ce382-7c2f-460e-9e44-773995a6b6ac.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_451ce382-7c2f-460e-9e44-773995a6b6ac.png index 1a07853a889a00674ea68a6e80edd596670465a3..d361ca1c5aa61ae1049ab7bc69fe0fb48e8fe249 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_451ce382-7c2f-460e-9e44-773995a6b6ac.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_451ce382-7c2f-460e-9e44-773995a6b6ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb8a6f8d2ea0a4ca63ed5c046d88b6d664a2c4d03fbe3bba4b7eb134c4417575 -size 883031 +oid sha256:ee2e630393464ebd57c49d7c3164e8764dfafd256a6618dec9b47dfd62dda53a +size 1263797 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_4e1f470d-9dfd-4136-9785-360b584f0683.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_4e1f470d-9dfd-4136-9785-360b584f0683.png index 73648cf0a8b9b18d273dc2a6764668a2ec0b7c3d..aebf84640acee75cf40789096d8e3d1e4129c9de 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_4e1f470d-9dfd-4136-9785-360b584f0683.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_4e1f470d-9dfd-4136-9785-360b584f0683.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b65e1684f749604e94e3cdde37cfd464c8b47f676a0819a8212c44e8c77df2ba -size 1531817 +oid sha256:abd02929e366ea3d159085a9992283b861cd81ed7613ca6a92f8c0841497b2b4 +size 2672620 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_a3aeb3e8-0034-40d6-b184-86ab3f05d619.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_a3aeb3e8-0034-40d6-b184-86ab3f05d619.png index c2ebee293daf03fbdda7440d9af94fad6e74e994..c17567ebef6290e6be9afb600907e84a8c7f7b16 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_a3aeb3e8-0034-40d6-b184-86ab3f05d619.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_a3aeb3e8-0034-40d6-b184-86ab3f05d619.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edfaad2e952033703dfe84dbdf7bd748ab5d7a8084849a9e16dc74d537ba75e9 -size 1896312 +oid sha256:645f6f883e824c9693d9884ac0c731c1ba46d513bfcbb7e2833a4d460faee6f1 +size 2179951 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_d8db2dc7-0796-421d-86ab-314c2f1ea86e.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_d8db2dc7-0796-421d-86ab-314c2f1ea86e.png index e6aa4ed7c3393b38d3aae4f2bd65a0502d33eed9..7ce8b8f0ed598de9c5dc1b3a864700871d2081f4 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_d8db2dc7-0796-421d-86ab-314c2f1ea86e.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_d8db2dc7-0796-421d-86ab-314c2f1ea86e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19c3b6d297b7973100b28dc5df60f1042f603e5ab961d85e740352058fbf28c3 -size 1632536 +oid sha256:ea236d24eaf8c7cfdf4bcc0e1d77d39a6bee1a3ac25bf91634754b7541cb825f +size 1342290 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ea7ad801-a927-4346-8491-60ac1394d7fa.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ea7ad801-a927-4346-8491-60ac1394d7fa.png index 110fb3747bd96037dcabf95a45452e69bc46a66b..83160c3deb1c5c9f7c4f66ef910f702e1613f220 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ea7ad801-a927-4346-8491-60ac1394d7fa.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ea7ad801-a927-4346-8491-60ac1394d7fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20aac09b2da8e1f4d7112565087691aaa2f7d26e12d80a66dca4e2a751cb8b34 -size 1883666 +oid sha256:9564a95203320a6d94f38b713b29def1120e425c1379b4c92b22a141a2ee71e9 +size 2654753 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ec97a061-a130-45c9-9ee0-c0db152698f0.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ec97a061-a130-45c9-9ee0-c0db152698f0.png index 3a907f13b325f80e2fe19d3c824ef639dca24a35..871a53e62368445c2157df355ffa99c487a69018 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ec97a061-a130-45c9-9ee0-c0db152698f0.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ec97a061-a130-45c9-9ee0-c0db152698f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb1ad5e1d5399fecf49382d24cc6e1c82444fb405986d72b60ab6e291f6ef842 -size 844919 +oid sha256:268a615225e3ba3052089efc6d3c6e400878c9327bca88785ebe4221e42dce17 +size 882119 diff --git a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_fa243564-2db4-4637-be03-7c5855112c7a.png b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_fa243564-2db4-4637-be03-7c5855112c7a.png index cb4d246868b8184839154aea0e4102a97f598cb9..0773fd20b5e95c670cae9369ad11a1b854d2d114 100644 --- a/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_fa243564-2db4-4637-be03-7c5855112c7a.png +++ b/images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_fa243564-2db4-4637-be03-7c5855112c7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c418c883155fa30420ce903196293612a7435c1675f0f17c344c021313cbba44 -size 844709 +oid sha256:c3430cdc8c8f21798ec8b85abd146c33067d7c61d2cf636a44d550e777b85bbe +size 956661 diff --git a/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_1b347a24-e015-4d1e-bce1-d999b5d80448.png b/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_1b347a24-e015-4d1e-bce1-d999b5d80448.png index 6cf9b9f7ce84d0481b42c602c0466fab0d4ceebd..c2d0d0953008df6da0871f72544b51e8f9c7179e 100644 --- a/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_1b347a24-e015-4d1e-bce1-d999b5d80448.png +++ b/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_1b347a24-e015-4d1e-bce1-d999b5d80448.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5304d552f86367b2a1098654140f73a8f23b1b9a6f22adbd962a7909826679a8 -size 933110 +oid sha256:ceb32ca62e48c548890303d0c94f799738af0c91c46257121f8947648f8f4c23 +size 926357 diff --git a/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_9164d8b5-1f9c-401b-ac02-e2235a798755.png b/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_9164d8b5-1f9c-401b-ac02-e2235a798755.png index bdd7745b3f13c67370df3c2e5589fdba79a66c22..acf20f25de5ec6133c0464d60905549aa1ac0fc6 100644 --- a/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_9164d8b5-1f9c-401b-ac02-e2235a798755.png +++ b/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_9164d8b5-1f9c-401b-ac02-e2235a798755.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b914dc3a7071fdd24dd33e5f8fe83ac3d2ea88df9ebe878e074004425d4e22f9 -size 1154287 +oid sha256:dcad88cb1d3d5179431dcbf276afa38a58c44089e2c3f88a06fa4b6c0bb4387c +size 1315841 diff --git a/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_b99c5581-8a56-4bd7-bbe4-782795ebf93c.png b/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_b99c5581-8a56-4bd7-bbe4-782795ebf93c.png index d3b9dcb732ae5d823aff4734fc1c8d34d70d3e4a..e7ea9e059877eb407f0cb70fa77ea9e685ac51d9 100644 --- a/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_b99c5581-8a56-4bd7-bbe4-782795ebf93c.png +++ b/images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_b99c5581-8a56-4bd7-bbe4-782795ebf93c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2bd0dfd25315bf58b8552b597d0e991416356a5194ecb9cc4d41a4f9c9212d0c -size 1151581 +oid sha256:30398dc93d33b1c7a81a6c541a50cbb89c6ab56641f079a33454a558f90fb097 +size 784141 diff --git a/images/0b70e49b-2759-4276-ad4f-471e405544b9_50ddc6d5-f2d7-497f-b281-bf6a1aa2061a.png b/images/0b70e49b-2759-4276-ad4f-471e405544b9_50ddc6d5-f2d7-497f-b281-bf6a1aa2061a.png index a4540637bb5fed683c261b2ef6b5d098f148412b..ef5ca408519555454aaa910989eaca1cf9a2d8c7 100644 --- a/images/0b70e49b-2759-4276-ad4f-471e405544b9_50ddc6d5-f2d7-497f-b281-bf6a1aa2061a.png +++ b/images/0b70e49b-2759-4276-ad4f-471e405544b9_50ddc6d5-f2d7-497f-b281-bf6a1aa2061a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb9a57f0cd0c5d39b2e3e87aeae26bc65939d3765e33cd6a2817c360f9029b4f -size 2428420 +oid sha256:9a0c70fa88e9d4013771b1c4b21558152463ff4581b5628004b2d2058102240a +size 2323899 diff --git a/images/0b70e49b-2759-4276-ad4f-471e405544b9_84321d57-8d7f-4a25-b4f2-dff4851503a4.png b/images/0b70e49b-2759-4276-ad4f-471e405544b9_84321d57-8d7f-4a25-b4f2-dff4851503a4.png index bdcab5e2e16668d89a420f1071bf9e458d6ef656..61948faf277fbe7969f859f06e077aedbb5d825c 100644 --- a/images/0b70e49b-2759-4276-ad4f-471e405544b9_84321d57-8d7f-4a25-b4f2-dff4851503a4.png +++ b/images/0b70e49b-2759-4276-ad4f-471e405544b9_84321d57-8d7f-4a25-b4f2-dff4851503a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3221256f560cfe53d3b6a1654cb00baec5705f732cc129d791f6c47643434122 -size 1090302 +oid sha256:6444c48f13b0756b22c3c9bb2fffedbdf3532bda219cc5d9842bc449193670f5 +size 732704 diff --git a/images/0b70e49b-2759-4276-ad4f-471e405544b9_91233398-b0ea-424a-9cd5-2b60b4283b6b.png b/images/0b70e49b-2759-4276-ad4f-471e405544b9_91233398-b0ea-424a-9cd5-2b60b4283b6b.png index 00a22f22cf43026485915e5f915ad2dea61964f0..20d544ffa6e187d97245b86095fe22a2440b34ff 100644 --- a/images/0b70e49b-2759-4276-ad4f-471e405544b9_91233398-b0ea-424a-9cd5-2b60b4283b6b.png +++ b/images/0b70e49b-2759-4276-ad4f-471e405544b9_91233398-b0ea-424a-9cd5-2b60b4283b6b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cb4372ee358bc2dea92d315ff2367a6bcc549b0045bf15a1af996366852f80d -size 444003 +oid sha256:95af9c16e177473d76d59900364383f6131b2daab658ed7d1d815392b2346c2c +size 356944 diff --git a/images/0b70e49b-2759-4276-ad4f-471e405544b9_b258f35f-a2e4-4edc-8102-f3109e0b4909.png b/images/0b70e49b-2759-4276-ad4f-471e405544b9_b258f35f-a2e4-4edc-8102-f3109e0b4909.png index 51d20ba2b1464e4f2006e61f1580bf02ccbf56ca..9eaca58df6593d1fc8405fa0d8099c48a6ea93c0 100644 --- a/images/0b70e49b-2759-4276-ad4f-471e405544b9_b258f35f-a2e4-4edc-8102-f3109e0b4909.png +++ b/images/0b70e49b-2759-4276-ad4f-471e405544b9_b258f35f-a2e4-4edc-8102-f3109e0b4909.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c89f193db88f88dcd47e324ca8b7e138851d5d2afbdfae3039d9d00c0f1670db -size 2317069 +oid sha256:0252cef599d1a746d8a4cebc9b06d98a269d00776bcc15a25ca907dd2ac67de7 +size 2250658 diff --git a/images/0b70e49b-2759-4276-ad4f-471e405544b9_c2d31bb2-2ab9-4d3f-b785-17fddb4b85f5.png b/images/0b70e49b-2759-4276-ad4f-471e405544b9_c2d31bb2-2ab9-4d3f-b785-17fddb4b85f5.png index 6f9d967931d23f67cd0175220d1012d722401d88..eb2097c359d9f64de57671ec882ee56c7a22173a 100644 --- a/images/0b70e49b-2759-4276-ad4f-471e405544b9_c2d31bb2-2ab9-4d3f-b785-17fddb4b85f5.png +++ b/images/0b70e49b-2759-4276-ad4f-471e405544b9_c2d31bb2-2ab9-4d3f-b785-17fddb4b85f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d50be5bc1ae1458bb80b02864eb2cce1d5cddbc097cb27364c5bbe398483cfe1 -size 2416439 +oid sha256:994e65d369affb41032022dd242c1a1c4fbdb714d6c7dfc77796afd025304169 +size 1140609 diff --git a/images/0b70e49b-2759-4276-ad4f-471e405544b9_df4d6445-8f4b-4462-8798-32c0b9d0aaea.png b/images/0b70e49b-2759-4276-ad4f-471e405544b9_df4d6445-8f4b-4462-8798-32c0b9d0aaea.png index eaa2aa48a31ceb368cb81802f7d63ecbc6209be6..e052d672e084fb16808dc9ed680c253cbd48aa56 100644 --- a/images/0b70e49b-2759-4276-ad4f-471e405544b9_df4d6445-8f4b-4462-8798-32c0b9d0aaea.png +++ b/images/0b70e49b-2759-4276-ad4f-471e405544b9_df4d6445-8f4b-4462-8798-32c0b9d0aaea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a30690dba8a801d74355455bd69840e042fe782c2357fc82c118f4c623f9527 -size 2520757 +oid sha256:846f6b4f72d015e7c039385080f364a1e449b920be09f92512924f2821cfd2f7 +size 1264929 diff --git a/images/0b70e49b-2759-4276-ad4f-471e405544b9_fa65f280-f9c8-4656-93fc-af91c10c364e.png b/images/0b70e49b-2759-4276-ad4f-471e405544b9_fa65f280-f9c8-4656-93fc-af91c10c364e.png index 00a22f22cf43026485915e5f915ad2dea61964f0..a719b6d50aa7632a0ec246009d246b5e35261056 100644 --- a/images/0b70e49b-2759-4276-ad4f-471e405544b9_fa65f280-f9c8-4656-93fc-af91c10c364e.png +++ b/images/0b70e49b-2759-4276-ad4f-471e405544b9_fa65f280-f9c8-4656-93fc-af91c10c364e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cb4372ee358bc2dea92d315ff2367a6bcc549b0045bf15a1af996366852f80d -size 444003 +oid sha256:3f5a1d962bc4b6953a54398ee2cefafa75c46cc484b42588ad4effb973e6334c +size 378322 diff --git a/images/0c02c193-2aef-4817-92b4-56722edc6b57_0df26719-4457-4f0d-a480-07531eaae3b8.png b/images/0c02c193-2aef-4817-92b4-56722edc6b57_0df26719-4457-4f0d-a480-07531eaae3b8.png index 7d4fc6723f36f2ecf943c468b6de5fbd5db74c35..131efd4498957131071a98726ce9e7b41954812e 100644 --- a/images/0c02c193-2aef-4817-92b4-56722edc6b57_0df26719-4457-4f0d-a480-07531eaae3b8.png +++ b/images/0c02c193-2aef-4817-92b4-56722edc6b57_0df26719-4457-4f0d-a480-07531eaae3b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb7413fc932a18536405411fe3a4c8c832564e0243b5c8dd5fa77e1eb40f97bb -size 937293 +oid sha256:6858eef5bfdcea9d646631dfb5f1ef4897ff12a3f6b8a563b28617f9654b001e +size 938453 diff --git a/images/0c02c193-2aef-4817-92b4-56722edc6b57_1945d47d-5810-42ee-bc36-5f0a90c7c1f8.png b/images/0c02c193-2aef-4817-92b4-56722edc6b57_1945d47d-5810-42ee-bc36-5f0a90c7c1f8.png index 3b0674109c60a9cb096f3e68d7b130816183f7ae..06b1a5b53ebb182d4bdc93cccd0db79870966d1b 100644 --- a/images/0c02c193-2aef-4817-92b4-56722edc6b57_1945d47d-5810-42ee-bc36-5f0a90c7c1f8.png +++ b/images/0c02c193-2aef-4817-92b4-56722edc6b57_1945d47d-5810-42ee-bc36-5f0a90c7c1f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59f6b8e758fe851c29d54e78000d8b4cef793f7a82881d789024e713a506ea34 -size 1738411 +oid sha256:246f35afae6631df23ed853b98a7b271ae8340a31e60294cde5566f462a48c4c +size 1390293 diff --git a/images/0c02c193-2aef-4817-92b4-56722edc6b57_50b39168-3d21-4d0d-8664-8a507729784e.png b/images/0c02c193-2aef-4817-92b4-56722edc6b57_50b39168-3d21-4d0d-8664-8a507729784e.png index 377465383f5cc5f251277665268a0c1eb65455e8..8811c0f721173b61a9d121eeb95b0b1cc902249e 100644 --- a/images/0c02c193-2aef-4817-92b4-56722edc6b57_50b39168-3d21-4d0d-8664-8a507729784e.png +++ b/images/0c02c193-2aef-4817-92b4-56722edc6b57_50b39168-3d21-4d0d-8664-8a507729784e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e00927972e35dae7229044366da76f35ba8c25d3110faf135f42b0b7bd3aba0b -size 1571089 +oid sha256:39d7934f4efb4b53e3691ca116156460d97ca80722318179bbd30d80232341a4 +size 1805026 diff --git a/images/0c02c193-2aef-4817-92b4-56722edc6b57_944f9f58-f6e0-4143-a86a-a3ee31f8e955.png b/images/0c02c193-2aef-4817-92b4-56722edc6b57_944f9f58-f6e0-4143-a86a-a3ee31f8e955.png index dd2117c680c656d42dc070ae77f248558cfb8a4d..4aa599feac027747a4028cfef28a95fa41979e27 100644 --- a/images/0c02c193-2aef-4817-92b4-56722edc6b57_944f9f58-f6e0-4143-a86a-a3ee31f8e955.png +++ b/images/0c02c193-2aef-4817-92b4-56722edc6b57_944f9f58-f6e0-4143-a86a-a3ee31f8e955.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b614ff6cb68ff29e8cf205cfd08e4f223c0adfdfd0979bf10ec49a50632abac4 -size 822132 +oid sha256:420991d7edde6160b8fb0a769e3ecd8569e3fa0dd513195cb2bb4b5b29fee49a +size 822643 diff --git a/images/0c02c193-2aef-4817-92b4-56722edc6b57_a6d9b3ec-76dc-4e1e-b4cb-d6b607719bc5.png b/images/0c02c193-2aef-4817-92b4-56722edc6b57_a6d9b3ec-76dc-4e1e-b4cb-d6b607719bc5.png index ffb56b2bc01200e80b4d04ee45a750a4c2a64d6d..d89d31a34800caddaca903dda1068d298a8419d0 100644 --- a/images/0c02c193-2aef-4817-92b4-56722edc6b57_a6d9b3ec-76dc-4e1e-b4cb-d6b607719bc5.png +++ b/images/0c02c193-2aef-4817-92b4-56722edc6b57_a6d9b3ec-76dc-4e1e-b4cb-d6b607719bc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dff4bc955a3f59c261e4d13815e72abc3742225896b6e98c185fbba01c98f9ef -size 592344 +oid sha256:9a524f4580667d950192687c88e47430880fddd03bc3d3bd246b6565ca9e5ffb +size 712941 diff --git a/images/0c02c193-2aef-4817-92b4-56722edc6b57_bc3b63a8-06fc-4da6-a5d0-8a80cec2bdc8.png b/images/0c02c193-2aef-4817-92b4-56722edc6b57_bc3b63a8-06fc-4da6-a5d0-8a80cec2bdc8.png index a0c336163bc6be7b8c7a87fbc856bf824a749ba7..4f22c342607f8c74a8cb026f7d5b538e6f62c25e 100644 --- a/images/0c02c193-2aef-4817-92b4-56722edc6b57_bc3b63a8-06fc-4da6-a5d0-8a80cec2bdc8.png +++ b/images/0c02c193-2aef-4817-92b4-56722edc6b57_bc3b63a8-06fc-4da6-a5d0-8a80cec2bdc8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f64c438770f8546bdf90fede3dd581f51114c4d1b4f7ac529964b0a1b2a18bd0 -size 576979 +oid sha256:cfeb3f5a80e68f2781ef7c61a9beb0084c80e6731e00e8773868b007f7965bf1 +size 728701 diff --git a/images/0c577209-47dc-4645-8d10-0b659663a969_2e7bca75-da1d-4ae9-a4e3-63c8a0469fdd.png b/images/0c577209-47dc-4645-8d10-0b659663a969_2e7bca75-da1d-4ae9-a4e3-63c8a0469fdd.png index ca4f98ef1cf40d2fdaabaf289e978fc36c72e615..55dfbcb5da43a609dc9876054204785e1f723aff 100644 --- a/images/0c577209-47dc-4645-8d10-0b659663a969_2e7bca75-da1d-4ae9-a4e3-63c8a0469fdd.png +++ b/images/0c577209-47dc-4645-8d10-0b659663a969_2e7bca75-da1d-4ae9-a4e3-63c8a0469fdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78a432c7956dff2288c7cddbd4400ba62c5015d0d2b4ff0ebdf79196c4535938 -size 1112227 +oid sha256:8b189c89833d7bdb5006398ef5dfc4ad50ccbf784365192ec6be469e66d8eb25 +size 1127738 diff --git a/images/0c577209-47dc-4645-8d10-0b659663a969_338aee04-6bae-4c3a-b3c3-1a8a12a61210.png b/images/0c577209-47dc-4645-8d10-0b659663a969_338aee04-6bae-4c3a-b3c3-1a8a12a61210.png index 4b13440588b1f2578207473f94c79bc0d79ef4b7..a881f4c76768d58f3586cce03c22df6ee66748ee 100644 --- a/images/0c577209-47dc-4645-8d10-0b659663a969_338aee04-6bae-4c3a-b3c3-1a8a12a61210.png +++ b/images/0c577209-47dc-4645-8d10-0b659663a969_338aee04-6bae-4c3a-b3c3-1a8a12a61210.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db9e3afde109c3edd72a91fe05c40f6a57fe0e4ee214407b6026e08afa87f412 -size 782176 +oid sha256:ada6ba3f6d1678bbe13f6f57d493d828477d9f6800a0578fe516cd7560579f5c +size 694857 diff --git a/images/0c577209-47dc-4645-8d10-0b659663a969_88cb6de4-642d-4878-916b-7ab443d2af7b.png b/images/0c577209-47dc-4645-8d10-0b659663a969_88cb6de4-642d-4878-916b-7ab443d2af7b.png index 5a6ac33f356f7a57c18f82aac6a1237b02277bdf..0afea083352d4e31e60fc088126ce92b30ce67ef 100644 --- a/images/0c577209-47dc-4645-8d10-0b659663a969_88cb6de4-642d-4878-916b-7ab443d2af7b.png +++ b/images/0c577209-47dc-4645-8d10-0b659663a969_88cb6de4-642d-4878-916b-7ab443d2af7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d24c8956ac954dc2ce95725248107dfcae2ed9ce203fd4dea573c9ddc0f343b -size 1310949 +oid sha256:571e4d2732d650c8f44a46e75d04dd2a0388682860aca8e1a126b1882d31461c +size 1400586 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_0bc6e4ed-a88d-49cc-aa74-a52453e53118.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_0bc6e4ed-a88d-49cc-aa74-a52453e53118.png index a92e5994f2b8fc9afaffe058dfd9189afed0bd21..7078fd93cc78092618f26e5c472fecb459e6b1c2 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_0bc6e4ed-a88d-49cc-aa74-a52453e53118.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_0bc6e4ed-a88d-49cc-aa74-a52453e53118.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf2541a230cfac3d1ced080ab2aebedc41c4fd53b9234c865c4da78f7b0df032 -size 1298317 +oid sha256:cbbda236731ab5fa9826c05f714641256aba8eeaf4c9a193ebaee8ded567f05b +size 1268443 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_3208832e-8eae-44a6-afde-a00344187ea6.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_3208832e-8eae-44a6-afde-a00344187ea6.png index 0c683fa33caf5c83202afeff04dff9042648b353..5d0716f4019b8d8bd25b678a1b192d27a1fab507 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_3208832e-8eae-44a6-afde-a00344187ea6.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_3208832e-8eae-44a6-afde-a00344187ea6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08b9545600be99efb79e79ab2592230091f7f49359bda0b3ac65f433ddf9428d -size 1159595 +oid sha256:53177ad15f0fda8bbc749fa1f00f85f99abbfd4be0ba57cae8aaebdeafc9a218 +size 891900 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_5612cccb-4e39-483a-8b5b-9d4f9261f5b9.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_5612cccb-4e39-483a-8b5b-9d4f9261f5b9.png index e7d97f55f3f632a349ce4940140a89ea31b06ada..5784346756d61469ad7c058d71547a6835c258b2 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_5612cccb-4e39-483a-8b5b-9d4f9261f5b9.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_5612cccb-4e39-483a-8b5b-9d4f9261f5b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8401cb4971573ab9e26cc6f87d4852389427e590a1af9aaaefb203d2039ad944 -size 1353989 +oid sha256:c146607854c8cdd8deef8233341e84ff2359398f60984f3c5c1ccb2b1b8e1e83 +size 1411392 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_83cd5543-cd4f-4bfe-9d7e-4ce6bf0dce32.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_83cd5543-cd4f-4bfe-9d7e-4ce6bf0dce32.png index 81fdacab33576c6c1e25dd565e0d3e9d8229f105..be642da642c926e5e5de3f238815339cd63cd70a 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_83cd5543-cd4f-4bfe-9d7e-4ce6bf0dce32.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_83cd5543-cd4f-4bfe-9d7e-4ce6bf0dce32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbd8082e6322bf0985583eb4e1a632a67505b9b2227ad3a6857eac71339b28fb -size 1353474 +oid sha256:080a0f2d815af3d69fb25cd0ef98c5f8b448b082e3cf20eb2cd38d05deb50e8d +size 1591294 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_84a83797-c89b-4fe8-a1d3-e2198a825f0f.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_84a83797-c89b-4fe8-a1d3-e2198a825f0f.png index 2a7605ce886b90ce540db559fa7c634a93509dc5..f6fe821feff8fb5548e1077e8bb217a2c898dd33 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_84a83797-c89b-4fe8-a1d3-e2198a825f0f.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_84a83797-c89b-4fe8-a1d3-e2198a825f0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abaf579d88104053350f63d668e2e0c862f6782b2abd949b4d7fcbf3ad1898ac -size 1410819 +oid sha256:28e472fc6504befb48951a8dc5e645c4d5ca5ad5a623e75173f4892cd9bedce6 +size 1892262 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_9732a0c7-bde5-479e-b4db-527fa1212bff.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_9732a0c7-bde5-479e-b4db-527fa1212bff.png index aa0c448c11c9a85ea9c79ef0c04d5e956b32b158..b95df7eaebd9a0e99faaa093cbd2a59e7d52f522 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_9732a0c7-bde5-479e-b4db-527fa1212bff.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_9732a0c7-bde5-479e-b4db-527fa1212bff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8999aeffcb827cb3107e7a11a7880db803fb3596bbfc051bee30cceca578e88 -size 1248202 +oid sha256:b584a71beaa0429164676f9d82b59f3a8a9ab6120f8529ff28901fffc212551e +size 995764 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_b8eff4ca-52f9-4a19-af84-cfa36e4a376b.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_b8eff4ca-52f9-4a19-af84-cfa36e4a376b.png index e864461b3e37d21f5609fc27d6ef1f3db2f29085..ee06b5f200e1b2ffdfac24a7a546f83d71ea3f8c 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_b8eff4ca-52f9-4a19-af84-cfa36e4a376b.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_b8eff4ca-52f9-4a19-af84-cfa36e4a376b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9d42daadb2814f5f8a760c979fe4dd0f296dd06e0611540827acde07cba6bae -size 1198851 +oid sha256:b2f3e7ad4bfebd84068b6633aa9ee346c23e5684ccc19e8db48ff466a48d20fa +size 779874 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_bb5bc946-e348-4e88-878a-a8b5ca8d580b.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_bb5bc946-e348-4e88-878a-a8b5ca8d580b.png index 4e55bd766298207bd41ab7ad787b0a2146593221..3d8fcf78298e7caed2422fe1cad1d082f3bcb6ed 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_bb5bc946-e348-4e88-878a-a8b5ca8d580b.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_bb5bc946-e348-4e88-878a-a8b5ca8d580b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6353cc0ae482d7fadf5084a22d1a2a955788033936aeb735bde2c13eb92ab0ff -size 1241534 +oid sha256:79395b9c59896a9d84d1651b85725329001b68180a4a4d814be1ea5ca3340b36 +size 1326862 diff --git a/images/0c7a69f9-989d-4899-a3fe-94c952859007_e9493953-b795-4941-acb1-554769dbee75.png b/images/0c7a69f9-989d-4899-a3fe-94c952859007_e9493953-b795-4941-acb1-554769dbee75.png index 0a4fd11e4daf9007bc2ff4b6f814b6b676b17790..1d102d87c552265690a842649c058a20edcf8780 100644 --- a/images/0c7a69f9-989d-4899-a3fe-94c952859007_e9493953-b795-4941-acb1-554769dbee75.png +++ b/images/0c7a69f9-989d-4899-a3fe-94c952859007_e9493953-b795-4941-acb1-554769dbee75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8bb9e173ede7b9a52132f9590503fedcccbd099e85d36bf289f3e42a329a892 -size 1147279 +oid sha256:5c8528bd4a200a8cbb7e3db76994a5debde3720502301a8108ed021552e1dcac +size 1059042 diff --git a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_03665eda-4ea5-49b2-b687-66ec30c80b16.png b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_03665eda-4ea5-49b2-b687-66ec30c80b16.png index 1308c4fb80e4250c2758e2a4978b9cbf6f983d52..396fbac4b3a1fdffa57a170f0810ca44b70d01a7 100644 --- a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_03665eda-4ea5-49b2-b687-66ec30c80b16.png +++ b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_03665eda-4ea5-49b2-b687-66ec30c80b16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6e67fe44c2785ea30e83aec291806a281f80dc13b61d3cbd0798b466db68a75 -size 926930 +oid sha256:5757aa6706ab3c5638fba83fac8b0d70b25e1e0de55f8223a3527a7e274e132c +size 912351 diff --git a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_2e7cc505-4147-435a-9662-293d0880c84d.png b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_2e7cc505-4147-435a-9662-293d0880c84d.png index 0937380496c54968dc352658c66c792a8d5e7b31..76e8c58baeeb89e95eae107fac046bd07574688e 100644 --- a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_2e7cc505-4147-435a-9662-293d0880c84d.png +++ b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_2e7cc505-4147-435a-9662-293d0880c84d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39f4d7d79d501e32ed39453ed3f36f58654b21b373e1a9fbbf56cf4af494287d -size 1298545 +oid sha256:a08459d95434d88b83747dbb0efe8ab39156458560f2d5326fa2fd01b9e05e5d +size 1500954 diff --git a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_37f20ac7-f9b8-45df-afe5-4f8d184cd100.png b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_37f20ac7-f9b8-45df-afe5-4f8d184cd100.png index ae42b7851781f65c6f44d34bc00957e497cd9e0f..ae7f990e8974660aa5c744669593c196a63840db 100644 --- a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_37f20ac7-f9b8-45df-afe5-4f8d184cd100.png +++ b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_37f20ac7-f9b8-45df-afe5-4f8d184cd100.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8265197c6ce831e89310e3274288d79cb92ea1680b74f9a16528dd15faa8db0 -size 1293209 +oid sha256:3c95b627cb1cb7cf5616dc34057c2ec1bd1606749e0c4c0c4fcc6ee7a8222a88 +size 567125 diff --git a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_5633f55b-e5ea-434b-9aae-06fec4fbe863.png b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_5633f55b-e5ea-434b-9aae-06fec4fbe863.png index 1ccf98eb913c5285190362c1e629ec619ce3c136..4840f6fad4195c987f3a3eb83079d85211ffccc0 100644 --- a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_5633f55b-e5ea-434b-9aae-06fec4fbe863.png +++ b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_5633f55b-e5ea-434b-9aae-06fec4fbe863.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74638e7c65b7d080b4190ed3cdbb628db7e9daa9a957606ffebeda0a2ba64d54 -size 960348 +oid sha256:5f1ab3780380ca9841f0603c6bc987ebe4722568bfac14ec294303ae08b48330 +size 799250 diff --git a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_dec7212a-0ef8-4cbe-86b6-1aa9f3ec293e.png b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_dec7212a-0ef8-4cbe-86b6-1aa9f3ec293e.png index d479e2bdf7151710311b293dbded96db269d1834..09288407dc17cda8054fe6667dcd8bb59d6ad2c3 100644 --- a/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_dec7212a-0ef8-4cbe-86b6-1aa9f3ec293e.png +++ b/images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_dec7212a-0ef8-4cbe-86b6-1aa9f3ec293e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5fa92cec40ffc52ef1d69cc02176408f6f3448e7d47fde6728e006bc77a75da5 -size 1293436 +oid sha256:a643449488ef56fe7f16f9325616d331af4f150650d16823bde865d66ab2e578 +size 998495 diff --git a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_605cda38-8b6c-4335-a9b6-56f97387b951.png b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_605cda38-8b6c-4335-a9b6-56f97387b951.png index 9bfe99d9f556b50364b16bd9b8e2bf0d7a0a123c..be9b45c20254ba72fd0a25c63539c4afbc8fd788 100644 --- a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_605cda38-8b6c-4335-a9b6-56f97387b951.png +++ b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_605cda38-8b6c-4335-a9b6-56f97387b951.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b926bcc63ec7373153f120f75419d7ee069f68a9358a3b2cdf5cd316e02194e6 -size 1048962 +oid sha256:51f0424bfe318db91904c89c2a5445267041eee839a1a20a15ba33beac38a962 +size 1129196 diff --git a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_6caa49b0-990d-4fdf-8534-bc3e4e6ab8d5.png b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_6caa49b0-990d-4fdf-8534-bc3e4e6ab8d5.png index b3a5ebbe567467f11fc8e48ec43a4b7e19da3f3b..92fa841e62affb8d2437f6ee03b31d07f23cbe44 100644 --- a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_6caa49b0-990d-4fdf-8534-bc3e4e6ab8d5.png +++ b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_6caa49b0-990d-4fdf-8534-bc3e4e6ab8d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4719c58f385f54a7ac6d33e8a7d653bc45cf2401dc033cb3e72b916f060c7626 -size 1113760 +oid sha256:5284315abe930abeb34cff35a6142c8c9f7580de0677ee848048cd1fdc097c7c +size 1017154 diff --git a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_a3a1bddc-b996-4166-829c-41ba7edc29a0.png b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_a3a1bddc-b996-4166-829c-41ba7edc29a0.png index deef72d1546bf5fadc58f90427fde5947a8ff8ef..c2eff2291ceb2ebafec268182fa05610dbbba8ae 100644 --- a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_a3a1bddc-b996-4166-829c-41ba7edc29a0.png +++ b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_a3a1bddc-b996-4166-829c-41ba7edc29a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bb7811402ea6953c4f7fecfa52e1513f15fa053e6ff9a8a7083076112fa3b44 -size 1254331 +oid sha256:aec44ecb7bc8644370574bf8102c0246ead04240edd75ad50b4bd1842e645f6e +size 1146743 diff --git a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_bf997ba6-3daf-48f3-9fdf-0beef8edc37a.png b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_bf997ba6-3daf-48f3-9fdf-0beef8edc37a.png index 082b6a329973ef428386784878f6b6464a2dffdc..3af8880153a04430b4ce4fb1f4a7fd0e5933f319 100644 --- a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_bf997ba6-3daf-48f3-9fdf-0beef8edc37a.png +++ b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_bf997ba6-3daf-48f3-9fdf-0beef8edc37a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0ad8e2368ef64242fca9dad82ea0280f1bf70abdc29ffd3f2bc5440fcbbaf0f -size 1488466 +oid sha256:0e1ed0eb06bbeb05ebfb2514a616a6525484a2993e2370b4a56723b2feb92339 +size 504044 diff --git a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_e8ebac86-489a-45a0-83e3-9963de2cf23a.png b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_e8ebac86-489a-45a0-83e3-9963de2cf23a.png index adb83e96ab1d8b7a9ead0a86f3c3657e559d3396..75b832249bf226f870bd4aaa4332f7e07a03b5f8 100644 --- a/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_e8ebac86-489a-45a0-83e3-9963de2cf23a.png +++ b/images/0cbdfafd-822f-4f61-bb57-05fc146752ce_e8ebac86-489a-45a0-83e3-9963de2cf23a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:241f5f8985ffd5b3a8f4680e428318c2faaeabb586655bd794311c3b663f6541 -size 599768 +oid sha256:2e76b5191ed158b470df5b23fa5e141fa2f72cd12cbb4167642c1298952f46e0 +size 229269 diff --git a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_0004f2a7-90d6-4f96-902a-b1d25d39a93d.png b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_0004f2a7-90d6-4f96-902a-b1d25d39a93d.png index 15e9e394df5b983e4d348cedecd48d2887ceaf23..6ebce2dbeb104498407af5bcba1967b18995c905 100644 --- a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_0004f2a7-90d6-4f96-902a-b1d25d39a93d.png +++ b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_0004f2a7-90d6-4f96-902a-b1d25d39a93d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4667c75c4c184d63973ce71abe3e31c6d1e2ea31f740ed52e69bf00d7ff617ac -size 1113935 +oid sha256:717da5cdbd4741c07298066b3c4d4dde5b71d3f6ddb0bd029fa09654d57edaf8 +size 801966 diff --git a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_134500d5-c3c2-4f6e-b266-64e10e38b77a.png b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_134500d5-c3c2-4f6e-b266-64e10e38b77a.png index 183c9001263db8c8f78b4519e4f8fb94d591a038..2b7b0f04fbfe2ed021f3ff03bc1a3ed60a6b22f1 100644 --- a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_134500d5-c3c2-4f6e-b266-64e10e38b77a.png +++ b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_134500d5-c3c2-4f6e-b266-64e10e38b77a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cad610b4927e93c66c77af6adec52ce2c3490ea3a7167213e433b739a33d333 -size 1365597 +oid sha256:b05f362bf636b7e84fdccfaebe45a0b8c01b272aaa3662657ba30407d12fbf3d +size 852783 diff --git a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_22107c45-e852-4ec8-9e35-2609a62c2bbd.png b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_22107c45-e852-4ec8-9e35-2609a62c2bbd.png index 15e9e394df5b983e4d348cedecd48d2887ceaf23..779ff218337bca8b9b0011702a3fec5b03d57cbd 100644 --- a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_22107c45-e852-4ec8-9e35-2609a62c2bbd.png +++ b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_22107c45-e852-4ec8-9e35-2609a62c2bbd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4667c75c4c184d63973ce71abe3e31c6d1e2ea31f740ed52e69bf00d7ff617ac -size 1113935 +oid sha256:1bdb93b239fcdf92c14a43515fcf10310aef3ddd285830006360f2ab1349e840 +size 1094050 diff --git a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_5b94aca3-e2e8-4f77-8fdb-1ba0de275494.png b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_5b94aca3-e2e8-4f77-8fdb-1ba0de275494.png index 4bc80b4fffb23b1a0b0892f6b0787309dae1ced7..992e22de4f8de7df833e98522d8ff2ca5e847f88 100644 --- a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_5b94aca3-e2e8-4f77-8fdb-1ba0de275494.png +++ b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_5b94aca3-e2e8-4f77-8fdb-1ba0de275494.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d64a1f2ebebc588887758f11e8b50a862f0cc88640ac954e9cccfea0067f62d5 -size 856469 +oid sha256:3b3cfe0e25b4bcdb5e61890d5c2fe472c3bf6fa5d379cac3a8841a4be1472dd1 +size 735551 diff --git a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_839a0b11-1b05-4278-a88b-7643ec8d49e5.png b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_839a0b11-1b05-4278-a88b-7643ec8d49e5.png index bf00a78c7ca74128d3293ca0847200d56b47f568..9bf49fa214f3efef155c9061b1a9d13aa51916a6 100644 --- a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_839a0b11-1b05-4278-a88b-7643ec8d49e5.png +++ b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_839a0b11-1b05-4278-a88b-7643ec8d49e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7d90bb6db9d536fdc21046f51f4ced69cd174f69c62a2ccb82d906b0d0763e7f -size 3666411 +oid sha256:effe73244c927b02c527a2715cc1f9ceaedb8038db5d71dbca36e28bd2a28e5a +size 951619 diff --git a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_bf4f08dd-0a44-4f6e-abc7-5d2272eb0b50.png b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_bf4f08dd-0a44-4f6e-abc7-5d2272eb0b50.png index 95d3f2bf1a72d4208126d9364e2574bd71d222da..3eb702b2c3e9fd99e2675ff744f3a245457dae6b 100644 --- a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_bf4f08dd-0a44-4f6e-abc7-5d2272eb0b50.png +++ b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_bf4f08dd-0a44-4f6e-abc7-5d2272eb0b50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:778107d2b45665db5fe3ebbbdcf94e028ad042afcf36c2f8042c925c78907507 -size 857816 +oid sha256:397acf95e579a7c6ca6b132c0a5d95753628384cb0971c1f4c1c8dfa3b65407a +size 821879 diff --git a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_c4d4d763-c6fa-47ba-8efb-eb4fb52f41dc.png b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_c4d4d763-c6fa-47ba-8efb-eb4fb52f41dc.png index 019160ff8e553850ed96c0c1271970f2b62aaf48..b88617a7055324cb12085cbf74ed1394c8dceca2 100644 --- a/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_c4d4d763-c6fa-47ba-8efb-eb4fb52f41dc.png +++ b/images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_c4d4d763-c6fa-47ba-8efb-eb4fb52f41dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0226a27afc340a6ce853e36b773890818f98d73f9582fbc793ce50edd72792f -size 1177836 +oid sha256:1b8b14f9e93117652a4bdaeb7e3caf3ee541dd2298ebf3d74dbe986d567e0cac +size 1107923 diff --git a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_0096821b-49aa-4a8d-b059-a9cea6e724f7.png b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_0096821b-49aa-4a8d-b059-a9cea6e724f7.png index 6bbe0838216f91c51eaab847926f50a60491609b..2ca893c89d070c8df2b299de9838d7d96a5fb7b6 100644 --- a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_0096821b-49aa-4a8d-b059-a9cea6e724f7.png +++ b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_0096821b-49aa-4a8d-b059-a9cea6e724f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9de6021ed696581d0955800420e95b5a03ccae4efaff35e286e4ada4040f337c -size 1815806 +oid sha256:d6f94c5c6e9837841a778fb7cb323de8584682631c81a5088cd13f53dfe01d81 +size 1525811 diff --git a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_080b7f6e-4cf4-4bcd-b8b7-5de3e9fb5337.png b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_080b7f6e-4cf4-4bcd-b8b7-5de3e9fb5337.png index 67775952d3b6677fd35118aaf4286aa864cca71c..f6a0bdc9132ea62cc35e9d442f0f3447b450749d 100644 --- a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_080b7f6e-4cf4-4bcd-b8b7-5de3e9fb5337.png +++ b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_080b7f6e-4cf4-4bcd-b8b7-5de3e9fb5337.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8d24391f0c3173b4b438012a8aee973c6c7573091408e2d49893a8740e3f4e5 -size 2535192 +oid sha256:b9380768dd73650b1b00ebde34829bb26b9a91c70dc78d706d2b913108f592aa +size 1876209 diff --git a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_1fb73b24-199a-4f34-9077-52fc82e584fe.png b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_1fb73b24-199a-4f34-9077-52fc82e584fe.png index dc086157b7193d840424f6b1ebfad9f5480bb1e4..aeb1f69e3df94cef1afeadb327bfa3885188e873 100644 --- a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_1fb73b24-199a-4f34-9077-52fc82e584fe.png +++ b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_1fb73b24-199a-4f34-9077-52fc82e584fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9392eb9dd9b344c1a85f8238ee7ceaa01b0db0949e7b36ae79e46c67c2047c40 -size 2524096 +oid sha256:4a339094ea0b5d2a581164e4ad0b7530bbc2e26229a7ed4c610f934ddf7c8334 +size 1882145 diff --git a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_63561ca3-8abb-4027-a3cb-4bbae12a9f7a.png b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_63561ca3-8abb-4027-a3cb-4bbae12a9f7a.png index 1f3163561fc446cac2442ce1596c15d1e363fc58..56afabd8179dd2c6b6264e6b4532d91c46f36744 100644 --- a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_63561ca3-8abb-4027-a3cb-4bbae12a9f7a.png +++ b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_63561ca3-8abb-4027-a3cb-4bbae12a9f7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abd84d41acc25da961f1e85e3e16dad92f36fbe79a6071443866b4fff2bc9e62 -size 1819079 +oid sha256:b911fa81831e6a15603dd3955a7f12f2ce6e3dcf554b92b442f123871787f568 +size 1497065 diff --git a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_7c115bfc-020b-4e0e-a063-b947e23e0649.png b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_7c115bfc-020b-4e0e-a063-b947e23e0649.png index 9e2c6398d48ed53fff39d2e39a66584505ac2a86..27c4ddb40126d4b1491c45a43b4c22bd8dc61040 100644 --- a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_7c115bfc-020b-4e0e-a063-b947e23e0649.png +++ b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_7c115bfc-020b-4e0e-a063-b947e23e0649.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:620b4fc8199322a7625f5fbdef835d0893f0eadc833e1bbd5b31933aa8226c17 -size 2519450 +oid sha256:75935a8b38f7b91a69155c7a649dd439a70bf5fcf03c9244e82bf7049e5ce795 +size 1482355 diff --git a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_980113f4-2f0a-4f86-bb2f-143710c7653f.png b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_980113f4-2f0a-4f86-bb2f-143710c7653f.png index 19e106c836356cd1b673b4f26c3c6bacc81454bf..7a7357688aeb01612e59492777a2e708cbfc2300 100644 --- a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_980113f4-2f0a-4f86-bb2f-143710c7653f.png +++ b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_980113f4-2f0a-4f86-bb2f-143710c7653f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:177546d6c9b0104d1feb385a92a7b3c234b9579514b2771e3bb372620e22aefb -size 2448380 +oid sha256:57ddd8a39d24c7d459040a9c136907fbc5ed51002c53d5cf27a89c76ea64241a +size 2255395 diff --git a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_acb70a60-4b7a-40b0-ae7d-80f59fd9d80a.png b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_acb70a60-4b7a-40b0-ae7d-80f59fd9d80a.png index b4e6a25f4c87e7dd37c71cabd51f55cae3df0de4..66775054094c7731fb5f3ef6fc4e6eaee7a9e993 100644 --- a/images/0dc0190c-57cc-4c0f-b939-c0617102166e_acb70a60-4b7a-40b0-ae7d-80f59fd9d80a.png +++ b/images/0dc0190c-57cc-4c0f-b939-c0617102166e_acb70a60-4b7a-40b0-ae7d-80f59fd9d80a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f58a0c94d709c00446e02a843a5fcb40e4c436ba7d3f4ab0c896371ae0ac16f6 -size 2525744 +oid sha256:304f22f4cfeaa70ab1356863257979212e3a5e729e523bce210a949b920388d1 +size 1964042 diff --git a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_20d5f662-5659-41f2-873e-3f1a4a681fe1.png b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_20d5f662-5659-41f2-873e-3f1a4a681fe1.png index 29cba1743f11a7fc48a75dce4d40e62db62fe179..6abb3a871e98ad6234089a3b677e4b811b514611 100644 --- a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_20d5f662-5659-41f2-873e-3f1a4a681fe1.png +++ b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_20d5f662-5659-41f2-873e-3f1a4a681fe1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00569ea60b1527d083f0f721895b7a73af5373e97c275dc8f6dba287e4885ec5 -size 622558 +oid sha256:bcdaa0242b04b550b5b47d9c0e0a2122c8a8d1d70b57c076c87a1290fc816ac2 +size 756338 diff --git a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_353b91ca-a6e4-4e6b-9fd1-14a2586a796a.png b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_353b91ca-a6e4-4e6b-9fd1-14a2586a796a.png index 5bcd5e1430183c46ab2346575b7d3d566fa1cc7f..7a90af7fb17efee402ad568ee4d1c60e2a894436 100644 --- a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_353b91ca-a6e4-4e6b-9fd1-14a2586a796a.png +++ b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_353b91ca-a6e4-4e6b-9fd1-14a2586a796a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bf1ce8e3157c522574371eb0ddc15f7d1f04df0f2fd0296b7f8b794c6d4ae6b -size 703686 +oid sha256:eed14055531ec8b0c9ebe8e94de40889bf8a0cf8e8d215ac16db29c2c3ad0cd3 +size 1642970 diff --git a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3f8cd6ae-eaec-4079-b5b1-d39b0b9c8903.png b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3f8cd6ae-eaec-4079-b5b1-d39b0b9c8903.png index 17b01ea5d791ce5a0e99094fd1e9dc460dabdb72..f5021c5bf3c7bea3136ac24c0cf39b0970fc9afa 100644 --- a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3f8cd6ae-eaec-4079-b5b1-d39b0b9c8903.png +++ b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3f8cd6ae-eaec-4079-b5b1-d39b0b9c8903.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c00b8029bd63d474179e09db1840c629a89c37c665b3a57266b94050fff722c8 -size 704047 +oid sha256:4bd61627315e05e001c9f109029ebec953d1da152a24bff45b4cd59d8c7b447b +size 1469521 diff --git a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3ffa4643-b065-489e-824a-9c30771b411c.png b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3ffa4643-b065-489e-824a-9c30771b411c.png index 28fd70e491c0bf2252c29c30f5dc4a899973de3d..0d7b7349be19b54be87678d0b15e73ae40bbb0fc 100644 --- a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3ffa4643-b065-489e-824a-9c30771b411c.png +++ b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3ffa4643-b065-489e-824a-9c30771b411c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:909439079b7ca72f45146d9a0bddce19ee8f78c33199d6b30dbc776fc3c546bf -size 723548 +oid sha256:4e1d8a19ffcf80dbbb8fe28680723376a76981c3ad424e406c80b7f857d112a2 +size 625027 diff --git a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_644f0928-6069-4c8e-9ed7-51ec7e259184.png b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_644f0928-6069-4c8e-9ed7-51ec7e259184.png index af162b43c78888d6f014a9a188d36c71fb41c22b..8608e5639866f5c653179615804092ac2f0e9cd6 100644 --- a/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_644f0928-6069-4c8e-9ed7-51ec7e259184.png +++ b/images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_644f0928-6069-4c8e-9ed7-51ec7e259184.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b05a8644f1288d61045563e32512fb9804587b95d1f8637d9002406b5f1c81c8 -size 845257 +oid sha256:d9659a4438345056eaa23d7705f902138f5802af7cb5f918227bcbd289f9706c +size 825443 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_07eaf7cf-bc33-4fd1-9e7a-5b4c915112c2.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_07eaf7cf-bc33-4fd1-9e7a-5b4c915112c2.png index 6fa7d95f216884c809ac15cebb0fd966da17eb5a..4050c336f40820fbe6ddc3390af6832a94deff54 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_07eaf7cf-bc33-4fd1-9e7a-5b4c915112c2.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_07eaf7cf-bc33-4fd1-9e7a-5b4c915112c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55ffce097d3e6839b6979b9b08f5b1568d6c50ffa3308524551f7863827b5881 -size 1061208 +oid sha256:0f991e77220091544b2aa2b833e5f0817b549328fe989fdfb9a006adbad14d30 +size 1035318 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_0a1122f8-7172-4300-985d-5abcb7750ca4.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_0a1122f8-7172-4300-985d-5abcb7750ca4.png index a793a9328f5a3ebfe5659de22c75c2f7c3652fc5..c93cd90b61c87b3c974551f63503ef331d6d155a 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_0a1122f8-7172-4300-985d-5abcb7750ca4.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_0a1122f8-7172-4300-985d-5abcb7750ca4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94250767568961f10a5272650566de27ad3416c2d453c74be29dfc2cd2c4e4cb -size 797040 +oid sha256:0fc17e53fdb4eba34ef582ddfedfc36e0a0872990228d89ccd1e852511ff009e +size 565552 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_12c1b3bf-3325-4612-9902-b097acc4a6e4.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_12c1b3bf-3325-4612-9902-b097acc4a6e4.png index c6205788aebdf4293f0b6df4b8cd4ea8a420ecff..b6dc0eb69c9d6094c9ed41b09ebedcaf100a9206 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_12c1b3bf-3325-4612-9902-b097acc4a6e4.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_12c1b3bf-3325-4612-9902-b097acc4a6e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de6a6c398c26a8b0c9dfa6a355f0107cfa07239ba57cd2ec6f2d62567670307c -size 1112062 +oid sha256:0fcb71fa10705c2b0f66688c2e885697afc1f34c469357d2b65090508aa2d41c +size 684221 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_137d1a09-cb26-41c0-b266-1d77219dcd09.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_137d1a09-cb26-41c0-b266-1d77219dcd09.png index f77c14ae626ecec98d2712bcfe4bdeb71c4e156a..1a6d267258feb99c7b55857ef50484dde947df68 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_137d1a09-cb26-41c0-b266-1d77219dcd09.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_137d1a09-cb26-41c0-b266-1d77219dcd09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e52481d26ac96588a43b5229b21f54b2043f280168dbe7f9c5fc74d8737c1353 -size 1109355 +oid sha256:de3d85544748dcf1ed182d045cc6030085831dd0758eb73dd02aca623aa67753 +size 736050 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_16a62e54-3ff2-4ec8-aa34-0f75f384d352.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_16a62e54-3ff2-4ec8-aa34-0f75f384d352.png index 1828f7a2a4ce13b375bdd24434bca3e4c3026df5..9f8331f2933ce371c4a7098a2b9af2e56b7bdc82 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_16a62e54-3ff2-4ec8-aa34-0f75f384d352.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_16a62e54-3ff2-4ec8-aa34-0f75f384d352.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6757abd4a4b38af5608e5fd4d82aa39a7c85b5a25f03b32e65278c1e7eb3f89 -size 1097709 +oid sha256:5f2a684cd760ae7b890035b5c82539fdf3c064eb37c08a7c8350820bb1171a93 +size 714784 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_3de56dba-079d-48d7-ae00-1612cbd66ca0.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_3de56dba-079d-48d7-ae00-1612cbd66ca0.png index 919646c4696329293e24d1ee1c37e26c0e2b30cf..899e14c73f40366e38dde05cba485938678465d6 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_3de56dba-079d-48d7-ae00-1612cbd66ca0.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_3de56dba-079d-48d7-ae00-1612cbd66ca0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79a2324d7c73117650737d62095d0a936981f7c04c45dac305e1a7621a73f322 -size 1321944 +oid sha256:830a1cb134bc5e7d7caa236b99b74c441ade9c8c8d7b20aea035bbcd67984b38 +size 1787319 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_96e8f2c6-30ca-4af9-9cad-68c16acf5eff.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_96e8f2c6-30ca-4af9-9cad-68c16acf5eff.png index a08b13106db52377ae0e601db8fe23dc657db55d..43551e4fbca4bde50eda30a67b219d32a10763e6 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_96e8f2c6-30ca-4af9-9cad-68c16acf5eff.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_96e8f2c6-30ca-4af9-9cad-68c16acf5eff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36df0d691ad978b25893b3ea202574132ca63c39d9d0bf64418746c212a655dd -size 1147898 +oid sha256:6df858c38906250eb089bb7131b31ca0e6fdc0e1abd96bb6e9f46fa939073bab +size 1192968 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a0335e1b-4305-40b9-9379-c6ecb06799ff.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a0335e1b-4305-40b9-9379-c6ecb06799ff.png index 371b9192813b630d1ff50963f08742ba308ea42b..f3784dfa87ac8731cb066b7aa7c1bccc6bd4d508 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a0335e1b-4305-40b9-9379-c6ecb06799ff.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a0335e1b-4305-40b9-9379-c6ecb06799ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb4ec91c1b3d1e29067febd39c007b36bfb65b4c53ac9e1f4925d510d6a667b2 -size 1131880 +oid sha256:6e084e13532c8cecb99ef2e0d258b5b9091b6a799c7de7f5c578d132ecbbd83a +size 702563 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a7343102-879a-40e8-8b57-6b2b96ee2dab.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a7343102-879a-40e8-8b57-6b2b96ee2dab.png index d0a2f64b2890a5180b6ac23da929bee3e46a49cd..0873f0a4aae7a389ae5d3765453c1f6487f0068d 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a7343102-879a-40e8-8b57-6b2b96ee2dab.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a7343102-879a-40e8-8b57-6b2b96ee2dab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21de877c62d3d323c5ff6fc2387d1efd99fcb642bfe4d374f617a0322b06f184 -size 781940 +oid sha256:fd46b1cf8481dc3dc1152dd122bdde448f81b2380ef3c2d1a3363ef8f0aa559c +size 636155 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_b48151cc-387f-4ee5-828f-77dd0d8b0209.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_b48151cc-387f-4ee5-828f-77dd0d8b0209.png index 2f7ca7befc86b378babff65f7312c199f8c74041..653a6a71d615595b4c29f15d77a676cf2b09aab8 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_b48151cc-387f-4ee5-828f-77dd0d8b0209.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_b48151cc-387f-4ee5-828f-77dd0d8b0209.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e65fe5335a2674103c6007f216578025161785cfba42f34342f51d01da5333e -size 993079 +oid sha256:7225ef40a4ee5243e29df5c8d8c1c484404b6a0b4f0a08e259d9d390baa7e29f +size 566574 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_e7266152-d209-4b04-b2dc-0c07b35a3d1b.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_e7266152-d209-4b04-b2dc-0c07b35a3d1b.png index ce5c20fcb0e7da79dab2070a21b24dc895682966..434093c3e516860b489dc8cd48d24c8a545d7f2e 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_e7266152-d209-4b04-b2dc-0c07b35a3d1b.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_e7266152-d209-4b04-b2dc-0c07b35a3d1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f643fdfd90476a5e29e2c1ee1ce7822599a6cdfe5e16d39df2f72a2345b368c0 -size 1496186 +oid sha256:4bfb953286b0f4612c8ef5c12de46b752876b29842dff0253676a51eeda75e3f +size 1972025 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eb8c0d47-b9b8-4622-a93d-57b975949833.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eb8c0d47-b9b8-4622-a93d-57b975949833.png index a9e80cbc7f7aea27cefa3e8e392c4813e682c1b9..196031bcdfdb19b087a4d6fa5b1ee690e4869001 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eb8c0d47-b9b8-4622-a93d-57b975949833.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eb8c0d47-b9b8-4622-a93d-57b975949833.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bef49ead7e799128cbfd473935ace514a1b5000130d1b41e032ebaa21b2d0a42 -size 1128291 +oid sha256:51dcb40cde100d1179b20763928201a4a8ac912334a8ade33d62c777865fcf58 +size 1023881 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_ebcfa163-d925-44c7-8cf2-b73382218e73.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_ebcfa163-d925-44c7-8cf2-b73382218e73.png index 152ae3e3dbbe4daa9adfbfa0c8d6e9d2525895ad..9c6b1629719edd5d64f9acc7163a2a84db27bea3 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_ebcfa163-d925-44c7-8cf2-b73382218e73.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_ebcfa163-d925-44c7-8cf2-b73382218e73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26936c206533c50c2db650d73c9ca61e1ab58e79830b478631c50d2245bc28af -size 779052 +oid sha256:8aac4660f77850aabbae19647874157eb83e812ff11463d1ea92c2db9010ea0c +size 795135 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eea26e1d-39b7-4781-b30d-dbdf56df77fa.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eea26e1d-39b7-4781-b30d-dbdf56df77fa.png index 883fe277d951f209bf799aa3b4b6976b066e15b3..e14f4eeb18c720572a9fb943d661053ff2127e11 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eea26e1d-39b7-4781-b30d-dbdf56df77fa.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eea26e1d-39b7-4781-b30d-dbdf56df77fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa56547f714ec67221d56d3acc508e24f90852939d8cd3600a9152ec0bb2178f -size 768463 +oid sha256:01cb97ad7353b71b58a24592c68e17b8d283844da824de4a584791ee8e6ea067 +size 534603 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f3797a4b-3d12-46e3-a420-64ec64f1c501.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f3797a4b-3d12-46e3-a420-64ec64f1c501.png index 1f83da9c9dfb70a38bebd8eb6b6e023ce6073cea..29f09acd94f5e23d970ab62b13d224287bc1f2be 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f3797a4b-3d12-46e3-a420-64ec64f1c501.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f3797a4b-3d12-46e3-a420-64ec64f1c501.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b61b4d54cc43e2677c8f4a40a12bbb84104850f2689d82298795b7442d74b9f0 -size 1018418 +oid sha256:037c47bb4ac000df0822948aae984313a8da05d5de0b8e4007ce1a36d60690fc +size 1136485 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f4bbc6e3-9922-4100-b3cd-cf0322e739b9.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f4bbc6e3-9922-4100-b3cd-cf0322e739b9.png index 0cf2b5a3a0d827aaa2b4d40ef7e141228311e826..98e3beeab45e2395ee4ba8bb8bd513b0609d0e57 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f4bbc6e3-9922-4100-b3cd-cf0322e739b9.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f4bbc6e3-9922-4100-b3cd-cf0322e739b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15af05d7368880b2c8ea3fab18276c19012f042b210aab97e8da9b95f1666fae -size 748965 +oid sha256:e6fd14bddb92e80b81fdae3f4f6952b522f6d6b7ed21996f354649627bb3b1fd +size 769043 diff --git a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f6af5393-0ce0-46f0-8b74-4e24be4e2eb7.png b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f6af5393-0ce0-46f0-8b74-4e24be4e2eb7.png index a4f61aa0287ff5e182770f291e25b1a17968b721..916c968e8d57143b11910f3c6998aebc18159efb 100644 --- a/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f6af5393-0ce0-46f0-8b74-4e24be4e2eb7.png +++ b/images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f6af5393-0ce0-46f0-8b74-4e24be4e2eb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57dedd8effdd732b25a8a102455ab0bbb17817b76d255ff7ef7051c4b8713a6a -size 1158929 +oid sha256:4eaefa0e228cadda87a177e2320f4dc91a8c8412d30d42ef010c75bbf3b8b507 +size 647593 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_12c4f752-1759-4fe1-b011-6efe7006dcda.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_12c4f752-1759-4fe1-b011-6efe7006dcda.png index d42a9be7056aaa4b1e941f137b9348238cfeff59..be3fafc872426721b2dca2d5c10d256625e53bd8 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_12c4f752-1759-4fe1-b011-6efe7006dcda.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_12c4f752-1759-4fe1-b011-6efe7006dcda.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:852da9a5e0261ff070e2e9e4a29536b681974e85e4d38654796eeb97091c961d -size 1299536 +oid sha256:50ccefcde99ed8117ff17c1f8082ad4d0fab5b694d467d38a2ff57db4668a9f7 +size 310916 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_1ca55a08-3c16-407e-b2c5-1c3d15360c55.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_1ca55a08-3c16-407e-b2c5-1c3d15360c55.png index 9fd9a6f1aaf5a47ccc98f87c165a9ec0e1e2fccb..6cd18ece04651b1ceedce4ea91ae153ef1f04682 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_1ca55a08-3c16-407e-b2c5-1c3d15360c55.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_1ca55a08-3c16-407e-b2c5-1c3d15360c55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb037a18fef2eee99e4ca19abf1a8cbe05ea86fe3758d91b49881378a2894ea0 -size 501567 +oid sha256:cb8b6aea9f69b782ade19c26d27580eca67e2815c15a7eaf7866e3d213591095 +size 467113 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_2cc7fafe-b2f4-46ce-9f99-62c9b885e2db.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_2cc7fafe-b2f4-46ce-9f99-62c9b885e2db.png index e87bdb67fa765257f5beba83e9f2700d2e84c796..749c3cbf42c9ffdf4553b0784ab92300eb622f32 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_2cc7fafe-b2f4-46ce-9f99-62c9b885e2db.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_2cc7fafe-b2f4-46ce-9f99-62c9b885e2db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:013af6f6fe494b1863665b5a456413a72c3e6f0324898f8cf289433fc426efe2 -size 465869 +oid sha256:a7b673fff1c8c4381a5b88386d0173476ac3163ae1375a427aea56e1520855b6 +size 319331 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_438a3200-eaac-441b-b9c8-6940fd697362.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_438a3200-eaac-441b-b9c8-6940fd697362.png index 2d8785b438878b90b5a9958438c3de404478aeb7..824eef02dbe092dd2b606eef2650f86364d3f942 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_438a3200-eaac-441b-b9c8-6940fd697362.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_438a3200-eaac-441b-b9c8-6940fd697362.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f4bc3f1962f4e8c5ac2619e2b461f996109ca095716a90e34e024a825721b80 -size 2084360 +oid sha256:383cc2985975a0a21d06a37d2204013757ac386074917c3a346c7a9912186d7c +size 686477 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_444f21bd-d835-4249-980c-92b55df4b4c3.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_444f21bd-d835-4249-980c-92b55df4b4c3.png index ec6436fe5b8a67c91b1cb35e31830c0efedb6a5b..d084b1ac09f247967a4c0f41d9efa7bb7c52a77f 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_444f21bd-d835-4249-980c-92b55df4b4c3.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_444f21bd-d835-4249-980c-92b55df4b4c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5afb72364d780e745e0068f98911eb305544ff90e210036d11b4176026d6f478 -size 569471 +oid sha256:e29b8105587f26f372af89a49715fe3972abd2ce252876f35c4dd292ab3b8023 +size 708596 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_7b459c84-10b1-4039-8d5e-815757741f7c.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_7b459c84-10b1-4039-8d5e-815757741f7c.png index 3db97118c99d0a2acf0ce63ae99e03e7366418bc..c03ca2c71f31821aec722046d9c4e4bf7443f3f7 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_7b459c84-10b1-4039-8d5e-815757741f7c.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_7b459c84-10b1-4039-8d5e-815757741f7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6aac0872a4beff8909d9e67af8fdd5cf984438db103f7e8245dd3ed0b8ca8d9 -size 2052017 +oid sha256:933ea9821236df6a67deaac21a2213890cfa23dde1dcf4710f813ae8de32a723 +size 694968 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_8971ff26-7b5c-4b17-be3d-006f780b3657.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_8971ff26-7b5c-4b17-be3d-006f780b3657.png index 85a9aaa8637ffaae7306f41f7ba234f3ebf2b3a0..9f837a2627e7ec0e4d458abee7aac41a300ebc86 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_8971ff26-7b5c-4b17-be3d-006f780b3657.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_8971ff26-7b5c-4b17-be3d-006f780b3657.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9b0a2ddb3c6209f23309035fe085a7dcdf791f499c9fd9dac104ea764780019 -size 501452 +oid sha256:83a555ca25faf3ab6fa041e696e23b2401e10286cdff30b251347e9c3adbd5f1 +size 945273 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_ea3e7300-7769-45d1-b36e-1958830a8e3e.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_ea3e7300-7769-45d1-b36e-1958830a8e3e.png index 11b6e192877d7283276ca980214f02a23b1a2873..c1c5d8b8af2daf39d2b2abbe0b345fcf699930ba 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_ea3e7300-7769-45d1-b36e-1958830a8e3e.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_ea3e7300-7769-45d1-b36e-1958830a8e3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5055fef7ce6e68b546ccc5058b112703dd678989407cedaf72ca53c7f628b931 -size 683367 +oid sha256:078d1d0308bdbc9beb9d6dadf4db82c4e834fcec56c5ed20f1d830d244b96a0e +size 487884 diff --git a/images/0f63c624-6097-473e-ad19-59bc139836d1_ed9c6c23-5d98-4945-a9c7-aae2cc041574.png b/images/0f63c624-6097-473e-ad19-59bc139836d1_ed9c6c23-5d98-4945-a9c7-aae2cc041574.png index 18a5f247e4fea4590af05ced4a1fea4ea21f6042..a97be76faca472bc27af3af88b0b36f749338a60 100644 --- a/images/0f63c624-6097-473e-ad19-59bc139836d1_ed9c6c23-5d98-4945-a9c7-aae2cc041574.png +++ b/images/0f63c624-6097-473e-ad19-59bc139836d1_ed9c6c23-5d98-4945-a9c7-aae2cc041574.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5ae37ac51028c808a3868dfb6891b357a736e131b47624d6d18d5b6b77b73a2 -size 2150445 +oid sha256:080ef7bf407722bf860f2c0a84c5a13a6bd559d97ecdf48bb516b9f1388667e3 +size 881407 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_14b29326-0525-482c-a3f7-ac9b37978045.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_14b29326-0525-482c-a3f7-ac9b37978045.png index 66943ea6d7553242f2812efd028818be696e782e..994c3bf63a9161646ae30c4a183bfd6b80d7a261 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_14b29326-0525-482c-a3f7-ac9b37978045.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_14b29326-0525-482c-a3f7-ac9b37978045.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41a5feff569d3a59f29b8d52f9be959fe4f5c8bbb10846cc1c543714feab2b9b -size 807161 +oid sha256:b9258b7ab9e88ffddc07c9382f62fd71c69a7d08e21bbd3151c3453b1d6686f5 +size 691257 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_185d50aa-4bbf-4107-8913-200ee426102d.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_185d50aa-4bbf-4107-8913-200ee426102d.png index 634c58e539d4030112740dd7e0fe32db4ce06307..e05c2f879dd56926e1c08e77a3173ce6fd53822b 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_185d50aa-4bbf-4107-8913-200ee426102d.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_185d50aa-4bbf-4107-8913-200ee426102d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e3486bfad8326f97734a4b66c46aedaefc3b812976cdb55429f8b4f4ab45968 -size 2052527 +oid sha256:83017ad9e35022c877cd6fb224d8557d92e25543d36cf39ca9345ff22d0e9e28 +size 1439352 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_2046ff41-f18c-401b-b0b6-acb8c47d4752.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_2046ff41-f18c-401b-b0b6-acb8c47d4752.png index 22dfab5d9b416425472833f14e97acacd6a71d86..c128604c445c910a4b41ab8b07eea612ac0c012f 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_2046ff41-f18c-401b-b0b6-acb8c47d4752.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_2046ff41-f18c-401b-b0b6-acb8c47d4752.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19f7445bd0c25e878276a549f7fe752326ede7bad76f84e20e4561ba601bc14c -size 1051185 +oid sha256:5007714da08b56f17ae4aac0e85100ea4232a84b96953af3a9b40c48f8ac7f27 +size 973129 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_582334ed-53ce-436c-86f1-03525500363c.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_582334ed-53ce-436c-86f1-03525500363c.png index 9633cacda87c05c4f8fecfff3f7de7abdbcf5c9a..9d3b12b6da32dfa3db15b2e7a27dcb3c5da0881a 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_582334ed-53ce-436c-86f1-03525500363c.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_582334ed-53ce-436c-86f1-03525500363c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4fdc613e44553e9c4f602afdc5cd2065de63156e07a64a1974133d6630f4a0c -size 620882 +oid sha256:019f9e42db893919d33a52da07b574dd9273198a9c3e8535d4d3151f80e9f2a1 +size 535112 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_92ca8e78-e5a3-407d-bea8-5a1000c3f54b.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_92ca8e78-e5a3-407d-bea8-5a1000c3f54b.png index 82fbd515e891063b6e21b78c5b27014dc9360f9d..c4b827d65e03a1220590f8c195bc3e5eee8cc54c 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_92ca8e78-e5a3-407d-bea8-5a1000c3f54b.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_92ca8e78-e5a3-407d-bea8-5a1000c3f54b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a101a624877769202de05f8ab224f1e30e8c5e41fa1e54bf69d1315200ff4de -size 661469 +oid sha256:3fe7cf19be23de834884eb828c99efb8b852a827ef73061515f5443089d32e6c +size 775825 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_aa137e85-cbce-4920-89d7-24cb550fbf81.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_aa137e85-cbce-4920-89d7-24cb550fbf81.png index e6610505798177b27a6feafb563fc6522599498b..0858f244f9bbc182ed9d45717a748f9dd0f0421a 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_aa137e85-cbce-4920-89d7-24cb550fbf81.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_aa137e85-cbce-4920-89d7-24cb550fbf81.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93ed627588736bc095e182169e76f0a3e38e9d7b7be0f14803a60f3cd991fe72 -size 931043 +oid sha256:92266cbfd0c72c4ea03bec887968ce5c3c3b7d4ba5c945c849670ab16b6e5930 +size 836696 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_c201e18e-6089-4696-a09a-4c07559c3500.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_c201e18e-6089-4696-a09a-4c07559c3500.png index 1891901f33e45b7b9ae528049e55d16eed044366..e5dcb2cc98d29d8c23081c88aa3b57534cd21cf0 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_c201e18e-6089-4696-a09a-4c07559c3500.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_c201e18e-6089-4696-a09a-4c07559c3500.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc66f831a8b2e7271bb784be40aeab46a879e079daafca9f3ef42e72769c6c5a -size 730290 +oid sha256:ad53dcf55adbceb159e96bda769d0177e6e2cf29ba34a51741768e3c64dbaabf +size 867153 diff --git a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_cad3fd8c-22ca-4bef-806e-3ffa533fa0b0.png b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_cad3fd8c-22ca-4bef-806e-3ffa533fa0b0.png index eb49832580d34072c87f4d6e4b0d0ac7f8c26ce7..18e6b5c8b95d58f5757e668cad945e5dc8b42afc 100644 --- a/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_cad3fd8c-22ca-4bef-806e-3ffa533fa0b0.png +++ b/images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_cad3fd8c-22ca-4bef-806e-3ffa533fa0b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d0fa617f3ce19d426f1f51371fff5d1de0748e6eae4d4ae7d139a8468341941 -size 551262 +oid sha256:4829de792de9b8043e743ed42ab04c8b75d953bf2f39f5b1345ed043b6153287 +size 618721 diff --git a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_1bf154c8-15bb-47d8-98fd-60b02921b167.png b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_1bf154c8-15bb-47d8-98fd-60b02921b167.png index 69364617e42003d375fb22d0422d37a6888ee195..29512245b2242de54049c7638daf5f720645441b 100644 --- a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_1bf154c8-15bb-47d8-98fd-60b02921b167.png +++ b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_1bf154c8-15bb-47d8-98fd-60b02921b167.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6861439cbcef576d4e6afab7e1085cf622ed84afdaceb840647444a324c25af0 -size 2346203 +oid sha256:1058008d1dcf867f83e36a73336e27ca98f486fbfd504a7b83f87c68b987f5cb +size 1248331 diff --git a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_2853cb4e-f67f-493e-be7a-7361e69c3d7f.png b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_2853cb4e-f67f-493e-be7a-7361e69c3d7f.png index 61bcd586c9805008b19d136e184e40fd7edf7c70..408de6ca411907b64e129301724a9805e8175029 100644 --- a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_2853cb4e-f67f-493e-be7a-7361e69c3d7f.png +++ b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_2853cb4e-f67f-493e-be7a-7361e69c3d7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:651480ab2317fae34f3bacf2c890c1ad3b5582239b40f6b55eea4c20bfe57946 -size 354592 +oid sha256:36f8a649dd2e0b84af9c55a63373b52cd2727445c1106c95b1187434775bc31d +size 354418 diff --git a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65a752cb-43ca-4607-bc88-ec49b8285742.png b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65a752cb-43ca-4607-bc88-ec49b8285742.png index 8deb4e30cc7acbfa0d5111a3b35e17882ed5a85b..bcf07bf6fcb357499a30dc8929d2e3a30dfab86c 100644 --- a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65a752cb-43ca-4607-bc88-ec49b8285742.png +++ b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65a752cb-43ca-4607-bc88-ec49b8285742.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac00782c3b1d1750a836210e4f5a9e13bf0d29214e790942385a48555050ce62 -size 357745 +oid sha256:2a1c5ad5c5f788735ff9cabb31c7e8ab145002d9f51ccd1c38383956a3b6b03b +size 350160 diff --git a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_ed897c6d-603a-4159-9a7e-9b397bf2e289.png b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_ed897c6d-603a-4159-9a7e-9b397bf2e289.png index e52ee1e8788aefd8cc668be582bc3021856a756c..46444cd17d4003563bc4b2906f68ab02f6b18c78 100644 --- a/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_ed897c6d-603a-4159-9a7e-9b397bf2e289.png +++ b/images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_ed897c6d-603a-4159-9a7e-9b397bf2e289.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a585282c7463fd42fc821f019bbb6308ad8704141451d88e2d25e76a85ce6f8e -size 390797 +oid sha256:c73aac82845f54995ed43f828062c616e0c449b3d441affc941faa26d9207814 +size 388295 diff --git a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_08a1e24a-5539-4b20-a56a-a6201abec410.png b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_08a1e24a-5539-4b20-a56a-a6201abec410.png index fccbf769ec15ac8bd62544ff70beeaecbd76519a..6f1e7f6772ad4bf9d99117bf228810f9f18eb833 100644 --- a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_08a1e24a-5539-4b20-a56a-a6201abec410.png +++ b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_08a1e24a-5539-4b20-a56a-a6201abec410.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19edf5de1d78b55b9782e08841776ca980ec52abd90dbca1aa70b2a91e399889 -size 1251864 +oid sha256:35ba3d95a847ac6e29c497422131b67cfd42eda4935e9d10ceb591d69181fd16 +size 1331518 diff --git a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_1ac506ac-dd62-4adb-8cc6-e42e39ea1e35.png b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_1ac506ac-dd62-4adb-8cc6-e42e39ea1e35.png index d2b280046f6ca18a98ddfeff57fcff933b340805..2913b4c0a3f88819a0698bf838ce4d22a1e4c2c7 100644 --- a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_1ac506ac-dd62-4adb-8cc6-e42e39ea1e35.png +++ b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_1ac506ac-dd62-4adb-8cc6-e42e39ea1e35.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4dd186e693c72ce989cff782b5e93da5d1898c89d81a208c2a563bd18f13a4ea -size 954514 +oid sha256:dcae8d19f26f6f3e42174f46d3f21ac4c9cdc2e01d66a0df3fe2b3e404c3974d +size 942241 diff --git a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_4dafe010-a466-41a4-ae48-14b3769fdd36.png b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_4dafe010-a466-41a4-ae48-14b3769fdd36.png index d833c3455f60f4081ebb59e90ff23e705bebbc31..2d4dc6714ea237d7b9f7b7c7525f2a88573c6b25 100644 --- a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_4dafe010-a466-41a4-ae48-14b3769fdd36.png +++ b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_4dafe010-a466-41a4-ae48-14b3769fdd36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0aef32e395bcb083b26941cc35617dd021cfd5d7e7e00e35d78ce8f9107bfb10 -size 1574326 +oid sha256:bb699184c5cbb244b5cad2d2e1d435dd8784499073303d4013f61d4bbd45007f +size 1673040 diff --git a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_516711a6-50e7-4f43-a97a-38b6442f384d.png b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_516711a6-50e7-4f43-a97a-38b6442f384d.png index 9160dfab03caee57932fe046d646ac9114dd0ae4..72ae357de533c5240f9ec019ce06129411e2a02b 100644 --- a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_516711a6-50e7-4f43-a97a-38b6442f384d.png +++ b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_516711a6-50e7-4f43-a97a-38b6442f384d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd8163e25601dc2472fd8d38bfd58e3324efe753e4851e43be9afbbe0037abf1 -size 989531 +oid sha256:d9e3dfce7fae0ca868cf2eff9a6edd99f5e5475e52badfe54f1cc42d750b0155 +size 1026669 diff --git a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_5d6062eb-95c5-4098-85ff-ac3fd095c9d1.png b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_5d6062eb-95c5-4098-85ff-ac3fd095c9d1.png index 054b45f59711bf27abb607f0b677e06c5347bf00..73b20d6defa76941893c9a4a2cc8470ca9fbf559 100644 --- a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_5d6062eb-95c5-4098-85ff-ac3fd095c9d1.png +++ b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_5d6062eb-95c5-4098-85ff-ac3fd095c9d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:007e082461d3ff5b9c12137a915dc81f47a8fe454363a11d05374a9ec4d97e0a -size 1191982 +oid sha256:86492a3acbf6b8c215a1dfcd441fb80cd036f41dda096fb3e17c1064c9ef7ed6 +size 1043537 diff --git a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_d2931b9c-010c-4937-84c3-cbb43b1adec0.png b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_d2931b9c-010c-4937-84c3-cbb43b1adec0.png index 809c1375f3377da4a46b9fd39cdc1aace4563194..1f4001efb09f33c8ae7e8553a9dd171a8b2b0ca1 100644 --- a/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_d2931b9c-010c-4937-84c3-cbb43b1adec0.png +++ b/images/0fb36232-0d2a-4115-9012-99dbaad25a5e_d2931b9c-010c-4937-84c3-cbb43b1adec0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b928b6f194509d56d01f59db553d68364b1e6697a2ab34bc7a749c933630992f -size 1141384 +oid sha256:6fc50ca1c38d8f75903e437af876057456695d0820b81dd610e63c8d19eeef1d +size 1103271 diff --git a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_44b4fb46-9ac1-4433-a49f-92c78000593a.png b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_44b4fb46-9ac1-4433-a49f-92c78000593a.png index 2dc54b6d1ab3778029b47b573b2d482e2edf4528..a1a8fdcb6d56b83849a88717d1124d5b27a9ba7a 100644 --- a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_44b4fb46-9ac1-4433-a49f-92c78000593a.png +++ b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_44b4fb46-9ac1-4433-a49f-92c78000593a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e7e7f09ed2ee014fbba4a29ae3cf1f8f7af6a303039ec665312d074d5c0c2f6 -size 973656 +oid sha256:9786f334b99e0c1aa8481c94fdc17271896444f21d6811825bfe7533244939e5 +size 1231116 diff --git a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b12e1589-6f9a-4f0c-9123-81a42039d8c0.png b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b12e1589-6f9a-4f0c-9123-81a42039d8c0.png index 31e7b6e2533e4ece32092776ddeac776a02d122a..bf25620f09cbe50f525cf04623d3f858084aa0a8 100644 --- a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b12e1589-6f9a-4f0c-9123-81a42039d8c0.png +++ b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b12e1589-6f9a-4f0c-9123-81a42039d8c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af0ea0c1ea725e38bad0552ee9243bf3df3b19ebc29e6a8ca8c66c242d271ab5 -size 785038 +oid sha256:1c8c6b2c3915a841916f0f7db3aa259f1338be1a55a2a25bcd3caccaa964bcf4 +size 573460 diff --git a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b991d354-d2d4-409a-9e22-98f3bc4c8ddb.png b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b991d354-d2d4-409a-9e22-98f3bc4c8ddb.png index 429875f14ae2d23d28785a5c5d1c99e0a1729d36..ca811f87815f407b62775d246da39ad8017fb20c 100644 --- a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b991d354-d2d4-409a-9e22-98f3bc4c8ddb.png +++ b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_b991d354-d2d4-409a-9e22-98f3bc4c8ddb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68726327937af719705c29c0642599ae303a0386518386efa3a7edc596a7b79e -size 615575 +oid sha256:01458e921d5f9d268c0391d84ac86c20d77c65dd1e6a571e9e8db7a3e196f759 +size 495317 diff --git a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_ba1cf05e-362c-499a-bd3a-1fbc5d649325.png b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_ba1cf05e-362c-499a-bd3a-1fbc5d649325.png index fc0635146951b2f0cfd4dc4031528665cc94ec3a..7e8765b8ba9a459c19d74e991b7d6679ca7a65b1 100644 --- a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_ba1cf05e-362c-499a-bd3a-1fbc5d649325.png +++ b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_ba1cf05e-362c-499a-bd3a-1fbc5d649325.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1acb18f433f00554d0f6d9242eb919649d464121a7b3ec0e6f48ff09c5b0733e -size 896724 +oid sha256:0012a46e775efaa4f7e22ef01f2ab8429ef44b96e92f37d08624f2e4004347f1 +size 511828 diff --git a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_bef2efa7-2137-4ab1-b2e1-158c78b052cd.png b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_bef2efa7-2137-4ab1-b2e1-158c78b052cd.png index 5f25052457ed9b4511e962dd33e51d49c65a2059..44ab03a2cca41178bb0005703ff2a21cb8c8d04e 100644 --- a/images/0fc202d2-4c12-48ca-b04b-b667aac49156_bef2efa7-2137-4ab1-b2e1-158c78b052cd.png +++ b/images/0fc202d2-4c12-48ca-b04b-b667aac49156_bef2efa7-2137-4ab1-b2e1-158c78b052cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3522e2b8a955090df9f4ee66848013c541fe28fa63bb71a073c300ef7deb6ec -size 880096 +oid sha256:86611bc738f7f774406e1c2baa2edb28b92b51db70fe3eb48c594ec1b158dbc2 +size 909890 diff --git a/images/0fc98662-4405-4a30-9854-9301c98fdb37_3a7fbb2d-26c1-497b-9e24-7a7d13a5d5ce.png b/images/0fc98662-4405-4a30-9854-9301c98fdb37_3a7fbb2d-26c1-497b-9e24-7a7d13a5d5ce.png index bff893efcf100492d96c5911acc66f60191445e0..4cf888c107299e4fee3c79e4899819d3104c1387 100644 --- a/images/0fc98662-4405-4a30-9854-9301c98fdb37_3a7fbb2d-26c1-497b-9e24-7a7d13a5d5ce.png +++ b/images/0fc98662-4405-4a30-9854-9301c98fdb37_3a7fbb2d-26c1-497b-9e24-7a7d13a5d5ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:869bcd94fb59272daa34503b1d42f6a72ff63f2845d84f294187e1513100bbc0 -size 1231155 +oid sha256:33d66fe481c716b5f6df3543ee88161293334b77126c262047a8a2ae56a1718b +size 667995 diff --git a/images/0fc98662-4405-4a30-9854-9301c98fdb37_538f32f7-02af-4098-b8e7-d1861bd5819f.png b/images/0fc98662-4405-4a30-9854-9301c98fdb37_538f32f7-02af-4098-b8e7-d1861bd5819f.png index 6fe637f6d13a3aa7be00b0f8a7131f31f8747f18..2edd596b4565fa169abff166d7ef83435df07059 100644 --- a/images/0fc98662-4405-4a30-9854-9301c98fdb37_538f32f7-02af-4098-b8e7-d1861bd5819f.png +++ b/images/0fc98662-4405-4a30-9854-9301c98fdb37_538f32f7-02af-4098-b8e7-d1861bd5819f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43af708e213b532a978286c5e9c94c13dafde0d9e778ba1c94b17e19b2c61d81 -size 1235781 +oid sha256:4f6d933504d4d0f0682dfab17c7048d427752a0f81050b8137e4b383dad0cf81 +size 1046568 diff --git a/images/0fc98662-4405-4a30-9854-9301c98fdb37_fef9b6f7-061e-4b76-b684-5505c9d7eb70.png b/images/0fc98662-4405-4a30-9854-9301c98fdb37_fef9b6f7-061e-4b76-b684-5505c9d7eb70.png index 411de1c41def430b434172d3a0584c79c58b0b03..85bc7591df7e221dd89024138dda81b90e885c95 100644 --- a/images/0fc98662-4405-4a30-9854-9301c98fdb37_fef9b6f7-061e-4b76-b684-5505c9d7eb70.png +++ b/images/0fc98662-4405-4a30-9854-9301c98fdb37_fef9b6f7-061e-4b76-b684-5505c9d7eb70.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7d25f2db63ee9833bdf9b1551019e224f311b64fd2c345865045efed72b3ef5 -size 585948 +oid sha256:f15c00442762fbeb3020a152b0d8482b94900c5b159f4e60d287d8ef12497aed +size 647953 diff --git a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_04d56d8e-6a56-43e5-b0cd-91f655b199c1.png b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_04d56d8e-6a56-43e5-b0cd-91f655b199c1.png index 829a9bdc57930365659e6056c0023d682473cbb9..01ff0a5ec65d8f667c7dd3643a16f9ef15b3dfd6 100644 --- a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_04d56d8e-6a56-43e5-b0cd-91f655b199c1.png +++ b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_04d56d8e-6a56-43e5-b0cd-91f655b199c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e4727b5641b50e8bb564cbf9cf2c448f2979b11c47fa4e8bdfdb0d350735c48 -size 1720214 +oid sha256:a2b65f14babc127b8676f61957f4c88f1b5b59c148941d1a0cf7f8680b8d736a +size 2336983 diff --git a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_5c9d8232-4c6b-4537-923a-dd0727032022.png b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_5c9d8232-4c6b-4537-923a-dd0727032022.png index 1cb2bd2cbbc792046e66423663e92eaa8ebf8119..c428dfa18724a07aa620e32969e51c65aa2602f5 100644 --- a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_5c9d8232-4c6b-4537-923a-dd0727032022.png +++ b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_5c9d8232-4c6b-4537-923a-dd0727032022.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8db5b36dbf7a7098fe31a9471f1297076da7aab2c4f21644de34bbdcd47ef1f9 -size 3297432 +oid sha256:97208869a03853e6b705d8696cf9bb43e06f49be6f1e19c12deee8cc4bdab640 +size 2573208 diff --git a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6ad45996-e569-422b-8e82-f5d261f319d1.png b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6ad45996-e569-422b-8e82-f5d261f319d1.png index d9597e70d2baf33dd43088aa4706f46e36106d58..00296058684925d6952ecc2480dc2fea154cf907 100644 --- a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6ad45996-e569-422b-8e82-f5d261f319d1.png +++ b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6ad45996-e569-422b-8e82-f5d261f319d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e6710690a3d168fbd33ecfa07af236432306dfa20857817f160400c43ee992f -size 3182409 +oid sha256:e28a9409c8afc1cd254e51b256fa2519e8eff2088a7fd88c7fd94607d783ce6e +size 2333721 diff --git a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6e893988-a198-4aab-a94b-4180a72e8dd6.png b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6e893988-a198-4aab-a94b-4180a72e8dd6.png index 1270e520acebdd99162593de8e31504b9d48e872..1c59b53dcda4d7fd95af9e5a27d9e313e2b1fba3 100644 --- a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6e893988-a198-4aab-a94b-4180a72e8dd6.png +++ b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_6e893988-a198-4aab-a94b-4180a72e8dd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed3b817a7a7fa78329526a4be593b58d3408cccc3e12cce5e9caf9b2d07c0541 -size 980809 +oid sha256:977d45e1bec2ac253b7e0963ff68e35a884601b688ae1a24449adb7a1367c963 +size 1521666 diff --git a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_766ece84-ce32-403a-a5b1-b4d395c07763.png b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_766ece84-ce32-403a-a5b1-b4d395c07763.png index c7997dc466d1fff46b39fc5bcb1d986fa05471c7..ec57bb798af2e3b0cf9dfe690adaa9bb25570453 100644 --- a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_766ece84-ce32-403a-a5b1-b4d395c07763.png +++ b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_766ece84-ce32-403a-a5b1-b4d395c07763.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85bb2450f525302064de1d54157577a72d28a9b5d5d1fdcabd940aa237c152a0 -size 2464308 +oid sha256:65402ea14e2a68328de6289bbbc4b1d7038887d5ec7950c46f81f3cef96a10e3 +size 1860598 diff --git a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_d469455a-3a83-4df8-a461-ebe480791b9f.png b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_d469455a-3a83-4df8-a461-ebe480791b9f.png index 316fc844cbc9dfd470d0bb7e0175c77df77049c0..cc647f3bc7fd15aa28dd0f148f858f5b43f0cb0f 100644 --- a/images/0fd460cc-679c-4d04-8816-c6460bdf895a_d469455a-3a83-4df8-a461-ebe480791b9f.png +++ b/images/0fd460cc-679c-4d04-8816-c6460bdf895a_d469455a-3a83-4df8-a461-ebe480791b9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2e9892ef63b0f00e192bdd9c384c4b62037ed3c4373385769cbc4f9da913390 -size 1728014 +oid sha256:c9a711c194f196befc07c55b24c323982f3fd75ddbc9acafdc2e3d821613d53d +size 1515046 diff --git a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_685cf19d-39cf-483b-9d1d-0b664f121910.png b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_685cf19d-39cf-483b-9d1d-0b664f121910.png index d598e641ecf41592eacb17153c2dd75ad0db4724..0acbea0adaa8b34d846e36303228b7a61724f73a 100644 --- a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_685cf19d-39cf-483b-9d1d-0b664f121910.png +++ b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_685cf19d-39cf-483b-9d1d-0b664f121910.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00b9c0aaef3f8ae89b40d5728838400920e6e5c7f7e99bd3119e814f1538c14e -size 651201 +oid sha256:79f973d9a896bbb4dbaf2277395940223416a97aede63d9adf94fe98e45f7154 +size 813435 diff --git a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_84042e46-ecd1-428d-b72c-53232329ed7a.png b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_84042e46-ecd1-428d-b72c-53232329ed7a.png index a4dd85484387cd2996dccb375bcff4b25294d342..73bdb123d3d0778c4b02cabefb678a3f2173e2c6 100644 --- a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_84042e46-ecd1-428d-b72c-53232329ed7a.png +++ b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_84042e46-ecd1-428d-b72c-53232329ed7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2639c6eed81a8e23ca8d338d7e3e44c19fb2f0a92ed8c2eefbadb86db67ed77a -size 691432 +oid sha256:d233225e669d6bd6dcd72572e438ffbce4695d587d92058d37b669a60cb1eb36 +size 1440602 diff --git a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_a5503712-4dd3-4c5a-ae2f-89359854adbd.png b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_a5503712-4dd3-4c5a-ae2f-89359854adbd.png index ed8bccd486b8b94465cd2c30f0de3ce31d2ec45e..7f1391c86c7dedde0f1a99492927c6901a69dd15 100644 --- a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_a5503712-4dd3-4c5a-ae2f-89359854adbd.png +++ b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_a5503712-4dd3-4c5a-ae2f-89359854adbd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6723a139503a36ac663410f2ed549e47569b03f088e146b9444f6ca5f6febd54 -size 529698 +oid sha256:38e7ae793d3e9467127b6faaeb479b159e4e1a02849a03a86cadcd0d7b073a03 +size 514627 diff --git a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_c14edccf-de52-47b8-928c-f87f13139747.png b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_c14edccf-de52-47b8-928c-f87f13139747.png index 0c0d4fd7b554d514a51fff27301739d437a624c4..a1a194d2a6710960ed886cb3b49b5759d37ebca2 100644 --- a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_c14edccf-de52-47b8-928c-f87f13139747.png +++ b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_c14edccf-de52-47b8-928c-f87f13139747.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a1767413085ccc60bf2d7bc30d8fa69839570120ca315d14f110c211694a5cd -size 632045 +oid sha256:f7b945dd9b7847dfde55dd5bbd5e0375429827222d76858368cb9ff35a53f491 +size 996601 diff --git a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_d1e667d6-a1c0-4ea4-8af5-2197e32eef24.png b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_d1e667d6-a1c0-4ea4-8af5-2197e32eef24.png index 21211cff44ef3fbd4e6f504c6b55b21bc930f615..28ace62ba1d7d040a3a9fbe2c0bd50f9139572de 100644 --- a/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_d1e667d6-a1c0-4ea4-8af5-2197e32eef24.png +++ b/images/0ff1648e-28bb-4014-9b8a-3c050c25e334_d1e667d6-a1c0-4ea4-8af5-2197e32eef24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5e9dcca18374d6248e2723f40643ef37608a4a911d29dc6adf756b266ea3fa7 -size 821448 +oid sha256:57509d3a6517044a7640145026107a359754efc58762d975ca607ed3d69f3c55 +size 1329335 diff --git a/images/102c50a4-23f8-44ae-8300-43822b271dbf_06c977cf-f2d5-40bc-ab31-453cdb6412b0.png b/images/102c50a4-23f8-44ae-8300-43822b271dbf_06c977cf-f2d5-40bc-ab31-453cdb6412b0.png index 9d2362160759a6c0e454f49a45d0658fcabd386b..7ef3084d29ba744cb5be884a569975446229bf5f 100644 --- a/images/102c50a4-23f8-44ae-8300-43822b271dbf_06c977cf-f2d5-40bc-ab31-453cdb6412b0.png +++ b/images/102c50a4-23f8-44ae-8300-43822b271dbf_06c977cf-f2d5-40bc-ab31-453cdb6412b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c63dfcc9d44921606d2c7ffdcd11471a4f313794963a337ffcc6f6801a366f8 -size 1467230 +oid sha256:3690f9802b0d18beb3db9da7a03ffeb833b7a2d1fa704e4981e3fbea58e03829 +size 3249932 diff --git a/images/102c50a4-23f8-44ae-8300-43822b271dbf_236326dd-e3a4-4b2a-98b0-a495fe869504.png b/images/102c50a4-23f8-44ae-8300-43822b271dbf_236326dd-e3a4-4b2a-98b0-a495fe869504.png index 7390c610574c3ea1012389a962a0f43d458af23b..6b1754cd4f1c570dad0c4b5f46d267f18238d7db 100644 --- a/images/102c50a4-23f8-44ae-8300-43822b271dbf_236326dd-e3a4-4b2a-98b0-a495fe869504.png +++ b/images/102c50a4-23f8-44ae-8300-43822b271dbf_236326dd-e3a4-4b2a-98b0-a495fe869504.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41ffec204194475e87694755a48f4085419d7170a8a09fe446a67a92e41d0dff -size 1968254 +oid sha256:cf1e26fe74043b4d3281861ae94df1edc11489e50c54832b4a4c2fff58be4a4b +size 3558718 diff --git a/images/102c50a4-23f8-44ae-8300-43822b271dbf_49fbd1f1-44e1-46b1-807a-88fa536868b9.png b/images/102c50a4-23f8-44ae-8300-43822b271dbf_49fbd1f1-44e1-46b1-807a-88fa536868b9.png index 390a86ab1a476f493328206ab6b3e5018273fd19..3e6955e959d23cf26ac0e0f6b54db0263906e86e 100644 --- a/images/102c50a4-23f8-44ae-8300-43822b271dbf_49fbd1f1-44e1-46b1-807a-88fa536868b9.png +++ b/images/102c50a4-23f8-44ae-8300-43822b271dbf_49fbd1f1-44e1-46b1-807a-88fa536868b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d8a820522462ba7a916ba9aef866860935dedce04d910ae1496373b94db2acc -size 1503961 +oid sha256:8d9e5537231393a5afab93746a5a217e105aeafca2bf722d63b2e2b29477509d +size 2564846 diff --git a/images/102c50a4-23f8-44ae-8300-43822b271dbf_ca152b05-af0c-47e0-8958-bce808d51e93.png b/images/102c50a4-23f8-44ae-8300-43822b271dbf_ca152b05-af0c-47e0-8958-bce808d51e93.png index c3d7e99cfaa3dec4b7249c2a6343201d17c67bae..beab1c16e324c45dfb7c0d73187ecb1e644b5003 100644 --- a/images/102c50a4-23f8-44ae-8300-43822b271dbf_ca152b05-af0c-47e0-8958-bce808d51e93.png +++ b/images/102c50a4-23f8-44ae-8300-43822b271dbf_ca152b05-af0c-47e0-8958-bce808d51e93.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f09c615f00525a32534dc8e0d65f6c065b310e0d733c836d9cd4dc8354221c46 -size 1276190 +oid sha256:104d2eb6bfc3b789db123e765b0c40fda86b100c5288e35af4cbfb7550ecfbc1 +size 2900832 diff --git a/images/102c50a4-23f8-44ae-8300-43822b271dbf_d0f7d055-29ae-4ebe-bd92-0c2e7ae2de4c.png b/images/102c50a4-23f8-44ae-8300-43822b271dbf_d0f7d055-29ae-4ebe-bd92-0c2e7ae2de4c.png index 7df9d8884864c77129d2481aac24ee66bcdb919e..ca28f3bfc917790ead73ca184f0dadd54d8f8276 100644 --- a/images/102c50a4-23f8-44ae-8300-43822b271dbf_d0f7d055-29ae-4ebe-bd92-0c2e7ae2de4c.png +++ b/images/102c50a4-23f8-44ae-8300-43822b271dbf_d0f7d055-29ae-4ebe-bd92-0c2e7ae2de4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e86060b84a79c2ae294742ffb61e8992118bcfb900dbbb2c9a5fa7c6a64d400f -size 1464705 +oid sha256:f0cbcd4035bd177d88fe9205851e8d3cb1e67f1dab4333e7075899f6cff1f3db +size 2540110 diff --git a/images/102c50a4-23f8-44ae-8300-43822b271dbf_f6c8620e-b239-4c5f-a904-a73fca89bc97.png b/images/102c50a4-23f8-44ae-8300-43822b271dbf_f6c8620e-b239-4c5f-a904-a73fca89bc97.png index 88d3844fd43acee1d706296756b5fb44f5222ccd..59a92b116dd7a3479b96ab042f85dddfc3140abb 100644 --- a/images/102c50a4-23f8-44ae-8300-43822b271dbf_f6c8620e-b239-4c5f-a904-a73fca89bc97.png +++ b/images/102c50a4-23f8-44ae-8300-43822b271dbf_f6c8620e-b239-4c5f-a904-a73fca89bc97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5becc4b6b9afb093c7488ba329310984a502935f2154c470defea7b4c1d20be4 -size 1039576 +oid sha256:9fab9cf9e6280bbdb212e1da4ef77db1e95ec467843e7df2d6bc3653dd533918 +size 2027769 diff --git a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_48f143c9-5b96-4ecd-9782-f33375c7879d.png b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_48f143c9-5b96-4ecd-9782-f33375c7879d.png index 3df60bc73cecabd001031e205d2efe028fa3805e..2c5ffa339ef0fd6dc991900017ffb649f8dc5e6e 100644 --- a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_48f143c9-5b96-4ecd-9782-f33375c7879d.png +++ b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_48f143c9-5b96-4ecd-9782-f33375c7879d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f12f71eb36d952e843ed29cfa51490da1f775eb6b07f2771bc6a54ba028c397a -size 895880 +oid sha256:639a3454ce107aa5d994adaf8d025d2237f8350c83651e6153b6c1d98ba1883e +size 1111007 diff --git a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_78b56f22-a09c-4cbf-8e7e-fe5dd97a1305.png b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_78b56f22-a09c-4cbf-8e7e-fe5dd97a1305.png index 0fce1435acbc530023acac0f95931bc2d1cd11d8..9469a9ef9218d692805b61e97c24fa166a9e1154 100644 --- a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_78b56f22-a09c-4cbf-8e7e-fe5dd97a1305.png +++ b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_78b56f22-a09c-4cbf-8e7e-fe5dd97a1305.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f55d53d7f49074be9e8992e35fc1bf3aa0352a9a16487cc209093123a7dbe14c -size 897333 +oid sha256:92aa0b8ca0b09ce950120511983c621d4a7b6b02417e3b8543de56f1311efe5d +size 1151534 diff --git a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ad5f42ba-311a-4ef7-9762-870220d71672.png b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ad5f42ba-311a-4ef7-9762-870220d71672.png index b68d176d1663247d01f6a6b5c6780e4c0c6de27a..b7de6240d7fc7a85edd68b5bbb2d40d59eeded25 100644 --- a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ad5f42ba-311a-4ef7-9762-870220d71672.png +++ b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ad5f42ba-311a-4ef7-9762-870220d71672.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1f87940fe92db6bd1f7350f44c9c8aae0397fa07ac1767f4bd49fdf030e2844 -size 955450 +oid sha256:d99e3650f119931470e9d2f8fcc377ba033ca67a6f7718d2dfe08a1e66f8476b +size 1170252 diff --git a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_b71031f3-1e82-4395-8e53-3b038b707899.png b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_b71031f3-1e82-4395-8e53-3b038b707899.png index 88b70d367fcb587cfbaaa342cc1c5efd36f0f9bf..b1d4214f98d05055876579df8fb3009301fe54cd 100644 --- a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_b71031f3-1e82-4395-8e53-3b038b707899.png +++ b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_b71031f3-1e82-4395-8e53-3b038b707899.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6a5c439ccc903cb3522119fecc2685c502c4ce69c772f9cec02f3402ef58036 -size 945060 +oid sha256:10158d03facb17b7c422d41916ecd537ac65456b0c55dbc20906262dded067d3 +size 1349255 diff --git a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ddadf800-5a52-419b-a717-ac5acbec55d1.png b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ddadf800-5a52-419b-a717-ac5acbec55d1.png index b2ba3cad512f71298e57e73bdf0e461230af9076..c47058418223997a1c925ce6d26b4522908ab688 100644 --- a/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ddadf800-5a52-419b-a717-ac5acbec55d1.png +++ b/images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ddadf800-5a52-419b-a717-ac5acbec55d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e0df2d496a256e371f6d5d31992f8c46a6d385f28036e3f4516eca257ce84b9 -size 966641 +oid sha256:eea20204cd44af11d5f1acad448bfa911c174694691a051f109bdbe556082398 +size 1180098 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_0cef9097-2798-4c61-bc77-7ed372327135.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_0cef9097-2798-4c61-bc77-7ed372327135.png index 8b64dfd59814398ffc41de73dfc94030f3f4db27..253998f487af0aa9fbe89cb7e99c0e7ef85df79a 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_0cef9097-2798-4c61-bc77-7ed372327135.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_0cef9097-2798-4c61-bc77-7ed372327135.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ca34d0a818e11eda5f181fa1adc0590f0afea6ed6bc9aa9fd904d8b2759dfe8 -size 902283 +oid sha256:0c4835e3e07e8e552473ad1577dec324fc5e6e744f24d8f79d36ec1b85340924 +size 922656 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_1643a5ef-db8a-42fb-a052-37a0e9122ac1.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_1643a5ef-db8a-42fb-a052-37a0e9122ac1.png index 6ead86241d11c38ec40841934ffb3d2f21de1720..6757b36a5feda35f8300112182ba5549781120e8 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_1643a5ef-db8a-42fb-a052-37a0e9122ac1.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_1643a5ef-db8a-42fb-a052-37a0e9122ac1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a11799481c0ec292d519871841f364acb4686b7551f9822e824e788566f3c3ce -size 1103267 +oid sha256:645545aeb10650109a84ab9ee008bf8ec48878744a72c588fa7fc83621ec801c +size 884531 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_2211b437-d9e0-41b7-a052-7d3867619be7.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_2211b437-d9e0-41b7-a052-7d3867619be7.png index f9312957e35bf985c17d97aa41bc659fd1da9dbe..66320bf290464a8a4ab7cb280ad9ad0241e1db32 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_2211b437-d9e0-41b7-a052-7d3867619be7.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_2211b437-d9e0-41b7-a052-7d3867619be7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0784c84aa8fc498d386e37270a59e341b3080a7090f5aec226d8cc0969efd392 -size 765345 +oid sha256:d0b7922fdb2309bd3ee118b1d9212776b95699c74f5cc9c28ce80ad9b4b1f94e +size 573288 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_249c9e0b-a8b9-48e5-a518-f5f037532ef9.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_249c9e0b-a8b9-48e5-a518-f5f037532ef9.png index bb91285f3cddc759cd14a849281b454e1d0afc1f..d7b330d39191f622de6f3f3d2767492aef152949 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_249c9e0b-a8b9-48e5-a518-f5f037532ef9.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_249c9e0b-a8b9-48e5-a518-f5f037532ef9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:498e9fe539ab01611e56bec3ed1cdb6070f57eeda6fe333a5d86a36dfa807611 -size 967249 +oid sha256:f30ee4d3eda08dc28a9df5f5a918d6dda37dada0e7c2f3055de3d71d5a44cb84 +size 557551 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_26c2202a-1d0c-4925-b6cc-87b75a0d5d09.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_26c2202a-1d0c-4925-b6cc-87b75a0d5d09.png index f6fb4c1a343635f5c856715ef0cb7e5db0a364a4..d6c4a1222b4d42620a1819bc6724c5a21f399e0f 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_26c2202a-1d0c-4925-b6cc-87b75a0d5d09.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_26c2202a-1d0c-4925-b6cc-87b75a0d5d09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0fc44b06d986ba51adaad0083c9a152df3028318befca98bfc98cf873e7c1a7 -size 431965 +oid sha256:c46f079d68a1a703a481360176b21499882ca6392017772cd3473a8840710250 +size 93635 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_3281daa6-4a6a-4cdc-b3d5-b28e0f977a09.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_3281daa6-4a6a-4cdc-b3d5-b28e0f977a09.png index 38a8917148e07fd49f00b2545cb1c5e825ac1608..4836a6368f0688131a429fc037d4bcc305738615 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_3281daa6-4a6a-4cdc-b3d5-b28e0f977a09.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_3281daa6-4a6a-4cdc-b3d5-b28e0f977a09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79b4dcb4f2b97a32c0b6080998df110fa777eafd06b90a3312da23f656ac7769 -size 868284 +oid sha256:9feaa5e8495edbfba42e305aaafae9e58fa4cbed9d027bb91536b410242a4709 +size 652814 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_5a47ed74-98e7-45da-a78e-7084c186f24c.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_5a47ed74-98e7-45da-a78e-7084c186f24c.png index 7d25f061498a92227c4b451dc401fc304797d7fd..6b7e35cafee2b73bd696e39d83c1d51606782f7e 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_5a47ed74-98e7-45da-a78e-7084c186f24c.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_5a47ed74-98e7-45da-a78e-7084c186f24c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f3fab2dceaccfaa87df6981e48745a493c13d884549d259c8cdc8b36f17780f -size 886607 +oid sha256:2c29f290f8e0ce81468b38d3ff768d8f547432980ee27b1cf128a73632aa6222 +size 677004 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_5ebf6d77-1802-40f6-a790-1445f8f6ddca.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_5ebf6d77-1802-40f6-a790-1445f8f6ddca.png index 935db3a95f83f08a5d253a86ccf41f0c9649b032..d42ef43388c67487fdc9c07864a007f4cff76396 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_5ebf6d77-1802-40f6-a790-1445f8f6ddca.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_5ebf6d77-1802-40f6-a790-1445f8f6ddca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ce9d988470b8575caedb35befa3ea7049f2a1695f4a0c621385442891855534 -size 1201472 +oid sha256:835b27dd80ef0a8130254175d806bdadaf593b92bb69aab84a81a3edb7f583c5 +size 1177521 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_635ce156-2cfa-433d-9e4a-a4b6002519cf.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_635ce156-2cfa-433d-9e4a-a4b6002519cf.png index 16dd5bfb838575f770bf6cb8e570dcfbf3905630..dc3a4793d363bbeea6a2d0f484020b416013b948 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_635ce156-2cfa-433d-9e4a-a4b6002519cf.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_635ce156-2cfa-433d-9e4a-a4b6002519cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4eedd8e293dc3d5f339fc170fa051af2ee193ac7bc80389cbd2a6984736aa424 -size 183819 +oid sha256:bd719496135fc7af78f90c0380a802e9bf14ef3e80075e48d15ef0d9c43b4e31 +size 108622 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_6fcb9d43-418b-4352-8aa2-ac7e22d8e10a.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_6fcb9d43-418b-4352-8aa2-ac7e22d8e10a.png index 6926cc804aad9ff3ecd9831f0d9338df695e83ae..d308a24c5b08c592a09864d27a80c3455d3af721 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_6fcb9d43-418b-4352-8aa2-ac7e22d8e10a.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_6fcb9d43-418b-4352-8aa2-ac7e22d8e10a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29183a1eb3832bd42bbe070a75301a6a75b87a8ecf85f178b415b737c6fc12ea -size 805266 +oid sha256:4d09dc3c778f80ff8c65dfa24c1efb27705b28ba9d94ef0b2c921ba95f326c15 +size 504614 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_7745dc56-1d07-4d83-b0b6-b196f26a0413.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_7745dc56-1d07-4d83-b0b6-b196f26a0413.png index 05539a8029d3e4f554a5c4a834f1485b1bd04f79..3e616a2aff66882b01699fc2e87e8adddd2d622b 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_7745dc56-1d07-4d83-b0b6-b196f26a0413.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_7745dc56-1d07-4d83-b0b6-b196f26a0413.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97de2df7b0590ae04cd7eefbfd42e6a7bd24372715b4980fd30b4aac9565679a -size 1200268 +oid sha256:4392a7804c2877d43fef5ed90b2533414920ecdef7937f0cdd89aafff9436bf6 +size 1006110 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_a2babf92-d02c-488b-b82d-e051319ca1f6.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_a2babf92-d02c-488b-b82d-e051319ca1f6.png index d1afa583851248746a133404efaa74c898e8ada8..8ed3b948dda2c50b9c2b4f568db2316bd33a9f24 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_a2babf92-d02c-488b-b82d-e051319ca1f6.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_a2babf92-d02c-488b-b82d-e051319ca1f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6921ecbb56bf1db01c3bafe9677c3ed0a6c7fd98f6a066041457758711d6e147 -size 982493 +oid sha256:cbf5bbd986e362fb4630eb869bd9cadc8e9b29fea543fa60117236b51eefb0a7 +size 766565 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_b8cff931-25cf-43d1-bd7c-c81275bec27d.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_b8cff931-25cf-43d1-bd7c-c81275bec27d.png index fa08a67b085f641b05639e6b720c0cdcec45a877..09f9e25ba82f48cc5c4462b2663c47d2fc249578 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_b8cff931-25cf-43d1-bd7c-c81275bec27d.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_b8cff931-25cf-43d1-bd7c-c81275bec27d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0b0e7d1121a0ec2f86f735892a29843492bffc5689753a35d9df2c283f3b164 -size 1159932 +oid sha256:135f5e9032005198be4c7347b05e1d8e485dd39c66646993ba5181be53327baf +size 647886 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_bc9d4b7a-56e0-4646-8339-e74a346a70b6.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_bc9d4b7a-56e0-4646-8339-e74a346a70b6.png index 49e19091f8b5723de7b3d67a282c58393d338ee8..69a55b6c3229ce706bf371784e92ef7de1c39b6f 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_bc9d4b7a-56e0-4646-8339-e74a346a70b6.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_bc9d4b7a-56e0-4646-8339-e74a346a70b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e313136a96fc031402248ffd67993a61100b4d1e66376c4e1c782821178961ca -size 86614 +oid sha256:bd3233c1dd3ec011ebadd730beef5655e68cccdae6cb9ce2927f32e01cdd65c1 +size 71587 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_be5d0683-3fab-48a5-9ce6-454a884f75b2.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_be5d0683-3fab-48a5-9ce6-454a884f75b2.png index d559418366527933be3d5f9a684f1035bdcb8084..b263a1e1dc7f8ff8cd1c970ad5c19509fbaf5e9d 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_be5d0683-3fab-48a5-9ce6-454a884f75b2.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_be5d0683-3fab-48a5-9ce6-454a884f75b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:702a0a4c8cc2730d79fc9372629605d764e6dab07afa9525cf0503c8cd00ae0b -size 860533 +oid sha256:d267047f45fd08fd5654188ea41f3c7e5173c17242a308467042bea16a5f2018 +size 1157500 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_c0eb07c3-268b-4e2a-8db0-666d7d413517.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_c0eb07c3-268b-4e2a-8db0-666d7d413517.png index 45d117e4e9aa0c41d85b88dda1512d3f906a4966..e33491ccfe2ae4c2e5c7775ac3176e5b954982a5 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_c0eb07c3-268b-4e2a-8db0-666d7d413517.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_c0eb07c3-268b-4e2a-8db0-666d7d413517.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d72c09f54b8b3844f8f3cf6c5c6f3dfcc35d32ce60e6d486125a1a5305a1705f -size 882344 +oid sha256:aee70f40640ad6e8fbfe3c412b304a3ffef7b8bffb61c5cacd370d1cf795a1fe +size 605884 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_e2bedd9a-7758-4d86-b0d2-517ee761c274.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_e2bedd9a-7758-4d86-b0d2-517ee761c274.png index dba9b3c20eb0897a0e734fcdf38077a9fa73c216..a7be767e6a7506f1a060caee3ed0efed257d1076 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_e2bedd9a-7758-4d86-b0d2-517ee761c274.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_e2bedd9a-7758-4d86-b0d2-517ee761c274.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e3d353474fb8a228c5fc898234078f05f117862e05c74936336daab9fdefd36 -size 1082014 +oid sha256:d9b7dd0c09af2e1f881ca5a12c4064605800e4dc94840d90a20d4e95d8cfdd3d +size 586695 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_f46e2703-87c4-4986-b7be-4975b7288aef.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_f46e2703-87c4-4986-b7be-4975b7288aef.png index b4b94f3c3091be9b1c3b0b4bbbc1f99e0b240173..71e30ac085046170e587a092c791a09d54215175 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_f46e2703-87c4-4986-b7be-4975b7288aef.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_f46e2703-87c4-4986-b7be-4975b7288aef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afa4bf13095b5bf3e7fe184c376e4d4d80fbdee533f2365668a42f9b107c52e4 -size 769904 +oid sha256:e19bec58439049b06a7c8675747eb275b7d67f86124b85aa1e25ab7e1416ad73 +size 808552 diff --git a/images/10b2af14-f708-4abe-94fc-00163d11cb56_fb6287ce-8359-4fc4-872d-a66acc862823.png b/images/10b2af14-f708-4abe-94fc-00163d11cb56_fb6287ce-8359-4fc4-872d-a66acc862823.png index 6854e05983f527bf0afb70ae6390d2755f843f39..f80c79b9e22f6083b964e79fc93f6f9c6f09607e 100644 --- a/images/10b2af14-f708-4abe-94fc-00163d11cb56_fb6287ce-8359-4fc4-872d-a66acc862823.png +++ b/images/10b2af14-f708-4abe-94fc-00163d11cb56_fb6287ce-8359-4fc4-872d-a66acc862823.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:386dbf664184ec860f3e074b78fc3453f4db8746d9e77d33aed61987e482364c -size 301984 +oid sha256:0baf01da5518f8d069a1aed695810b10bb6cf3f5508c148d3fdb33499c3ed4ff +size 297052 diff --git a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_1b134d74-a104-4353-a54e-5a420f0822da.png b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_1b134d74-a104-4353-a54e-5a420f0822da.png index 23131e8e60c5f61ab2b181cdeaffc72620c25622..6b3e2c3c4cdabba79627f0666101627e07ac72b3 100644 --- a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_1b134d74-a104-4353-a54e-5a420f0822da.png +++ b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_1b134d74-a104-4353-a54e-5a420f0822da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:902c16920184bca70ea260fc3fc71a1928601e73a00d46b74341255a94524853 -size 554748 +oid sha256:6c52b6eabdc87a3b6f8091af61ddb30e705f393a044da085315d80e976368799 +size 646623 diff --git a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_218890ad-5305-4f3b-b3dd-da31e5b40fbe.png b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_218890ad-5305-4f3b-b3dd-da31e5b40fbe.png index e2f2e9402820f411aa9550bfbb90737b859d4ce7..9e5b439354e0a310d5f9621a665a21137be3592f 100644 --- a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_218890ad-5305-4f3b-b3dd-da31e5b40fbe.png +++ b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_218890ad-5305-4f3b-b3dd-da31e5b40fbe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca5989f7746eccc7f1aa04b66d19fcdaaef9f47b1d9f92a8893263809b5067cd -size 803097 +oid sha256:71d74bf5d29a833c14d5de504741bd7e430cd5fc370106bafe333aa19dfd75ca +size 803174 diff --git a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_51296200-a767-42ec-86ee-fe3bc80412f4.png b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_51296200-a767-42ec-86ee-fe3bc80412f4.png index bd6130604e7a0c6dacdf9496c2e355a079edd122..8121360406950c0a461a0f36468cd4422742a896 100644 --- a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_51296200-a767-42ec-86ee-fe3bc80412f4.png +++ b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_51296200-a767-42ec-86ee-fe3bc80412f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2c978ba74501e020969f7557d0d4f6850813e64359a5f73d826da73343e2e75 -size 789297 +oid sha256:6f3b753d0fc08f4a84078a80982e0d8c49fd0c933a0a06081df2b5f762976890 +size 821437 diff --git a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_c8346f41-3686-4039-b18c-40eb8b76516b.png b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_c8346f41-3686-4039-b18c-40eb8b76516b.png index ca3d1c424c10624193db0bb62e575d40dcc20aa4..1c31c48655cde8752fc71f4fcfa66607c1612ce3 100644 --- a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_c8346f41-3686-4039-b18c-40eb8b76516b.png +++ b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_c8346f41-3686-4039-b18c-40eb8b76516b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32c7fa2e7fb60354fdf81591a80a66319143e02cc67416c289074a82d326a801 -size 993491 +oid sha256:aceb571bf676193f16a675b78b779a010dec61cf8a44f68603f61d0c6eed78c1 +size 1005930 diff --git a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_d2e28992-f3bc-445d-b2a4-876daf96d479.png b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_d2e28992-f3bc-445d-b2a4-876daf96d479.png index fbab2bc02b2e33928c18d2c1902651aa7fbc75be..248f5bb38a31435434038c8be1ae65647cbe1fb9 100644 --- a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_d2e28992-f3bc-445d-b2a4-876daf96d479.png +++ b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_d2e28992-f3bc-445d-b2a4-876daf96d479.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00ed173304e71c479ba44b97d23bd84d8483479128bbc65288deae0cecdbd307 -size 658442 +oid sha256:0d27c793aa63251b690405ced637ff6cda0675e1935309518c9486b2e28d34dd +size 658191 diff --git a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f14a0102-32f0-44f4-8a4f-28a04f537807.png b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f14a0102-32f0-44f4-8a4f-28a04f537807.png index f16845ab0a4d50a24b746b18063df0cc5abcaad2..201008e866c70615cbe6200cb7968b2f8feaf3f9 100644 --- a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f14a0102-32f0-44f4-8a4f-28a04f537807.png +++ b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f14a0102-32f0-44f4-8a4f-28a04f537807.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d608b4b65f0e74e6959c32e088b78c747bc4d01adf540ffaacfb4e4f1aaac3b4 -size 599839 +oid sha256:d02aadcf6726b7bf792cf4b14cf6942009fbad8366faa13cf7ebb03448973c03 +size 599895 diff --git a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f3a45444-8db6-4965-b692-96e995ab489b.png b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f3a45444-8db6-4965-b692-96e995ab489b.png index a5c17a8c707bfad678e660c2201ceb3d6c480bef..349ac3dd34890cb1ae6d08aa620331c6d16d8c62 100644 --- a/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f3a45444-8db6-4965-b692-96e995ab489b.png +++ b/images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f3a45444-8db6-4965-b692-96e995ab489b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c482cc972daba340a3744d56ec4d0598542ef00d64eb70ae5e744d7b45fea1ad -size 1044630 +oid sha256:e7f6c921c4f8c2fadf45ea1caa278aea0cf79ee54b8636b26a2c38016377d75a +size 837216 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_0a81b2d9-3825-43c7-8e12-c6658b73422e.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_0a81b2d9-3825-43c7-8e12-c6658b73422e.png index 9c877ca80199da00cdcaa6e7a4e95ec91908f575..48df675ddea7b599bd92069d15d1afd3cd551a12 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_0a81b2d9-3825-43c7-8e12-c6658b73422e.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_0a81b2d9-3825-43c7-8e12-c6658b73422e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44779e7fb33110ee507d0b95f770eabaef105459be67ba45ddbb0e22755a9923 -size 1256176 +oid sha256:7fb1832c86a5e1eb4b806acf4e0d93d8adcc1774dc646d1d82858b3da209829a +size 1432105 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_4077127f-bf53-43c9-8fff-96ffb9ecb611.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_4077127f-bf53-43c9-8fff-96ffb9ecb611.png index d231a52cb4b00c903bbdeb4b5d8ad9150662f68a..26352ecc51b0559c38ce38d41811baffeb7a441d 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_4077127f-bf53-43c9-8fff-96ffb9ecb611.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_4077127f-bf53-43c9-8fff-96ffb9ecb611.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e41420c325aee6702f199bcba55d3626ef14f530705cbddbf90ff77c3ec26095 -size 1644957 +oid sha256:741a5c613ac6c9162c23bb88030e8f600bd0a4f5960e05af6dd083eddafde236 +size 1072419 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_93b6ddac-92d1-4133-9adc-86d8ce49f9d6.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_93b6ddac-92d1-4133-9adc-86d8ce49f9d6.png index b68fc058aa9a0a1dc9cf9d1174a9868862fee02a..57f9d3eb244b2c97c2d8d2f26e74eaadb513b77f 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_93b6ddac-92d1-4133-9adc-86d8ce49f9d6.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_93b6ddac-92d1-4133-9adc-86d8ce49f9d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7351d620e102ce39fe8e1a8c2d6fc4b6e2ee1fa646eaf8b766c25d257b58e7fe -size 1166022 +oid sha256:a16d8def17d912de5967d45fb3a26980c25c5467afc1e97a5eef0dc5fec9cddd +size 938393 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9abdee22-f71a-427a-a5b1-4ed0386a1de5.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9abdee22-f71a-427a-a5b1-4ed0386a1de5.png index bb81d7ecdcbb3553edf8ba9765c0bf63078bbc28..552b719f5ef353815969e66fff7b5fbfd46a6cea 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9abdee22-f71a-427a-a5b1-4ed0386a1de5.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9abdee22-f71a-427a-a5b1-4ed0386a1de5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c7cd5b40c90a85ab4bd612f0716c0c8e7f3a7363634505619d515ba2c67e145 -size 1221341 +oid sha256:7827db7d3d9b2deb7d2bc34ad64f851c1643307863a2487681e9158d994c2fe1 +size 1310782 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9d4a2fe6-8c14-4164-902f-0529d7d9261e.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9d4a2fe6-8c14-4164-902f-0529d7d9261e.png index b2f8688a009ebe03f1f74942b4f886a2a8586b54..3221915f807b1b122df05d5a9b1e68c4e856d1f8 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9d4a2fe6-8c14-4164-902f-0529d7d9261e.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9d4a2fe6-8c14-4164-902f-0529d7d9261e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dcbb7589dd675d6ccfc9f43879a9c756e5830f87e1fe7826cce7832fd582c4b -size 1239722 +oid sha256:103e6ed6d9b9a53dd2e7047cc4f55718c3798342a08811ebacfb7623e5c1ef8f +size 1117880 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a17e5768-0ac8-450e-af82-4b7c2656c3ad.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a17e5768-0ac8-450e-af82-4b7c2656c3ad.png index a856093795b43a8e3e2c279e6e64f5bab890b49c..e7b5cc0f81f961c778df82217f78307d13358967 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a17e5768-0ac8-450e-af82-4b7c2656c3ad.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a17e5768-0ac8-450e-af82-4b7c2656c3ad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:135947df6dd91aa1ef6be5dd1443efb9faaf6402e557c58622d683b4d50fa7ed -size 1173013 +oid sha256:78a3e140ccd781a79670a587db9dd6fb96ac2bcb75a61717abf391511a7c1466 +size 1767352 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a2b55fc4-8a30-4dea-a8e1-ef1fe9141036.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a2b55fc4-8a30-4dea-a8e1-ef1fe9141036.png index db16e5d1215145079a78e7a7a66039cb3ac0b695..3e6cecdc73cbd76b8fca1347ce8b92168b9e3d1b 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a2b55fc4-8a30-4dea-a8e1-ef1fe9141036.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a2b55fc4-8a30-4dea-a8e1-ef1fe9141036.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38ba2a46d6298a301e661aff4daa2f06dc33026a9e71524c0a190b5136db91cf -size 1168849 +oid sha256:2d4db7d8a23bdc29ed4751335a8f9ee1e82fa60710509fb8a39f7904a0895ed3 +size 1410294 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a725013c-1fb2-44f4-b17c-66f001302852.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a725013c-1fb2-44f4-b17c-66f001302852.png index 296bd66787f93b71c6146e24d451e64c2f6855f0..75bd95c507f0b0625659afe6d8a8b7bd78ee8263 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a725013c-1fb2-44f4-b17c-66f001302852.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a725013c-1fb2-44f4-b17c-66f001302852.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebd753e374ad6f79d05fa9fe2a4200861fcc77570d2b5066bbb155079f5127ca -size 1030453 +oid sha256:524f12529d0bcd5cdc5efa95da5a2d1d8091b4676dd242bfbae721dd8bfb53f7 +size 1223323 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a87e7411-3bda-4944-beb8-2f77f9fbe4b5.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a87e7411-3bda-4944-beb8-2f77f9fbe4b5.png index e6a076ef0aa3ba843bc727f9f3a8002f8189ad3f..b960ca961f0ecd5c9e5f27e1cb16449e4a41e749 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a87e7411-3bda-4944-beb8-2f77f9fbe4b5.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a87e7411-3bda-4944-beb8-2f77f9fbe4b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db9f2fa37449859aaea11e1e1ca36f4f639d20e97ae518ee098b4537a8238316 -size 1130103 +oid sha256:af896ee692d11b2a38efa06f5deeac4c08957586a2a078b1815e069d76896bd6 +size 1188100 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ab620f2c-ea4d-4465-b77b-aa3b064e0f47.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ab620f2c-ea4d-4465-b77b-aa3b064e0f47.png index 49560c06275ae22a59f48a3e36e7f1458f9726f4..e8bd2d8aa9bdc48fc68ae1aaff6de84d21adb0cf 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ab620f2c-ea4d-4465-b77b-aa3b064e0f47.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ab620f2c-ea4d-4465-b77b-aa3b064e0f47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6500e5f9d92f9ef31ca0a1fa1c79531827b37b7b8f53e6225fbed5f728630056 -size 1223391 +oid sha256:294676b6e09214eaa1e665c3b532b5f3cacc9d40c037c7322c03e00eeca4e263 +size 687995 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ad9b72be-b60a-47b0-af68-3123c7b4a0ba.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ad9b72be-b60a-47b0-af68-3123c7b4a0ba.png index 4f729e4fb0326b35f9ccc7825683946b84488d9c..91f770ccf09d49ec7f8e5ca003d14f8c7f5231ad 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ad9b72be-b60a-47b0-af68-3123c7b4a0ba.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ad9b72be-b60a-47b0-af68-3123c7b4a0ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e59ba550ef0fa9052a7e1f0ce855a2e731309db2118c8dd195ab06952cfb2a97 -size 1220191 +oid sha256:f3e4c77e6b285a96bddbda36f1ac58b083bc6befeec2e1997022930a0c1f4cb8 +size 1823005 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_e6cb09cb-edbd-44c6-a911-d51f39af7dfe.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_e6cb09cb-edbd-44c6-a911-d51f39af7dfe.png index 275dd260c3e35018e477974f2238bab4d4ef356a..16d2c391f81db4e05009827c9e56aa7198d3e63f 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_e6cb09cb-edbd-44c6-a911-d51f39af7dfe.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_e6cb09cb-edbd-44c6-a911-d51f39af7dfe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36003d09b9c02a61029f9f45ca7e17914a146a99f69451ee1b615500e8da882b -size 1152369 +oid sha256:c725252313b4db922b26da7ce73a2454635fab683a6e160b3ec3776e9d577ab2 +size 1729694 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f273ee73-79f4-4536-aaf9-db2ccf3d8e1d.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f273ee73-79f4-4536-aaf9-db2ccf3d8e1d.png index 015f0cb9b015532378756de788cf8f849c5a4564..57bbb8580f179ba34b3c8717a7cfbd9cf250f6fa 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f273ee73-79f4-4536-aaf9-db2ccf3d8e1d.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f273ee73-79f4-4536-aaf9-db2ccf3d8e1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bed9a88e0df571917b7ed45bbe38d18890c633d72eafdf7820c3fee62bcd72bf -size 1223085 +oid sha256:c056f4244b679db9d51b7cc40d81f82f7c940c9aa66852d364c43fb67cee3df5 +size 1670521 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f7a84d47-0214-4ba2-90cb-e556c1ed2802.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f7a84d47-0214-4ba2-90cb-e556c1ed2802.png index 928b1a82d090199226b30e4fc1ce4635d8ab6fbc..2190c31f814af68f2888b298e078314c0d773bb4 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f7a84d47-0214-4ba2-90cb-e556c1ed2802.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f7a84d47-0214-4ba2-90cb-e556c1ed2802.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15d67bce71bf6efba5f3fa5f127d57b72024700fafe0eeaaa2900a55e652ae78 -size 1207295 +oid sha256:d7ca7a4b44590db0825c127c0aefce33ef46a48d85f06858b612bad5dfa1d3c8 +size 1152020 diff --git a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_fc21b0b8-0045-44c9-b6ac-423368b4bb4a.png b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_fc21b0b8-0045-44c9-b6ac-423368b4bb4a.png index a359178f69f0bcffa68f45b0ad314a18eedfdcca..3f82e186a4e0e925a87c8bfef681bef3393c142c 100644 --- a/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_fc21b0b8-0045-44c9-b6ac-423368b4bb4a.png +++ b/images/10e17667-f9cf-4a68-adcd-ad2677c3f385_fc21b0b8-0045-44c9-b6ac-423368b4bb4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:631a1da4532fe90018886d7f838e6b1262e4c0a020ea848dac3d47586fe4985e -size 1218265 +oid sha256:9ec2bac85d7a6b0ca389fd797f8e6c593e071b0302a49e0ec9a681caa693858a +size 1151607 diff --git a/images/112459f9-10ea-44b5-affa-79008c2977e7_4498554c-48ae-409f-ab30-cdd208c0ae22.png b/images/112459f9-10ea-44b5-affa-79008c2977e7_4498554c-48ae-409f-ab30-cdd208c0ae22.png index 52bc127e0164158fa72520f2da473a0b47883151..abd78b172311102b5234ad53819c5ef6b0fccaf7 100644 --- a/images/112459f9-10ea-44b5-affa-79008c2977e7_4498554c-48ae-409f-ab30-cdd208c0ae22.png +++ b/images/112459f9-10ea-44b5-affa-79008c2977e7_4498554c-48ae-409f-ab30-cdd208c0ae22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bf2e417b0b8078ce301466fbec1d9fca0b73068a3cf5e56ac13b250abc1916d -size 1143959 +oid sha256:4f2bd29192f727b2c1e2c8f18fd5ae72ef3ec66956409cef30dc3b611811312f +size 1185075 diff --git a/images/112459f9-10ea-44b5-affa-79008c2977e7_578dd203-e4de-4ea6-bb5a-d65d7c71e63d.png b/images/112459f9-10ea-44b5-affa-79008c2977e7_578dd203-e4de-4ea6-bb5a-d65d7c71e63d.png index ce4c251a4493c2124548a1e62e08952b008580db..db6afec2eba90b340057475099adba5938b5da42 100644 --- a/images/112459f9-10ea-44b5-affa-79008c2977e7_578dd203-e4de-4ea6-bb5a-d65d7c71e63d.png +++ b/images/112459f9-10ea-44b5-affa-79008c2977e7_578dd203-e4de-4ea6-bb5a-d65d7c71e63d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e281fb3ad2b908024bf0241e85a390dc0bc18582eb2397cd50dfe80bd4e1fbf -size 1235407 +oid sha256:cb292b31daab7169cadc37854eeef88f04f94afc814a1fcf6e93673e99294ed4 +size 1366768 diff --git a/images/112459f9-10ea-44b5-affa-79008c2977e7_fc7ce1dd-bf86-4110-95b6-073aa3e5b082.png b/images/112459f9-10ea-44b5-affa-79008c2977e7_fc7ce1dd-bf86-4110-95b6-073aa3e5b082.png index ae0a33bdd59e82d21b3c62e791e68ce37155e3b2..ce59893facdaada2d76fbae055ae8933ed59d6e2 100644 --- a/images/112459f9-10ea-44b5-affa-79008c2977e7_fc7ce1dd-bf86-4110-95b6-073aa3e5b082.png +++ b/images/112459f9-10ea-44b5-affa-79008c2977e7_fc7ce1dd-bf86-4110-95b6-073aa3e5b082.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e18b7d6fa66343bdd4bf5e19b8e5c82ac3e89eaa7527a073a90dff241273a1cc -size 1148509 +oid sha256:0267631d2b4f7c88e4fc0c7fa9df61e1439eebe742299a5c3889c1f7f21872b6 +size 1266527 diff --git a/images/112459f9-10ea-44b5-affa-79008c2977e7_fe0d8eda-4222-4b4f-989d-c947c8219867.png b/images/112459f9-10ea-44b5-affa-79008c2977e7_fe0d8eda-4222-4b4f-989d-c947c8219867.png index d6bf96af6dd8bb3fe5398d823d15810c6b30ea73..7f61ab580fd1470bdde08c0bfd7bb50e63b3a3c0 100644 --- a/images/112459f9-10ea-44b5-affa-79008c2977e7_fe0d8eda-4222-4b4f-989d-c947c8219867.png +++ b/images/112459f9-10ea-44b5-affa-79008c2977e7_fe0d8eda-4222-4b4f-989d-c947c8219867.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8009832cf8093fa3fe6be48a37c82ca8b1449bd503eed60db996ca9399594451 -size 882436 +oid sha256:69a98ee3e73df2a2e4e8b3823c773165cec55b3bb2ec6831238497dfa992c235 +size 1013952 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_0a04ab16-035e-4c33-9db9-abfe44095a57.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_0a04ab16-035e-4c33-9db9-abfe44095a57.png index cc33bd6845c08d9334546d7b77d83d8de09ca1d1..dd1af1dd4ff66417e756a7d8af8985cc41bbf4e2 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_0a04ab16-035e-4c33-9db9-abfe44095a57.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_0a04ab16-035e-4c33-9db9-abfe44095a57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:161fd1919197b481fa85f00abf82d2b653b49cd3e4d8e5dbadc347451a404e65 -size 1104781 +oid sha256:eb05dedbd3dedbc10aec68512e097224971e9f0aedf5d23714ff4f7cd3ddbf40 +size 1147950 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_3e4047ec-800b-44dd-bd0d-eb31c5702bbc.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_3e4047ec-800b-44dd-bd0d-eb31c5702bbc.png index 08358980b8ee1450a512a93cba161254622e8a3f..8d1d353817f06b0431365efd3d910cacadb48209 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_3e4047ec-800b-44dd-bd0d-eb31c5702bbc.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_3e4047ec-800b-44dd-bd0d-eb31c5702bbc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c793f610225f14fe407117ffd7e72def0f9d2f3418b3f0a0b869c6c2a51d98ec -size 1867066 +oid sha256:a8291de429dca6bc573a712c258a30e25322976d4ee294f8ea2bef9da6141fcc +size 1952803 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_46f83ebb-597c-4df2-a715-6d17b102a7cd.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_46f83ebb-597c-4df2-a715-6d17b102a7cd.png index fc92a51595684fb4ff1638da3e0b1e80f4175f94..7723b3da187fd1c5a983b3b85198327fea2e3c1b 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_46f83ebb-597c-4df2-a715-6d17b102a7cd.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_46f83ebb-597c-4df2-a715-6d17b102a7cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:205947b65621d9a279ec79c3f1683f53ad7ad7740b838fb254eb14f6d1ceedbd -size 991693 +oid sha256:acbf30ba7ca02db1bff456db18085bbd716c97e2169573193403ff2ecbec2c4e +size 1164120 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_54031223-8306-4317-b2cd-7c63428cc122.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_54031223-8306-4317-b2cd-7c63428cc122.png index 2069e9c95b76d675c51a669ba5c6aab0e543a1a5..eb093a8cd497bd2a753a970b54ca50b0c3d219f0 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_54031223-8306-4317-b2cd-7c63428cc122.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_54031223-8306-4317-b2cd-7c63428cc122.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4062d51339f5651bd49c3c3b8749520149548478ae40c95017778e81213738f4 -size 855417 +oid sha256:e79829bc05f716cc431b2da3ec487cb6cf56975f9afdc0d4831c6dd8d4794419 +size 504730 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_59aefbe2-91fb-454b-9776-e882facf39e7.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_59aefbe2-91fb-454b-9776-e882facf39e7.png index e9fb507d534ced2d1de424b8672d67b48d9fa974..168c5a6403666cbf20f54d8d38a9382c9cd2ffc4 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_59aefbe2-91fb-454b-9776-e882facf39e7.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_59aefbe2-91fb-454b-9776-e882facf39e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f74da06975a34ff8e3d287efe4d2e14b7183073131827261912e831874e30ab -size 1015220 +oid sha256:724e111b0719136394a5158089100b5751846e5bb4a9ebeed0dfdb50632d7a7d +size 848958 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_6a527941-0214-4124-b97f-4f28e08866a6.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_6a527941-0214-4124-b97f-4f28e08866a6.png index aa016488013c389f80c862bc26d635d5870a1310..a2173f9dbda9786b406974375e523bb31020bc56 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_6a527941-0214-4124-b97f-4f28e08866a6.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_6a527941-0214-4124-b97f-4f28e08866a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1a7deabd24384c2667b20d1871f0986f4d32adc20dfb9c9af0e99ae66d6388f -size 1341810 +oid sha256:39e60b4bd3cc25abb38d60eab1f4cd9002dcaf99e150d233f9fad7bc25d4fa6c +size 1032212 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_93a58437-0259-4b0a-b430-74597d880feb.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_93a58437-0259-4b0a-b430-74597d880feb.png index ac9b76f061b99361ba7770cf6bc34b0a2982e1e6..327a60cba4e0c8ccb397ea65aaa00e1f06a8cbff 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_93a58437-0259-4b0a-b430-74597d880feb.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_93a58437-0259-4b0a-b430-74597d880feb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5bd2a0cd56ca4a48e2642312830171ec2faf56325ccc3c47ceaec183bf6d4989 -size 1309910 +oid sha256:3f351a6a631980950b8c86f3e63f7915bb87ccdc7935176ea2a8053b051798f4 +size 1344144 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_94a52ef2-5c85-4dae-9de0-e54c23e77f0d.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_94a52ef2-5c85-4dae-9de0-e54c23e77f0d.png index cdc508507e7bae015106bd6b94578b48fb7bdad0..f1e264bd7b19c081d08498290e3378bfa6c2e413 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_94a52ef2-5c85-4dae-9de0-e54c23e77f0d.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_94a52ef2-5c85-4dae-9de0-e54c23e77f0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9af9551221661318e84555ad75b14875835f6ad278975b9b1cf1569c1e9a31b6 -size 868067 +oid sha256:91e401c1847d8b6e1642fa273a9bbb0efee07dc2a531b0d5fe5a832721188a3c +size 756461 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_95063aac-bdf3-44d0-9ac9-ff9893c88672.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_95063aac-bdf3-44d0-9ac9-ff9893c88672.png index 33a563ac6a727c626c1bc5ecaa6216e049ed1b76..9caf2170cbc97744cfd2c038c7cc3dfde9f68833 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_95063aac-bdf3-44d0-9ac9-ff9893c88672.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_95063aac-bdf3-44d0-9ac9-ff9893c88672.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b95548a4a31bdf1f506ff03fefb1b8e99fdd9007efd514929489408e1ee7fc2 -size 1864718 +oid sha256:d9c384647d6df0e929d233cab7f75994be23e23d2caa9844ffe5a159e285a1bc +size 1864924 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_a5e3b4dd-7550-4133-ab81-97aff1f3e12d.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_a5e3b4dd-7550-4133-ab81-97aff1f3e12d.png index 82c66f6c9fd5059c5f117b4704dabbba079b4094..5417b9b1056a46cbcb60b10b43768fa101394569 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_a5e3b4dd-7550-4133-ab81-97aff1f3e12d.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_a5e3b4dd-7550-4133-ab81-97aff1f3e12d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c5565b4a40a0b8c423e239f878cd001f6550a7e0033932757a8d3438171481b -size 1158355 +oid sha256:75f99d9d89799260f6dfd5ae7369b75640cc0337b8424c069f5039ef9ffc3c12 +size 1133289 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_d4598675-b3f5-4401-989e-45aeca7b33c1.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_d4598675-b3f5-4401-989e-45aeca7b33c1.png index 77c910a799ba3f3d7149d5f879703c74a11ed736..adfb222d9023db89e29b97342b55874396f960c7 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_d4598675-b3f5-4401-989e-45aeca7b33c1.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_d4598675-b3f5-4401-989e-45aeca7b33c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd24fac6531417cca7401ade729b38d9c9f2038339dc0dd7eb533a45bb7b008f -size 1119546 +oid sha256:ca65507374797f2dbb2177815fe190a27c47ff2096329dde360332cb80b8531c +size 1102078 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_dce74858-cfe5-48b1-92a4-6ed0b917dc77.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_dce74858-cfe5-48b1-92a4-6ed0b917dc77.png index 00bf94ba3f2092b6490c08b16186d59a72fd2f0b..9ab6f931a811d4dbf3b3b7da83b30a4c996ca50b 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_dce74858-cfe5-48b1-92a4-6ed0b917dc77.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_dce74858-cfe5-48b1-92a4-6ed0b917dc77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e69bdb39c4d5e3ab1e9a713733fba41e1823f92c31d0a795a7d63ed92661817e -size 1737885 +oid sha256:2fe8fc68f86f703c5d63f95ce5a4e85dd8d47b975842beb563eafa4874bc5713 +size 1793489 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_ed9a300a-422a-4ee9-ac12-b6e26509649d.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_ed9a300a-422a-4ee9-ac12-b6e26509649d.png index 4edb1fb57c468d2792179fdabf53fbea7b141909..3ec6c0264687b911ce297c697978b1a5bfdb3285 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_ed9a300a-422a-4ee9-ac12-b6e26509649d.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_ed9a300a-422a-4ee9-ac12-b6e26509649d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dff81d2526061d60f38f3d2b4196ace4ff598c5b680d41df9d70826d62be79ec -size 1660502 +oid sha256:f6cd5de5ad786332270b156a2fc5fa05438f1dc30eedf8dead5950faff22a547 +size 1641155 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f0ca54a4-d720-47a1-9ef7-1250abb05bbd.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f0ca54a4-d720-47a1-9ef7-1250abb05bbd.png index 46b1288147aa9320e90018a7e9db92c638419a59..3de795cb466cd23a00e7f0d38a03229811d02aab 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f0ca54a4-d720-47a1-9ef7-1250abb05bbd.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f0ca54a4-d720-47a1-9ef7-1250abb05bbd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be75413706a3c392f3c5a13f1ef1840ba388da8b7c47576ca8b261e16cfa7518 -size 934986 +oid sha256:cd57190cf0e735cbac3a911fa7c55074cd280cf76b58d426bdc9ebf3a1037811 +size 669125 diff --git a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f5cb11a1-e04c-4d99-98f2-5c902d3c1283.png b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f5cb11a1-e04c-4d99-98f2-5c902d3c1283.png index 098f168ee4e1115c09be774abcd4b239324763ea..cee5a144d220f9182dc0a36dea5b23d3231e1048 100644 --- a/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f5cb11a1-e04c-4d99-98f2-5c902d3c1283.png +++ b/images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f5cb11a1-e04c-4d99-98f2-5c902d3c1283.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08b64161c2f4131e2fc55ce28018301a6cbf11ba7799f1e459ab18573e391996 -size 912897 +oid sha256:675221d306fe4e99da56b3b4a426f07bf178802805f6a5f872892b13daf788ae +size 1034865 diff --git a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_0aae2ff1-b036-4560-9beb-701ce59d4e71.png b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_0aae2ff1-b036-4560-9beb-701ce59d4e71.png index d88411e775c39f63624e0980d70431766823a0f0..cd4e05572c41dc67280fe3d5f9c5d807fe6975f1 100644 --- a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_0aae2ff1-b036-4560-9beb-701ce59d4e71.png +++ b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_0aae2ff1-b036-4560-9beb-701ce59d4e71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa0c47462fc099cc1bb9de8e0d971746879e283ecffcb0a8b9b3ca4db9a6ecbe -size 692397 +oid sha256:92a941fb57103ffd054ce6ce8704ca572f6ca8e32ab585f5bb9294af356c197f +size 739084 diff --git a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_20dccc2d-feeb-4527-b6c6-8873b4b1f8f0.png b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_20dccc2d-feeb-4527-b6c6-8873b4b1f8f0.png index ff199856ccd4f747bc35e1b7634bb4f1ceffdc96..7c9bce6c98606b87061da78dc2288fbdc35f6fe1 100644 --- a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_20dccc2d-feeb-4527-b6c6-8873b4b1f8f0.png +++ b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_20dccc2d-feeb-4527-b6c6-8873b4b1f8f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:758c8a192c1b213d444a14b7cbd705dd0c1b70e8ffdd48b59d56227982e135eb -size 2505112 +oid sha256:0c9869da8a7f723ef434f40142456307c02ef3d09af49db635b89f204a92ab8b +size 2477957 diff --git a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_25b10621-3099-4546-9ff0-dd74ac022908.png b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_25b10621-3099-4546-9ff0-dd74ac022908.png index 299dfa714fd47946144091eb7029119023106723..034e9fe6e2b0f57af7b29731a96ce05624e55f29 100644 --- a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_25b10621-3099-4546-9ff0-dd74ac022908.png +++ b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_25b10621-3099-4546-9ff0-dd74ac022908.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddec84c077984b461469ef420d9a868a72de7bc7784b1ce95b32145796bbfa0d -size 2379413 +oid sha256:1266924e62bb20e3ae051b42052c7225847b7d85245d58b80172802d3abd7671 +size 1742971 diff --git a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_9078fd3a-f526-43ca-8756-187c5f59b43f.png b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_9078fd3a-f526-43ca-8756-187c5f59b43f.png index 09fb2407734bedf40bafcbebcfdb1074d2bbde6f..49b698689c4257b1ee7e34b0d31ccf9104f26390 100644 --- a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_9078fd3a-f526-43ca-8756-187c5f59b43f.png +++ b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_9078fd3a-f526-43ca-8756-187c5f59b43f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5c371e1bcf91f1affa9e0c96799f2d0af838431106ff5ec940ea9af8becb178 -size 587317 +oid sha256:3832ce95e891e6dbd99e6598fb405e7d7209a448e5db890810a8878132f31cda +size 728139 diff --git a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_c8d4e4d8-3926-494b-b1ae-2f1317e4cfd1.png b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_c8d4e4d8-3926-494b-b1ae-2f1317e4cfd1.png index f803a2b2b08bafabfc5964d0495020952cc85cf5..aed7c9e813681bc686ff8ec161c7b8ad4a04fbd0 100644 --- a/images/11344944-81ef-4fde-82b7-7e9724fc96f1_c8d4e4d8-3926-494b-b1ae-2f1317e4cfd1.png +++ b/images/11344944-81ef-4fde-82b7-7e9724fc96f1_c8d4e4d8-3926-494b-b1ae-2f1317e4cfd1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e015525c1ff35ddc8a4c59d03dac47ba5de5d476006faeda2df284f83736930f -size 591418 +oid sha256:fd68148362c71d149b318aa9fffb94fa2becaf352be61bff2d159815e0669510 +size 693414 diff --git a/images/117b1d5c-1e54-4588-ba84-aa173887b067_1874df15-0121-4c6c-9489-9ca06fbc20fd.png b/images/117b1d5c-1e54-4588-ba84-aa173887b067_1874df15-0121-4c6c-9489-9ca06fbc20fd.png index 6e227d00c71b07df61c02b167fb3acde685fd79d..812ec5bcd0765f1d715d7b34506adf3a4a1b5cb8 100644 --- a/images/117b1d5c-1e54-4588-ba84-aa173887b067_1874df15-0121-4c6c-9489-9ca06fbc20fd.png +++ b/images/117b1d5c-1e54-4588-ba84-aa173887b067_1874df15-0121-4c6c-9489-9ca06fbc20fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9d62a98186d028695c877a512a613993fdc3fc0a386d82de2d12dfd1899ad09 -size 3735993 +oid sha256:495f8c5314a2ae3398f663a1f226fd32b3cf6757bc9f11ae1d12964ee5b50aa4 +size 2123783 diff --git a/images/117b1d5c-1e54-4588-ba84-aa173887b067_53edb01c-5098-443e-bd99-d63dae18684d.png b/images/117b1d5c-1e54-4588-ba84-aa173887b067_53edb01c-5098-443e-bd99-d63dae18684d.png index 8946ccb8a7a9e083c83414babb2087e6365794eb..541e52846a1e27fd3997795265fccebf66769370 100644 --- a/images/117b1d5c-1e54-4588-ba84-aa173887b067_53edb01c-5098-443e-bd99-d63dae18684d.png +++ b/images/117b1d5c-1e54-4588-ba84-aa173887b067_53edb01c-5098-443e-bd99-d63dae18684d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:873a132048d40b78dbe5f45201cd910f166281a8023230911fa7fd6dd6a4ae7e -size 468488 +oid sha256:74c0e6cb508879081a5b6658c06a8ab226962a54ca8ee732e2d390e5020e5c42 +size 415122 diff --git a/images/117b1d5c-1e54-4588-ba84-aa173887b067_cca8b945-855b-47c1-82fb-2ccaf2794176.png b/images/117b1d5c-1e54-4588-ba84-aa173887b067_cca8b945-855b-47c1-82fb-2ccaf2794176.png index f84503d447219699a206c0ff8984b4ea97ccf068..790b036522c3698e2a9251c720ad63113d94329b 100644 --- a/images/117b1d5c-1e54-4588-ba84-aa173887b067_cca8b945-855b-47c1-82fb-2ccaf2794176.png +++ b/images/117b1d5c-1e54-4588-ba84-aa173887b067_cca8b945-855b-47c1-82fb-2ccaf2794176.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e934ff5ebc556a52d421d610a4985a5046d658823e3d7803cefbe4b568f7799 -size 2660968 +oid sha256:4a129233c1352c333699e03de8bb42ae337a9e15b0d4a9822bad9d604386c0b8 +size 3132609 diff --git a/images/117b1d5c-1e54-4588-ba84-aa173887b067_de893c6e-8bae-40fc-ae87-01165ef350f1.png b/images/117b1d5c-1e54-4588-ba84-aa173887b067_de893c6e-8bae-40fc-ae87-01165ef350f1.png index a70e070055f4667959a635e82c4ea408d929b760..01a3386a449788ecbc7434a30692bf25a49d5967 100644 --- a/images/117b1d5c-1e54-4588-ba84-aa173887b067_de893c6e-8bae-40fc-ae87-01165ef350f1.png +++ b/images/117b1d5c-1e54-4588-ba84-aa173887b067_de893c6e-8bae-40fc-ae87-01165ef350f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0636002e3d98ea65307e753896b556b30466f4ca1f03a4b9259a05fb1abb37d9 -size 414582 +oid sha256:e41c9d4854c7dbc49fdff656fb3627898a422919eb26726a20f9b312227abd19 +size 427748 diff --git a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_11ef7ceb-4c64-4d8c-a2f9-8cfb1874d942.png b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_11ef7ceb-4c64-4d8c-a2f9-8cfb1874d942.png index 4f9c539ac19fac0b1b8d5e9703e0c81fb1d66298..4fbfcee9d2e26542c9fd132fc5a4076bc2d4d7f3 100644 --- a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_11ef7ceb-4c64-4d8c-a2f9-8cfb1874d942.png +++ b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_11ef7ceb-4c64-4d8c-a2f9-8cfb1874d942.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa76440513fa8528ae07d80d7de04193f64e928f29e3f2e6e069765a5c4df693 -size 935681 +oid sha256:3b4b1da23794c866b0146106221b812464494a8304e03c29a846f3c53c97682d +size 1011855 diff --git a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1a347d26-3f20-44c8-8030-c09a8ae8ec9a.png b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1a347d26-3f20-44c8-8030-c09a8ae8ec9a.png index 2eb036f5c7bae5ada9e55734f70662e5a0688c03..37aa013158e34c973648a9b455bd90fd76e00028 100644 --- a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1a347d26-3f20-44c8-8030-c09a8ae8ec9a.png +++ b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1a347d26-3f20-44c8-8030-c09a8ae8ec9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3d246d0da1aaa31f42a10279b46155167aedb11d800de9620bd2a690891452e -size 1196456 +oid sha256:0c1261abad09adb9c473f44ea1f5eeb54064d7c53a7afc997115d5caf4ef2de9 +size 691648 diff --git a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1ef02895-60ba-4c09-b182-0296afae7c18.png b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1ef02895-60ba-4c09-b182-0296afae7c18.png index 9510271718c48b53a8e9ce55fbcf74071a704367..510db5781670f1aab3acc10911d48c8d9ef8bb3c 100644 --- a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1ef02895-60ba-4c09-b182-0296afae7c18.png +++ b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1ef02895-60ba-4c09-b182-0296afae7c18.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e495485ca8e679c6c5b8744d29b5278413bbb35c9760579466e4a925a90ad5ae -size 1487785 +oid sha256:7a018e7878fab385b2b046f0c9414e84207cbd19357a2604abdc898955422db0 +size 1469707 diff --git a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_96808158-b0a9-46ad-bddf-d79b9823a094.png b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_96808158-b0a9-46ad-bddf-d79b9823a094.png index 191784e42d3a34e856c1dc6b6ed7aa69f15d5610..338c4aebc11567f2f789b14fe1d1cf57a9361863 100644 --- a/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_96808158-b0a9-46ad-bddf-d79b9823a094.png +++ b/images/117c1176-b5bd-4b9a-9be2-80a7f390e207_96808158-b0a9-46ad-bddf-d79b9823a094.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b97b9557167073e92fc805d4bc0fe1f9da7250bf42a00061366d13a37b1d1fc5 -size 1658261 +oid sha256:4abfe6c4d51139d55ff1050e52e47495cb0a60d32a60490dff64fe58ecb0dec5 +size 451193 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_023d2f51-86fa-446b-8c75-47ad3f0c4643.png b/images/1202e28a-361d-4704-979a-526c599b4859_023d2f51-86fa-446b-8c75-47ad3f0c4643.png index 5a6739d276cf315544394f832e363b17df82277d..fde08bb16a509bb7ae67b0d103c52e3ee610ce7e 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_023d2f51-86fa-446b-8c75-47ad3f0c4643.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_023d2f51-86fa-446b-8c75-47ad3f0c4643.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb319e7f170e972a0af5cef72f898f7e0b3473ab65bd28173df71c525d7a05df -size 1134341 +oid sha256:480b4534b25903b590d7ee6b2eeb4106a5ff41396e471acfb95d45e0933a8ebc +size 1480513 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_2f3a086d-426a-4c71-b79b-05865962cb27.png b/images/1202e28a-361d-4704-979a-526c599b4859_2f3a086d-426a-4c71-b79b-05865962cb27.png index edbae4d483bc0ff93a8e3d0668d02d826af60074..26e8d64e4e910747a7b7038f3b36a881eb8baab6 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_2f3a086d-426a-4c71-b79b-05865962cb27.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_2f3a086d-426a-4c71-b79b-05865962cb27.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:498929bab5f8298874a9d3633af0510b8eb3a4ea8b01aecf43e61bf28d3cf397 -size 1070490 +oid sha256:2932d6200e4b58f8965d0ce815a84bb3fa7cdb78f01f98040caeb9bd9331451f +size 1547550 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_39200f36-86f3-403e-979d-0505ce6dad4d.png b/images/1202e28a-361d-4704-979a-526c599b4859_39200f36-86f3-403e-979d-0505ce6dad4d.png index 68c17591277b80629da8671bb2915ea8f8fa4c84..e862e4fcb02b273ee9bab20d84e9580315959dc2 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_39200f36-86f3-403e-979d-0505ce6dad4d.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_39200f36-86f3-403e-979d-0505ce6dad4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b621a4dbedc201be91b888c2279afb545d559522c297da29dae96ea064bec6e -size 521941 +oid sha256:414d191bda0cecee293e4ebff12d315e60ad969c2853aeb69ea90a0ea8be0352 +size 875855 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_af4d003a-a706-4015-902e-83ede88b94cb.png b/images/1202e28a-361d-4704-979a-526c599b4859_af4d003a-a706-4015-902e-83ede88b94cb.png index b9aed36f7d80a7042ac5d8610b06f5f61ee8e783..ad9ada539966359111283ed2ee145f1a262a10f6 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_af4d003a-a706-4015-902e-83ede88b94cb.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_af4d003a-a706-4015-902e-83ede88b94cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86140d312ec9eda89f429e47fdafb5111a630b64c9ad90b24aea9b4dc333d8c7 -size 1382937 +oid sha256:20f6474f1f373f493805bef4f7cd28c481579fce96d00b3245cb1d0c8587747d +size 1054200 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_c60cbbc5-6586-48ee-b238-c2b3c0488113.png b/images/1202e28a-361d-4704-979a-526c599b4859_c60cbbc5-6586-48ee-b238-c2b3c0488113.png index 7c6a8f4d7b02c491be3a1d0008788d56039c70cc..f2abdd111496768a1867d3ed066786d80aa7c3db 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_c60cbbc5-6586-48ee-b238-c2b3c0488113.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_c60cbbc5-6586-48ee-b238-c2b3c0488113.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:380a6514d8963271d92f367c8f886c5144c4932723f9dc26fd268eb40d2a9654 -size 1372418 +oid sha256:0fbf94a71c5d703e746d0938339102007e91aa721ebe15fc81f342bb4d1bc5fc +size 1127540 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_d1797a5c-2b21-43af-8036-66c4b2fa4941.png b/images/1202e28a-361d-4704-979a-526c599b4859_d1797a5c-2b21-43af-8036-66c4b2fa4941.png index 7c56bc666406bce8a23cc8b4ba8db6a77a07de8e..2ae2f7c0f4c64c03b8b1510f66435916c7d6c260 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_d1797a5c-2b21-43af-8036-66c4b2fa4941.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_d1797a5c-2b21-43af-8036-66c4b2fa4941.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01fc30bef8241a5dd2777e968fec1da320d2756146304151c523a9e0bbd62309 -size 1341964 +oid sha256:e95b278b805cc79123e5586350e9a46e0f98278f5773c53033f08613efd1c118 +size 1077416 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_e1c980b4-954f-44d5-8288-8b27eb6c7f24.png b/images/1202e28a-361d-4704-979a-526c599b4859_e1c980b4-954f-44d5-8288-8b27eb6c7f24.png index a21eca3a9a1d631affeaba030c99af63a1b2fc17..55ba90048155f7a9a3e7e36dd97a8a19a8d27fe0 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_e1c980b4-954f-44d5-8288-8b27eb6c7f24.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_e1c980b4-954f-44d5-8288-8b27eb6c7f24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f69a2241582748b95456a41c1c883496c2ddf99184f28ff29651844f0741c83 -size 1146096 +oid sha256:2409e2d4aaf40d50d2fd08ada20c4ade8a3586c13d3bd5b5c0fc264708f53878 +size 1371258 diff --git a/images/1202e28a-361d-4704-979a-526c599b4859_ed843127-7acc-4756-96f7-ef0177b3e64a.png b/images/1202e28a-361d-4704-979a-526c599b4859_ed843127-7acc-4756-96f7-ef0177b3e64a.png index 8e2c068e81286d569b156eda2ba7f846f5772ca4..fa3e7544cd497ac2ce03bd570313ab2c94b29091 100644 --- a/images/1202e28a-361d-4704-979a-526c599b4859_ed843127-7acc-4756-96f7-ef0177b3e64a.png +++ b/images/1202e28a-361d-4704-979a-526c599b4859_ed843127-7acc-4756-96f7-ef0177b3e64a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56e53d9f449fc56f1495b811a9fe7b9e75a3b920b975feb76575063129ed347b -size 1480855 +oid sha256:2c52be879b28eb075f4ba2285f6807002d4b75d33447cb6ce205ceb6cc259356 +size 1499416 diff --git a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_2fed1405-e307-4548-b8db-160e3d6a3342.png b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_2fed1405-e307-4548-b8db-160e3d6a3342.png index 5107ae9374e77558b758a8619e2e3d4c5e58bad0..aaa8b1bcad16b48f09e98f3ea67b1d46d776cd22 100644 --- a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_2fed1405-e307-4548-b8db-160e3d6a3342.png +++ b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_2fed1405-e307-4548-b8db-160e3d6a3342.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9ee7d94bf55c6f3d571e73ab75e468aeaa6dca16a70290e708f5308cb32c3c6 -size 2357973 +oid sha256:4ea0023e116640e6a661f59de9e4104a006c8b07cf32d33be978bb9867dafe4a +size 2183998 diff --git a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3bc7e106-2d01-485b-bf0d-a1f32cca0604.png b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3bc7e106-2d01-485b-bf0d-a1f32cca0604.png index 1c33d30576677f6e2f26eb21c9f06ac12f18f592..78f6af955306a364ba202feb5481daadb350dedf 100644 --- a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3bc7e106-2d01-485b-bf0d-a1f32cca0604.png +++ b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3bc7e106-2d01-485b-bf0d-a1f32cca0604.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c81119ad578d6d973f7f4def9a8a8f60a8b53d641f11be772922a77a026c193 -size 2311938 +oid sha256:e7041d61ba74319d1e931b83c4e2249574a66029adb7855fca1eb9999ad99205 +size 1341507 diff --git a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3f3a9593-8678-4c23-9d1b-d5bfa52c98c7.png b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3f3a9593-8678-4c23-9d1b-d5bfa52c98c7.png index c224a0df02ac1b595e49ae48fa1b984d35c0d942..dfefb9af135b9352ae72177cc01e04fc0fba6f8a 100644 --- a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3f3a9593-8678-4c23-9d1b-d5bfa52c98c7.png +++ b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_3f3a9593-8678-4c23-9d1b-d5bfa52c98c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4c8f4e0d734286bb119bde9bf6673a2b8655cdcc56a2c913111e984e8c9abed -size 2309935 +oid sha256:83b8b7dbae57ef0c91e28767e9a373b65b1007b905d8ccce1f440d111c1d4940 +size 1018592 diff --git a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_94b84afe-117d-4fd2-a611-616055f7a86a.png b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_94b84afe-117d-4fd2-a611-616055f7a86a.png index 62900b3436c42c47b08081abc090a96442db8e70..7a7477d85f3ba6adab4be0a50b906cb17f64d9aa 100644 --- a/images/1203a016-d541-4914-9cdb-f042ad0abcf5_94b84afe-117d-4fd2-a611-616055f7a86a.png +++ b/images/1203a016-d541-4914-9cdb-f042ad0abcf5_94b84afe-117d-4fd2-a611-616055f7a86a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55c52f7651b924cd344843d7d76556f87264b096821e7d16576e1c90cd36b1fd -size 2421487 +oid sha256:466fc4d125061041a95c6d81e6fbc3f19fa086d089f7104514b32e9152bbc231 +size 2119136 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_0392b523-ff32-4400-98ff-9da00b7cda72.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_0392b523-ff32-4400-98ff-9da00b7cda72.png index 62477712c9819a9cbdf9ddf3df6d06637d286fe1..8d689524148217dd8ae2b3909b190a0d78a0f94c 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_0392b523-ff32-4400-98ff-9da00b7cda72.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_0392b523-ff32-4400-98ff-9da00b7cda72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b6832db8e007607fbfbe493a0ebd0be51fede2e80b64946f0b351443e7da5df -size 1235955 +oid sha256:329c38a806fe44c849c8fa2b0661a8923b508e172e819634ce6d7fc5a7368100 +size 1149015 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_05abe37e-9ee1-4f51-a521-2ea404b58e7e.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_05abe37e-9ee1-4f51-a521-2ea404b58e7e.png index c10533fe36dd0bbb6aa3f1a762c69f9aa2e1f521..12afa461ece83555173cebba1211ef5d3b5c37da 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_05abe37e-9ee1-4f51-a521-2ea404b58e7e.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_05abe37e-9ee1-4f51-a521-2ea404b58e7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8e400a91907ffbda2a4fd18f2ed8da520cb44429d6b63314d6e406ace2ac260 -size 221462 +oid sha256:43a4e6093b74572099cb350d4927468605ee12d90bdea6ff336f8c2bc0439209 +size 221387 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_07cf23ef-c4c8-4f6e-8ea7-acd2ea457987.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_07cf23ef-c4c8-4f6e-8ea7-acd2ea457987.png index cd3addd58fecc50f3790de80e33732472e6f14bc..5c5a21facc87f0f090c2ed8bcd57e5a295a1b5aa 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_07cf23ef-c4c8-4f6e-8ea7-acd2ea457987.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_07cf23ef-c4c8-4f6e-8ea7-acd2ea457987.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:014838adf1bfff06c6ff46de1251065f0eb987a29e8a63b457bda77514947f5b -size 507440 +oid sha256:ec5eb3f6533816e3fe1ffe0f3d70107e5be5119a92df3aa0a9aef6ca2eb61ef2 +size 509246 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_2285e9d0-68f5-4691-895a-faf4f9e1ceca.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_2285e9d0-68f5-4691-895a-faf4f9e1ceca.png index 673220a4cc17ed7941dfbcf431571b2609a8a8b1..b6cce239bca8babeadab23a1cd2647cda2f147ab 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_2285e9d0-68f5-4691-895a-faf4f9e1ceca.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_2285e9d0-68f5-4691-895a-faf4f9e1ceca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8840dd101202f2d1c15223fef0e387f5c172b64c5a06eb75f03d1c31cdd4cbb -size 143088 +oid sha256:962095dbb794e4ff55be54995d38319d2f7d7b2fb44ed21c9f585e392a240c4f +size 226911 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_6004dfae-b262-479a-8e78-8ba5fbe68470.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_6004dfae-b262-479a-8e78-8ba5fbe68470.png index 43979c60d94fcf34c1ac82c095dce8f8f3b85e73..62a421c72a555158dbbae686b721581d5c1e7988 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_6004dfae-b262-479a-8e78-8ba5fbe68470.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_6004dfae-b262-479a-8e78-8ba5fbe68470.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cd8fd84538b8b291f0261f6af8979babf25d75207b2b0f0a685bb3422b77ea6 -size 493726 +oid sha256:3ef3fb6ab81616f888137c80dbff36346dba46cb5f3c606abe42646698445180 +size 501885 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_733acd7f-9db8-407d-9169-c28918f38ce6.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_733acd7f-9db8-407d-9169-c28918f38ce6.png index 8424229699954420ed3b529a01f5deebce0930b7..6a076c000535b1ae35ca2e430fb4ffe2507d8970 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_733acd7f-9db8-407d-9169-c28918f38ce6.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_733acd7f-9db8-407d-9169-c28918f38ce6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:160551170b612fd1e171d3a170a8f3812ec76335c5bcc0212dfc61d20ac22abf -size 306698 +oid sha256:79aaaa55eb600e745b4644ec673406c6bf5d99473be9dc3dcdf763a1e9b2a43a +size 244551 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_8e52097f-9096-4b87-9a14-afec783592f4.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_8e52097f-9096-4b87-9a14-afec783592f4.png index 8692239a1786932f9937ff12397d1060cb6587b5..50a3e69f0d909dcf909e6e13183aef3edea656d8 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_8e52097f-9096-4b87-9a14-afec783592f4.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_8e52097f-9096-4b87-9a14-afec783592f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90a875bcb977a1e457edd1434e489aa69506b9eef9cffc21a19043485ec36e6f -size 1167452 +oid sha256:73ce2e3b76760fa97d48efa94ea64f3680e808fa50450d965823a8bdb4db2fe6 +size 1083138 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_9754a5ea-7a5e-4822-8c16-3049b3ec50ce.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_9754a5ea-7a5e-4822-8c16-3049b3ec50ce.png index c5ce8cfa69fa5cae09b846a4877ea1f3892d7f1f..19445f3fe8be74fc4f598af9b4ccc1e47e6f5084 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_9754a5ea-7a5e-4822-8c16-3049b3ec50ce.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_9754a5ea-7a5e-4822-8c16-3049b3ec50ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:475997d43111528da33e671b789cef107fba31d9333d5c0316934e4baa7f5201 -size 677788 +oid sha256:6ac48d0eb2e2d5476c5a5ddd240e53a9937faae81772f43b362a725f692cda81 +size 782963 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_afe07d47-565f-4f9c-be14-366c5a45b1c3.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_afe07d47-565f-4f9c-be14-366c5a45b1c3.png index b04258c3391af71d8a8323cc8a71b5940269c226..f603c4d84ec6f023f97db83df98104544a30b739 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_afe07d47-565f-4f9c-be14-366c5a45b1c3.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_afe07d47-565f-4f9c-be14-366c5a45b1c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b41a54a61639bc8f0a9f218b54a6093d7d7787f9d84e4d81f2dbcec91fe8ce7 -size 307926 +oid sha256:c9af4291ceb05e9f9ac8a83273efac5af7de75095b0bb76732f1705a542976f1 +size 397304 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_c87db43b-1abd-4fd3-83a9-fe2edd7bfa4e.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_c87db43b-1abd-4fd3-83a9-fe2edd7bfa4e.png index ae548bdb58a0fd596a0e1df0eea86dc7fff6f0fd..fca97664bbfd8c95d13d34b2350ce7aef94a6d06 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_c87db43b-1abd-4fd3-83a9-fe2edd7bfa4e.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_c87db43b-1abd-4fd3-83a9-fe2edd7bfa4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d67c08ef7c0feaf3ef2669b13d0603ce96fc15d86df6e0038f9e45f43aafd83a -size 575438 +oid sha256:54be0abc47a8bbb8fc2e39fc72c92712542c47cba41c72934fe513ed1cd7cf3e +size 579077 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_d2be2e1a-e5ba-44a6-bbee-83c29f97f07a.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_d2be2e1a-e5ba-44a6-bbee-83c29f97f07a.png index 78aa1b693065ede9f253924dda9300e438966b25..807ade24d48c12a1118c319959d9f1adb69300ca 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_d2be2e1a-e5ba-44a6-bbee-83c29f97f07a.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_d2be2e1a-e5ba-44a6-bbee-83c29f97f07a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5fb321988c8317c09b7d46f7e41c7f1830755d7df3eed0d6d71c16da88a8348f -size 553180 +oid sha256:23f4a996ca2e88f3fc036798648d73bbba205481ec2cf4ca3835a582457362a0 +size 550520 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f547410c-3931-49b3-8113-614e741e6ad6.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f547410c-3931-49b3-8113-614e741e6ad6.png index 2a73cf1cb1ecd4f781cc4f6b6462c236ed851121..fa5532a6658e17416843a6536d4e60a3c947073d 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f547410c-3931-49b3-8113-614e741e6ad6.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f547410c-3931-49b3-8113-614e741e6ad6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20cc34cb5bafbcce492b1576d4d20d93ade87fea0778ee3f71c2498ab547168e -size 568603 +oid sha256:98f3141bbcca89cf8ce5909be30391fa82e9e70d960f81db85fd0fb7b2365cba +size 1102442 diff --git a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f77f54bb-e260-4d62-a12d-3e8f3df35306.png b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f77f54bb-e260-4d62-a12d-3e8f3df35306.png index 1c2749e9b4b19435fd16ecfb44cdff3e938c06ea..5f838d63967ace7796ab4d0ad8b85e5524ff06a2 100644 --- a/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f77f54bb-e260-4d62-a12d-3e8f3df35306.png +++ b/images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f77f54bb-e260-4d62-a12d-3e8f3df35306.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7aad2b4766c4592749e725c118c43f88de7f9ed3b9c478629900c0c0bed5294 -size 797896 +oid sha256:4b64481991e1386304ba7cd7f36b079fd5394f01fc610b8172017f5c75b5b6b1 +size 998475 diff --git a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_1533426c-6f64-4fc5-aa01-7fad60360f2e.png b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_1533426c-6f64-4fc5-aa01-7fad60360f2e.png index 711a2b07cb04feabd63a48757184b88b889769bd..5a97bbcdec4c710a3e3b4e0d5bde9c3fbd66514c 100644 --- a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_1533426c-6f64-4fc5-aa01-7fad60360f2e.png +++ b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_1533426c-6f64-4fc5-aa01-7fad60360f2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3adcd9ef0d362dd360a20fbb3d22b7d33bdbcbeb6a4fbb4e6072a37d5a8c3c61 -size 992700 +oid sha256:43ee16f1696956633268bceb290494069f8eed05bf55ebd6f13a20b21e385f71 +size 1067167 diff --git a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_2c7c5fe2-1b57-449c-83f0-eb79a096739f.png b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_2c7c5fe2-1b57-449c-83f0-eb79a096739f.png index ddacf09acbf65cdca05c253bbda81e7b2cbf1546..686b95da781bedb15b599d1183cf711527d77180 100644 --- a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_2c7c5fe2-1b57-449c-83f0-eb79a096739f.png +++ b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_2c7c5fe2-1b57-449c-83f0-eb79a096739f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:442fbb65f685aba38c86815b52248ab437b66e904876d20dc69f19cb8743a242 -size 1375964 +oid sha256:02a407ec7adc1f172c6513ab31ae46c3b1493735b99342e826cb605261cf6652 +size 515516 diff --git a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8a0711a0-bd00-4c9e-8186-8178f224303d.png b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8a0711a0-bd00-4c9e-8186-8178f224303d.png index 09c924e2f745be41f7fc3095861b1bc373a485bb..f579d0867a58dcd2399986da8c99df08723ee4d1 100644 --- a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8a0711a0-bd00-4c9e-8186-8178f224303d.png +++ b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8a0711a0-bd00-4c9e-8186-8178f224303d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bca8faf68e9e9350d798f2259a8a5da2a3d2eaa772af284b7c7c264c90fbc2a3 -size 1021931 +oid sha256:5f0f0b74345c44e89efbd40fcaa6f0dd68725e79571ce77db52ec955006e8013 +size 1032464 diff --git a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8ceb9903-e4ac-4aed-bd80-57d318467160.png b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8ceb9903-e4ac-4aed-bd80-57d318467160.png index 593dea5ac19e772e29c2245a838fb094df257f8b..fbb1b6f16553acf8bcb48d2e8ac1a7366f732586 100644 --- a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8ceb9903-e4ac-4aed-bd80-57d318467160.png +++ b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8ceb9903-e4ac-4aed-bd80-57d318467160.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1a194d2af90d2fa59c4d86284892e1088e6f2cb09a8592e1df19ab9b9877d5e -size 1439712 +oid sha256:f87dc0dc1a10cdd61124107fa70c1efb3b0de216c357f00332e9c59d5890cdd8 +size 1249374 diff --git a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_9a2bfce6-7f6c-496d-8537-c063af49d516.png b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_9a2bfce6-7f6c-496d-8537-c063af49d516.png index 5b052d94c0f2dfe25b1fec11a22c88c3706c41db..8e78d630341293b8e4f3881690f0a5970955bca2 100644 --- a/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_9a2bfce6-7f6c-496d-8537-c063af49d516.png +++ b/images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_9a2bfce6-7f6c-496d-8537-c063af49d516.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bfbfd60db62cd36e111c1a1984a2cc143cbdf45e4f7dbd443633979bec2d4f2 -size 1424290 +oid sha256:80279330980b1352f31e39795949c7a282ab094e6a0ddd27bb3d0f56362872eb +size 1777298 diff --git a/images/12324b80-88bd-4d48-962c-ae80203e5058_41631711-b251-4fe0-9b5f-0b86f4b58466.png b/images/12324b80-88bd-4d48-962c-ae80203e5058_41631711-b251-4fe0-9b5f-0b86f4b58466.png index 9ee55cd89363201dd04295a2196c3c268130b1a4..9755bd8bd47850d33aac5b8811f3192c12ca3a5f 100644 --- a/images/12324b80-88bd-4d48-962c-ae80203e5058_41631711-b251-4fe0-9b5f-0b86f4b58466.png +++ b/images/12324b80-88bd-4d48-962c-ae80203e5058_41631711-b251-4fe0-9b5f-0b86f4b58466.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2798b620ca0978180fc6bf2830d4f09ad1e46ff32fa2bab32eb75048d9343e1 -size 2304654 +oid sha256:7a2b280797a2a20ae4ced0a9a83b16169a9c6f7396c8e2512ddbeb819ddf8610 +size 2012098 diff --git a/images/12324b80-88bd-4d48-962c-ae80203e5058_c01ad00e-d680-48e8-bfe6-bf73b8d30674.png b/images/12324b80-88bd-4d48-962c-ae80203e5058_c01ad00e-d680-48e8-bfe6-bf73b8d30674.png index 2fd2dcd69782892efd19d285cfc2e5a0abf8b2bb..ef673d79b5427dfc7b74c076f97d087889e6d9d9 100644 --- a/images/12324b80-88bd-4d48-962c-ae80203e5058_c01ad00e-d680-48e8-bfe6-bf73b8d30674.png +++ b/images/12324b80-88bd-4d48-962c-ae80203e5058_c01ad00e-d680-48e8-bfe6-bf73b8d30674.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:736f19d47d3cd164ef63efdb4b8f33995982acd9c70a8b22c433592b02a6f740 -size 905949 +oid sha256:bf7b6f59d0e74876916bb7e6f936f58adc65a4698ee06bda4e556c9381f4b410 +size 908990 diff --git a/images/12324b80-88bd-4d48-962c-ae80203e5058_d71602fb-ca32-4910-9360-a5684b83ea91.png b/images/12324b80-88bd-4d48-962c-ae80203e5058_d71602fb-ca32-4910-9360-a5684b83ea91.png index 14728f1052a94ac69a4f107dab02714f0dd7e40d..77dc164597ba33c12c712a718c5d0040df19d32a 100644 --- a/images/12324b80-88bd-4d48-962c-ae80203e5058_d71602fb-ca32-4910-9360-a5684b83ea91.png +++ b/images/12324b80-88bd-4d48-962c-ae80203e5058_d71602fb-ca32-4910-9360-a5684b83ea91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fce9e7deb1796c31f90680cb32be98495fe2b376d217211b9e95eeec31372ea7 -size 1372860 +oid sha256:3abccf92247cbf1bbcf7b758b5daf824931b713a23c9d94ad82acbbfd0340049 +size 1348232 diff --git a/images/12324b80-88bd-4d48-962c-ae80203e5058_ebbb7fed-de37-4ce8-ad48-6bf6573f5708.png b/images/12324b80-88bd-4d48-962c-ae80203e5058_ebbb7fed-de37-4ce8-ad48-6bf6573f5708.png index f6cdcf06939e49611b8ce3ffc6b089e5d03033d4..909bc713de15fc47f8549f80632fb4c5a6942d31 100644 --- a/images/12324b80-88bd-4d48-962c-ae80203e5058_ebbb7fed-de37-4ce8-ad48-6bf6573f5708.png +++ b/images/12324b80-88bd-4d48-962c-ae80203e5058_ebbb7fed-de37-4ce8-ad48-6bf6573f5708.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12a695c38c08a0b6288ee915a575fbc666a97c37424c9c2d771449a30d89082d -size 2902474 +oid sha256:462d12c8894f4be5223434b1d832a3cb7e19ddc20ba8679449402a3e0f8b79b7 +size 2081152 diff --git a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_091dfb3c-d0d9-49c2-a922-18e6468bc29a.png b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_091dfb3c-d0d9-49c2-a922-18e6468bc29a.png index ded48cc6d903df652dbe52b486fbaa5b91bfeb16..b51f0eb0691ccc416d70ede3719ace528af3418a 100644 --- a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_091dfb3c-d0d9-49c2-a922-18e6468bc29a.png +++ b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_091dfb3c-d0d9-49c2-a922-18e6468bc29a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5e842be2f933d4eed64779f540a0a14df0d6d4e0965b12e569a58a784eeb55e -size 1353418 +oid sha256:52c395bd207c06333b4a1476b23b76b9f933dc9fc78618978cbf6c04e3a3bd98 +size 1590736 diff --git a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_449e6dc9-7f9a-408c-bed6-3d20020ddddf.png b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_449e6dc9-7f9a-408c-bed6-3d20020ddddf.png index 3dcac2cb35cfc0b69dc52af7c23b472a3f444436..fe2455174634ed49f4abcf430b3527f771edc3b9 100644 --- a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_449e6dc9-7f9a-408c-bed6-3d20020ddddf.png +++ b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_449e6dc9-7f9a-408c-bed6-3d20020ddddf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be8a321ad9b1c94e7fc008c1c7f368144c53c95d91794fae2bb6e6bb5ffda378 -size 1357864 +oid sha256:d68d02a4cb72743e854901d80b57b860c43541cd76d17706c4b22f5432973afd +size 1600301 diff --git a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_a1e3d261-9e4d-4aa2-b851-4df6032e1794.png b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_a1e3d261-9e4d-4aa2-b851-4df6032e1794.png index 01036675cc31d8be52e5da6bfa883f5527af0c67..78addbbd0699ad45bb2b00600fb28cd5764f4218 100644 --- a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_a1e3d261-9e4d-4aa2-b851-4df6032e1794.png +++ b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_a1e3d261-9e4d-4aa2-b851-4df6032e1794.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80a92d489035dffcbf7286cd7359e6b0eda08c5b39b920ebbb9c892e796c1d70 -size 1355220 +oid sha256:76cdbfc0b93eebec9c22e1dafbe0e09bbab9935acb850f3c27d0536025b242b9 +size 3037843 diff --git a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_b5988297-2c7c-4904-b027-838dccd562f7.png b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_b5988297-2c7c-4904-b027-838dccd562f7.png index 352501e402d89e5b82302d12b7ed7dd74c5c80c0..8522e1e6aefadc6245a4e634a90572b4912057bc 100644 --- a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_b5988297-2c7c-4904-b027-838dccd562f7.png +++ b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_b5988297-2c7c-4904-b027-838dccd562f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26ae443a0a09423e858502cbd4d2d6f0c832b92c8e4b2707baaa90093a00c3e3 -size 1799926 +oid sha256:c8ee7c8cba5628b61ac3e888fbb24e3fc47259dc06a54bf15068f1c06e8217fe +size 3461025 diff --git a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_e5dc29b7-feed-4ca1-addd-d63034be1d36.png b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_e5dc29b7-feed-4ca1-addd-d63034be1d36.png index f96dea9b2000fa118cd9cab2fcdf81584ff319fb..b7315f4ab53fa082a27794a9b34fb05e74d13ff8 100644 --- a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_e5dc29b7-feed-4ca1-addd-d63034be1d36.png +++ b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_e5dc29b7-feed-4ca1-addd-d63034be1d36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:470536f7b14fe40fd0b52e31cb23aa52973ade1cd3445eee6f34d096bac120f1 -size 2082151 +oid sha256:8bbb8ef7deb4cf9df2597af30d703b6eb71709c71a46fe7055106604e171f735 +size 2306224 diff --git a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_f6508bab-5a51-44f5-abd0-fa6863f8d1d0.png b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_f6508bab-5a51-44f5-abd0-fa6863f8d1d0.png index 5e2d1ce51bf423597e1515ef503e811f6832da6e..4d4158e5695c52ce387ed97151a25e346e94472c 100644 --- a/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_f6508bab-5a51-44f5-abd0-fa6863f8d1d0.png +++ b/images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_f6508bab-5a51-44f5-abd0-fa6863f8d1d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54e17c2062f5fdd024088451291d1f2241e3377af5ce96f55c3fcd88274ffc2d -size 1777373 +oid sha256:8c8bbc13b287df0472298faaa1a56d50544563eca555ae9a8f87c52a0b9e2f77 +size 2027410 diff --git a/images/126b4604-8b04-4eb1-9027-266631c96f01_00403920-e25f-4c7e-877c-e8e119cda4e5.png b/images/126b4604-8b04-4eb1-9027-266631c96f01_00403920-e25f-4c7e-877c-e8e119cda4e5.png index 864e956c440c87b76173c2eeed74beb96b98c4ca..6decfb29fcbdd3c5af7f401cf453acb4608e327f 100644 --- a/images/126b4604-8b04-4eb1-9027-266631c96f01_00403920-e25f-4c7e-877c-e8e119cda4e5.png +++ b/images/126b4604-8b04-4eb1-9027-266631c96f01_00403920-e25f-4c7e-877c-e8e119cda4e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e27c80ce3d7bccde2e4b8848eaf1868a5c319b9cd29b2e0033ffbd184669f83f -size 1252618 +oid sha256:7d1058a67eb5dc94b2e4a4b542db4fd5dc895b10b3704b86cca006a415cfc871 +size 1188457 diff --git a/images/126b4604-8b04-4eb1-9027-266631c96f01_3f13fb42-633d-48a5-8d77-afcf9de93569.png b/images/126b4604-8b04-4eb1-9027-266631c96f01_3f13fb42-633d-48a5-8d77-afcf9de93569.png index 7f3e348c99678c7ac9b4e6db6968a98b919a7779..f5102178d2a4f4d5fda9c6f4cf1c61515da8a6ef 100644 --- a/images/126b4604-8b04-4eb1-9027-266631c96f01_3f13fb42-633d-48a5-8d77-afcf9de93569.png +++ b/images/126b4604-8b04-4eb1-9027-266631c96f01_3f13fb42-633d-48a5-8d77-afcf9de93569.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:adbeeac46edc57dae55e3f2ba95ed9ea36bfe95d5dfaeb4146bc394325ba67e5 -size 1237136 +oid sha256:92390348b20f867af74f3447a359aa635892558681575b2732d447dfcf1be6ef +size 1844042 diff --git a/images/126b4604-8b04-4eb1-9027-266631c96f01_4325202f-ddfb-4f0d-9997-9e911bd2079a.png b/images/126b4604-8b04-4eb1-9027-266631c96f01_4325202f-ddfb-4f0d-9997-9e911bd2079a.png index 4393cc0d0a19493837b19aa52c62a260f1394de6..ffd1b2659062615446011417f1db215e18ba92ab 100644 --- a/images/126b4604-8b04-4eb1-9027-266631c96f01_4325202f-ddfb-4f0d-9997-9e911bd2079a.png +++ b/images/126b4604-8b04-4eb1-9027-266631c96f01_4325202f-ddfb-4f0d-9997-9e911bd2079a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd57dbf564e06a9d2226f7550f5cfdf71acee87dc927b29dd455a9d6bfdf8edc -size 3741997 +oid sha256:e6bda9ce0ed8aa2927256bede1db4f8f4db45e0585427d0f24507f6c154768e1 +size 2039084 diff --git a/images/126b4604-8b04-4eb1-9027-266631c96f01_6b716ee5-9df3-429d-86ec-a8e0146a5a2f.png b/images/126b4604-8b04-4eb1-9027-266631c96f01_6b716ee5-9df3-429d-86ec-a8e0146a5a2f.png index 19a007e4e4fc586e84942d33b030e11c864de333..f411ecc78dc7ead2f6f7ea65e21938f2c5b99de6 100644 --- a/images/126b4604-8b04-4eb1-9027-266631c96f01_6b716ee5-9df3-429d-86ec-a8e0146a5a2f.png +++ b/images/126b4604-8b04-4eb1-9027-266631c96f01_6b716ee5-9df3-429d-86ec-a8e0146a5a2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4feac7202165ace7219ff17a6e4ad004020e84604cb9aa768f797abb327ea5b1 -size 1190463 +oid sha256:2b4409bb8072ab0cb103d213299ca9e66138f49ec85d25a2ac5675dab6e18265 +size 1358767 diff --git a/images/126b4604-8b04-4eb1-9027-266631c96f01_773b414e-e1c4-4471-bbdf-f8143c8a606f.png b/images/126b4604-8b04-4eb1-9027-266631c96f01_773b414e-e1c4-4471-bbdf-f8143c8a606f.png index 57e20c43efc3cc3c922e8ac98cdba81f89460f7b..be7378d5c8ca34ad776d347edf6175d1079eab31 100644 --- a/images/126b4604-8b04-4eb1-9027-266631c96f01_773b414e-e1c4-4471-bbdf-f8143c8a606f.png +++ b/images/126b4604-8b04-4eb1-9027-266631c96f01_773b414e-e1c4-4471-bbdf-f8143c8a606f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcb84e27ca40790b34b60bd66a400799fb4b61a75f9b94e5e7f8de06369a834b -size 1255195 +oid sha256:2cd1d541a030df41015673094854bc958c7ce104287495c4a254507c41d9a647 +size 1807491 diff --git a/images/126b4604-8b04-4eb1-9027-266631c96f01_ad867891-0aa7-44b2-a033-6f297e36b85f.png b/images/126b4604-8b04-4eb1-9027-266631c96f01_ad867891-0aa7-44b2-a033-6f297e36b85f.png index ad907debf0b2ef200f1412dade1447fb2bf2be4d..c3889d677189d25b27a343b858ee7037d30b6700 100644 --- a/images/126b4604-8b04-4eb1-9027-266631c96f01_ad867891-0aa7-44b2-a033-6f297e36b85f.png +++ b/images/126b4604-8b04-4eb1-9027-266631c96f01_ad867891-0aa7-44b2-a033-6f297e36b85f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e8a285ae2e946cc0a2db4f6d7366c5aaba7ccfbcc08415d9512b3f45c61114d -size 977568 +oid sha256:a2e7c97524d23ff2eacb4c414034e5643dc318d0a3c941aef312f2027e68fdef +size 904907 diff --git a/images/126b4604-8b04-4eb1-9027-266631c96f01_db04e65a-c4bc-47b4-90cb-2a233cee4a12.png b/images/126b4604-8b04-4eb1-9027-266631c96f01_db04e65a-c4bc-47b4-90cb-2a233cee4a12.png index 5969642891497bf42a54ca2460bef008931863b4..7cf550b0408fdcbb4cb9536d742abb2afc043833 100644 --- a/images/126b4604-8b04-4eb1-9027-266631c96f01_db04e65a-c4bc-47b4-90cb-2a233cee4a12.png +++ b/images/126b4604-8b04-4eb1-9027-266631c96f01_db04e65a-c4bc-47b4-90cb-2a233cee4a12.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b99f2e1a8e04cbd172e715eefc9026ddb8cc24732942902d78e0301507fcc970 -size 1190926 +oid sha256:64568c31c2cb97c7e37e9b60258c13a22d3513f8f1b4544f19dad3bdeea36680 +size 1134876 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_15edde36-09a1-4143-8fd1-1aa23e4c17fb.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_15edde36-09a1-4143-8fd1-1aa23e4c17fb.png index ce37e89aea7058ffa06a74b920f6206b7ed7de95..4b3ff0713361f58026c44fca70c7249fcbafe7dd 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_15edde36-09a1-4143-8fd1-1aa23e4c17fb.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_15edde36-09a1-4143-8fd1-1aa23e4c17fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9a7b4ca85971f498bf9398a9cacf2f4e9e8608829c27af8d0572fce37ad3592 -size 1151940 +oid sha256:c0e397a0688914f2e573327aefc429491129c6270c2eda89cccf264a64861bc3 +size 778262 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_17bb4834-cbfd-40cc-84dc-8a06cb5be3d3.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_17bb4834-cbfd-40cc-84dc-8a06cb5be3d3.png index a9c573a7a8c3da9931043139ccc392de7d1829ed..000c11c6e1b85982e5d538275b3db943d4722b7c 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_17bb4834-cbfd-40cc-84dc-8a06cb5be3d3.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_17bb4834-cbfd-40cc-84dc-8a06cb5be3d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d85ff91efe19ce806566ed204e38a1269718f3b63be86372165f798bfd4052b9 -size 1071263 +oid sha256:0d9c3e6f2cb1740beabc1d994cee00c1dcd3876b270d0e7ffe319e954c71cafb +size 1077563 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_1d412587-c49d-49e7-aa72-4b12e47b9c59.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_1d412587-c49d-49e7-aa72-4b12e47b9c59.png index b177dfc2de8cb0656240930505fbccac2334bef1..827fbb5afc5f061c3504cb3a293651b43d8e57ab 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_1d412587-c49d-49e7-aa72-4b12e47b9c59.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_1d412587-c49d-49e7-aa72-4b12e47b9c59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:195e15e249f0f4737739f0b27f4394cb2a5baa641179d35760889a8d67fdf63a -size 983782 +oid sha256:95b91f71c29040362f232c09b28996258cd9e448bd692f61fd819389d8dd08d6 +size 1389140 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_1e8b8975-5a8e-4555-aa79-7b38c3a2f62e.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_1e8b8975-5a8e-4555-aa79-7b38c3a2f62e.png index a9c573a7a8c3da9931043139ccc392de7d1829ed..8f24c589d1031113d99a808c589134bd82b2e12c 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_1e8b8975-5a8e-4555-aa79-7b38c3a2f62e.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_1e8b8975-5a8e-4555-aa79-7b38c3a2f62e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d85ff91efe19ce806566ed204e38a1269718f3b63be86372165f798bfd4052b9 -size 1071263 +oid sha256:9ba624e419ac27cc860154003a8d970bf5f2019859fa434f41303b651125a7c8 +size 1432039 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_45555477-fab7-4449-b1ff-66433e3230ff.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_45555477-fab7-4449-b1ff-66433e3230ff.png index 9461145c83544abefa9d1a96b8fea708b60ec89e..ca70237ed0402c4c7847c8bb00cdb1de2e753fad 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_45555477-fab7-4449-b1ff-66433e3230ff.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_45555477-fab7-4449-b1ff-66433e3230ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:deb7a052f1ac63a0a65f72ab0518f6a11b26e492acce5d94635070183c4003d4 -size 1114865 +oid sha256:a34aa81bd16f0d29ab714d984cd9e2af26af04f8a58f5cf24f26852528d97a22 +size 889188 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_9ca6d59b-86bc-45b7-8234-669bc1d307a7.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_9ca6d59b-86bc-45b7-8234-669bc1d307a7.png index cb5c0b15d90e0dc6a135fabfe9a82c1bf6ed6ed4..0b9930313167bbe691c9a0add1cc6714245c00c4 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_9ca6d59b-86bc-45b7-8234-669bc1d307a7.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_9ca6d59b-86bc-45b7-8234-669bc1d307a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d62efc3e943b4868cec465483f3c4135910382268e2bc11d6b84b2f4f808c6d -size 1462680 +oid sha256:f13aab5179cba71e6db2c26022a85a28189402c43d0a22e536a5f3e1a6b68c64 +size 830045 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_a6121a43-e23f-421c-ad8a-ec637cb2e49a.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_a6121a43-e23f-421c-ad8a-ec637cb2e49a.png index a82460f336f42ee878ad55b0b975323d9727c0ec..d09bf065a107c175cfb93753376f0d5857a7392d 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_a6121a43-e23f-421c-ad8a-ec637cb2e49a.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_a6121a43-e23f-421c-ad8a-ec637cb2e49a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ebb6a8ec764deeae41d0f6028b72674d9f117566b6270bd311d495aa1f096a7 -size 841185 +oid sha256:7d0f0529693af047349634ebf72e1ec463d6f1ded821e93ca01021ebf461aadd +size 836580 diff --git a/images/1282a011-194c-40c4-8f76-875e502cdd53_c534f502-bbd7-495e-b75e-fa1d5e851def.png b/images/1282a011-194c-40c4-8f76-875e502cdd53_c534f502-bbd7-495e-b75e-fa1d5e851def.png index a9c573a7a8c3da9931043139ccc392de7d1829ed..6052c3c852ceddcd08039051dd9e5fa2120af07f 100644 --- a/images/1282a011-194c-40c4-8f76-875e502cdd53_c534f502-bbd7-495e-b75e-fa1d5e851def.png +++ b/images/1282a011-194c-40c4-8f76-875e502cdd53_c534f502-bbd7-495e-b75e-fa1d5e851def.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d85ff91efe19ce806566ed204e38a1269718f3b63be86372165f798bfd4052b9 -size 1071263 +oid sha256:258b002eb92c10c04d9f2393872972b5c6face2dc726e28dd87d6a1962643457 +size 1181441 diff --git a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_15606cb6-9b40-427c-b76c-5f32223fafda.png b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_15606cb6-9b40-427c-b76c-5f32223fafda.png index 8a0515235b05aa1a3f2c737b72d0556e301b16ac..b57e24d59540ab496a0a476639ed30427261bb96 100644 --- a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_15606cb6-9b40-427c-b76c-5f32223fafda.png +++ b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_15606cb6-9b40-427c-b76c-5f32223fafda.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9b4b8e44a89c1966b2d172e94d6b674c594bb75bfee30549373717fe078adc6 -size 881646 +oid sha256:4759f0178a409a84135242461b098a3600b6835539ec0b919e8867b583326c99 +size 482272 diff --git a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_88ae7475-92a6-4415-bbe3-16b73b100272.png b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_88ae7475-92a6-4415-bbe3-16b73b100272.png index 13651cb3afe4621ed66b9ebd1cc622975a166f2c..2ee1cfcb7241f9f9b73026b0fe1df2d48e697fda 100644 --- a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_88ae7475-92a6-4415-bbe3-16b73b100272.png +++ b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_88ae7475-92a6-4415-bbe3-16b73b100272.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e359dbf513fddb790d510cab63b7502a183f1c626a819400595870b936f694ee -size 867489 +oid sha256:8582a56a4ed49cc1e2d555a5fdc30bd839be1433266c6348274ca3d0b5841fe7 +size 583797 diff --git a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_90aba443-6a23-47d7-bd15-ccab225917fc.png b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_90aba443-6a23-47d7-bd15-ccab225917fc.png index d89cbedac59df1e0bc464df9d0e55a74144c29d3..a68ca04e9b5ef0622a0b91e054f3388de4917247 100644 --- a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_90aba443-6a23-47d7-bd15-ccab225917fc.png +++ b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_90aba443-6a23-47d7-bd15-ccab225917fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9addec78b25876009c9fce224f6303543450116642ce5f2eec47aed7538aa5e -size 1130828 +oid sha256:9a69970e633a5511a64f0adee4cb7e07ab974711651857fad14cd7f3ba8ad891 +size 1136995 diff --git a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_dd3a385f-430e-44de-adb5-e2318ec80c1d.png b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_dd3a385f-430e-44de-adb5-e2318ec80c1d.png index 35c921310a7a8a23f80fcb9324615dd9fe5082c7..777acb94ac308664e6d6f44ef2e00da375600ac5 100644 --- a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_dd3a385f-430e-44de-adb5-e2318ec80c1d.png +++ b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_dd3a385f-430e-44de-adb5-e2318ec80c1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7417dce5fd02870d2e236c155a7dec5ac21a8b902f36e3602084cae856814e4c -size 2409843 +oid sha256:1b8f3fd25699f7ad90262a9caff05853d6200749278591bcb6ce2c199fb02478 +size 2463938 diff --git a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_ef1fc0e7-40f7-4a73-91c6-b2442598e009.png b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_ef1fc0e7-40f7-4a73-91c6-b2442598e009.png index fe62a0be8ea7fd8f9b0a9c983272c9820f6c1d1d..12699488973e66445ad6e75859a3e7480c22a137 100644 --- a/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_ef1fc0e7-40f7-4a73-91c6-b2442598e009.png +++ b/images/130b1cd5-3e2f-4676-aea4-24ba9706171b_ef1fc0e7-40f7-4a73-91c6-b2442598e009.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af4ae3a8d4326a00dbed31fbfcc07561c6eeb8f66c07daff93dbfcca914deea0 -size 1081426 +oid sha256:a8ff73ff2687c1a6ff5d579d1b7d528878032f7462e1055133e185223b3e64c7 +size 1192329 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_3151a491-e1ce-480a-9cfb-d389ee8624df.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_3151a491-e1ce-480a-9cfb-d389ee8624df.png index a02d7a89987563abb1821481847cccfcc52542b4..65305be95872642d587a7f1412eff2fd521d674c 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_3151a491-e1ce-480a-9cfb-d389ee8624df.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_3151a491-e1ce-480a-9cfb-d389ee8624df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d108149add87a1b7651bf1e135c729d7b3a93d0455c872e72ff35d486562a0a7 -size 1106901 +oid sha256:6a92aa9f18b424373ddff2c737a076d7b72206d26f49b9ba4e10ce49eaefcb53 +size 1105943 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_347640d1-0b66-485b-9360-cf59dc1ce10c.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_347640d1-0b66-485b-9360-cf59dc1ce10c.png index 6a6166d0ec32a241898ada7411313ce403214d10..631b4198561ded4ab5463ca430b7ce9df759176e 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_347640d1-0b66-485b-9360-cf59dc1ce10c.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_347640d1-0b66-485b-9360-cf59dc1ce10c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5aa33bba93659cd3a9a6eb7eddcf88114193ff8a4846fecdce4184886c4915bc -size 1178549 +oid sha256:f03dbf0786a5762c36d923132b5856b4bb0ccadc36121fb5c148ec02e9a8567e +size 1175858 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_461b3ec5-fb73-4017-b2e2-07d17b336e0c.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_461b3ec5-fb73-4017-b2e2-07d17b336e0c.png index ad659cdfa03b7cd223ba9953dfe4bd9a4e7879b1..c5f96ca2c5a24382351a296d42f163b70c0caf51 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_461b3ec5-fb73-4017-b2e2-07d17b336e0c.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_461b3ec5-fb73-4017-b2e2-07d17b336e0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59ab8ef21c25812c7e5876f2a19b4d76e575d642c2fd554013573992d835ab4d -size 1477680 +oid sha256:a1b13e2145e18baa2c45a8d46773a32006dd1eaf964238b200f9b2e4b31bcab2 +size 1433550 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_6f344629-c4fb-4980-b926-4ea947c17609.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_6f344629-c4fb-4980-b926-4ea947c17609.png index 637e7cbb12c100012e133a42ebc5327a68df426c..52548db450597a6eeffd1884069c12ce1151a332 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_6f344629-c4fb-4980-b926-4ea947c17609.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_6f344629-c4fb-4980-b926-4ea947c17609.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6769ed4affaeb1fe4e3aff0eb33cfbea2205d333db44cb7089f56afa1d9b06c -size 1080718 +oid sha256:1c5f1495e31fba6488ae22210a9347ed00be70bbd049c3e905dcd64195e607a2 +size 899287 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_aa1e041e-4dec-4d00-971d-f27cbae2c3bc.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_aa1e041e-4dec-4d00-971d-f27cbae2c3bc.png index d26276d35d0103dedf3113e9497481c53fc3d5f5..42b27e3534625a2d876e279c4a7c2c02ddbed243 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_aa1e041e-4dec-4d00-971d-f27cbae2c3bc.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_aa1e041e-4dec-4d00-971d-f27cbae2c3bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da7f1df8caeba5488286a4c97dcff1d70d7bf07d140a473ea904bcd593343c42 -size 1215789 +oid sha256:0f9882598f0b002122ea9cb211ddc3fe184fa3e670e1115f60ac7e4242faddd3 +size 1388040 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_af385238-0c5d-4ce4-bf14-c3ece21aa30c.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_af385238-0c5d-4ce4-bf14-c3ece21aa30c.png index 8ecbcbb41e4d6fcf8be8f7973c3e39623d3bbdb8..bce79773fd3ce845e785a361f950932c72c0a420 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_af385238-0c5d-4ce4-bf14-c3ece21aa30c.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_af385238-0c5d-4ce4-bf14-c3ece21aa30c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6f6b31fa068c67ca32f28029cf4707c1964bf7e444cb88195b1d6d4a12cdcb5 -size 1109633 +oid sha256:e96e07927277293c31c7c51b0ab040465ce3de962c151bc565a79da06eafc9e3 +size 1174254 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6088520-5a6b-4e2e-bbdb-d9a7e1f5a605.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6088520-5a6b-4e2e-bbdb-d9a7e1f5a605.png index 86bdbb2d9b4e3f18bd519c2e54abc5e420738930..5b7b99a03a9692d606b47aff09163289de5795be 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6088520-5a6b-4e2e-bbdb-d9a7e1f5a605.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6088520-5a6b-4e2e-bbdb-d9a7e1f5a605.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10f7fecea02b9363c560cb871c9b563b3fc3f3c025be4836e5442a22befd53c3 -size 1481226 +oid sha256:9d60c3212144a067a02926fb5ceddfb9d57e9509f4c449f04cdcb4974bba2627 +size 952248 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6e1208e-d16a-437b-aa5e-4bce43b335f8.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6e1208e-d16a-437b-aa5e-4bce43b335f8.png index c56aecf8a5ee014aa66bbaded12e1eca296f84bf..9196db77d96c8ca7b3f6d12e2e9ae76b1abde34c 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6e1208e-d16a-437b-aa5e-4bce43b335f8.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6e1208e-d16a-437b-aa5e-4bce43b335f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f061595d5a23577f074778ad479b75d12aaf3e34a8e61cd42be83e158f37c7de -size 1105997 +oid sha256:7c344cbcf004a1d15838887204f2e12fc5dec909bf870ccfa0348ae7b9f2ea2f +size 1173996 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_cfcf2ac3-e03c-4911-98d9-b75840eeddb4.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_cfcf2ac3-e03c-4911-98d9-b75840eeddb4.png index e868a0188d988bd611ed0882e7d28cc9b04a87a8..25251e908c7390679c79dc9413cc8bf487a2ac21 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_cfcf2ac3-e03c-4911-98d9-b75840eeddb4.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_cfcf2ac3-e03c-4911-98d9-b75840eeddb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:553d15a2e37c67d11558976643d7cbbde30c8ea4141dbcf6d7baf31f009869ae -size 1288493 +oid sha256:aab8c887ae64a7d53b9a23b80291e68ecda57f70d8cb83144446b4b2bee7ef93 +size 1507717 diff --git a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_e1fe0472-1d20-446d-a70d-80ff72131b1c.png b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_e1fe0472-1d20-446d-a70d-80ff72131b1c.png index 65ddf603b56089ea2c689e0940d6c3ff2b7a13c4..0abee257e02ff634d8d21c1d72135e54e09d1057 100644 --- a/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_e1fe0472-1d20-446d-a70d-80ff72131b1c.png +++ b/images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_e1fe0472-1d20-446d-a70d-80ff72131b1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20fd23041e08504032e546f868e37a114c2ee5fb52fdbc90067ff73fa2c59a44 -size 987880 +oid sha256:e9281752228b9b0eec386a0ab867b315f524e2f84d8b574955e673de1c681784 +size 1102694 diff --git a/images/13a676be-2f4f-4abf-83fb-4ab641793801_20f7373e-4912-4000-aab2-2097e31b32e8.png b/images/13a676be-2f4f-4abf-83fb-4ab641793801_20f7373e-4912-4000-aab2-2097e31b32e8.png index 922854c393a482866f14edad231d6e50a06f8605..3f25dfe4fb916c33474bf44e218eda4ddd2eb4dc 100644 --- a/images/13a676be-2f4f-4abf-83fb-4ab641793801_20f7373e-4912-4000-aab2-2097e31b32e8.png +++ b/images/13a676be-2f4f-4abf-83fb-4ab641793801_20f7373e-4912-4000-aab2-2097e31b32e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f32b79ef38ad60fc3c4321d4b6ddf3cedc4b4d5015d65964adfc22235c91ffe4 -size 880407 +oid sha256:bf61bf0855e8394e308d7930494227d5ba815fbc81743281ae559ab646df18e9 +size 1170981 diff --git a/images/13a676be-2f4f-4abf-83fb-4ab641793801_805622d3-f7bf-4871-8774-5a3fa531171b.png b/images/13a676be-2f4f-4abf-83fb-4ab641793801_805622d3-f7bf-4871-8774-5a3fa531171b.png index 54f4faa083f2b1dcf6125a56f9848a8d2d22d718..50abbd15bd009e2639d23a17c1e023a61307d00e 100644 --- a/images/13a676be-2f4f-4abf-83fb-4ab641793801_805622d3-f7bf-4871-8774-5a3fa531171b.png +++ b/images/13a676be-2f4f-4abf-83fb-4ab641793801_805622d3-f7bf-4871-8774-5a3fa531171b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db1c8b37de4160aeb9b4bb426d8d37d3f4373139ed9a61309c0f2355403e1df1 -size 2326746 +oid sha256:c4ec53d002c8b152375701be4ceeb4e882700e41de65e98587782aa0e999f3d9 +size 1148484 diff --git a/images/13a676be-2f4f-4abf-83fb-4ab641793801_a7e54311-1339-4fde-a1b9-2571f6f85d29.png b/images/13a676be-2f4f-4abf-83fb-4ab641793801_a7e54311-1339-4fde-a1b9-2571f6f85d29.png index c7c22209f102392e472229b68497dda54bf1eb8f..690d871a65c21763dc64f362a15778d86e6a9505 100644 --- a/images/13a676be-2f4f-4abf-83fb-4ab641793801_a7e54311-1339-4fde-a1b9-2571f6f85d29.png +++ b/images/13a676be-2f4f-4abf-83fb-4ab641793801_a7e54311-1339-4fde-a1b9-2571f6f85d29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aea62cceaf576a2b7aa42899c2ec7904f15dbc3bf7e92ef249dfade2ff117f25 -size 1155214 +oid sha256:0faff3c9c8216694ae8d764155b995938be17167dd2fb61a9a8eb3746cd1b9d1 +size 1454848 diff --git a/images/13a676be-2f4f-4abf-83fb-4ab641793801_abc09fd8-c93f-4e0a-a150-52b8aa5a03f3.png b/images/13a676be-2f4f-4abf-83fb-4ab641793801_abc09fd8-c93f-4e0a-a150-52b8aa5a03f3.png index 8a4072ea07e0fd1776c69cb94d0f55764dbfce9e..c3a2088d3c007d0e9ed3e4fe010710b714098238 100644 --- a/images/13a676be-2f4f-4abf-83fb-4ab641793801_abc09fd8-c93f-4e0a-a150-52b8aa5a03f3.png +++ b/images/13a676be-2f4f-4abf-83fb-4ab641793801_abc09fd8-c93f-4e0a-a150-52b8aa5a03f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bef6cd6c5f4a0550b357c76638a8221df3a210704a214743ea3a82d59af5ac0 -size 2868639 +oid sha256:d315463039d324eb139ab21eba6e1ed6a5313231ebb49eec8be34914a8495552 +size 1904638 diff --git a/images/13a676be-2f4f-4abf-83fb-4ab641793801_ce4602ee-4097-4c4d-a52e-dd181d2ca5eb.png b/images/13a676be-2f4f-4abf-83fb-4ab641793801_ce4602ee-4097-4c4d-a52e-dd181d2ca5eb.png index fa5dfdc9eebc822ffd28182e001986ee86214ff3..78b0da34976580dbf1947cb1a057c59b38d2f6cd 100644 --- a/images/13a676be-2f4f-4abf-83fb-4ab641793801_ce4602ee-4097-4c4d-a52e-dd181d2ca5eb.png +++ b/images/13a676be-2f4f-4abf-83fb-4ab641793801_ce4602ee-4097-4c4d-a52e-dd181d2ca5eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53c0a3f9277023d515e14440e4a8b7be68687979880ccbf4adb00cc139a83dd0 -size 2325091 +oid sha256:264166e1144a20c4a7ebc3089cd491e6d7c7f036199daf55605a2ba18c5df735 +size 1406443 diff --git a/images/13cf0b14-422b-4486-841a-aa9ded048380_3829d5f0-4044-4132-aba4-64fad198fbcc.png b/images/13cf0b14-422b-4486-841a-aa9ded048380_3829d5f0-4044-4132-aba4-64fad198fbcc.png index f15646efc3dc6f00d7b08bc6217b44a273b50fca..8650f9f4a5a5900c407927b3171d7e856742ff1a 100644 --- a/images/13cf0b14-422b-4486-841a-aa9ded048380_3829d5f0-4044-4132-aba4-64fad198fbcc.png +++ b/images/13cf0b14-422b-4486-841a-aa9ded048380_3829d5f0-4044-4132-aba4-64fad198fbcc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5f79597fdde3097c7f7bdb3a406674cafdd221f7d9ceb5fd96a890a725aede0 -size 2167780 +oid sha256:a0ae0c57ae6d654c9c3b836e4ab00ac04f21e8ca4719a0c759bd55c6cdf04eea +size 874658 diff --git a/images/13cf0b14-422b-4486-841a-aa9ded048380_676972b3-6baa-442b-bb01-51684fb564af.png b/images/13cf0b14-422b-4486-841a-aa9ded048380_676972b3-6baa-442b-bb01-51684fb564af.png index 11ab509a4bbcbcb8e3eb0a79c28314f093c2cc7e..18d958cce3c39659c9c5b9511e0b4dcf03606d8c 100644 --- a/images/13cf0b14-422b-4486-841a-aa9ded048380_676972b3-6baa-442b-bb01-51684fb564af.png +++ b/images/13cf0b14-422b-4486-841a-aa9ded048380_676972b3-6baa-442b-bb01-51684fb564af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88a4fa32fc7bfeee753a48fbac8183d1b1427de0cc60d187c619ded68daec82e -size 407417 +oid sha256:a8ee5fc63708d6e9e1a3ac2fe42df893d89022450382bd66c98b2be91b6f546d +size 408365 diff --git a/images/13cf0b14-422b-4486-841a-aa9ded048380_e56baebb-6877-4766-9a61-6f73fafacb7f.png b/images/13cf0b14-422b-4486-841a-aa9ded048380_e56baebb-6877-4766-9a61-6f73fafacb7f.png index f121ab6dbcf2049802f0117a5d6c6bc95e956bb7..53e8773bae4ffc2c4c8bf2f2b7cb9a316855171d 100644 --- a/images/13cf0b14-422b-4486-841a-aa9ded048380_e56baebb-6877-4766-9a61-6f73fafacb7f.png +++ b/images/13cf0b14-422b-4486-841a-aa9ded048380_e56baebb-6877-4766-9a61-6f73fafacb7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86f25d496cc07b1997595b603d8313c61cd2f8b0895a534326e705ac2df8fe04 -size 2020749 +oid sha256:a69af04982a04f4f6b82d0413d11b5e9d6e75068480c54143e1853372885da39 +size 1097304 diff --git a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_1012b462-c850-48cc-9aec-b52c613c9815.png b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_1012b462-c850-48cc-9aec-b52c613c9815.png index 416aba0450fd519d5452a4d5e8674da63222598d..b7d6288a133675b448642a6b0caccaed2dbf71d4 100644 --- a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_1012b462-c850-48cc-9aec-b52c613c9815.png +++ b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_1012b462-c850-48cc-9aec-b52c613c9815.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0fdac5057d10620b4ec516be8e431f7463350386eb42d98bf75813366df439e -size 1505212 +oid sha256:94ac5bc34d6763b8d49956ef43d934a5e513992b3161e818af7f57e5e1b29061 +size 1660143 diff --git a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_119a3ffb-4740-4938-8d4e-ebe3d288562a.png b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_119a3ffb-4740-4938-8d4e-ebe3d288562a.png index 6aee2f7fb7bbae4c06919893403adea0e3ff31c0..611beadd775109c987c0082b05221d3a5977ae17 100644 --- a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_119a3ffb-4740-4938-8d4e-ebe3d288562a.png +++ b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_119a3ffb-4740-4938-8d4e-ebe3d288562a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26b2b5837f2df82f79e9ad86b2a9c8df2423d819beab11bc980a63272f2c4f4a -size 1056025 +oid sha256:6b2775bd199e26fb083687ee3a83cc7966937c36d0fbe769b7dcfdffc47e0738 +size 1064084 diff --git a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_3135abcd-a139-493b-8c7b-9321fa5acc73.png b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_3135abcd-a139-493b-8c7b-9321fa5acc73.png index f95537da1a704024c26b92d8a6b9bdcdc348ce81..ae02abb8c45e27de02ae24ff6920340beac9c43d 100644 --- a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_3135abcd-a139-493b-8c7b-9321fa5acc73.png +++ b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_3135abcd-a139-493b-8c7b-9321fa5acc73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bf87fd4ae84117b8f099dffb4c37c1f0af34c0b30c30d6e3df9e2d9dea1ac46 -size 770563 +oid sha256:48d92e90488309ab214155518b0e7814035746bdd2f5df27d1875f41b4863b15 +size 547946 diff --git a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_90936bb2-dd00-403c-b782-4b006604b686.png b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_90936bb2-dd00-403c-b782-4b006604b686.png index af12f1e1c3139ff020082828484aa1489e4cafb2..4d91f8ae948c24f9444e3510bbb7649385bf59c2 100644 --- a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_90936bb2-dd00-403c-b782-4b006604b686.png +++ b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_90936bb2-dd00-403c-b782-4b006604b686.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f13e901b2f2b2a9390a5f2b0b532c23c1cbb9deccddbbd38d10e343266145ce -size 774716 +oid sha256:78df9768e95bf45e2e5a1be03adea5ca3eae577ca4dfaf1f26edf6d0b64d2d64 +size 837694 diff --git a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_c01ffea2-9354-448e-8ff9-2f3083925381.png b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_c01ffea2-9354-448e-8ff9-2f3083925381.png index 139b6f46106f347d8575b57a90c771d04c5631ed..dae4fafeabb605480ad66d4e990797832f484a15 100644 --- a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_c01ffea2-9354-448e-8ff9-2f3083925381.png +++ b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_c01ffea2-9354-448e-8ff9-2f3083925381.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6052fec9196e731f90ffe0da0c4607d140034a218a537070c42126cbd522fdf0 -size 699699 +oid sha256:8a53d891df534cfd7cc580f64cb85a4417cb2dabc0b7b182c3c8adb9990e821a +size 817572 diff --git a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_df45ef47-c2eb-4bb6-9b1d-aac6c641c2e0.png b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_df45ef47-c2eb-4bb6-9b1d-aac6c641c2e0.png index fed0fee9158af99a7fbdab1f39ef57251342432c..929c728b74b4e575d59c0e0e9553b4553de4b55c 100644 --- a/images/13d78369-994a-4202-a9ed-8361e1fdea9d_df45ef47-c2eb-4bb6-9b1d-aac6c641c2e0.png +++ b/images/13d78369-994a-4202-a9ed-8361e1fdea9d_df45ef47-c2eb-4bb6-9b1d-aac6c641c2e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4a9ddf02e94aba2bbcc750aaac9197ad78feb1022da53d2bcb9bb93f17c5619 -size 757918 +oid sha256:1905d0535f5be48cfcf5a0041cba4e15f60c5146650387f0330e265740a9a803 +size 756742 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_2616aadf-415f-4074-990c-4b08b8a936f7.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_2616aadf-415f-4074-990c-4b08b8a936f7.png index 42cdc9a5d68b0aaaf4509c97bdf3e02d5faf5af0..179e65dbb3711dab097e9482ebdacd5923fb39d3 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_2616aadf-415f-4074-990c-4b08b8a936f7.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_2616aadf-415f-4074-990c-4b08b8a936f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9349a55edff2e8bc885227aacf67c3d70838a44fb6b2c78f59a1f2c1ecfcf8ef -size 746745 +oid sha256:86d4129e20dc1a0c58391749cb7f9afa86525c598f297ee295985eef5f9bbd0b +size 897286 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_2ca10cfe-4ab1-488f-b16d-305182e3c99f.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_2ca10cfe-4ab1-488f-b16d-305182e3c99f.png index 999f6c4ff76972ff56f7f1d35735c246d7c1871f..1118a466ccd599e4e9ba8fb91a7eeb91bee972f2 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_2ca10cfe-4ab1-488f-b16d-305182e3c99f.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_2ca10cfe-4ab1-488f-b16d-305182e3c99f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5648f9fd54ed8126163fbb8cc60e34fb838a0fcba4e539913a6c33ad6cce7ef8 -size 805527 +oid sha256:52da83bb85a8153dee2cd50f77e74aaf2a40ce36a0832d9f8c64da3941f862fe +size 730681 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_6547e39b-5ccc-4df5-8668-44a769d70fd5.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_6547e39b-5ccc-4df5-8668-44a769d70fd5.png index 999f6c4ff76972ff56f7f1d35735c246d7c1871f..8e0f7b47d36e031417dcbaa2b2fb8b7c83412106 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_6547e39b-5ccc-4df5-8668-44a769d70fd5.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_6547e39b-5ccc-4df5-8668-44a769d70fd5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5648f9fd54ed8126163fbb8cc60e34fb838a0fcba4e539913a6c33ad6cce7ef8 -size 805527 +oid sha256:4e0eb7dfc0bfac5f0607bb286a8f30d77e46824be01bc68d8d15c3adad19feb7 +size 1109987 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_6661dc2d-7358-4707-b483-256abecef314.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_6661dc2d-7358-4707-b483-256abecef314.png index 2f81d8bb06cb77624921ce1c9e565e45c481eeac..84202bdd9e79f6d00f78407143834486cf07ce34 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_6661dc2d-7358-4707-b483-256abecef314.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_6661dc2d-7358-4707-b483-256abecef314.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d34d7dc6924efa6f259389c8231577b0c884b3f2f4258a9e9bae90e421681891 -size 778668 +oid sha256:45051546e4b32366d749d63d637cc385c5c42e8bf5703b3fd2564823aeef5e3e +size 426566 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_6c2838cc-2eba-4e57-ba2b-91edc2804240.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_6c2838cc-2eba-4e57-ba2b-91edc2804240.png index 42cdc9a5d68b0aaaf4509c97bdf3e02d5faf5af0..e27f8a20a86fc00104d13534a97103f5e4113f4d 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_6c2838cc-2eba-4e57-ba2b-91edc2804240.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_6c2838cc-2eba-4e57-ba2b-91edc2804240.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9349a55edff2e8bc885227aacf67c3d70838a44fb6b2c78f59a1f2c1ecfcf8ef -size 746745 +oid sha256:c810b760631235b4121fa330899a0f51adf2ddffa917cf2cf79d005134c86ebf +size 843365 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_8782d364-4e18-44ff-9aee-4e1c21c11ed6.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_8782d364-4e18-44ff-9aee-4e1c21c11ed6.png index 2cf5da9f7ef00dba72c826d3e65f525b70374849..f49121e7f69b0db1aca7b6439b19a06646f954ac 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_8782d364-4e18-44ff-9aee-4e1c21c11ed6.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_8782d364-4e18-44ff-9aee-4e1c21c11ed6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:760fb9ff4e539648a896755982362ccdd5e5098e36e1827d673ee7b241230e90 -size 773670 +oid sha256:bae90c60ae8357eabe27f05f527aad35468f5142411a1e1eebd934a1020a0f83 +size 933514 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_88dec089-2b92-494a-b781-c7f3e9cafe3e.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_88dec089-2b92-494a-b781-c7f3e9cafe3e.png index 9aa808b4255a2fa8b7709ff1b4898b2f80d34481..8065ddc7b9eae16516613b88a857782458b7ee5b 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_88dec089-2b92-494a-b781-c7f3e9cafe3e.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_88dec089-2b92-494a-b781-c7f3e9cafe3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8538981eb8c0bfed0e6976d8a27c0b1d6d8d48ae8c7cab2ae2e12c1592433178 -size 643919 +oid sha256:40f02eae9448d5d0f2c692a6a2cc16530f517eb59d25c0dab629fcad81772a26 +size 999138 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_960ebc24-1a13-4086-afd4-fd6e30b783fe.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_960ebc24-1a13-4086-afd4-fd6e30b783fe.png index 42c00a74246ddafa9719d22d334b9aa65e7a4d92..338f2f7ca1cd991389bb1ef0e1430327db2a8440 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_960ebc24-1a13-4086-afd4-fd6e30b783fe.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_960ebc24-1a13-4086-afd4-fd6e30b783fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e8f0df5df9fb6f4393f941c6e33079c744d1e5407cefb113064d8fa2ac17968 -size 961542 +oid sha256:7c9cc4f7f2060857e63ecd3c3f1681f5914756dc42e3c5bb2a650d655326427d +size 1356597 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_9ac267ec-b83a-4ee9-9fba-e0beedd3f174.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_9ac267ec-b83a-4ee9-9fba-e0beedd3f174.png index 24629439027ab7e9de2fb44bced7bd1c71335ecf..09fb3e57a5f2732d23839f2206408877c32531ef 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_9ac267ec-b83a-4ee9-9fba-e0beedd3f174.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_9ac267ec-b83a-4ee9-9fba-e0beedd3f174.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8d4fcf347a65ac247805053e650c096e9595f8152dc592ed3d7fc4bd0cb51c4 -size 805650 +oid sha256:b584fb5be13b5f143d7c21586fbb07018921ed387533c0cc5a31ddb2f45d0272 +size 792383 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_a2e91cde-5120-4851-a140-2dcd34d9e26e.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_a2e91cde-5120-4851-a140-2dcd34d9e26e.png index 051c1b0475c27d4eca6ce9f873f25fc2aad2031d..c80ae6f2ef1a2d6cfc58e919857182c6cf473dc3 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_a2e91cde-5120-4851-a140-2dcd34d9e26e.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_a2e91cde-5120-4851-a140-2dcd34d9e26e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc634cac23ab34178e35c4df7b87614ee0ea95d32883932b7f4bba6ba09d5952 -size 597492 +oid sha256:e98cc973b8625c17929de3400ff2e5f432cebc95341123682229d49d590f5ebe +size 548795 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_af8d6e2e-cb67-4ba2-b95d-734aeb121700.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_af8d6e2e-cb67-4ba2-b95d-734aeb121700.png index 999f6c4ff76972ff56f7f1d35735c246d7c1871f..ab84d7caacef3732ce1c8888197a3e6db1d83fed 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_af8d6e2e-cb67-4ba2-b95d-734aeb121700.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_af8d6e2e-cb67-4ba2-b95d-734aeb121700.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5648f9fd54ed8126163fbb8cc60e34fb838a0fcba4e539913a6c33ad6cce7ef8 -size 805527 +oid sha256:820f5df7bdc715ef6d792934b1a0fe573566457a91c326ff59d3de3add064792 +size 764663 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_e62362e7-8e6f-4005-a005-b8bdb6c3d5bf.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_e62362e7-8e6f-4005-a005-b8bdb6c3d5bf.png index 56b77cd5451182b470e8316a6d7a26d72971872d..19014f3c7d7ecf96cde23ba4133dd7b69cd41664 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_e62362e7-8e6f-4005-a005-b8bdb6c3d5bf.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_e62362e7-8e6f-4005-a005-b8bdb6c3d5bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01a18c100e829b623e5e1ac39dcc481ad051095aedaec5c0cbf555baad80aa7b -size 787323 +oid sha256:accd11c35b349d1bdcccb8d6b8b602b41ade2f25d2bbc26ab11ec942ff024ed4 +size 1071462 diff --git a/images/13f1648c-9de8-497d-bed3-02096206dc5f_fb5b3b82-7410-4d95-b743-5441b8e24ece.png b/images/13f1648c-9de8-497d-bed3-02096206dc5f_fb5b3b82-7410-4d95-b743-5441b8e24ece.png index 999f6c4ff76972ff56f7f1d35735c246d7c1871f..7662bb000293dbe64fb98739291c2d5a7a05abbb 100644 --- a/images/13f1648c-9de8-497d-bed3-02096206dc5f_fb5b3b82-7410-4d95-b743-5441b8e24ece.png +++ b/images/13f1648c-9de8-497d-bed3-02096206dc5f_fb5b3b82-7410-4d95-b743-5441b8e24ece.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5648f9fd54ed8126163fbb8cc60e34fb838a0fcba4e539913a6c33ad6cce7ef8 -size 805527 +oid sha256:f0a3bb9afd6107c4b8940fc4ccd7da1679026be553be37601fb10d67c6ab7767 +size 442219 diff --git a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_2da19bbe-dd62-482e-bbf3-24f0ecc52e72.png b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_2da19bbe-dd62-482e-bbf3-24f0ecc52e72.png index 98508dee86b167cf95066a43b43192dcb7524eff..fc01032f9886e096854f0ea4441748e31ecf879c 100644 --- a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_2da19bbe-dd62-482e-bbf3-24f0ecc52e72.png +++ b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_2da19bbe-dd62-482e-bbf3-24f0ecc52e72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19a6d6a3ee53b3f9b43cfc99f0dc00775dddab500dc424550b9cbd8f0e1ed5e3 -size 652465 +oid sha256:8dda8a5333ff9ba22683549aced8b422d65229aa7c5a5c17114ba80c6f5bbeaf +size 519659 diff --git a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_62cf5905-dba2-4936-abd9-9b6e872672d5.png b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_62cf5905-dba2-4936-abd9-9b6e872672d5.png index b2e6ca4dcd8a74ee805654da74cd883a44cb6024..404bc9bfd41949837ccc984654f35a3169ac0142 100644 --- a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_62cf5905-dba2-4936-abd9-9b6e872672d5.png +++ b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_62cf5905-dba2-4936-abd9-9b6e872672d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:505ee5c93185ee4b0acb65f64f4f261b3dbf97e1a2ecbb0dfe9aa6ee7fe9690a -size 760959 +oid sha256:7e98578602903364b2adc9bf5be10d1df2841425a7633610ea768023bac8408f +size 350736 diff --git a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_77c307df-b1ab-41f3-b616-1e19acd5cd98.png b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_77c307df-b1ab-41f3-b616-1e19acd5cd98.png index 026833d2b48bce107f1e5e1f41050241dbed37bf..9d041d709a9ff579066bbc121c21f25d504cc22f 100644 --- a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_77c307df-b1ab-41f3-b616-1e19acd5cd98.png +++ b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_77c307df-b1ab-41f3-b616-1e19acd5cd98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f184127574c4fbce9ea16504b1d604779f216d8cf1a25ec44fd3b1aa1a0bd22 -size 442154 +oid sha256:05cab5827c6a88d5898ac08918c74915df2410ec006d959fde9d97d44e2145ba +size 522111 diff --git a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_9d995e9a-9209-44b2-995e-c789e80640fe.png b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_9d995e9a-9209-44b2-995e-c789e80640fe.png index f248e9d84dd776c2457aa6681bc4016dc7d0daa8..00628b2d6dde590e7458a756fba3a0f5637266b0 100644 --- a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_9d995e9a-9209-44b2-995e-c789e80640fe.png +++ b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_9d995e9a-9209-44b2-995e-c789e80640fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f758a930b9143ece56ea605d86c3f0a54c5b35404abba196e1d092a54d2f4243 -size 550048 +oid sha256:9debd066f4af44e3051bd38f79d4a023a32f26d57d603556bd669f23967eb88a +size 550199 diff --git a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_cc1747c1-6897-475a-a414-30da991bc3fd.png b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_cc1747c1-6897-475a-a414-30da991bc3fd.png index 8034027dcaca57971e60065ddc3df5ecedc144f1..9ec524a61e500dc2d99dfe88366f7b9c06cc604e 100644 --- a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_cc1747c1-6897-475a-a414-30da991bc3fd.png +++ b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_cc1747c1-6897-475a-a414-30da991bc3fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45fa13e0fafd7997781af8732602ef90c36f43b0e16b40182f184fe82956218b -size 761286 +oid sha256:951dc2bb2b66b754fcd88fdd016ad4832b9a0de884332c0257befdecb9522e90 +size 525647 diff --git a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_e63aff02-4d6c-4289-b897-91262275d712.png b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_e63aff02-4d6c-4289-b897-91262275d712.png index 99fe7bae30295ab80cc36c66988873cf91d2339e..662cb87c2a5b60bb69831b5a7610bd810c325c47 100644 --- a/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_e63aff02-4d6c-4289-b897-91262275d712.png +++ b/images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_e63aff02-4d6c-4289-b897-91262275d712.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29c3dd310191827fb39ab2ea0302397ddc008a416eda3ae14d8c1f3845880dd9 -size 582162 +oid sha256:24a080d59499c5b428a155cd4e4a66b84add1841a5268c4bee9ce50a85a5b8e9 +size 730788 diff --git a/images/14b5885e-5454-465f-92bf-d8f7315c4a46_551a50ba-12b7-47fe-843b-b62606544767.png b/images/14b5885e-5454-465f-92bf-d8f7315c4a46_551a50ba-12b7-47fe-843b-b62606544767.png index 4e92e82ca1e318f9e4e00f37978f667fa71dbae3..cd6df1ba6aca5d0e4b198c320cecd594422766f5 100644 --- a/images/14b5885e-5454-465f-92bf-d8f7315c4a46_551a50ba-12b7-47fe-843b-b62606544767.png +++ b/images/14b5885e-5454-465f-92bf-d8f7315c4a46_551a50ba-12b7-47fe-843b-b62606544767.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4c482ada05cb4bff1cb48c941cf3664a0827bce315876ec2927ea50668a09a0 -size 1078143 +oid sha256:44b5d8d3991bf66cf1f78298f0dacf1fbefe4784152edec90b533bc69d3146c9 +size 803539 diff --git a/images/14b5885e-5454-465f-92bf-d8f7315c4a46_7ad31fb5-8326-41d3-b6d1-d7de45193fc1.png b/images/14b5885e-5454-465f-92bf-d8f7315c4a46_7ad31fb5-8326-41d3-b6d1-d7de45193fc1.png index e536e8afdf751df9f43c0ff2bf9c18f1eac2ce92..a46ebcf3b8d810bb663c35aef4069bb580b68bbe 100644 --- a/images/14b5885e-5454-465f-92bf-d8f7315c4a46_7ad31fb5-8326-41d3-b6d1-d7de45193fc1.png +++ b/images/14b5885e-5454-465f-92bf-d8f7315c4a46_7ad31fb5-8326-41d3-b6d1-d7de45193fc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34afc9f4f7943b1ca4901a3e3c3b35d945aa53061bb607de06c8b5996b7722d1 -size 139338 +oid sha256:ce3601bb665e2ed78c99433583aba05370eaeb193442c44d6a0b0f1d2c199794 +size 93024 diff --git a/images/14b5885e-5454-465f-92bf-d8f7315c4a46_f1a4664b-00c9-4016-8c61-d86520080cc7.png b/images/14b5885e-5454-465f-92bf-d8f7315c4a46_f1a4664b-00c9-4016-8c61-d86520080cc7.png index 3989b263cfabd991d5010cecba5d99b193d9924f..360e06d7f41e811f8077448b4575b30219dbf121 100644 --- a/images/14b5885e-5454-465f-92bf-d8f7315c4a46_f1a4664b-00c9-4016-8c61-d86520080cc7.png +++ b/images/14b5885e-5454-465f-92bf-d8f7315c4a46_f1a4664b-00c9-4016-8c61-d86520080cc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44fa985daaec7759d1f52c94326c387a6b718b6a87b06525bfb4d569112675f8 -size 876023 +oid sha256:c99e8ee258a72a8a4ce05e1992fc3f9f3140e33891d9415bede72c7bde90b033 +size 1276679 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_35bc428e-f0a7-4e6c-a921-213d082a151f.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_35bc428e-f0a7-4e6c-a921-213d082a151f.png index 36f9197df5bc03958238cef39a4f38386144c819..ef1fc9d58d578aa61c4da4183f51a88de23f6c29 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_35bc428e-f0a7-4e6c-a921-213d082a151f.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_35bc428e-f0a7-4e6c-a921-213d082a151f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ed32c127a4893eed83ddc1e9de71b1fa8cedd205e49ebd15ae31ec1ca74b08d -size 1108821 +oid sha256:1ed5798c8465fb2f3f75d33a63763424c171a5a8542d319cefe77eb5e5c45c4c +size 1298775 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_36e6f5fb-eb43-4278-aee1-29a470c244a3.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_36e6f5fb-eb43-4278-aee1-29a470c244a3.png index 2b771795cd2dc4ff65491c5369ab4c2e3a5076ed..db964bd665318c00250d7020aa938e9e6559c14f 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_36e6f5fb-eb43-4278-aee1-29a470c244a3.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_36e6f5fb-eb43-4278-aee1-29a470c244a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76649459f302ebdcdff33f48eadc2e5058c283681922fe5ca451bf589934fbb2 -size 1755116 +oid sha256:0b106e88721c1fe948ffecabcefa8cbf82b3e7a60f53266bf72156c46a1c65d1 +size 1333152 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_4fdc91b7-4ecb-4279-81f3-3e53e6e92071.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_4fdc91b7-4ecb-4279-81f3-3e53e6e92071.png index 35aa9b1fa88819732b61c5ab4d2d04a22fa78e3b..085b93545be56237e7586e105f8a68edbf0463d6 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_4fdc91b7-4ecb-4279-81f3-3e53e6e92071.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_4fdc91b7-4ecb-4279-81f3-3e53e6e92071.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:caa84b096118dfcca9768aace4e7b4f95b81aa15aebdd0e280af76e484cc3f0c -size 630132 +oid sha256:96db17405ee27bcf40db5760d86da4a751c13fe7f8ed8f42cd616431507424da +size 952737 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_7404da48-043c-4f90-99cf-3a2e99a7bf80.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_7404da48-043c-4f90-99cf-3a2e99a7bf80.png index 7787ecb51ee368e7598fc1406eff9a6a2f4f8d10..59cdd5bb293f159f222622657e893577fcd81143 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_7404da48-043c-4f90-99cf-3a2e99a7bf80.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_7404da48-043c-4f90-99cf-3a2e99a7bf80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5987e6137a4bf0fb643af42fc11dd568316ef0af3a70690bde2f9b89244c84a -size 1323936 +oid sha256:c6e658a301fe6dc9231b0ace6bbe8f7f8c98898c8e6201929ba19dd6ae82f870 +size 1844442 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ac6c5b85-a57e-437d-88c5-ba2902646ae3.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ac6c5b85-a57e-437d-88c5-ba2902646ae3.png index 56946284e0245a97f323e737bc1f45325661d305..6754534c5f457d0c2aaf4351e245d92e2f2495b2 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ac6c5b85-a57e-437d-88c5-ba2902646ae3.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ac6c5b85-a57e-437d-88c5-ba2902646ae3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddbe7d008e2119dfa905608c00df4ed6ba3f00de11c58c5725acbaefd877d2de -size 629674 +oid sha256:3eed2621ffefe047e2b6338993cce7933dc8089ae2ba58530f174395814c6dab +size 835377 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bd7c2bc5-c2da-40ed-a815-11cd373099bd.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bd7c2bc5-c2da-40ed-a815-11cd373099bd.png index 510fae10160bb01918b0fb299e50c27b6067075b..9d328cc76271e7d8ee38ffc5ac4e5aee294082cd 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bd7c2bc5-c2da-40ed-a815-11cd373099bd.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bd7c2bc5-c2da-40ed-a815-11cd373099bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:890dc085743cda8c98062179263b9a6acf8c1a759b4bb44eed1b80a19cc43af1 -size 1609908 +oid sha256:24720b467511638691e07c78bf505c1efe056d8da16fdb4bae38e3163e150885 +size 891005 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bfa98fe8-342a-4833-b221-f1274a517937.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bfa98fe8-342a-4833-b221-f1274a517937.png index 018a523dd9b4648a971970f27b1b09221dc302d6..4a2ef2f834729cc55915df94fe2804a224152d90 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bfa98fe8-342a-4833-b221-f1274a517937.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_bfa98fe8-342a-4833-b221-f1274a517937.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f8b0e42737b8b59c5b6d3ce8aa29fccd727254b760c8da64f1509a7e2be2470 -size 1069637 +oid sha256:b0ebfb35e2c23ad29217bc2f78f59826a7f4f23b134e5b2e18539af7b8712ffe +size 1243478 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_d953665b-60d3-4f3b-a12b-d55b929baddb.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_d953665b-60d3-4f3b-a12b-d55b929baddb.png index 2bd96356eea661165ad4b73059354fcac334c56c..829d0bc42697f3d06630380f6ea54dfdd9af7916 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_d953665b-60d3-4f3b-a12b-d55b929baddb.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_d953665b-60d3-4f3b-a12b-d55b929baddb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21a9a684279ced741189ee57c09884de58ff36300ee59436a546e890bb81491f -size 1184594 +oid sha256:7d1b60237365237707626ae50d3324f7b20fa9e6319aa35fadfd8461b4f6cc6d +size 721335 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e23eb900-deb5-4f4b-8941-625c60a5ea37.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e23eb900-deb5-4f4b-8941-625c60a5ea37.png index d82c8b91510f1d359b562598c39b080269d30e29..a9baf069f1ccfddbe0a8fdfde9c3001048b867a9 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e23eb900-deb5-4f4b-8941-625c60a5ea37.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e23eb900-deb5-4f4b-8941-625c60a5ea37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d531bdd683b7259fe224cc2528b0e12961a70e66decc29037347d3e004f0d83 -size 603948 +oid sha256:7d7f41f21948c66a75c67f210a2060de8d6dcb3253412553acf38a2854f58bd1 +size 1276579 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e73d92f8-b366-4344-9c6a-f8e671a75728.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e73d92f8-b366-4344-9c6a-f8e671a75728.png index 15eca24bcc98ef9af9ee6ed79fa067048a440b0f..70eadfdef135d8f0beaf58d48ad7a349a79b3e0a 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e73d92f8-b366-4344-9c6a-f8e671a75728.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_e73d92f8-b366-4344-9c6a-f8e671a75728.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02b5301d180278f286c5083030306678e9e697a9df376939e7f066bfe408d168 -size 1109711 +oid sha256:762084501a169cefc7c3bb2d65dc72f0fa4650b4ca0a7527c8e0d8f2e673167a +size 1253839 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_f75324ed-ef88-4e36-9985-867b0955b5d7.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_f75324ed-ef88-4e36-9985-867b0955b5d7.png index 53cae7d083a51514192d170cb86e6a682e72454b..c8deb164a717cf43ec56e7794fcfaf994b06486f 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_f75324ed-ef88-4e36-9985-867b0955b5d7.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_f75324ed-ef88-4e36-9985-867b0955b5d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d70368f87c23c61d80f5db3ee0e8f5fdfa46ee68eb7e547027b191920494874 -size 604896 +oid sha256:51dd3d73b1d14db61bba4c7a79641af6f79fba1bcfb3dbfaf655c40f09661caa +size 1223198 diff --git a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ff659553-9a17-46b7-8ba2-3b166e41eb8e.png b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ff659553-9a17-46b7-8ba2-3b166e41eb8e.png index dd7e8b0097032d2f2becff3b8d03cc2ecedb50be..7cde9dcc6643e81f947851214d78262c418fc803 100644 --- a/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ff659553-9a17-46b7-8ba2-3b166e41eb8e.png +++ b/images/14b72eb6-48c6-4408-9044-94c3003dccfc_ff659553-9a17-46b7-8ba2-3b166e41eb8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90da485bf983c681a6948b2c03482dd4e446c9708b95d070eceaeeaba14ed465 -size 1195124 +oid sha256:6235fff4bb9e7a37cb5f436d41173fd8643e1eeb9ba6768c1988cba12c557953 +size 1326239 diff --git a/images/14be9a2b-5559-423b-8362-4dccf306992e_07461677-526b-43c3-96a0-f92b0e69a3b1.png b/images/14be9a2b-5559-423b-8362-4dccf306992e_07461677-526b-43c3-96a0-f92b0e69a3b1.png index ddcaf30b9cc4ee7aeec6febfc2b61fc5b9e197d3..205e8a78fa0b5ee6460bad8721dcbed92da655ed 100644 --- a/images/14be9a2b-5559-423b-8362-4dccf306992e_07461677-526b-43c3-96a0-f92b0e69a3b1.png +++ b/images/14be9a2b-5559-423b-8362-4dccf306992e_07461677-526b-43c3-96a0-f92b0e69a3b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:654d24333f12756476d7bdfe428e2078334d590349e1bc3ca5fcb500b1b1063e -size 706617 +oid sha256:edafcc70e4f083efb729b3596916a64b99be1757448ba195ad1c64797792fd35 +size 821369 diff --git a/images/14be9a2b-5559-423b-8362-4dccf306992e_3c7c8607-1c94-490a-b1aa-7d545fba0376.png b/images/14be9a2b-5559-423b-8362-4dccf306992e_3c7c8607-1c94-490a-b1aa-7d545fba0376.png index 2dae5d3cf858a6ca11d3efe7956a120cd7afecdd..1624d683c14bfb588b3c0ea4899f763ced8485fd 100644 --- a/images/14be9a2b-5559-423b-8362-4dccf306992e_3c7c8607-1c94-490a-b1aa-7d545fba0376.png +++ b/images/14be9a2b-5559-423b-8362-4dccf306992e_3c7c8607-1c94-490a-b1aa-7d545fba0376.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9811edfb2129d9eaae3537d431623949052ba9678f56442ec8bea2ecd9d4acd -size 362556 +oid sha256:1bc87fe1ac596b2194c9c5b2a073e19baf5b46c15a4eb6b57e3168dd650c1cd3 +size 1589581 diff --git a/images/14be9a2b-5559-423b-8362-4dccf306992e_4f276e90-fedf-456b-846d-97813a812772.png b/images/14be9a2b-5559-423b-8362-4dccf306992e_4f276e90-fedf-456b-846d-97813a812772.png index 39eddecf9528ecc527393fff8e2b3b6e7ba69266..579062add9c1ea26da262d2a44dc8af33dbfa267 100644 --- a/images/14be9a2b-5559-423b-8362-4dccf306992e_4f276e90-fedf-456b-846d-97813a812772.png +++ b/images/14be9a2b-5559-423b-8362-4dccf306992e_4f276e90-fedf-456b-846d-97813a812772.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab3139016b6bc8b98704d1b44ab3863962068b13424d4beafb4f8f8c9fcd0f32 -size 652898 +oid sha256:5ddcf03dc60616f32bbb2b8bfae6c3b01b145d915ff49ac39410c8a2e5548f34 +size 685075 diff --git a/images/14be9a2b-5559-423b-8362-4dccf306992e_831dd65f-fe94-410c-959b-cbbbaaf170f5.png b/images/14be9a2b-5559-423b-8362-4dccf306992e_831dd65f-fe94-410c-959b-cbbbaaf170f5.png index 2cae22d02597e7d2762d08d7316b4e2ced564dd1..6de8fa6092c4bd2d4f072a8d7a3599df62f517e5 100644 --- a/images/14be9a2b-5559-423b-8362-4dccf306992e_831dd65f-fe94-410c-959b-cbbbaaf170f5.png +++ b/images/14be9a2b-5559-423b-8362-4dccf306992e_831dd65f-fe94-410c-959b-cbbbaaf170f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b00c12d414d1ed932cd40c3838c583508d2b07291a59d9885bac8892b5c11b7 -size 689487 +oid sha256:7da8e2fce7daec736b46b591f6b32f30c71efe5bda5c73b6ff53c93f6e5b8d0c +size 1121001 diff --git a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_2ea9701a-ab3d-4dbc-a9b4-bf8f615fe651.png b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_2ea9701a-ab3d-4dbc-a9b4-bf8f615fe651.png index fcce140889c6b45946666cd7d86a5bec6b5bb6a0..17617b5357ad43666cb9781ddca362416a10460b 100644 --- a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_2ea9701a-ab3d-4dbc-a9b4-bf8f615fe651.png +++ b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_2ea9701a-ab3d-4dbc-a9b4-bf8f615fe651.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64389699e1091b38381b5997ef22f918b8eaee67966b173147d05010d53d64ef -size 603510 +oid sha256:31e27157b383dffd58fab8bc78c679fb04218dd224eaa1067636070837131348 +size 253625 diff --git a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_33c7a999-38ef-4589-8279-fdf8c2302c63.png b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_33c7a999-38ef-4589-8279-fdf8c2302c63.png index 0d9602d7b4264258f58fc38717e0ba9ea45b2e83..2b1571da7afcf78af042487ea9bb58934cbfc301 100644 --- a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_33c7a999-38ef-4589-8279-fdf8c2302c63.png +++ b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_33c7a999-38ef-4589-8279-fdf8c2302c63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e1e5e5bf887b3331ebd88327f92b3091fbdb7799a1661660b08c04258c6c6d0 -size 340428 +oid sha256:e17328b7528924a868fadb2da564d39630dfa0cd9ff00c2877cc19003892130c +size 328839 diff --git a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_52743c90-fd78-45d7-bfbe-99eb4ee84c1b.png b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_52743c90-fd78-45d7-bfbe-99eb4ee84c1b.png index cf709eb53f4d522c4a501f22df08ddbccc1d7c08..31aed2a05e01e978c9cc5a7fd3b8539caed5e6f0 100644 --- a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_52743c90-fd78-45d7-bfbe-99eb4ee84c1b.png +++ b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_52743c90-fd78-45d7-bfbe-99eb4ee84c1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:509e48a58f20f04379eca5ec1d089819c00ab4a14aa3bee6920364aa2d5620b0 -size 584053 +oid sha256:98861af07d201f14fb12eb81d56393a3d6b82a01cef5c413312ee56e9401109b +size 1424618 diff --git a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_d5d06797-e73f-4063-8807-2792fae51cf0.png b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_d5d06797-e73f-4063-8807-2792fae51cf0.png index 10c9eb77a877db725c6c847cc3999fc161937848..9e5cf5edadf07f059887a4a34a5a29efb7e674e1 100644 --- a/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_d5d06797-e73f-4063-8807-2792fae51cf0.png +++ b/images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_d5d06797-e73f-4063-8807-2792fae51cf0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1fae4797f7873afbd6ec23543fc8534ac5a758a29a36d4ca9c20c7cd2c8c2654 -size 279467 +oid sha256:1d6374f6575cf7d33525ae28a432b9f8430c5b19bca9732b6d90aaa63d5277fe +size 405273 diff --git a/images/14f0e837-af77-44b9-9cad-a8911aab30c6_451803f9-762c-48e4-a1a6-71d06f5d9431.png b/images/14f0e837-af77-44b9-9cad-a8911aab30c6_451803f9-762c-48e4-a1a6-71d06f5d9431.png index b8a7d822946ebcca1b0d7de6daa86d425e8ee7c8..81a72368c79326208187d2ddbf834573e6c6f0cc 100644 --- a/images/14f0e837-af77-44b9-9cad-a8911aab30c6_451803f9-762c-48e4-a1a6-71d06f5d9431.png +++ b/images/14f0e837-af77-44b9-9cad-a8911aab30c6_451803f9-762c-48e4-a1a6-71d06f5d9431.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40a27f182595daa495fe025b35ebe5e314b2dceaae8670b54c4e2c5a101bb86e -size 1164930 +oid sha256:b595109540134b54c6972ea9866dd4ae5a20445ea5e0086cff7ada77d26c50d2 +size 1188963 diff --git a/images/14f0e837-af77-44b9-9cad-a8911aab30c6_5c14ea08-04a7-4e9d-b602-4c6a24be2182.png b/images/14f0e837-af77-44b9-9cad-a8911aab30c6_5c14ea08-04a7-4e9d-b602-4c6a24be2182.png index 7f66485585cd5894edba9432573ca0e332c7a1ba..2ad8e4da02ada9345c09aa83ca8ad984a844d6bc 100644 --- a/images/14f0e837-af77-44b9-9cad-a8911aab30c6_5c14ea08-04a7-4e9d-b602-4c6a24be2182.png +++ b/images/14f0e837-af77-44b9-9cad-a8911aab30c6_5c14ea08-04a7-4e9d-b602-4c6a24be2182.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15c9089024477606438ba18a38ddf67483b12e7dbc118dd5c4389333763151a3 -size 1083012 +oid sha256:9b731047924be20d27da4714ce5224bb1825f370fd80392f70f4833ce6de0359 +size 1214419 diff --git a/images/14f0e837-af77-44b9-9cad-a8911aab30c6_89ea3db5-1984-4912-a93a-8cdb9b2402af.png b/images/14f0e837-af77-44b9-9cad-a8911aab30c6_89ea3db5-1984-4912-a93a-8cdb9b2402af.png index a89e59330d01e71056ea9e30edd0bb54047dfc1e..be046aebde71cbf234a95117c5edc7b144f12b08 100644 --- a/images/14f0e837-af77-44b9-9cad-a8911aab30c6_89ea3db5-1984-4912-a93a-8cdb9b2402af.png +++ b/images/14f0e837-af77-44b9-9cad-a8911aab30c6_89ea3db5-1984-4912-a93a-8cdb9b2402af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80fbcb200649e187d3b047f8393ce78953448fdf0fa5434652d0b45b2d7b5ab0 -size 1167922 +oid sha256:00a9a41f73d9a2bfc359e28ff2304a00c57c349866c78142a3cad7666f35ba5b +size 1298931 diff --git a/images/14f5587e-1353-419e-a381-f92d54ea2059_1429f24c-c8ac-40fb-aaea-d2c48942177c.png b/images/14f5587e-1353-419e-a381-f92d54ea2059_1429f24c-c8ac-40fb-aaea-d2c48942177c.png index 38e849f46b6ef7ffa15df52074cf0e7e5cf08f3b..19b0c36dae26b0928a901f008c675d016239ce32 100644 --- a/images/14f5587e-1353-419e-a381-f92d54ea2059_1429f24c-c8ac-40fb-aaea-d2c48942177c.png +++ b/images/14f5587e-1353-419e-a381-f92d54ea2059_1429f24c-c8ac-40fb-aaea-d2c48942177c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f5943779dc25ce69b547603bbfe2a8520608e3230219481cd508f7acdc6910f -size 1398046 +oid sha256:139ae1d9610c4a54a073796662677696e89149c2dd1d7d77508f9095ea70df6e +size 1279621 diff --git a/images/14f5587e-1353-419e-a381-f92d54ea2059_33996c48-9ef9-42e0-9ae2-d73a23df1bee.png b/images/14f5587e-1353-419e-a381-f92d54ea2059_33996c48-9ef9-42e0-9ae2-d73a23df1bee.png index 23bef7a40bb7f462b50c777e15a7cefc1adef9fd..bdb55c8ed96f550182d61c7d01b540fa56f15cf4 100644 --- a/images/14f5587e-1353-419e-a381-f92d54ea2059_33996c48-9ef9-42e0-9ae2-d73a23df1bee.png +++ b/images/14f5587e-1353-419e-a381-f92d54ea2059_33996c48-9ef9-42e0-9ae2-d73a23df1bee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11ce3dfdba780cbacf44bdff5d4f8022ae797572433f1dee7568de900165010d -size 1375143 +oid sha256:2a53492efed9e6542a603a0fa41e2590fcf50fad99fdc0e54d4ba0d0498a98ed +size 1710261 diff --git a/images/14f5587e-1353-419e-a381-f92d54ea2059_50e29032-1d28-41a0-9348-754e15b4cfa9.png b/images/14f5587e-1353-419e-a381-f92d54ea2059_50e29032-1d28-41a0-9348-754e15b4cfa9.png index 77d5e932ff97bb66463f083ad9169195f077f7e7..ac833f4465270909900de7c0b87ff69a12a7b847 100644 --- a/images/14f5587e-1353-419e-a381-f92d54ea2059_50e29032-1d28-41a0-9348-754e15b4cfa9.png +++ b/images/14f5587e-1353-419e-a381-f92d54ea2059_50e29032-1d28-41a0-9348-754e15b4cfa9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f09a241048159003fd8fada2a63c136710561d292506c5514da9dd9b5f01abe -size 1426208 +oid sha256:8ed04b3e512e306721956b6926da5e0bf4639bdc28c657cdb4032baf92df2a08 +size 1698816 diff --git a/images/14f5587e-1353-419e-a381-f92d54ea2059_89812b96-d097-40d2-9b31-672894992c81.png b/images/14f5587e-1353-419e-a381-f92d54ea2059_89812b96-d097-40d2-9b31-672894992c81.png index ed760f3ee4e0135deb59c7344093d4e7a1eb03de..b82efdd63c8458b02eccefc16e591da12edef926 100644 --- a/images/14f5587e-1353-419e-a381-f92d54ea2059_89812b96-d097-40d2-9b31-672894992c81.png +++ b/images/14f5587e-1353-419e-a381-f92d54ea2059_89812b96-d097-40d2-9b31-672894992c81.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a00f89539fe439fcbf05cbda19990d8a70ad5b83bc634408a9c0abe70806f385 -size 3827896 +oid sha256:177f481736642c867aedb004be936d9e64d54ef50ecc5fd2aed52a5f8e0465f5 +size 687479 diff --git a/images/14f5587e-1353-419e-a381-f92d54ea2059_c93d7f4e-f722-4b74-accf-af32bb9ba52e.png b/images/14f5587e-1353-419e-a381-f92d54ea2059_c93d7f4e-f722-4b74-accf-af32bb9ba52e.png index 783d41bb7dcff62568138589b33b516a388e283a..c9660e289c5b57399636acdb2f42dcd53763b16f 100644 --- a/images/14f5587e-1353-419e-a381-f92d54ea2059_c93d7f4e-f722-4b74-accf-af32bb9ba52e.png +++ b/images/14f5587e-1353-419e-a381-f92d54ea2059_c93d7f4e-f722-4b74-accf-af32bb9ba52e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da3c236d0a5aeb00a39f61d45991cb50721564ab1c957f087c94e75b7887bc71 -size 1406237 +oid sha256:2a1fabd770400677f403d6dfccfa19ce8fc27a83a05c8f88c1522fb891dbb47b +size 1012613 diff --git a/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3a3ea0a2-ac4f-4852-9eef-06f64dcc0b45.png b/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3a3ea0a2-ac4f-4852-9eef-06f64dcc0b45.png index 6e6eea79a60c5f6d4aa9c7295a5a7bd5d5397f80..1fbe377d8403803a4fda445bdd5b411e39d1f013 100644 --- a/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3a3ea0a2-ac4f-4852-9eef-06f64dcc0b45.png +++ b/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3a3ea0a2-ac4f-4852-9eef-06f64dcc0b45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26d0c653a4bddcd1e77373f08add482569d94c2b1d365060f904f4432751bd67 -size 1257694 +oid sha256:54382815ea9c9ac57387422d82b8786808aed4c57fe6c8b555321b944cb7204e +size 283562 diff --git a/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3be9ae7f-a70b-4318-8fb6-4ad2c22b8f3f.png b/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3be9ae7f-a70b-4318-8fb6-4ad2c22b8f3f.png index 21fb39ddf22ed04f99003630b4caa5f71c68c8ec..7f244bcc2cfaabc1e86bce8b360ebcc6097b6e3e 100644 --- a/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3be9ae7f-a70b-4318-8fb6-4ad2c22b8f3f.png +++ b/images/1538e37b-9c33-48b0-b10e-662e192ad53a_3be9ae7f-a70b-4318-8fb6-4ad2c22b8f3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f5c71cf3a069d058df58fafb60ed239e577c3d5f401ceed47f208f2ea09ebb7 -size 1096983 +oid sha256:799ea620dc030cb8d1ed203e272f2cb3832fc1ec95ae81b597c4a08848658bd5 +size 1523312 diff --git a/images/1538e37b-9c33-48b0-b10e-662e192ad53a_455a4731-6e47-4746-9c56-df4a0cc128eb.png b/images/1538e37b-9c33-48b0-b10e-662e192ad53a_455a4731-6e47-4746-9c56-df4a0cc128eb.png index 519b6c2d0ab12a9d42e6bcaeb4dbfde3b32d067c..539f872437cad1d0621fc5987e8e3256c56648bf 100644 --- a/images/1538e37b-9c33-48b0-b10e-662e192ad53a_455a4731-6e47-4746-9c56-df4a0cc128eb.png +++ b/images/1538e37b-9c33-48b0-b10e-662e192ad53a_455a4731-6e47-4746-9c56-df4a0cc128eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f684aa35970496d27261aa11915ec4f415f52afbd87567af9c3075dcc88ed06 -size 1087306 +oid sha256:2f65de9c3280cf782b5acf44a414d76c743ea55cae51ce48445923f2e74a795a +size 1134846 diff --git a/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_2f2426db-0cca-4e67-a6b0-333de7a4b037.png b/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_2f2426db-0cca-4e67-a6b0-333de7a4b037.png index 84fc87020f23325cc440d1e75c5dcb3807adc62a..36a1f52b09f0a7feeff114bf761d5057ae74456e 100644 --- a/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_2f2426db-0cca-4e67-a6b0-333de7a4b037.png +++ b/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_2f2426db-0cca-4e67-a6b0-333de7a4b037.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56e2cb05d84e5cd28c2eb2325d5e4649e8d6819561dcedbbe4631351e5fd35b8 -size 496695 +oid sha256:3bd63bb1151537306fcc60c9029b5dc1493faad460707a3269dc1bbcda751f77 +size 355608 diff --git a/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_ec31e954-a50a-420c-8399-467d237b647d.png b/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_ec31e954-a50a-420c-8399-467d237b647d.png index 29c33d718400aa9bfc18d590c12c5c1b120e00e1..54f65ecb20713cade6e5b14844ad83fda80784cd 100644 --- a/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_ec31e954-a50a-420c-8399-467d237b647d.png +++ b/images/15a0ffe5-8462-4a8e-8938-91b05a40756f_ec31e954-a50a-420c-8399-467d237b647d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c62da3ea3b6edb258a0a8d58a1f8c20a9149e83833e2d0566686d5901b6de7e -size 529007 +oid sha256:b2c8dc7f7ffdc7b40acd3f0c4a44122279dfd74c5302da276cceb4bd349aaeef +size 456599 diff --git a/images/160fc162-7f03-4f59-83e1-5502d00806f2_1b586bab-28ee-4b81-96bd-0cce359c5989.png b/images/160fc162-7f03-4f59-83e1-5502d00806f2_1b586bab-28ee-4b81-96bd-0cce359c5989.png index 9facd041f46b2d6573674bfee36f30eec8383d85..eb20baadb8826cc7250045f0cbc0edcc1e3c46e0 100644 --- a/images/160fc162-7f03-4f59-83e1-5502d00806f2_1b586bab-28ee-4b81-96bd-0cce359c5989.png +++ b/images/160fc162-7f03-4f59-83e1-5502d00806f2_1b586bab-28ee-4b81-96bd-0cce359c5989.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f42ed0ad95b2cb6d39494b94ad4d84c7e1a1b351b58b0895ef564fd930300530 -size 1615799 +oid sha256:edcd76ca59a83dee342666c5d4746c7ef9e06886d932b40d9e529e85277ae10b +size 1622211 diff --git a/images/160fc162-7f03-4f59-83e1-5502d00806f2_44c6392e-a186-4eaa-8760-eb0ab0f1688a.png b/images/160fc162-7f03-4f59-83e1-5502d00806f2_44c6392e-a186-4eaa-8760-eb0ab0f1688a.png index a82c192a8762c20ab95a21faaa3a99e92fd57bf2..d2ac628eb0d59000c017cac0b41b7b82ca6d8027 100644 --- a/images/160fc162-7f03-4f59-83e1-5502d00806f2_44c6392e-a186-4eaa-8760-eb0ab0f1688a.png +++ b/images/160fc162-7f03-4f59-83e1-5502d00806f2_44c6392e-a186-4eaa-8760-eb0ab0f1688a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ce08552c96d20c10d2169279b798029d2e30402a6a267e7a63c4285e141107b -size 1832538 +oid sha256:1390cf8a4518154833827be37535971d34030346df0b66f794b8d15802bb77a1 +size 2006755 diff --git a/images/160fc162-7f03-4f59-83e1-5502d00806f2_edb1d676-2d90-478f-a19b-c083f267b082.png b/images/160fc162-7f03-4f59-83e1-5502d00806f2_edb1d676-2d90-478f-a19b-c083f267b082.png index d087bbd8d0bb8ea5ece3d3eac415069b6cfbf8e8..dde1d11aa6ad1caadc9d4934568eadd29035c852 100644 --- a/images/160fc162-7f03-4f59-83e1-5502d00806f2_edb1d676-2d90-478f-a19b-c083f267b082.png +++ b/images/160fc162-7f03-4f59-83e1-5502d00806f2_edb1d676-2d90-478f-a19b-c083f267b082.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e0010fb3d63c863cd0a6a273a9f62007131980b41707dcb59e20e9b31818efd -size 2691456 +oid sha256:05caada142d9bc7befaef1d6853774b0bb5e97dd261120134a0f71ab9bd546f9 +size 2368618 diff --git a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_9329b2b4-204c-456d-803b-fd5be3bb63a3.png b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_9329b2b4-204c-456d-803b-fd5be3bb63a3.png index a8afc2692280b1b66fa933f1c532562f34b16f49..ed67b6a72e32b9176d4cc641af5aca962959e978 100644 --- a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_9329b2b4-204c-456d-803b-fd5be3bb63a3.png +++ b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_9329b2b4-204c-456d-803b-fd5be3bb63a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:900fc14114c76eedb339679e775f3241eca08e9915edd2b02ae8d079e83663eb -size 2743968 +oid sha256:79f4d775a4baa29d898873d832db4e2e71586ae6fe1a1573e991647c21a41fa3 +size 1374786 diff --git a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_aa4ff294-fa00-493f-8625-e483115057f9.png b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_aa4ff294-fa00-493f-8625-e483115057f9.png index 54cec2c69615dfd404ce252da4c96c0190f5bc3b..ad691cab518dca463cd53f163d29807c953b785f 100644 --- a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_aa4ff294-fa00-493f-8625-e483115057f9.png +++ b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_aa4ff294-fa00-493f-8625-e483115057f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:972c1081f3aa88379212847ae7ab886a96035b03f9efa2c3cf7b6309b934f8e6 -size 3093390 +oid sha256:b8abe0f9bad36397c28591a8104603773e688aeb37ad657f15c5bd5e474f0955 +size 1227596 diff --git a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_b7e5976b-55cf-4461-a63b-e6cb9a069717.png b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_b7e5976b-55cf-4461-a63b-e6cb9a069717.png index e4276289ad698ae283bd9f463ceee7c6b591cf56..28b3d3de9fa413a44b3c22167f9b916784fab1da 100644 --- a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_b7e5976b-55cf-4461-a63b-e6cb9a069717.png +++ b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_b7e5976b-55cf-4461-a63b-e6cb9a069717.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe2f2a6f4eab74186ccfb924ed9c44dd20ec021d64c5cae57deb430ddeed31b1 -size 1891894 +oid sha256:32b65afcd02bdae6b105c3106afce407a610c05ee9661921e6bf0cc18f3f0227 +size 1973585 diff --git a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_e4018dc3-e21a-46c7-b1c3-4add061eb3ca.png b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_e4018dc3-e21a-46c7-b1c3-4add061eb3ca.png index adbb87ea8a34ada5db6a5e40b3c93ca0bdf9eaf3..7213c57a789eab36484d9f0d2cec3cacc765e71d 100644 --- a/images/164cf025-e767-4781-930d-5bd46ad2c4c1_e4018dc3-e21a-46c7-b1c3-4add061eb3ca.png +++ b/images/164cf025-e767-4781-930d-5bd46ad2c4c1_e4018dc3-e21a-46c7-b1c3-4add061eb3ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:290c7113c2825eb6579793a9cfc01c79937142df6472202386760abe2b92e58a -size 2655982 +oid sha256:920a6e8e6c0acc334522ffc704061c7b6bc9bdff37f9641cb8acb5d07f6ce978 +size 1164947 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_1cf0e95f-e911-42e3-a18f-a2c4ae24e04b.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_1cf0e95f-e911-42e3-a18f-a2c4ae24e04b.png index da35885bb7a08933d073c239e1b11243411f2787..abb26ba3136a9afc71face43bf894850473f838e 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_1cf0e95f-e911-42e3-a18f-a2c4ae24e04b.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_1cf0e95f-e911-42e3-a18f-a2c4ae24e04b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0222106080af22e65eb351776341140e2d8f1fcf49146cd55469f5d80686f6d -size 951267 +oid sha256:52094cd8903057a700f22ad04d306ceef3aea48e7de7e88c07add6ab307c3010 +size 926254 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_41e84ed6-7a28-41f6-92e0-daa1fc5f5611.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_41e84ed6-7a28-41f6-92e0-daa1fc5f5611.png index 9ca85db5024d42c9d2dd78bde71134d81876baa3..e8decb99a586608864740d4ee5e694eb2c5ad88b 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_41e84ed6-7a28-41f6-92e0-daa1fc5f5611.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_41e84ed6-7a28-41f6-92e0-daa1fc5f5611.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:104566174e8c61e91b65c44dd9ccda7ae8a49761ef36da37171c8336500751e1 -size 919329 +oid sha256:f1415a9aeb4378392c83da75341d62589dc322ff41e9e2ce26161e244ce54573 +size 963286 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4326dff5-bcbc-46fd-a64f-37c77bc38404.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4326dff5-bcbc-46fd-a64f-37c77bc38404.png index 0ce5cfed4f89a333a873c64157d44636acfc13aa..7f20c29bcc21a0f5f405d1ed0d4b0c2386e972f5 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4326dff5-bcbc-46fd-a64f-37c77bc38404.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4326dff5-bcbc-46fd-a64f-37c77bc38404.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10997c1a70336bd8eaedc418a8d41a672c748dc03320ab84af8fadd5b068cb28 -size 983868 +oid sha256:c426d9bdedfc3458c92a37a3b6efcd89cc8acf8a81fdc5fc44f357c4a352baa6 +size 851383 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4a105819-7709-498b-a943-5e3a9eefdfda.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4a105819-7709-498b-a943-5e3a9eefdfda.png index d1ac7e2ef08685ec1f9211d85a0a2cb5f206a0a4..595b4f39c1c6f3a14d28e1d7a82073ade4f4ca6b 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4a105819-7709-498b-a943-5e3a9eefdfda.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_4a105819-7709-498b-a943-5e3a9eefdfda.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14103ddc670cec4fc5f6381625d9ae82f1c354de70d3d5e09b09710d4100ddbb -size 951874 +oid sha256:1d932977d471d1c81394848d3d7068c338052d7685e3377ddcc2c031a00603da +size 992208 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_752e0eed-61ec-416e-a42d-7313f6820f5b.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_752e0eed-61ec-416e-a42d-7313f6820f5b.png index 32ff0de8f50b2b4f315feddf5f11fe46c3e80bf6..f3e9ae0f989eb2487493b61b44d90c5a513290ba 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_752e0eed-61ec-416e-a42d-7313f6820f5b.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_752e0eed-61ec-416e-a42d-7313f6820f5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1683fa8b44a0a7b0e804dcf75fffd2e4806ae8f0c3461c491ccf8de6715cfb3 -size 3017238 +oid sha256:dcf4acbe8d61d9a9dbe601159461b73fa1870e0a104bafebef90e0dc5bfb2c44 +size 1697461 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7baddb2c-fc8d-44cf-be47-7d265a0c1d8c.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7baddb2c-fc8d-44cf-be47-7d265a0c1d8c.png index befe25e1e07d8db88ed3e5347fb1dea7f7bd8607..4955ccb1f4615843de4b43dcd70148763b0d345d 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7baddb2c-fc8d-44cf-be47-7d265a0c1d8c.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7baddb2c-fc8d-44cf-be47-7d265a0c1d8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9bd6404527330de29bcae588ddd708abefbfd66452f37a1c67aa9586ec325c5 -size 949444 +oid sha256:c40d8ac922a7fbf78e6a61dc19d7386e66eca9f3fd7e3aae4ad561cfa618b3a5 +size 1202013 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7fabb89e-f2fd-419c-b2b1-bf792b60efff.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7fabb89e-f2fd-419c-b2b1-bf792b60efff.png index 36e04f17474d3b8f8ea7b1a431ef6035df11aed3..518f5ee5927fed8b6178a7c91ce86f53d64f3f76 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7fabb89e-f2fd-419c-b2b1-bf792b60efff.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_7fabb89e-f2fd-419c-b2b1-bf792b60efff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b227ede5feb9f92466e87544a920b1e26513f0ed4fb9c93845960538123d873a -size 951411 +oid sha256:415d3bf9209848204776811ec3e0b3898ac8c84e7e7b5178ca1f200ff7f0e987 +size 991718 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9cab69a1-12c1-4b8a-96b6-6677977b0efb.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9cab69a1-12c1-4b8a-96b6-6677977b0efb.png index 269bd66dbb5861145d649c296777b9e76541bff7..bcdfe351095476e67784d3b4078f9ef97bd86c6d 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9cab69a1-12c1-4b8a-96b6-6677977b0efb.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9cab69a1-12c1-4b8a-96b6-6677977b0efb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3e996043655c90f31c9cde7dff8fa2fc2755e9bf580db9d24396ce36f48a02e -size 606904 +oid sha256:19d3685d5380201bd2e087a39ec21c59c9368baff9d9facd5ca72f5677aa3a6c +size 1309672 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9f6a84e6-6b69-424c-8e55-1759affbedd7.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9f6a84e6-6b69-424c-8e55-1759affbedd7.png index dcbb0d9ada7eec245e2359eaefb6608a0d5dc3cd..614feb023bf2f3d0c681434bdedf4eeb43420386 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9f6a84e6-6b69-424c-8e55-1759affbedd7.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_9f6a84e6-6b69-424c-8e55-1759affbedd7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa90512927a474f970f01b618b48806ce37fef7d4636c7e235db55e448b5226b -size 964162 +oid sha256:d0dfb9117ca97d734e7d89f2817376d1b0430f08c66fa3d297242c6a44002306 +size 898674 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_ae451dae-1e4a-41d5-a580-57a183968ac1.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_ae451dae-1e4a-41d5-a580-57a183968ac1.png index e464cc8a6ccb69c90be9c9342142d8cd724ce867..7fed5b49fe1f0add08dab069a2ced05e666a251f 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_ae451dae-1e4a-41d5-a580-57a183968ac1.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_ae451dae-1e4a-41d5-a580-57a183968ac1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89e69b282a9205ee2726616d92e00db40ff8ee07b0f69c18fe7c6846d8cc5936 -size 990120 +oid sha256:7df285a1f5e1388bb430f96c12cee5b26f5ff3fb0d7955f2a3eef318b1eccfde +size 1136092 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c42cca35-7a05-4bb6-8671-737bb9dc9812.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c42cca35-7a05-4bb6-8671-737bb9dc9812.png index 6c19ad30cca1d73ec4d0e5bdaaef9bea035c2e33..9e02cfe0f9d73ca6984a77a29fadf15fa047b03c 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c42cca35-7a05-4bb6-8671-737bb9dc9812.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c42cca35-7a05-4bb6-8671-737bb9dc9812.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ee3f4a5b44c4fa74b4858dbe029c052510937ed5fbe225671d9fe1d86715b9e -size 1509970 +oid sha256:89b2b21a62df3c0f906e663773ad0e655c09e99b33aeaf6cca773030181dac6b +size 1157876 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c8e636ed-095c-4824-ba93-64541480befc.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c8e636ed-095c-4824-ba93-64541480befc.png index 9b1be25173d673976cca066baa4cc61f0540f21c..2468eda9cd8fd7f75c7582cbb94fc12fc7979e55 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c8e636ed-095c-4824-ba93-64541480befc.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_c8e636ed-095c-4824-ba93-64541480befc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83aea88635aaae478e997fbd488e025a332af620c5cd02838380876b61e54087 -size 950474 +oid sha256:de10a5fa31513e62fdb3f657a7cd7f6186906e7b804b64c30e232b43b0b5e056 +size 1254307 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_d9c7b18b-2aed-4aa2-9e8f-1a2cb9fc509c.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_d9c7b18b-2aed-4aa2-9e8f-1a2cb9fc509c.png index c8db09df15ea6ae498f88db0b1785805863c8139..c7061b5160f12e87675dd21b4daa835f95fc8dfc 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_d9c7b18b-2aed-4aa2-9e8f-1a2cb9fc509c.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_d9c7b18b-2aed-4aa2-9e8f-1a2cb9fc509c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97697f88626b6ff0a2d072b8f3bbc35d10fb1d01f5c117cc32b4fb6e552f8dfb -size 990639 +oid sha256:9bc65b072105f8d83227babdc04372edd8b5c5bba71222c8bb577e727ee2c7f7 +size 1042374 diff --git a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_f84c6ce3-3f65-4091-a2ba-e372b65fbaa6.png b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_f84c6ce3-3f65-4091-a2ba-e372b65fbaa6.png index 5366f20edb2ed5b9a41df06bbc1058be8d957eaa..af8adee3367dc142a0402cd400e41a113081b505 100644 --- a/images/1655f54a-31e5-4dda-9089-7ccff35a1095_f84c6ce3-3f65-4091-a2ba-e372b65fbaa6.png +++ b/images/1655f54a-31e5-4dda-9089-7ccff35a1095_f84c6ce3-3f65-4091-a2ba-e372b65fbaa6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2eb67b6682361c536139832b4c110ea0b2c6b5774134b274f427c8434e0b481f -size 949299 +oid sha256:b3fda9f0d6076547bb0e4ff7ee2fa55af900e9e3a74ea14eb7911e152b41d4ba +size 1115729 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_110c514a-0c12-4e5f-8a6f-68ea4fa545d4.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_110c514a-0c12-4e5f-8a6f-68ea4fa545d4.png index 3f4a2c2a3e4c92081758c2f650e839a087b06ec3..e95afceef4d782d08a6856b7b5883a524b1ac3b7 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_110c514a-0c12-4e5f-8a6f-68ea4fa545d4.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_110c514a-0c12-4e5f-8a6f-68ea4fa545d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0822f8af12310a70daa7a4a0d29d8f000ffbe0fc0cbfd9b933253128c4d383fa -size 607419 +oid sha256:1704fe466f7098eb61ca76be6ea43c60076ee1f122f76ea59ee9a36bf446844b +size 1149716 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_18511120-cc2f-421c-97a5-7ed7fc32cdb2.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_18511120-cc2f-421c-97a5-7ed7fc32cdb2.png index e58a96932df0b4f80b11a7e90bfe0a0fd800380d..b092e62ca66acf234d88b18cdb311fdd50384edf 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_18511120-cc2f-421c-97a5-7ed7fc32cdb2.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_18511120-cc2f-421c-97a5-7ed7fc32cdb2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af2ee83221a95ef2c64f37995808c844fc43e62ddccdfcb9e44e2de91eab2648 -size 1299913 +oid sha256:9cda53b39e65b87878ad6f5f27df3c36c959a3182e81cb5d6c73f2226e716f69 +size 1968165 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_2e5c3ba5-79d1-47e9-a404-03e9aa72be31.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_2e5c3ba5-79d1-47e9-a404-03e9aa72be31.png index de1ae2627f0b138eda414960adeb9fd4d990b74b..8eddaa71d2e865b8d0697112929e6784e33d4f4e 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_2e5c3ba5-79d1-47e9-a404-03e9aa72be31.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_2e5c3ba5-79d1-47e9-a404-03e9aa72be31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db196e8e92dc78abaa6bb71d7a8f1b2660f57882410ad202386d43daa86a3bcc -size 603200 +oid sha256:a16b57163554a4c46e9ebd45119d49e0dbe83aff78228aab706c59804c8e123a +size 1173138 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_5354557d-77af-42ce-9b8d-f4948fc805b4.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_5354557d-77af-42ce-9b8d-f4948fc805b4.png index b5f542761981e14904a81660278f954bd1f7e482..afe03f8594d7ee416a0525e3dcf17971d3a43078 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_5354557d-77af-42ce-9b8d-f4948fc805b4.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_5354557d-77af-42ce-9b8d-f4948fc805b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7f21676d71803ff31e9c08444edfef5cabad8a3553782fcbfb84fdb04f1debf -size 592573 +oid sha256:338d95ea173e9080ecdbc5bd983543d0527e4c06450891896cd897cced6d44df +size 298799 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_6157d472-2e23-4858-928e-091450f63ff9.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_6157d472-2e23-4858-928e-091450f63ff9.png index a2d033960f0a6c2f4c11b429e58120f2f406b0d7..a31a10ef32e5dbdaddb0976162b3d4b8c83b517c 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_6157d472-2e23-4858-928e-091450f63ff9.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_6157d472-2e23-4858-928e-091450f63ff9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbcea0751e3c94f043ea1090fad9d4584987a27bb078b35d003490bce060bdbb -size 553070 +oid sha256:61ff175617fcd84964e54370f98ea0d838feb456d9023e552955a20956158362 +size 1091435 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_947b3258-9cc5-40c9-8aec-b8e6041d3782.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_947b3258-9cc5-40c9-8aec-b8e6041d3782.png index b7abd7b07b008410cfa6cec3bd67a8492b5e0e43..539b9832ee02f517d1160c8c2901ab1619e2b076 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_947b3258-9cc5-40c9-8aec-b8e6041d3782.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_947b3258-9cc5-40c9-8aec-b8e6041d3782.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f16e3c11b6dfa1a4791fcb94ad5aad02cc90d618d54dff87f0cf42917ff1a8cf -size 609755 +oid sha256:4fe4e51dc02ea3617fc8533ab8610b56ee14558a1cd1900f425e15dac66a624d +size 609304 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_afd9ae37-5686-4a76-8d2a-b5a040a49170.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_afd9ae37-5686-4a76-8d2a-b5a040a49170.png index d284982b50c3c6c2d2b82ab8e0f5ec3b2db48ffd..ff5cd44d4de064e466090ac95ff66436d30ad980 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_afd9ae37-5686-4a76-8d2a-b5a040a49170.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_afd9ae37-5686-4a76-8d2a-b5a040a49170.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2efecc53babc908e9b544383a352285c487eb10e5ba0cd6b3343f88f0ca39512 -size 1197610 +oid sha256:57c9b15ee44805d59e8961bf582bccc3ac7c44808821299756af664f19e4094d +size 1464569 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_d55bc99d-7725-453e-b01d-c0cd6d36e985.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_d55bc99d-7725-453e-b01d-c0cd6d36e985.png index d0473952373175e7365ed7c8a146a10280ff6bb8..689936d2e6a0be79e54ae87b1b2de777d122b220 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_d55bc99d-7725-453e-b01d-c0cd6d36e985.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_d55bc99d-7725-453e-b01d-c0cd6d36e985.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf90edc980855937b4651a59423859356997ca37fa20adfdbd27fb2e9f071509 -size 1197837 +oid sha256:6b9b93249f47d17d64797e1849afae72ad0d6c9d50754d4390388753bbc1ab84 +size 1464801 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_eba8a877-10db-482e-8ca8-f6b61efb119b.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_eba8a877-10db-482e-8ca8-f6b61efb119b.png index 32c8b238757aced8e33f3f903cafcbff8a16588b..3b66e5d3d7744c942096a8b884773bf329b495ad 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_eba8a877-10db-482e-8ca8-f6b61efb119b.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_eba8a877-10db-482e-8ca8-f6b61efb119b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38481b45de702dbd05d795eb492a4ebb744ee5078023dc9f5b9f8fa18972e320 -size 331974 +oid sha256:169305623f71490321d685acbe54dc26d1e937ffa387dacd2c2996db8d202457 +size 182160 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_f124a0f6-d428-41e6-957d-75863da08b17.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_f124a0f6-d428-41e6-957d-75863da08b17.png index b602f10bf51b2e1a27b5d308a361f61a30f0c71a..30e2c0c5779f2e2e893a950973eddf0557419cad 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_f124a0f6-d428-41e6-957d-75863da08b17.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_f124a0f6-d428-41e6-957d-75863da08b17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d50d24c96a505480b83600c858c170a0ad0f558a087d9fd82c968cf06e7f26d1 -size 594308 +oid sha256:132a367ea01841a42241e8523b322b103167e7ee767012dddcbf1aa687584347 +size 1097429 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_f5e72948-efaa-4d29-88d8-b29de4097021.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_f5e72948-efaa-4d29-88d8-b29de4097021.png index 9142eaf2c12658ef7172af1b309085c9092ee8ea..49fe1de066e56a19bb070ab87e5e9421f550dae8 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_f5e72948-efaa-4d29-88d8-b29de4097021.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_f5e72948-efaa-4d29-88d8-b29de4097021.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dde5dd92d498c2efd8e16097aae331c38fd8b5dc0fd99ae982372d0e27ae4410 -size 1208954 +oid sha256:23b3b2bc01d18b4c2e6dd42c28d174d9194b7737e84f459c273a5199fa4a81b4 +size 928154 diff --git a/images/1684f224-47d5-45ed-9a26-821b98b851cd_f7e594b5-3cc2-4b2e-8820-2dee88a6a1f1.png b/images/1684f224-47d5-45ed-9a26-821b98b851cd_f7e594b5-3cc2-4b2e-8820-2dee88a6a1f1.png index ce4f727d6208ee64669aa21261e8455c92f9018d..a8a2dcee61f542fee0d02d587ab2db74c77d773f 100644 --- a/images/1684f224-47d5-45ed-9a26-821b98b851cd_f7e594b5-3cc2-4b2e-8820-2dee88a6a1f1.png +++ b/images/1684f224-47d5-45ed-9a26-821b98b851cd_f7e594b5-3cc2-4b2e-8820-2dee88a6a1f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1899ff9deaaed419a3d7b49dc0ee17d6fa5048a5a6edf679b03e8f3bee384c5 -size 577161 +oid sha256:0eeda7dad72d90d9b33703e2450bcbda39586ee2bd71c27576fd12b476626cf8 +size 365066 diff --git a/images/16886ec7-3301-4103-b175-9fa817335984_672dc62a-a88d-468f-9b1e-eee5818cb7a1.png b/images/16886ec7-3301-4103-b175-9fa817335984_672dc62a-a88d-468f-9b1e-eee5818cb7a1.png index 4db7c9c20ccf02358798ccc58761d76fd83cb079..def32eef15d60a64757497aafaf507bd573db65e 100644 --- a/images/16886ec7-3301-4103-b175-9fa817335984_672dc62a-a88d-468f-9b1e-eee5818cb7a1.png +++ b/images/16886ec7-3301-4103-b175-9fa817335984_672dc62a-a88d-468f-9b1e-eee5818cb7a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb76f13f1dafee3bc654e251b0f55a1717738f32c8bb3bca0791d34b026fe3b6 -size 1094276 +oid sha256:80b1aeede4f6954460db8a41f945d595108c94cc1430ba59e26a3c213f2ec0a6 +size 1522744 diff --git a/images/16886ec7-3301-4103-b175-9fa817335984_96b11da3-438a-468c-a35a-da87b5a3b3ba.png b/images/16886ec7-3301-4103-b175-9fa817335984_96b11da3-438a-468c-a35a-da87b5a3b3ba.png index c62d15344fdde62d3bc6be65ee3934bfa0adf58e..92b27d2a5c6feadc83688c532807b30ea1f81a14 100644 --- a/images/16886ec7-3301-4103-b175-9fa817335984_96b11da3-438a-468c-a35a-da87b5a3b3ba.png +++ b/images/16886ec7-3301-4103-b175-9fa817335984_96b11da3-438a-468c-a35a-da87b5a3b3ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd0de4ed3a2a5e4b88313171fa13f2fb428581bd88dd78d0783b9cd3ac8dff5f -size 1252436 +oid sha256:c3733b63cbaa3f0f49942f9710cf4ac1f71052d1ccfac4ab1b9583a0675ecaf1 +size 1946283 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_258c60a2-eebd-485f-86e6-890e57a66ec3.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_258c60a2-eebd-485f-86e6-890e57a66ec3.png index bf38f30b0ec3dbabcf364f0b9dfec7591d708b4a..6496dffb30d19b1547574066d2b6587b0ca903d0 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_258c60a2-eebd-485f-86e6-890e57a66ec3.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_258c60a2-eebd-485f-86e6-890e57a66ec3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd0db1478800d7bb35f7ac7e7d608fae2c3d560cc368d6d243174f6fabc94e82 -size 1402533 +oid sha256:5219c726ad4fab166a1fd638dccdf238c4c00201fac00c3f90f9846b04da5752 +size 1386445 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_38c4b245-e414-4cce-b837-42706cb27f23.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_38c4b245-e414-4cce-b837-42706cb27f23.png index 3c6543d803060dcba736d7e1849fb8648f018b5f..b4122ee93e2225a1fe0b7b4b030cdbacba39e47a 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_38c4b245-e414-4cce-b837-42706cb27f23.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_38c4b245-e414-4cce-b837-42706cb27f23.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fefe29c5d45591c50c984249b81f5001c0b63345d0f3cff98dcc717ed1a56f4d -size 1220125 +oid sha256:c4b0805e7e35d56796c502c7afb4d19246ec14df264e5e231d276574912c0b59 +size 939341 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_51aa7294-0daa-44c7-adc5-04a136b43a7a.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_51aa7294-0daa-44c7-adc5-04a136b43a7a.png index 99deadba169660dfa7f88649a2d9d1afdeadaa29..f1576a040c83cb645cee2deaa73037d7d72698ed 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_51aa7294-0daa-44c7-adc5-04a136b43a7a.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_51aa7294-0daa-44c7-adc5-04a136b43a7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b884c8596e0c502409fabe1907b6c92206aee2661069af4d23353b8a04e58561 -size 1513641 +oid sha256:2e6249f32f143c8dabace84585430afc442a504db9a7b69170f1278478e1f9e0 +size 1857557 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_5541dc40-3d3c-4624-8e10-e78d62032873.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_5541dc40-3d3c-4624-8e10-e78d62032873.png index 3f418bd3e9e9fef4c80a6d63c4501bcd6d3cb235..b29e335f1a05f26aac28c2ec8c2095b9d4140c24 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_5541dc40-3d3c-4624-8e10-e78d62032873.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_5541dc40-3d3c-4624-8e10-e78d62032873.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31ef004e7bd8b7e457a2565dc1663e0c0f162221dd626192a2c1d26105df521f -size 1403909 +oid sha256:3a5a6275f8b19ea7b21d5f752eaa6442d83fe1943b80b1749235776788c78218 +size 1957915 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8f7c6002-5777-46f1-80f9-13e66c053b06.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8f7c6002-5777-46f1-80f9-13e66c053b06.png index 069d1fd9b8b625ff74e13cac8e5c62d39f809bb4..bf42f3f1d8b3b05102c6159e9f6f18e899d58017 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8f7c6002-5777-46f1-80f9-13e66c053b06.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8f7c6002-5777-46f1-80f9-13e66c053b06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fc4ea514bbd2696db58cf940c6610c6a79aced68968f25a26434a35ca968db8 -size 1429463 +oid sha256:cdb67ec74db46b2e698f526553be009d3b9da55e448cc071529ccec73c0c4e95 +size 2018960 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8ff449e2-99a5-48b0-94be-3804c92710c4.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8ff449e2-99a5-48b0-94be-3804c92710c4.png index 735277d2255be3239d3c77f5e6c9daa9595dcb8a..306547084278cf8f2f1d223ad9e38f959c1d5eaa 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8ff449e2-99a5-48b0-94be-3804c92710c4.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_8ff449e2-99a5-48b0-94be-3804c92710c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f91cf02b51779404cfc438d54de401689eb0b5396944d37dedc47bcf174ea6d -size 1402614 +oid sha256:c3f29c35e4ff8b22c433c4c1b8afc21e352922a2ce26d10c5c6f8c4bf2b36858 +size 1433168 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_b3cf21f4-85bf-4461-8154-b500af3a6b9f.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_b3cf21f4-85bf-4461-8154-b500af3a6b9f.png index f491721039d0656d65749355169488d4165249f3..e82101b40f005e6e0b311fe22f0be7abdc32c28a 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_b3cf21f4-85bf-4461-8154-b500af3a6b9f.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_b3cf21f4-85bf-4461-8154-b500af3a6b9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14edc4319c7215fc41543ee348efe2082b7dcfe7ae994b367cbf321f3d554813 -size 1644992 +oid sha256:5e39ca27b8e15532034c1738c302636976a40ca96edd11fc28b717eefe5701f8 +size 1379607 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_c0462513-58ef-424d-baba-92baeaac15cf.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_c0462513-58ef-424d-baba-92baeaac15cf.png index 32f1a9f013c7db0d0531a22e4fa0e9e7ef1eb08e..feb3438ad76e1dd25c0b4d6d61e68eec8770ad4d 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_c0462513-58ef-424d-baba-92baeaac15cf.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_c0462513-58ef-424d-baba-92baeaac15cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cbcf7eef4f7152869dfb0cf77b573b0ce8efd17221a86978aabfbe4fb8c0c65 -size 1480433 +oid sha256:1d868e3276e83cf53ca3815b6b46f4ed673de431bcbdca92ae809cc96b5ac65f +size 2114752 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_e2bca4e2-c8ba-4505-bb4a-2c11560be18b.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_e2bca4e2-c8ba-4505-bb4a-2c11560be18b.png index 12f485b79cbd6b521a081a7656a1bfc1068863ee..0ff27b5985bc096e28c8b585fb1bc90f8b54f6df 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_e2bca4e2-c8ba-4505-bb4a-2c11560be18b.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_e2bca4e2-c8ba-4505-bb4a-2c11560be18b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b7ceca66ceb382d1dac76715da19efd62268a7b41787ecf3c5b5bd524c8a6e1 -size 1404254 +oid sha256:24f4a812fd51b5d00ea750985e283dcb7c15a792038958356b76d07874e3b502 +size 1383192 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_ed212c71-ebb3-483a-8e55-dee589fad20b.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_ed212c71-ebb3-483a-8e55-dee589fad20b.png index 5e8627982b371c37bbc89abfed8934c6b98783ae..7e8088132e0054c03e701544d9778d422af5c625 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_ed212c71-ebb3-483a-8e55-dee589fad20b.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_ed212c71-ebb3-483a-8e55-dee589fad20b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afbca56bd2be2d033bc66735fcff8dd04e155892467c8ca82f5be649d512c3fa -size 1510484 +oid sha256:c6ab34c11869d6998f014de428c611399a10e4f11e08440e2c4da1d25ca9363b +size 1886901 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f39fb52e-050b-44d5-997e-e214bf88693b.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f39fb52e-050b-44d5-997e-e214bf88693b.png index f31f729f10c6fbd7de2c8a5b2c3b47a3c5b64f60..5e77014cae0e6ada602d850c2a3d21bb57bf23c4 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f39fb52e-050b-44d5-997e-e214bf88693b.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f39fb52e-050b-44d5-997e-e214bf88693b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90ff743ac5efc946497a0a1c574b4628f758a8e6ec4b3b4359db9746e2906fae -size 1302158 +oid sha256:1dc78a7033fefb9781c857f2f753c13cc3e6d27c2f39297074c6ffb12faad38a +size 1288442 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f53535ac-ee85-47f4-9e60-9d64b5ce8005.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f53535ac-ee85-47f4-9e60-9d64b5ce8005.png index 735277d2255be3239d3c77f5e6c9daa9595dcb8a..d2ef63bc9f98eefb6fdd1b07bf38689bcd707c9f 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f53535ac-ee85-47f4-9e60-9d64b5ce8005.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_f53535ac-ee85-47f4-9e60-9d64b5ce8005.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f91cf02b51779404cfc438d54de401689eb0b5396944d37dedc47bcf174ea6d -size 1402614 +oid sha256:421950400bc3dba1a1e5dd663c2cb2f7e7150b8f6e22cba647987786592f1161 +size 1563998 diff --git a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_fc043e8d-37d3-44a0-a1fd-fc04dd4d87de.png b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_fc043e8d-37d3-44a0-a1fd-fc04dd4d87de.png index 409b9003feaaebc2b4ad097a2efec76ef9a98d81..a00e397eb1f510435bb005f742ac7c9a4bbe292e 100644 --- a/images/16e81dcf-9762-4b83-be3e-60a65a15efec_fc043e8d-37d3-44a0-a1fd-fc04dd4d87de.png +++ b/images/16e81dcf-9762-4b83-be3e-60a65a15efec_fc043e8d-37d3-44a0-a1fd-fc04dd4d87de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:638d005631522f903e65bbde586c6ad19e82c8619ea94c03bbf29db6492e0f88 -size 1823606 +oid sha256:3f210d1f59fe9ecbe538b1def2569b59669844d11af0247a87e89005f16c4c30 +size 2238989 diff --git a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_0802ef06-4167-4e70-b52f-4f106bf1ce19.png b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_0802ef06-4167-4e70-b52f-4f106bf1ce19.png index 92889f75cbf4b18a481a131a60f029ea7f26dfd1..d6641f47333cc02072752e7e9e93ff69e6ec138e 100644 --- a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_0802ef06-4167-4e70-b52f-4f106bf1ce19.png +++ b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_0802ef06-4167-4e70-b52f-4f106bf1ce19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f46c0cf9408e0668d35357f60fb68c8aec2bc48fba9dbee33eaa9dd1691b854 -size 1164424 +oid sha256:f4da565c91efffc38909bcf5ff03dd2722d9131660ba4850678a068da9362547 +size 1015570 diff --git a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_2a48ee3e-f492-498c-bdad-de1107d2da4c.png b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_2a48ee3e-f492-498c-bdad-de1107d2da4c.png index 3a8c95fbca2e3a1ad9a17b90a712a68e88142bd7..72b5d9bc879ba3f3a6edb7b192c0028c4f4d11a6 100644 --- a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_2a48ee3e-f492-498c-bdad-de1107d2da4c.png +++ b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_2a48ee3e-f492-498c-bdad-de1107d2da4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87174d74ef5d6cfbe9c89f2c53c5050d6bc74fd0211ceae2c79cf4a6ddf705eb -size 1674361 +oid sha256:150f3aa71fc323a1a1fdeb78d8069b4cd8a09ef70bc1a2ff4b6f08c215df045c +size 1089396 diff --git a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_3f694618-f0a3-47d5-98c5-024c53562900.png b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_3f694618-f0a3-47d5-98c5-024c53562900.png index e579c838909be7d95bb2d0d226d6875494509ad6..8e97b0a75342f0c04dc5b6d9439f2911cc49a8e6 100644 --- a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_3f694618-f0a3-47d5-98c5-024c53562900.png +++ b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_3f694618-f0a3-47d5-98c5-024c53562900.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41bbf4899011e035e39c3a445ba3ee964f5366e9bbaff9b7fe3ca83e52642212 -size 1002153 +oid sha256:ee11c1a99fce4b7cd29edfc9cd6353cb9853fc4e087d69a6033486ef588a13f4 +size 1137402 diff --git a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_a58f44bd-baa3-4a90-8fca-7abece0f83bd.png b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_a58f44bd-baa3-4a90-8fca-7abece0f83bd.png index 1644f5750a4d9b9ef935d30e53f6b24d0f456594..b37ebfa59875af8c8c512ca8aba85662b7156e70 100644 --- a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_a58f44bd-baa3-4a90-8fca-7abece0f83bd.png +++ b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_a58f44bd-baa3-4a90-8fca-7abece0f83bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7e4e5be30c33736976d7d56ca40dbd68196989eb0d2b18e20f86a26dc6dfb12 -size 1456752 +oid sha256:bfa8b1cd34f6c961aca6415a0fe7c3c53496e6e8148b52210e35a988ce491054 +size 1262787 diff --git a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_d0ed97a5-260e-43f7-b268-72fa521ff5a9.png b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_d0ed97a5-260e-43f7-b268-72fa521ff5a9.png index 6c3cf3a94a261aa8c288beadc591310a3be836d3..2ccf58fa08f21743f15f03b761983f1336e27175 100644 --- a/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_d0ed97a5-260e-43f7-b268-72fa521ff5a9.png +++ b/images/17fffbee-e41b-46e4-ab75-675b263ca7b7_d0ed97a5-260e-43f7-b268-72fa521ff5a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d11a763abc1636581c7ffc36d25212b768a3c6c39bbb365a4d0f281667f33ef -size 882763 +oid sha256:b0a01e49c6a4b064eb9056e3a74f13022063b5d65f8e26ecb7f01611f959d9f5 +size 1181109 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_18379f86-26b5-4f32-8c38-cfd07d6f4ec8.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_18379f86-26b5-4f32-8c38-cfd07d6f4ec8.png index 2e1b7257b2eaffbf1aacfcba8f2c710e5e3f276b..85c1adc8fb41d6b17bd5caec2603830bedb3584b 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_18379f86-26b5-4f32-8c38-cfd07d6f4ec8.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_18379f86-26b5-4f32-8c38-cfd07d6f4ec8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80592ef37322ff8b1d33ad41aca6887283058ce131a60ebfb164610d983a62d6 -size 889132 +oid sha256:c1c44f61f94ddfb7364f1193d97d4a2f9cd508f8903a33522d36e38ae5726586 +size 1655679 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_55dbe23a-9887-4aca-9658-46b687dac5af.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_55dbe23a-9887-4aca-9658-46b687dac5af.png index 1091e5bb737ae7d8737e297a2c218fe0a8332c2b..fe5744e93a1bf8daa84a669f89e66195e04577e9 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_55dbe23a-9887-4aca-9658-46b687dac5af.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_55dbe23a-9887-4aca-9658-46b687dac5af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0450d68f237d795ca36268eb61aee078f6c1a0a1cfd929178a4c268ef55ee3a -size 1320202 +oid sha256:bf2f9c67549ed021e0419be71ef23218c05fcab527d23534a7746df46496b632 +size 1491734 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_5f391dd3-bf59-4f55-8776-180a9fd6dc48.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_5f391dd3-bf59-4f55-8776-180a9fd6dc48.png index 9ba33a670ff1d505f5728c1c14dbcf2c3fa1dfdb..b9b4d1e8bd77b0cbc6373ea5bf30613adbdf1e6f 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_5f391dd3-bf59-4f55-8776-180a9fd6dc48.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_5f391dd3-bf59-4f55-8776-180a9fd6dc48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:422117088201b03b2ae90dd4ccf59b1cacd94e30f95ca410f669d754a12160ca -size 1130444 +oid sha256:18c999d705a3d46ae01377fdbd5679d19af336a12de8d64300d7db69c9f9b347 +size 1401821 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_82d3b3a2-ab6a-4d43-be93-b933685cab2b.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_82d3b3a2-ab6a-4d43-be93-b933685cab2b.png index d6dd4ebf4a16731a8c331ebee50c0a455faab560..0f8fece269686a64647808f5a731d05dfcc3905a 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_82d3b3a2-ab6a-4d43-be93-b933685cab2b.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_82d3b3a2-ab6a-4d43-be93-b933685cab2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8873b38bade8897a2beee4dc1a5223e81c3865c15793eaf9790fe4ed475cceba -size 1433865 +oid sha256:041f2a3e8846c5123f61a87e52f11db43c232a767345562c2aad2891ef6f3b58 +size 1288802 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_8676f7b9-73e2-4bf3-b8e6-d38576f3f87f.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_8676f7b9-73e2-4bf3-b8e6-d38576f3f87f.png index dd22b1b97d8e97b9b6f1834a12a144307f75c2d0..524d8587bb5c21cf9315acff78cf833c41d76f06 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_8676f7b9-73e2-4bf3-b8e6-d38576f3f87f.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_8676f7b9-73e2-4bf3-b8e6-d38576f3f87f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfc6fb62af2c76ccafb2eb3101a53086f61b8faf4a64d4c844ea9cfc1fcf27fa -size 1826084 +oid sha256:fd40183a00fdc8fc9cc9e3f226a334766d0a1d8a46991c51924aaf4833283cc9 +size 1920323 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_905be250-0f48-4c69-a6c0-82997c490294.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_905be250-0f48-4c69-a6c0-82997c490294.png index d7f32032eb55a308f97699f4a784f1a79e1a689a..7a21cc33b4983cae2ad4e0db90f06985276f6b86 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_905be250-0f48-4c69-a6c0-82997c490294.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_905be250-0f48-4c69-a6c0-82997c490294.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ff80256b9096883b87c95ddbeb73bef3bce6f9ef8a8b39c900181a85ea3bf20 -size 1325591 +oid sha256:2fc3e346b7094001cbe67edc873dd1c354f62264b929127443e194f5444e2d90 +size 3166822 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_b491c36c-59cd-45e3-853d-c107a16e3373.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_b491c36c-59cd-45e3-853d-c107a16e3373.png index 300fcd4b7d248dd27eb1d387a320dab5bcbd2593..357afeaa1ad2bbd810ff6de0478370332689471b 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_b491c36c-59cd-45e3-853d-c107a16e3373.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_b491c36c-59cd-45e3-853d-c107a16e3373.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89a1d2987ccebd88c5d877f4511e6a4738d48a226d5056d560f3e2c8088a3e73 -size 1500004 +oid sha256:b8f36ec8ac2fea8a5d0a0a30b1c8bef2db8abf35dba6bd2c433fd2b788501591 +size 3432286 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_e07fd3ab-03a2-4115-baef-7334d62c7687.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_e07fd3ab-03a2-4115-baef-7334d62c7687.png index 671ea7564685e3f2a6a4601e9af54ea4e327de7c..628fc36d45183972777d73965e8f48bee3124102 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_e07fd3ab-03a2-4115-baef-7334d62c7687.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_e07fd3ab-03a2-4115-baef-7334d62c7687.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17999a0c3967b2151203758b0f9df23efa21c24259b2745296408bf60a412f1e -size 1799232 +oid sha256:fa822749202492fb98bce8582e420193a06bebf6119f05c604c36aca39a1e49f +size 2248514 diff --git a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_f84b368c-5c44-41ff-be11-271ea329cfbb.png b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_f84b368c-5c44-41ff-be11-271ea329cfbb.png index 9558483057ec59e7479f2e8c0800d6cadde82240..73bb03f32708844632ae31d68101b48e9754f2df 100644 --- a/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_f84b368c-5c44-41ff-be11-271ea329cfbb.png +++ b/images/181e8206-2d04-47ea-bcaf-f701b8c5525a_f84b368c-5c44-41ff-be11-271ea329cfbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:843d67c4bc6771fbe6c472af20c8ce332bc0c9ed04921468b98181de6b57cbf1 -size 1323113 +oid sha256:25522d73a6cd5771c72d58a5c363fcffc3014c80eb122bc5d87ffb09d198850b +size 1010775 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_2d372e31-24cb-41f2-8bfe-95836f933805.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_2d372e31-24cb-41f2-8bfe-95836f933805.png index 00280721834d1096bc752ff9cdb4f40636718bb3..b3ac432a5562f9927eed0ba1a633dca640521728 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_2d372e31-24cb-41f2-8bfe-95836f933805.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_2d372e31-24cb-41f2-8bfe-95836f933805.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28ff2af22edb0bf20627fc6bb59f6f9d9baf03c9388ba570981e5267a56f8a7b -size 821419 +oid sha256:c525cefd1f8143b7ab06a4332159fb8449578b6829643dfcbd26f615becff871 +size 1362456 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_31f0c71f-7a90-4fa6-beac-319af1442002.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_31f0c71f-7a90-4fa6-beac-319af1442002.png index 7b0c5759a08cfb0a3faf77d348b6f3433647ac93..ec879503c065261d9a50c1abbcea5e96e7e35f37 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_31f0c71f-7a90-4fa6-beac-319af1442002.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_31f0c71f-7a90-4fa6-beac-319af1442002.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1180eb738eecfa2f01908c3d631bc94c1c45bed54bbafb7618b5b701baab93c2 -size 799712 +oid sha256:b17383a0c220ab84c94b1a2a9ad6dcd9788d0e7e5395b7d3e08a6e9df01f6ea2 +size 1555746 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_3f7b29b7-0875-4dc9-8d95-f024555edf4e.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_3f7b29b7-0875-4dc9-8d95-f024555edf4e.png index fa4aeb7d1cfa9aeeaf2535970acb8b2f5afdf300..a11f9e276c84367fd7ab543455f5a81d324dbda5 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_3f7b29b7-0875-4dc9-8d95-f024555edf4e.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_3f7b29b7-0875-4dc9-8d95-f024555edf4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90bf1abb2bb67995bdcf532ae62a04ef84c9b07cfed6cff4a7a92d2e25e0e3a4 -size 703361 +oid sha256:c4d308e03d9488f8e95a6e7e4265506dda709663da56e5bb00bc51a1c595bf16 +size 1035801 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_465ea3dd-835f-4dba-b0fb-7d1092c13c1c.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_465ea3dd-835f-4dba-b0fb-7d1092c13c1c.png index d31b2ff3abafabf25a79d2d3479f277cec4559ce..eeceffd351a2f2d2c77a452c4a4b8bbe9bfe8056 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_465ea3dd-835f-4dba-b0fb-7d1092c13c1c.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_465ea3dd-835f-4dba-b0fb-7d1092c13c1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98d887625976879f3918ab052d9556880430f9476d2fd3bf33c39e0672227cf1 -size 894064 +oid sha256:fe682871545cf47891263330718a4e3dda3ba4199817cb0943496b567ea349f3 +size 427249 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_6530ec2d-af29-4aa3-87d9-1459e0e0aab5.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_6530ec2d-af29-4aa3-87d9-1459e0e0aab5.png index dd481ab4aa0d2d2670d2b7cd03fa4951ac65a957..104d53c23d5f99317bd2cd2b434ebd11227aee5f 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_6530ec2d-af29-4aa3-87d9-1459e0e0aab5.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_6530ec2d-af29-4aa3-87d9-1459e0e0aab5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:722db839e769d877a4bb406780fb859bf9ed4c356ebf4fb5a1ba2e9a99852e90 -size 795892 +oid sha256:7dae90009f15a14e95a396eae8517e8a82cb00bfc21d2941ad87895c67c2dafd +size 586700 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_74a7bdfb-8e01-47ae-8251-2c1ee845131e.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_74a7bdfb-8e01-47ae-8251-2c1ee845131e.png index 9b150b4ceb5cf3cdaae2fa838a4cfad45a756e4e..eff25e8d20bfd90406aebd7282bad43c09691f4d 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_74a7bdfb-8e01-47ae-8251-2c1ee845131e.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_74a7bdfb-8e01-47ae-8251-2c1ee845131e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca88626be997cac4c0dd8e8f260dd019170fd9669157fbdb5d25bf461ec81e82 -size 984001 +oid sha256:50386e2b76ef31b402bc77f941b0e27241242c999905f3d7f9316c90243f6e88 +size 1051123 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_864d3cfb-f813-4b7b-ad22-bcf37afeb3c2.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_864d3cfb-f813-4b7b-ad22-bcf37afeb3c2.png index 7e2d9629345739050632b2aa61bb5b2fab906f5e..6fefa3819b9422b059a5f1cca8ffa90a4291ab12 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_864d3cfb-f813-4b7b-ad22-bcf37afeb3c2.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_864d3cfb-f813-4b7b-ad22-bcf37afeb3c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ce5ed8945d43bc7efe000a2b1242e2c2cdd5ec2bce2f9582fe4a0fb29d9ce1d -size 700738 +oid sha256:6683c25d285fce782727ed954f29147d1ee10f88a9781b9bfadddd4092be51ab +size 1307670 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_894a3e88-d3f2-417d-b464-ce6f3086c9cd.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_894a3e88-d3f2-417d-b464-ce6f3086c9cd.png index 4b4447d5972bec388485a41c842215104f736359..b5f79e531183fabf048b6279010ab2b658a1a3da 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_894a3e88-d3f2-417d-b464-ce6f3086c9cd.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_894a3e88-d3f2-417d-b464-ce6f3086c9cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fdd646adbcc8d28e595e2d24d68720f9c55b33ef0ca1fa948a22bc5ceda28d21 -size 799367 +oid sha256:a7c7835c1f6875fcf9e4386372eb6dc69ccc5e40db54923ed7d2bf56cd8eefca +size 1488346 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_a6a47a5f-af4c-4828-877e-98c2bec76ac6.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_a6a47a5f-af4c-4828-877e-98c2bec76ac6.png index 6c842c1ee6672d484a80ed1119037b3f05050ad0..e816b8932d628628ba4a6871aa85d226f6f8fd9d 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_a6a47a5f-af4c-4828-877e-98c2bec76ac6.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_a6a47a5f-af4c-4828-877e-98c2bec76ac6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01d05008e546bfbd84820a637eeaa6e51f8780de91de99c7836a0761a6c977bd -size 770572 +oid sha256:76e2362531d3b3fde95149a51bb33b32741d5ee2da8f6ff983d029e8a6fceecd +size 594876 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_a7c0ac26-f51d-45fc-969f-73a22770dfc0.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_a7c0ac26-f51d-45fc-969f-73a22770dfc0.png index 8edea290a7209fa852c32b3bc98af2fd9c64972f..00281b759c0f56bc7239e6ec6248768a67b3e62a 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_a7c0ac26-f51d-45fc-969f-73a22770dfc0.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_a7c0ac26-f51d-45fc-969f-73a22770dfc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19e244e533a949f0756b108902a54e84d4802b231da75676a149511c1f483e5f -size 782330 +oid sha256:474c3c25cb9fe2420030f5bc42462fac7f551ce41556715b5f1cb91eecd201a6 +size 1134060 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_a87b377d-8822-4c90-8f14-11e689d3aacf.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_a87b377d-8822-4c90-8f14-11e689d3aacf.png index 5798acad06fb454a547437526dcf9296fb21c4ae..45fc53e4e7df401fd6d3e0c379be0a159be56965 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_a87b377d-8822-4c90-8f14-11e689d3aacf.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_a87b377d-8822-4c90-8f14-11e689d3aacf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c558f10f7086b59d6239f9fed3ed057cd258a29e3023d84c503e970db24f18a7 -size 815540 +oid sha256:82ecfd44896bf1aa66a44cb7db4af515f3b48a942abb24775b8b44adf43be0ab +size 1755481 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_d7821dba-fdc5-4738-ae2a-d5bf94da0dcc.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_d7821dba-fdc5-4738-ae2a-d5bf94da0dcc.png index d4fb6739b1b371b09fc370568e6da96918f26d87..7b99abda59d241a19f13c1e338a09e449f98894f 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_d7821dba-fdc5-4738-ae2a-d5bf94da0dcc.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_d7821dba-fdc5-4738-ae2a-d5bf94da0dcc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39ce6e63703ce68bacc8ffa33452c46e3aadba48d12098df6720209f81d270a7 -size 947550 +oid sha256:9b89da6420774a189df0235bab845ad3caa16277d2e89f85632d4dccf716e618 +size 1431091 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_dd7801cd-f2c0-4d87-a7fa-cc6d80723a61.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_dd7801cd-f2c0-4d87-a7fa-cc6d80723a61.png index cb86f0f3abed12adf873a0e427253cd6904fa929..d5fc9b951a7868ba090e87a6c39060ca671f310b 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_dd7801cd-f2c0-4d87-a7fa-cc6d80723a61.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_dd7801cd-f2c0-4d87-a7fa-cc6d80723a61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65dd4356d031214806a6bf43930d2fd84d850c2ef0f4b8f5bed9fbd2ddd9814c -size 716112 +oid sha256:8bd0444bc6082ede30587db5bb99c7536a3e707a393a6cc16ad1a78501dc1d1f +size 808690 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_f31dd533-86bd-4d07-af19-fa8d0f61bb64.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_f31dd533-86bd-4d07-af19-fa8d0f61bb64.png index 02f84f826b6279f6c7e16f2d98f752a6dbfba2ec..d948bd12f8c36e55a0ffdf121406cbb63bf27cc8 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_f31dd533-86bd-4d07-af19-fa8d0f61bb64.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_f31dd533-86bd-4d07-af19-fa8d0f61bb64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cacd33604c769c6daee1715d872724880929282e7f53b1e87d1684095bd6070d -size 717960 +oid sha256:4fb471ca9fba06f8967997e8515ec3aba86c23a590c79988d17948d38bcc2b45 +size 945651 diff --git a/images/1860601a-faef-4dba-8cbb-807ea8434dca_fa09573f-3fe8-4781-878c-595e27b1289d.png b/images/1860601a-faef-4dba-8cbb-807ea8434dca_fa09573f-3fe8-4781-878c-595e27b1289d.png index 7cbadfa928534af284b5c3205c29b779108defd9..d979dd4eb12e5cfd4448dd06d04469bff29b4e02 100644 --- a/images/1860601a-faef-4dba-8cbb-807ea8434dca_fa09573f-3fe8-4781-878c-595e27b1289d.png +++ b/images/1860601a-faef-4dba-8cbb-807ea8434dca_fa09573f-3fe8-4781-878c-595e27b1289d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:515322fad0ecf4b35b9981f893ec44206860a1b6e305927055d61a88782d529d -size 889353 +oid sha256:1b54a16e4aefaea2cc677a3266887ea6d4744242859fd429a83e16907da6afab +size 1343828 diff --git a/images/18a104dc-29e7-4777-9fee-1e023be1d686_6d88fab4-239f-42ed-8ccf-c52a478eb08d.png b/images/18a104dc-29e7-4777-9fee-1e023be1d686_6d88fab4-239f-42ed-8ccf-c52a478eb08d.png index c31d6355072f38cef92663686f08191440f20849..768896dff489da41826f64d0ba6ecc7b789c81e8 100644 --- a/images/18a104dc-29e7-4777-9fee-1e023be1d686_6d88fab4-239f-42ed-8ccf-c52a478eb08d.png +++ b/images/18a104dc-29e7-4777-9fee-1e023be1d686_6d88fab4-239f-42ed-8ccf-c52a478eb08d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:267bb2bed4fbbad96b5ca9f1aaa6d92c10dd900cd26c73488a8b201abb7436b6 -size 237757 +oid sha256:633694731c094d3f36a83db0296c5e03e3a42c9f649af5f98c63908da7b7a436 +size 228132 diff --git a/images/18a581b9-5021-40aa-835d-3c8110402df3_2c3caf3a-dd47-45ad-9bb0-4b63ea700dfe.png b/images/18a581b9-5021-40aa-835d-3c8110402df3_2c3caf3a-dd47-45ad-9bb0-4b63ea700dfe.png index 646b89162c4d625d8447fa0cd4fa54b5e6491c74..05f8958ff3560afe3aeb8dd337495c2a3a170720 100644 --- a/images/18a581b9-5021-40aa-835d-3c8110402df3_2c3caf3a-dd47-45ad-9bb0-4b63ea700dfe.png +++ b/images/18a581b9-5021-40aa-835d-3c8110402df3_2c3caf3a-dd47-45ad-9bb0-4b63ea700dfe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5153fa0d2a115b75b1aaa883ab3ff72adee0713d3576486421b30c23c1e13dbd -size 1974355 +oid sha256:44d7cc1a4bdd93f990e75d0a8b886ba31a8d49125240691f90a9f41ef44545f0 +size 1792634 diff --git a/images/18a581b9-5021-40aa-835d-3c8110402df3_63d785ce-1b1f-4f8a-ba50-8cf8ff40d73f.png b/images/18a581b9-5021-40aa-835d-3c8110402df3_63d785ce-1b1f-4f8a-ba50-8cf8ff40d73f.png index 93d5bb121a6228431f3eb9c8309e78c9789bd8ae..bb5cd2aae31aebb1286ba2259faca3f5665ce9e8 100644 --- a/images/18a581b9-5021-40aa-835d-3c8110402df3_63d785ce-1b1f-4f8a-ba50-8cf8ff40d73f.png +++ b/images/18a581b9-5021-40aa-835d-3c8110402df3_63d785ce-1b1f-4f8a-ba50-8cf8ff40d73f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f50bd0877e0d2098313aacc9d8ac1e69013308a2c4d68134b31ae9da4a82de25 -size 1521669 +oid sha256:bc99a034ade8ebac10e359aef11d97978dc613fb34ac86e34ec4a777c2c8a533 +size 1276568 diff --git a/images/18a581b9-5021-40aa-835d-3c8110402df3_67529ff9-0625-47b7-bf03-7a04f7556fd6.png b/images/18a581b9-5021-40aa-835d-3c8110402df3_67529ff9-0625-47b7-bf03-7a04f7556fd6.png index fbabcc37efb78512bcfc6a0bfd70286dc8927915..e6be662af3bd8614b03a0343c5bbf327e935db08 100644 --- a/images/18a581b9-5021-40aa-835d-3c8110402df3_67529ff9-0625-47b7-bf03-7a04f7556fd6.png +++ b/images/18a581b9-5021-40aa-835d-3c8110402df3_67529ff9-0625-47b7-bf03-7a04f7556fd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe80e9c6ccc9358cd778ddec773dd1c1945271c50cce4cd3774a44fa3fb91545 -size 1779384 +oid sha256:ccb775f05fabb880d570d4a83d0b283f53cb30748b679952a04ddd4c8fd42a59 +size 2549535 diff --git a/images/18a581b9-5021-40aa-835d-3c8110402df3_73c4da1e-dfc7-42c1-9b8b-493fc0048f3a.png b/images/18a581b9-5021-40aa-835d-3c8110402df3_73c4da1e-dfc7-42c1-9b8b-493fc0048f3a.png index 4a1a85bdaece1a352200fb64691590fa39f6cf0f..41b1b29d85bad1336622b94670c6a95b0735a6e6 100644 --- a/images/18a581b9-5021-40aa-835d-3c8110402df3_73c4da1e-dfc7-42c1-9b8b-493fc0048f3a.png +++ b/images/18a581b9-5021-40aa-835d-3c8110402df3_73c4da1e-dfc7-42c1-9b8b-493fc0048f3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12df7f86b2b496eff0cf68a3b2ffaeff4ed186589ef27d5f150551785f8aae72 -size 1986428 +oid sha256:6c883238de1231c8081017aa11cbb0faad85fafa8cf3fc4965225b8a15708e77 +size 1506984 diff --git a/images/18a581b9-5021-40aa-835d-3c8110402df3_a544e04d-4b21-40f0-beb2-5da432e73791.png b/images/18a581b9-5021-40aa-835d-3c8110402df3_a544e04d-4b21-40f0-beb2-5da432e73791.png index 8d5e90743a343cfc6430759d8a92b2a828fd203f..9b44417759a13784fa90c11523e5f3097293cc6d 100644 --- a/images/18a581b9-5021-40aa-835d-3c8110402df3_a544e04d-4b21-40f0-beb2-5da432e73791.png +++ b/images/18a581b9-5021-40aa-835d-3c8110402df3_a544e04d-4b21-40f0-beb2-5da432e73791.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1815450936371e07bd729746ce201e0c260adf233e5187b301f3e119c71a5767 -size 1777384 +oid sha256:5ac68ec6d9328ebfafdbd9fe687edb1653a9bbb84783900bb04400a3e5869192 +size 1291619 diff --git a/images/1943febc-0635-4124-871c-578c4d9f5a44_13946ef7-7b91-478e-adc6-d3ef4f6a8270.png b/images/1943febc-0635-4124-871c-578c4d9f5a44_13946ef7-7b91-478e-adc6-d3ef4f6a8270.png index 1d4e738c3c56edf22ab39ccebfcbad77b5737b17..98d41f6b7b320a19ccb407c1cf8bf3b92a46680b 100644 --- a/images/1943febc-0635-4124-871c-578c4d9f5a44_13946ef7-7b91-478e-adc6-d3ef4f6a8270.png +++ b/images/1943febc-0635-4124-871c-578c4d9f5a44_13946ef7-7b91-478e-adc6-d3ef4f6a8270.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53ad44b0078477b311e30fd7d18a6cce9750e02b05dbfa5294b9fda4ab938705 -size 1019272 +oid sha256:5d7b16f5168e8a2bb085e1cc1610ef30822bf74aca9152fe600988071c6f6900 +size 1194347 diff --git a/images/1943febc-0635-4124-871c-578c4d9f5a44_49b6818c-3f34-49f1-ba58-9cba952646ee.png b/images/1943febc-0635-4124-871c-578c4d9f5a44_49b6818c-3f34-49f1-ba58-9cba952646ee.png index 443d1ce59c6f4e52b82582579c025a20b3c2a280..06365785581b691de66dab055568f3a2fe3a3d13 100644 --- a/images/1943febc-0635-4124-871c-578c4d9f5a44_49b6818c-3f34-49f1-ba58-9cba952646ee.png +++ b/images/1943febc-0635-4124-871c-578c4d9f5a44_49b6818c-3f34-49f1-ba58-9cba952646ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20c5adb65079f1954f05d3dee0d5aec8568cd957e941c491f9601d0422a85b11 -size 803881 +oid sha256:08d1029fc5d1776362ec22f6567c14912fad418e1ff4bbdeef9b712806426712 +size 489094 diff --git a/images/1943febc-0635-4124-871c-578c4d9f5a44_62f82d6a-0799-49e5-9b06-3de4294ea2e6.png b/images/1943febc-0635-4124-871c-578c4d9f5a44_62f82d6a-0799-49e5-9b06-3de4294ea2e6.png index 870dfa15e3f4c5ddeb9606d5f0276c514192adcc..05d9d9a3501fc8f8f836c28e1e1a3b8af33b3baa 100644 --- a/images/1943febc-0635-4124-871c-578c4d9f5a44_62f82d6a-0799-49e5-9b06-3de4294ea2e6.png +++ b/images/1943febc-0635-4124-871c-578c4d9f5a44_62f82d6a-0799-49e5-9b06-3de4294ea2e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8353a4f26a745a822b2c00ccae9b3f5769c1354f5bae7aac6cb21d785daf9a35 -size 1265406 +oid sha256:dabd9fe5867e6583f0b5430bd82c75956d45d808202bb2bd48d2dbeb05cf8733 +size 1414761 diff --git a/images/1943febc-0635-4124-871c-578c4d9f5a44_a0acf9c4-877d-41bf-b856-126033533bdb.png b/images/1943febc-0635-4124-871c-578c4d9f5a44_a0acf9c4-877d-41bf-b856-126033533bdb.png index 50d23df78c85f61f7c42d608a3c7f9aea2d30150..d30a03cc5fd7452be43f5bab53a14da1a7a4016e 100644 --- a/images/1943febc-0635-4124-871c-578c4d9f5a44_a0acf9c4-877d-41bf-b856-126033533bdb.png +++ b/images/1943febc-0635-4124-871c-578c4d9f5a44_a0acf9c4-877d-41bf-b856-126033533bdb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83c6798858ee1aeaa26f700ce01e6412a578b829b1375fe7264cc2bb6285bec6 -size 872181 +oid sha256:3ad9b17aa1d790e8fd91ae6fadca96e3f514be151dd8a1208605c090459615ea +size 1223062 diff --git a/images/196cde81-b001-47ec-b7c3-a77869e36deb_3ea5e794-345f-46e8-bbc8-b7b4484a3de2.png b/images/196cde81-b001-47ec-b7c3-a77869e36deb_3ea5e794-345f-46e8-bbc8-b7b4484a3de2.png index 0584ffc990ab83b4a9105e5b6d7f3e06fe01b65b..bfaa654309cc44b7c70aa5095329ca5f8fc74c48 100644 --- a/images/196cde81-b001-47ec-b7c3-a77869e36deb_3ea5e794-345f-46e8-bbc8-b7b4484a3de2.png +++ b/images/196cde81-b001-47ec-b7c3-a77869e36deb_3ea5e794-345f-46e8-bbc8-b7b4484a3de2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96610e3827d4407e6795ad47b1acf0a10d91dbfe65229eb70ac081e79658fb44 -size 2348030 +oid sha256:5304fac0e1fbfbee9527e2e17afc99546bb6c9c25a4780b78c3a39417f8f012a +size 2080828 diff --git a/images/196cde81-b001-47ec-b7c3-a77869e36deb_404c41ad-b28f-42fe-a465-64585cbd1cd8.png b/images/196cde81-b001-47ec-b7c3-a77869e36deb_404c41ad-b28f-42fe-a465-64585cbd1cd8.png index 9cd9593f7f3225e359edda1511cf9c3c4edd253a..408c9ea4f0e208a5f5e713a2f3beaa9339932ee8 100644 --- a/images/196cde81-b001-47ec-b7c3-a77869e36deb_404c41ad-b28f-42fe-a465-64585cbd1cd8.png +++ b/images/196cde81-b001-47ec-b7c3-a77869e36deb_404c41ad-b28f-42fe-a465-64585cbd1cd8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12ecdc5ee116fb79bb301c6ce1864fa116bf6c2e7229895e34888ac625b0e017 -size 2441818 +oid sha256:d71edf3deaa7a3ca78ff4e610a63273023e587d84e2076c31f2e43ae968902ef +size 1780160 diff --git a/images/196cde81-b001-47ec-b7c3-a77869e36deb_9712bbdc-3c5d-417f-b2d8-d9532b8fd75f.png b/images/196cde81-b001-47ec-b7c3-a77869e36deb_9712bbdc-3c5d-417f-b2d8-d9532b8fd75f.png index 436e6032d5fe3594e5065a3559b9bd2383b80287..290b0767269fdd3715951c399918f9174c34d1e3 100644 --- a/images/196cde81-b001-47ec-b7c3-a77869e36deb_9712bbdc-3c5d-417f-b2d8-d9532b8fd75f.png +++ b/images/196cde81-b001-47ec-b7c3-a77869e36deb_9712bbdc-3c5d-417f-b2d8-d9532b8fd75f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a50a9e6dc96728e02ba05934b777d242dce7441ddc9d23321bc1dd48fe9b76a6 -size 819780 +oid sha256:65671b4bd2744e498d43eab56781f742063e37ad80031b6e50437bbf9c14895d +size 928584 diff --git a/images/196cde81-b001-47ec-b7c3-a77869e36deb_e44d4bc3-a47f-4b4f-95f7-635fea019b5b.png b/images/196cde81-b001-47ec-b7c3-a77869e36deb_e44d4bc3-a47f-4b4f-95f7-635fea019b5b.png index fe36fb738b4411ddf38800c46135556bd265ce5a..ec6e63b69eaa89e09c46a1d008a69b6fee0bb2ed 100644 --- a/images/196cde81-b001-47ec-b7c3-a77869e36deb_e44d4bc3-a47f-4b4f-95f7-635fea019b5b.png +++ b/images/196cde81-b001-47ec-b7c3-a77869e36deb_e44d4bc3-a47f-4b4f-95f7-635fea019b5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:198ab0619e57cd74a1863204689f35bcf15ba9a825fbe76f6e6d26bedede1dbd -size 2012708 +oid sha256:df0e9193751d3c38ace4936d662c28b6ab8a1e9dd7ea4d7a2813a2ba1994e51d +size 2105518 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_070bbaab-3707-41c9-b426-8c019877cacd.png b/images/19847108-680f-4021-83a9-2548fab75fac_070bbaab-3707-41c9-b426-8c019877cacd.png index 1fd6396179d3cd718a04076b01cfd6fccf83b70f..89f94b5423fb449bac459f8fd2ef4c022c3730dc 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_070bbaab-3707-41c9-b426-8c019877cacd.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_070bbaab-3707-41c9-b426-8c019877cacd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25c7cb71ee17757061e2b879e17c8c6da2c46574451d7ee2fc5165172a4352e7 -size 1236627 +oid sha256:d1ca9ed65c3a9e02513f94e38e1133aa0a591578b3c036e774d77df29bdd22ab +size 1396990 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_158b6c1d-31c1-4888-ae71-7f8cb5bebcd0.png b/images/19847108-680f-4021-83a9-2548fab75fac_158b6c1d-31c1-4888-ae71-7f8cb5bebcd0.png index 31bcc3291acc660f8d4ce06809ac7dcefc168b0d..f3517aa3224321cf159202f0df48cbd0fd5c15db 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_158b6c1d-31c1-4888-ae71-7f8cb5bebcd0.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_158b6c1d-31c1-4888-ae71-7f8cb5bebcd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b6e2359e97dbb9c0018cd3f8c16d9b604a59755487d94bc530891e1b7dc6201 -size 1398048 +oid sha256:ff50aae20ce37a304424417887db7a31894708e4fed5283b856a8c36e3b807fb +size 1447078 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_3466e787-56c0-4c59-96f4-a31bf152d42e.png b/images/19847108-680f-4021-83a9-2548fab75fac_3466e787-56c0-4c59-96f4-a31bf152d42e.png index bce443a7bfc0a556e05bff92f61b84fd18b6f9e9..ccb808329c60a29719c61b88d168fa7d5f9c879f 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_3466e787-56c0-4c59-96f4-a31bf152d42e.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_3466e787-56c0-4c59-96f4-a31bf152d42e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:932570000c5a55a2a631a714bc75bb9ae25677efd4217c6c46f7ba16d6b2f944 -size 1796399 +oid sha256:ca39a4c73734e75194ae3d0c2a0ba0a3349bf24d5f1324ae2c5a9f709afbb8e8 +size 1523859 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_34cd7674-6ef6-4a36-a6d4-86d0ed4840ac.png b/images/19847108-680f-4021-83a9-2548fab75fac_34cd7674-6ef6-4a36-a6d4-86d0ed4840ac.png index 595562b3cb2cb25a0e365c37444651252030bbe4..75a4b5416e4b05668f4a9335e295247b19791410 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_34cd7674-6ef6-4a36-a6d4-86d0ed4840ac.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_34cd7674-6ef6-4a36-a6d4-86d0ed4840ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c874288ecdddaf573f788b8ad23c506db569354bd52ba9d01c19e5d8c65cddb -size 1729041 +oid sha256:0e7781d2f1c51ed166aab0d29ae19564349250c52f0211ad9b2442f0918eb0b1 +size 1642043 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_609ac7e9-480b-4b27-bfb7-6cecf26afdb5.png b/images/19847108-680f-4021-83a9-2548fab75fac_609ac7e9-480b-4b27-bfb7-6cecf26afdb5.png index a5a53e804a4927d0f5baa850e1a67ac392114eba..8f980183b2059f55bf48e570cb1d909226f16359 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_609ac7e9-480b-4b27-bfb7-6cecf26afdb5.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_609ac7e9-480b-4b27-bfb7-6cecf26afdb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9923591375345ec9bda835784a9f9d25817f4fb6385e02f187874a5c5829559 -size 1545858 +oid sha256:143500f011cdefd891243c221fcb643235a564694c5b52fe5a939a6cebf4be33 +size 1208224 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_771625de-5227-4b48-a469-ac3b6593b8ef.png b/images/19847108-680f-4021-83a9-2548fab75fac_771625de-5227-4b48-a469-ac3b6593b8ef.png index 2c55115c2368745e29861fd47d65b3b0a6e13566..90fc723ee1d7a3c048767540403f39899d86ba40 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_771625de-5227-4b48-a469-ac3b6593b8ef.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_771625de-5227-4b48-a469-ac3b6593b8ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f34d29ce32ff959cab51715b46c1d04d15b83dff5cd3d00deecc8197a0e48a19 -size 875923 +oid sha256:f1c10cb037a08be59f86cb119cc6d73400e82e84e974f91e205c8f55692e192b +size 758790 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_95f9528e-68a1-4ce4-9f96-4a9888e5eefd.png b/images/19847108-680f-4021-83a9-2548fab75fac_95f9528e-68a1-4ce4-9f96-4a9888e5eefd.png index 78db9920fbacc57d6ef7211d58556ece147183c7..2f151d7abfb2643cd7c438f8bcf5f4c322987477 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_95f9528e-68a1-4ce4-9f96-4a9888e5eefd.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_95f9528e-68a1-4ce4-9f96-4a9888e5eefd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e489c8e67a2fcc193c375aee594579676196c297cb714f9a0a611e7d93be3eea -size 1391786 +oid sha256:fa2e818f49c886c535286e37efb5df5d1fd1292acbd32fe6d437007974f1a754 +size 1399147 diff --git a/images/19847108-680f-4021-83a9-2548fab75fac_ded882d1-0f01-46b0-b67a-11ad3e9b513d.png b/images/19847108-680f-4021-83a9-2548fab75fac_ded882d1-0f01-46b0-b67a-11ad3e9b513d.png index 6ee0acd8677968fbcf9327c6e2b4e7d105a31f15..5477ec590cbb39770aa5a0025fc0bb0611d66e38 100644 --- a/images/19847108-680f-4021-83a9-2548fab75fac_ded882d1-0f01-46b0-b67a-11ad3e9b513d.png +++ b/images/19847108-680f-4021-83a9-2548fab75fac_ded882d1-0f01-46b0-b67a-11ad3e9b513d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1adc1ad1b1a46b9cbf6ca6ccefa2c5d3c998d4c9c1fc3fc533d9a91762ceed26 -size 1291537 +oid sha256:e8d841a18dfc32dd031cbec09d08631b98597a45c1b2f89325c36914a7daccaf +size 1513548 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_27cebec5-d92c-4883-b6f0-9514162b357b.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_27cebec5-d92c-4883-b6f0-9514162b357b.png index 40c1829ea51cd5aa2eadb00c3da0e9c05836aac0..00a17c235a5b0a65ffb619dd93d35f0a20c5d0a8 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_27cebec5-d92c-4883-b6f0-9514162b357b.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_27cebec5-d92c-4883-b6f0-9514162b357b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cafffb20be6c9abdfd5e957037a6ad8a20af2e9b9ea95cdde9bef1b3646c3af1 -size 1371920 +oid sha256:2d676cb52a761788820af93a7015733365b8b0cdcfba40293adc08640ec3757e +size 812747 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_29593e46-ab32-4882-a602-dd9905ebbea9.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_29593e46-ab32-4882-a602-dd9905ebbea9.png index 3b8dc878ad7e268f9a1bf7a41b46f39f6d775f15..85e551837afb923a30c5462165e6e0eb4392c896 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_29593e46-ab32-4882-a602-dd9905ebbea9.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_29593e46-ab32-4882-a602-dd9905ebbea9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12b18fad6cc0ada489ce376b9c9d64a6d1849e03a7a947e476782a6b91dba79f -size 471174 +oid sha256:3dfedc77f38b9dc9495b17ed5637dce47f036fab6ab22958d35cc89118f0b02d +size 471598 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_39c5f4f4-45aa-4c73-ac79-3c9e99a750a9.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_39c5f4f4-45aa-4c73-ac79-3c9e99a750a9.png index 05be82fb6c7fe296a9a80fc19683edea8f859565..875bd54e2b3447ad3db4102e29ab4f744e8dc93d 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_39c5f4f4-45aa-4c73-ac79-3c9e99a750a9.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_39c5f4f4-45aa-4c73-ac79-3c9e99a750a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9271f7b37cbadf6908be8f3334bb1fa22117b86d251e980de4560c33c85a9cae -size 435175 +oid sha256:f5e97e95f4a7937eba9c190c8c31bcde436ef643c3e6e983a40dd217c7c73d50 +size 341735 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_3bf8ca73-e41c-42b9-b642-7cf7743311ef.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_3bf8ca73-e41c-42b9-b642-7cf7743311ef.png index 6c9f9ce88ec01114b2734cdb5eac00f194954416..b25cc361e3c91f06c2f89d06ff70f164ff2047dd 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_3bf8ca73-e41c-42b9-b642-7cf7743311ef.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_3bf8ca73-e41c-42b9-b642-7cf7743311ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36d5ec7d395b019b41ffb06d21b07c36ad61a44fef961cf589158b1334e15f37 -size 485996 +oid sha256:aa994bb946da4d2dae8d38ea5089b1d6b08b5aa16d1fc63dba88fd39bb705e46 +size 407202 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9440b99d-8aea-4482-9e40-7df8f1a3844b.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9440b99d-8aea-4482-9e40-7df8f1a3844b.png index c5304052bcb40f8c95685d0d7ea789df4ae5ebd0..74e110213c8e9abe75c69f4233d3b6f253e3a396 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9440b99d-8aea-4482-9e40-7df8f1a3844b.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9440b99d-8aea-4482-9e40-7df8f1a3844b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f1f43a45df9f8827032b8ad51812a924f963be77f1e40d1f2346e9d40419ac6 -size 2400227 +oid sha256:c8c46dd11eed1e19ee5b3739a357ede5555d090f3f880c14111201f0e288346a +size 624541 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9c4fbe9e-68fb-4c08-965d-82474dfa64c6.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9c4fbe9e-68fb-4c08-965d-82474dfa64c6.png index 060adc0018cdff5a1448d178867d0a72fe393716..bfe75e292a2e48e4a23b0c99847a12e5921ac440 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9c4fbe9e-68fb-4c08-965d-82474dfa64c6.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9c4fbe9e-68fb-4c08-965d-82474dfa64c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c262a45ee3436b896fbcd35ab5ff3899e1dfc8838b852e2fbb3f56759c00b05 -size 459118 +oid sha256:df2f4da9b075b5c8a90dedce949c5dd05e1c8221b44723ddf538956dd982570e +size 335570 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c27a29c4-d67c-47ac-93f0-713b9aed25ef.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c27a29c4-d67c-47ac-93f0-713b9aed25ef.png index b539131314166d8b55089e3d13b0a0ccbb7039bf..5735e81572bfd6fa8346db5348bd97f6070c6e54 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c27a29c4-d67c-47ac-93f0-713b9aed25ef.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c27a29c4-d67c-47ac-93f0-713b9aed25ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc4d9bc2979ad9225f3dd0aaec11643bac44ddc1ebd11c379c4cbb613bc72c52 -size 347330 +oid sha256:55050762e324604af3b1e266438402dc5f20438adb27871f57d050269b5e0081 +size 397436 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c507a159-e69a-4a8a-9f3a-64cb387e850e.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c507a159-e69a-4a8a-9f3a-64cb387e850e.png index 9d7929e129cfb31304e07e3b0c0b72cca3bbe3e9..e42dd7bda49151ee33a3c691e69b1748577536d3 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c507a159-e69a-4a8a-9f3a-64cb387e850e.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c507a159-e69a-4a8a-9f3a-64cb387e850e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8f120aad7a305e80062cc1a0080491fa30a54e06046ba91d4fd3d920dc57f13 -size 524238 +oid sha256:6dc76d03cf39bbe1e3adfcee30aad2bc997a6ea2d41b75b5a35765b94043a5e7 +size 525200 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_cb4667cf-02bf-48dc-a01b-a81a5c205577.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_cb4667cf-02bf-48dc-a01b-a81a5c205577.png index fe9318f86b555465cad6ff25f753570d82597d1f..6c5fbe357847c9009ee78e2cf19988b8579bf4f1 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_cb4667cf-02bf-48dc-a01b-a81a5c205577.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_cb4667cf-02bf-48dc-a01b-a81a5c205577.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bd99624a3ff390c48ebba39466a7ffb29ec872e599973956600bd279d3cf3e9 -size 351869 +oid sha256:94d939a202e48d444ac114e26611239c258b7825bb2090aafdddb1d8ee4a1cea +size 386273 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_dd98ebcc-12fe-476a-aa79-7c94bde9eabf.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_dd98ebcc-12fe-476a-aa79-7c94bde9eabf.png index 6ebee6a8f1d2ec31e66f98e4e38a5d81754fb806..92f72b1563579d264b2f599888843a3d3f6c73d0 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_dd98ebcc-12fe-476a-aa79-7c94bde9eabf.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_dd98ebcc-12fe-476a-aa79-7c94bde9eabf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c14f2fea5a8948e12d4a2695b940eb6a603104889b633b904f2f543adf8b92d4 -size 1229020 +oid sha256:e0ace73094ad6bc23c4e582638bb8a3b238300f258f26a09c541ca50c569ee7f +size 1280346 diff --git a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_e977dfbb-fe57-43f2-979f-b9249b2747b2.png b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_e977dfbb-fe57-43f2-979f-b9249b2747b2.png index 98e6be7386e55bd6f46f556b3eafefd59fa16165..118abe786ecf988b8bd45a234424172a15ab163b 100644 --- a/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_e977dfbb-fe57-43f2-979f-b9249b2747b2.png +++ b/images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_e977dfbb-fe57-43f2-979f-b9249b2747b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d24281f293b1adddc8c3693b2693457c985ec6c230020ab56d8566e2ca8e6e7 -size 348151 +oid sha256:1d4b1f330c7504eaa6519338aeac432f297975242acaedf2a6eb9801a8de271f +size 337719 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_00e931ae-8251-49ff-9ac5-8409c46d5204.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_00e931ae-8251-49ff-9ac5-8409c46d5204.png index 2ab702864f47d9566b905218e962ead844ce5ef6..7291899d19c3d5762aaf57be3063cc54a6522418 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_00e931ae-8251-49ff-9ac5-8409c46d5204.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_00e931ae-8251-49ff-9ac5-8409c46d5204.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8233839f33d587dcc261afb92f688142a8d3611324687317e9afc7f2d38aaed6 -size 543703 +oid sha256:cf16391d145234a9fdde1bfa3b36e6fc5bd8044bbc3502dd9f129d9b075aaea7 +size 419762 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_37e3a163-4d51-41c1-bd49-ec440145578d.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_37e3a163-4d51-41c1-bd49-ec440145578d.png index a268d5582b409e4d608373b00948d2778c617119..22d331021a3cb9a2930e7b9c369adc723020345f 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_37e3a163-4d51-41c1-bd49-ec440145578d.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_37e3a163-4d51-41c1-bd49-ec440145578d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:515151212b6ce14dfe3834c043bed0a57dd87744dff17ac3add0a9d0df160aca -size 2317776 +oid sha256:ae08ed2afa9ca0dc23e8ac1bead1484980a3c127a34c4e502ac7b299fb7af121 +size 2337232 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_3ee8c662-4498-4f40-8eff-7320a2470dd6.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_3ee8c662-4498-4f40-8eff-7320a2470dd6.png index 0d518d20d5e6e35630da4d1faf72ada747ac372c..d82900d6f4d823c26ebaa43efaec8c760903f381 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_3ee8c662-4498-4f40-8eff-7320a2470dd6.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_3ee8c662-4498-4f40-8eff-7320a2470dd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce3d43ceb1b98839f7390a84b704d4e32570c5a1dd0e4638114455644136920c -size 1171185 +oid sha256:ccb06e3aef0a03a0ee40ae6de74d08e7870fc8d564dd0335955a7be4a54f1862 +size 733390 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_5a7f29fc-db45-4eee-8795-c0ab17f04f05.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_5a7f29fc-db45-4eee-8795-c0ab17f04f05.png index 5ddcfab6a15e8a0a9fc9cfe628fd432b8b5dd89e..14ff2edea3a36170130733e58ebe3537eae8211c 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_5a7f29fc-db45-4eee-8795-c0ab17f04f05.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_5a7f29fc-db45-4eee-8795-c0ab17f04f05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7db03c63073b7ec7b5e1d4a63643a7586552b582505c339a88e84c5b38c0f8c5 -size 767958 +oid sha256:9fc3c901b43ed99819622866f0b9d9bc247b099abd13239e061d8243bf281851 +size 842408 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_67ca7cd9-6200-4e87-a045-d7b4cc5e6c72.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_67ca7cd9-6200-4e87-a045-d7b4cc5e6c72.png index 7747664948c802efab5aec56256143fba16e52e2..5c8cb89e3ba626dc9dbcf3ded82f507f914c2a81 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_67ca7cd9-6200-4e87-a045-d7b4cc5e6c72.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_67ca7cd9-6200-4e87-a045-d7b4cc5e6c72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39d1ff6fb780334747ccc99d455c75270b41621363317daf9a2c7359feb115a7 -size 1063680 +oid sha256:80d17265a108ad5e256e961d8f843fd6902a0319a8e587c64c875c25fd421db1 +size 780516 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_8b608f62-98ad-4a30-98fb-39c4d74a95d6.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_8b608f62-98ad-4a30-98fb-39c4d74a95d6.png index 069e7ec65919070a3a9cb0bb5b30adc95a282f13..862bb60f900c13ecb5eb8bdd98d3eeaf7f4b80ae 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_8b608f62-98ad-4a30-98fb-39c4d74a95d6.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_8b608f62-98ad-4a30-98fb-39c4d74a95d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6e1ee49f3a770139c877cd2b80e28d4db70b60a16cbd51086792610383f4716 -size 595561 +oid sha256:5662988da6da82881692b9cc3bec9cb7904bb6bab9336d85515b5cc03a8e853d +size 776882 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_a824cebd-8374-4f3a-b76d-df0f6a9f45ae.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_a824cebd-8374-4f3a-b76d-df0f6a9f45ae.png index 181894b75c0dabc3e1e6e0c015b0a38be099ed68..3fae45325d07e861eb3677e3733dc5a52f673759 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_a824cebd-8374-4f3a-b76d-df0f6a9f45ae.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_a824cebd-8374-4f3a-b76d-df0f6a9f45ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f3638085fd914fc7565b157e8818b1ee4130b221926ce7a155ccff60d8adaa2 -size 1086409 +oid sha256:4f4e907944a1eadf6abc7be99303097efc5f40d77566b38de0b31f5a2de033cd +size 1087446 diff --git a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_bbaa21de-6ebe-4cb2-b0f1-5f078bc25883.png b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_bbaa21de-6ebe-4cb2-b0f1-5f078bc25883.png index a192bd3354962e9e9edefede87641ad1f0bf13f1..e697e5d7e08cd8d03d0425cae735fe215d944a0b 100644 --- a/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_bbaa21de-6ebe-4cb2-b0f1-5f078bc25883.png +++ b/images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_bbaa21de-6ebe-4cb2-b0f1-5f078bc25883.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55921d8397de482af7b43ebddf8b004105a294af4e3b65a37c288481c0538220 -size 1296816 +oid sha256:f9179679eb0119602b242c31171b73f9ceb3f769cddb6cc0b4e26303a610a3fb +size 1211376 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_05016fe3-32db-4f00-8d4a-e23b842cbd13.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_05016fe3-32db-4f00-8d4a-e23b842cbd13.png index 610dbc550a03b65b9516b6caf01061543a7dc2f9..7c606355b6ff35e7bd62c8e3de5c46d4464e7bb6 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_05016fe3-32db-4f00-8d4a-e23b842cbd13.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_05016fe3-32db-4f00-8d4a-e23b842cbd13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:073262a3792c86e3d32e761aaf1caaa02b17c08ece190ab4e0346e0ff6c3df97 -size 1857190 +oid sha256:2962ca0ce2a7c557ff661cb20f512bd2b56a250fef37004b02ba8bf6edac53be +size 1013534 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_18953029-7de1-4f5f-bbfd-47497151e78d.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_18953029-7de1-4f5f-bbfd-47497151e78d.png index 0ea6336b44091037eddf2985ec4cfec22c226885..88f1d346925e220fb5894ac3253ee73de77f2553 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_18953029-7de1-4f5f-bbfd-47497151e78d.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_18953029-7de1-4f5f-bbfd-47497151e78d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f743281ef418200adabf8b433859246682f5b02eb417d57139f295f24fef3b9e -size 1952961 +oid sha256:980a10f3540d9d8885a3e953d39c792772b932ef7bc1346a93c19373f59d232b +size 2020933 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_31ffb3eb-ddb4-4ca0-ba8c-1a6dd6b4497b.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_31ffb3eb-ddb4-4ca0-ba8c-1a6dd6b4497b.png index 70a6e335101265306dd0b9b03e2978e39b0c8b1c..dd3541a156118e5b09707d9dcd5d1f8b8cf94ca7 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_31ffb3eb-ddb4-4ca0-ba8c-1a6dd6b4497b.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_31ffb3eb-ddb4-4ca0-ba8c-1a6dd6b4497b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9313d6412da54f94a8975f054ce298ce9165fccd6ee5139600b89b1158976482 -size 1174853 +oid sha256:0e41b130c99141295fb07e10785b42b9ef92a49d6cf104a7406736a0c70fc2aa +size 1174547 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_9dc42eb4-30b5-4c98-8ae5-e1a1cca00859.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_9dc42eb4-30b5-4c98-8ae5-e1a1cca00859.png index b47d908f5cd19c0787cd74b1e5c2929876b7e8e6..cc22ae5c80e13f55f0341ca5752e0521e221e4f0 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_9dc42eb4-30b5-4c98-8ae5-e1a1cca00859.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_9dc42eb4-30b5-4c98-8ae5-e1a1cca00859.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b17e0f3ab81f0880ef2f443171b3c230fa23bdbf978052a1ee8bd721e8d5706 -size 1697422 +oid sha256:6b1ded9c97058b056860d96296f51917e57a4e09e970726cb59a9e43092c4d91 +size 1070798 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_a054f615-6d0c-41cc-9d18-1b7a88647a37.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_a054f615-6d0c-41cc-9d18-1b7a88647a37.png index f80dd1c2eb41010ff2ed4ac516541b86db7820a1..861132cdd53d2e1aa1f6c73bf3c1e20326d59e35 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_a054f615-6d0c-41cc-9d18-1b7a88647a37.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_a054f615-6d0c-41cc-9d18-1b7a88647a37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff53f8cbd690d5de52cc73bddbd2df369351f86880ae376446967a58556b56d6 -size 1757018 +oid sha256:a4c62a849ca1ac09a656e1d790e29d88941ff8e8c1ef89befd6803ae0efdce58 +size 1685225 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_c8af097d-59f1-4eae-ac13-da33e869dd8e.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_c8af097d-59f1-4eae-ac13-da33e869dd8e.png index 21436ddd176d30ec37aa2ea8fd44496036931bc6..6951ea6c6ed231379f4461323e77b7eb2fddb62a 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_c8af097d-59f1-4eae-ac13-da33e869dd8e.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_c8af097d-59f1-4eae-ac13-da33e869dd8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee9d40c468af2e6b4cbfa0abf9a806f2a607bdeb91b6b3509c6f5cc24354a900 -size 903724 +oid sha256:a2ae8086e4474f9518631e757f6fddb9f12f5a62f590af007663931d01b0de0c +size 665458 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_e85f24b8-77cd-4c47-b407-05b6a636c04c.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_e85f24b8-77cd-4c47-b407-05b6a636c04c.png index 29de824de1353066d5f6991c5d593ae4dfea6fe2..8106be00d0e60829baf8872cc83e6e3a77f22ab1 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_e85f24b8-77cd-4c47-b407-05b6a636c04c.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_e85f24b8-77cd-4c47-b407-05b6a636c04c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69cd4d50440db7a9d8e478a7e2bebde6180725d4745115d2b849bf0f90e94e7c -size 1696053 +oid sha256:002a5a1cc99897b5667a48b4a3b4118eaf1ff943c0c208b5d6c1427998cdffd5 +size 920228 diff --git a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_f93c512a-0e48-4c6d-9271-edbd7a0af295.png b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_f93c512a-0e48-4c6d-9271-edbd7a0af295.png index 038e0e6255a1a5076bf13faf571ce411e2addf03..7b1aa3d4fa46086b194b542b220b5ed8de969a1d 100644 --- a/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_f93c512a-0e48-4c6d-9271-edbd7a0af295.png +++ b/images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_f93c512a-0e48-4c6d-9271-edbd7a0af295.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd07002ae1e959d10d02533901a7cbb68c3cc4dd7bee872a0947fa3bdcb0b986 -size 825823 +oid sha256:8b760dcda1c1b03d87c335fc290487b295a09536506b3a1a11aa1405a4c4e33d +size 1376661 diff --git a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_1cda2d0c-fc85-46e2-9352-deea3a3d9d8f.png b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_1cda2d0c-fc85-46e2-9352-deea3a3d9d8f.png index c351c5bd89fa710a69c1c1d0746fd6cc67f574d3..19588cf979b5112786340621a447ea369342b8b9 100644 --- a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_1cda2d0c-fc85-46e2-9352-deea3a3d9d8f.png +++ b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_1cda2d0c-fc85-46e2-9352-deea3a3d9d8f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75e9a3fcdc79478a3ce2e829e6e8927f2733a56b0e5ae99ce142a47d87ce91ac -size 968042 +oid sha256:562e3653a42804ce06dec00afe91b1e7d4d4daa87b27c89b2fdc128239f27ba1 +size 1075361 diff --git a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_31d78d33-b4df-433c-8033-62c738f1a8a1.png b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_31d78d33-b4df-433c-8033-62c738f1a8a1.png index fa9d937bad8794ec09732fc5d0d9cb52239f2849..58d7775b4079390defa0d6387f9bb1b6d41383c5 100644 --- a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_31d78d33-b4df-433c-8033-62c738f1a8a1.png +++ b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_31d78d33-b4df-433c-8033-62c738f1a8a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:931969bb77fe3cd6e9f027700359724dd5e8453fc51e758d8862e44642230254 -size 1097057 +oid sha256:94f6c2ab958b30a445c6a9e4a97824ae2104fcbf32d558b8c55679b857e9b80e +size 1149255 diff --git a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_dc3b0597-0b0b-44a3-922a-df69bcb0df24.png b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_dc3b0597-0b0b-44a3-922a-df69bcb0df24.png index 13546fa5fa743e756467363afac6f6d735e801c7..d6e7a5000ecc0b78327aa0294e15ad5c035e1442 100644 --- a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_dc3b0597-0b0b-44a3-922a-df69bcb0df24.png +++ b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_dc3b0597-0b0b-44a3-922a-df69bcb0df24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18998ceb3ab261419da88beadef7e4930c52eac84fa338bfa056b21de2a42508 -size 984856 +oid sha256:aa4cee04495f1927a090e5a2084750eef3bf6a5ba24fd429f3779533e68491fc +size 1370391 diff --git a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_e4a9cbd4-088d-4619-bea8-f2012f168a59.png b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_e4a9cbd4-088d-4619-bea8-f2012f168a59.png index 37f46eb05a472e8cc884b78a5436950f36054f68..e9d6095da67e3fa736fd3f43cc19dace75ae8d1a 100644 --- a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_e4a9cbd4-088d-4619-bea8-f2012f168a59.png +++ b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_e4a9cbd4-088d-4619-bea8-f2012f168a59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4da9dfb74bad46f599aaf9932e7869e3bb87f2f9bf3f34fa06974bb5f28ee32 -size 1443360 +oid sha256:85ce09478aeb6305add9693639328b69991b243f31e02007f3db07cf06247868 +size 1063719 diff --git a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_f3309336-f65d-423d-943f-296c3d7a3b97.png b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_f3309336-f65d-423d-943f-296c3d7a3b97.png index d1cbec7c6eb4c484f49108edf6b71f9801b1bff6..d41e8d5c792781106e379b4c065e83c4c478010a 100644 --- a/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_f3309336-f65d-423d-943f-296c3d7a3b97.png +++ b/images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_f3309336-f65d-423d-943f-296c3d7a3b97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f139768dd5b4f358db4771b02af1a955022454816f6988409306d30b9d3a3fab -size 1161539 +oid sha256:2370af3332bd1a15e53a0ddd28feac0bdd9f9cdb8df4ffcb64664073ad651d17 +size 1152239 diff --git a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_11d98119-16db-4912-930e-afe4a8e285d9.png b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_11d98119-16db-4912-930e-afe4a8e285d9.png index 83913db9bcd08fab78b01a757877bf15c964cbcf..413084e35ca8f852aca8a4c3b3312928404d4a30 100644 --- a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_11d98119-16db-4912-930e-afe4a8e285d9.png +++ b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_11d98119-16db-4912-930e-afe4a8e285d9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be33cc9975c751050d445be4d3f6d88113c3de5fc52528fee11a782542047761 -size 660605 +oid sha256:7f054374fb0981558a6ba87dd8d9373c2cf5788b405548b92f583de6ac19de3d +size 454525 diff --git a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_4b040e38-9b37-4656-a247-f1e793174ea6.png b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_4b040e38-9b37-4656-a247-f1e793174ea6.png index 367ea27a15afe9a7837e52c3e13bb36b5bc0fff8..46e301371ba3a9bbbb37c85e46115bd8dcebaf93 100644 --- a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_4b040e38-9b37-4656-a247-f1e793174ea6.png +++ b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_4b040e38-9b37-4656-a247-f1e793174ea6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9ea5824dfaba3e49505b84a9f5a03b2b10b558f4fbc486bb5cb37a55b18cf62 -size 711051 +oid sha256:6014093e3c884a6a564cdf6b886ca6964965b99d181ad9686ea5c9576b36c5e3 +size 700951 diff --git a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_52afba1f-0b83-422d-a20d-10afa650dc82.png b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_52afba1f-0b83-422d-a20d-10afa650dc82.png index b827151c5525a836a0fcf9f5709f716abb2f5e34..09ec9188bbe253285266cec423cf89f76cdd939b 100644 --- a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_52afba1f-0b83-422d-a20d-10afa650dc82.png +++ b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_52afba1f-0b83-422d-a20d-10afa650dc82.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff01cb4772305e8709ec8cf31cb22c39b8c30dd4bb6957de9d38240658f0dc3f -size 2956004 +oid sha256:15245e4d7194a995f81543400f85845a52200772002e5530d7ccc183abe816de +size 2354793 diff --git a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_77ccb021-cc33-47a9-9637-3cf72d44d1af.png b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_77ccb021-cc33-47a9-9637-3cf72d44d1af.png index c65817e43490ba5d3cf7c88d27239dba7f37f0d8..e5468b3e8f33664f8faa6f15df926bc021b3c3a7 100644 --- a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_77ccb021-cc33-47a9-9637-3cf72d44d1af.png +++ b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_77ccb021-cc33-47a9-9637-3cf72d44d1af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06e25c505c0d061e82c29d747f0ae63234fbbb17fc3f38600fb92cddc390503e -size 1855956 +oid sha256:2319749db254774c5e65cc948ca5c3ce9dae75794fa80f1ba7612ce414a0cb36 +size 1370253 diff --git a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_a2674f77-6ce4-4d25-a6c9-9c5dbbecd99a.png b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_a2674f77-6ce4-4d25-a6c9-9c5dbbecd99a.png index c0764b1e40976a5e0f0d35133ba2f163f448eabb..7030bae8a36e50218f3504f0fd42c9c6198be66c 100644 --- a/images/1a807a1c-d3b2-425e-9684-2a9e79846676_a2674f77-6ce4-4d25-a6c9-9c5dbbecd99a.png +++ b/images/1a807a1c-d3b2-425e-9684-2a9e79846676_a2674f77-6ce4-4d25-a6c9-9c5dbbecd99a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42cf4c1b651ca538862ef33f266d428bec99e765bab6ababda531b4f2a3e5700 -size 2318978 +oid sha256:3507cf5c72e96c01203340c7191353c7a70d85b8f5ffadd6bc8b0a130732c56e +size 1280081 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_1b53f68e-584e-406b-89f1-9ebc42ccc465.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_1b53f68e-584e-406b-89f1-9ebc42ccc465.png index 12de564abb342024f4e772ff4747da6c90e25a85..6a76db581206ddc5ede2e8e99a06e4a6872cbeff 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_1b53f68e-584e-406b-89f1-9ebc42ccc465.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_1b53f68e-584e-406b-89f1-9ebc42ccc465.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e65af45b91989963962d1c0fa0eeaac37164d458630867b6161784ca0451809 -size 1559686 +oid sha256:4d43ae6f9441c6262085a62a396638ef08908dd70a07ebfe9be3d588421003ab +size 1693202 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2e444d2d-3922-45da-99f8-1e1843d6dcdd.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2e444d2d-3922-45da-99f8-1e1843d6dcdd.png index 8faa77ebfd66e0ad7046b1ac9c58806776f216cb..047f007c3c377962f6f955702a920c5acbfd3196 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2e444d2d-3922-45da-99f8-1e1843d6dcdd.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2e444d2d-3922-45da-99f8-1e1843d6dcdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f34d842ebddd52eea2a0a05843605f15bf888b73280358ed991026c769f0be0 -size 763401 +oid sha256:d8e38813b3f88d720b1fa6b1d58d692c27b799ba2f3dc071457b1cc73f1ccdf0 +size 841937 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2ed256fb-5cb4-47e1-8d8b-49003507da7b.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2ed256fb-5cb4-47e1-8d8b-49003507da7b.png index 5a6774088ecb33c89a94e17665a1e32e2fcbeb94..1fadc2d1c03556c2ad29a048eebbacfe2d5d1ab5 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2ed256fb-5cb4-47e1-8d8b-49003507da7b.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_2ed256fb-5cb4-47e1-8d8b-49003507da7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:751e4afc34834cbef6a22a24266a4d3c35080fe742e01d69fd25437cb2088383 -size 1614278 +oid sha256:a015c13628762f963f6984620eabd0a69656c8c8b20fdc44782c9d09772f2982 +size 1054132 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_758f22f5-565b-4381-ae08-76f669e70273.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_758f22f5-565b-4381-ae08-76f669e70273.png index a4092f202096e81936ec8d70220874f3b23e2722..85b85474b0b7b80b45b968c4e33d70e6d07594c1 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_758f22f5-565b-4381-ae08-76f669e70273.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_758f22f5-565b-4381-ae08-76f669e70273.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24bc5c2744633188f3fc0b4522ebbfa82df1ce8c412b54ddaeec503c0b218070 -size 1604153 +oid sha256:8cb7c5c791feb847ca58b1d3f912bc6d1388e955b841d7e532838c2614e057c8 +size 2027712 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_7b310218-134f-4b1d-aff8-4c79ffd81728.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_7b310218-134f-4b1d-aff8-4c79ffd81728.png index 213ab45371f6948c0ea7bc16cd4511113d832bb5..1eedea79ae6825b8f0ad1baf6226354ce88f4f7e 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_7b310218-134f-4b1d-aff8-4c79ffd81728.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_7b310218-134f-4b1d-aff8-4c79ffd81728.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:084cf2be96d2dde0ca52c9c804431e83fb1c0d490ecdca9c806d92741db28bfa -size 763249 +oid sha256:1f1f442d3d2ae28f528faeb789d7a70d5321381e0b0101ad3139ea5b65fcae8c +size 1174203 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_912cb9c9-bd7e-4716-a337-1d848ad699be.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_912cb9c9-bd7e-4716-a337-1d848ad699be.png index bc0e8a8930dc236a359b7cfcd91729829b06cd51..a72c3782f9bde2b1c88d5ec691251976524d1172 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_912cb9c9-bd7e-4716-a337-1d848ad699be.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_912cb9c9-bd7e-4716-a337-1d848ad699be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2cfa2d94a448b3a3fc219f770b0f6821e25e83d5a6ec81d83a62d4ec9d0e6b85 -size 1244351 +oid sha256:cb4784327dd0c183bb8f3a9dd9c68a18f714e91c410a99afda148e5e49cdd350 +size 1591248 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_a2a1777d-0072-4151-ba64-a138c5158bb1.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_a2a1777d-0072-4151-ba64-a138c5158bb1.png index 12de564abb342024f4e772ff4747da6c90e25a85..292e3e30e29538610e938bb5adf43b8f129102e2 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_a2a1777d-0072-4151-ba64-a138c5158bb1.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_a2a1777d-0072-4151-ba64-a138c5158bb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e65af45b91989963962d1c0fa0eeaac37164d458630867b6161784ca0451809 -size 1559686 +oid sha256:1e2cff7ede290756bdc398f724b20e87c1129f10aba1eb5a830e95ec77dc9e56 +size 1774581 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_d5e383de-e2e1-4615-a52c-c0e09c504d91.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_d5e383de-e2e1-4615-a52c-c0e09c504d91.png index 90652974d3bb707afdfeceeff5610bc6fe78560c..5e403eae05e6067af92fa49c5974f1fa8e82b284 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_d5e383de-e2e1-4615-a52c-c0e09c504d91.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_d5e383de-e2e1-4615-a52c-c0e09c504d91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6519b86ba5e764854adb464754228969b8545e982c4278201526dd0e754c3b1e -size 1566058 +oid sha256:f8db9d65afbed4812aad603fc17cc14ec78b3ea24f841e4d8a8123edaa4971b9 +size 1727893 diff --git a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_ff077583-f04d-41d5-b21a-8ce068740bec.png b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_ff077583-f04d-41d5-b21a-8ce068740bec.png index c7d92c48d5a32bee0d99fc09504d9990cac4b85e..d860be935431e22b959319ac853cea17e18c6176 100644 --- a/images/1a833106-368a-41e4-a36e-32f6b1d36d16_ff077583-f04d-41d5-b21a-8ce068740bec.png +++ b/images/1a833106-368a-41e4-a36e-32f6b1d36d16_ff077583-f04d-41d5-b21a-8ce068740bec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a00731250982604a476d484e837175ccd8b42271f3df8fae33182067d74dd748 -size 1558896 +oid sha256:476f7f7f72dc2d14f2fc4ecec2f61a7480a89f40d2e0e0bfc32aaca8e1777e6f +size 1614297 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_03315d1e-dfa4-4cc0-b5ee-7a7b8f4cf799.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_03315d1e-dfa4-4cc0-b5ee-7a7b8f4cf799.png index 1cbc5aa9dc8d3b1edc7b14f059f1cf38b0088eaa..e257b4e947faba0c38944a5426f0ac026ee0d1a2 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_03315d1e-dfa4-4cc0-b5ee-7a7b8f4cf799.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_03315d1e-dfa4-4cc0-b5ee-7a7b8f4cf799.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48958ad307b37d54bf09e2a872d49ded5d873fe88316ab2ee538136736a05767 -size 470046 +oid sha256:7b49fc9a3bafca18b06b204a424324c411b50294bb131839b1d5cd0cb852fa21 +size 469352 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_5d534eb2-9235-4e29-9b92-955b87be94bb.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_5d534eb2-9235-4e29-9b92-955b87be94bb.png index 599eb163be1ddeec3d21d635dbad84c4c68791bc..1213fd1b265297eebc6e267230ff2fab68672164 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_5d534eb2-9235-4e29-9b92-955b87be94bb.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_5d534eb2-9235-4e29-9b92-955b87be94bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9f6d10d292b6d4e79bd550b66fde656f5b7b8b46ae92a119455eaee595cb9cf -size 344342 +oid sha256:1ba5f49286329710bd951f030ad0d4d9b15dc1c3c231d5f2b86d8848ed3198f0 +size 110598 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_79f76036-6a56-4ff6-8f25-49bda6beaa0a.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_79f76036-6a56-4ff6-8f25-49bda6beaa0a.png index 6c3843cd8809f4daa91f054ee48679873d89f73e..fee9d8a703c82b6573b3e3a32dfb88ca974c3972 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_79f76036-6a56-4ff6-8f25-49bda6beaa0a.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_79f76036-6a56-4ff6-8f25-49bda6beaa0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41aa10f3026a3e6d78167d259e4cc5f105a8633483ff3e959b97f41a8cbdeea8 -size 500884 +oid sha256:41b630037c8abc53648e4ec19a51a9359f6ad49dd72ab6345901e9090cb6126d +size 295829 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_7d2559d3-fa15-4fb3-ac78-fdbd51dcc976.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_7d2559d3-fa15-4fb3-ac78-fdbd51dcc976.png index 832c4441afd5918dde14a4b3a4b5200ecf159d64..5517f26152dd99b12e5602bdc15c165b8dd2b6dd 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_7d2559d3-fa15-4fb3-ac78-fdbd51dcc976.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_7d2559d3-fa15-4fb3-ac78-fdbd51dcc976.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:577e32cc03821abf9e8670c003a9c8988ac7a5f385b3d771f48b7011405983de -size 486019 +oid sha256:9e3e9541ac648763fb8df9d9445610589ffe60bb1950f902a1ccc09fccf5d1b3 +size 401759 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_8ae8c40d-2a52-496f-ac1b-a012fdf01d3f.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_8ae8c40d-2a52-496f-ac1b-a012fdf01d3f.png index cd5d9f3059b136005c5e404fec77d47702e3f08d..f9160bc2bcc59cee6867578f13114c1602560743 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_8ae8c40d-2a52-496f-ac1b-a012fdf01d3f.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_8ae8c40d-2a52-496f-ac1b-a012fdf01d3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b00f3b76436ad727c46aa2a9d8e7b61fa37bd3b06e789ea6b114fbe8b6ff49b -size 484009 +oid sha256:d59ec501d3febcdc8fc7340fcdfb39f257b263af3840a2ad7ffc7e5f9ed3bd41 +size 219531 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6170a50-fd4d-4d7c-930f-66231442bda0.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6170a50-fd4d-4d7c-930f-66231442bda0.png index ec7cc52744983f07d4651aa2355c5edf6edcf3ee..e0b58ee0ac7562a2f286c288005bc348b762c63d 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6170a50-fd4d-4d7c-930f-66231442bda0.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6170a50-fd4d-4d7c-930f-66231442bda0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e85a5b468b0f404017b088989e958ec50db7baf17c82e2f6ae3ae2eb2d1e4ff -size 494061 +oid sha256:77a0ce4fe0d00623d4dfd534a4cb3768562f9412a3af8f21855510db17096838 +size 389959 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6b3a070-2485-4f86-bfd3-55de0ad13052.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6b3a070-2485-4f86-bfd3-55de0ad13052.png index 2990b606f4a451210189cd23c41d67023ee8a21a..448494512ee651415fdddce3a3fa5755dd7499fc 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6b3a070-2485-4f86-bfd3-55de0ad13052.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6b3a070-2485-4f86-bfd3-55de0ad13052.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf629147f5fbaee52c5dc513202a18c6cdf96677cb888070a4d6c9764657b87b -size 1055153 +oid sha256:0dcfbcdb92753c1669109f99c99eaf75e512aa781c3465d6fdc753dc300b4e3b +size 1509256 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_cb72c5b6-7bc1-40f4-84a0-264b0de8d2bc.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_cb72c5b6-7bc1-40f4-84a0-264b0de8d2bc.png index 51c9679a3284325091228652931e03beb8e5ed30..ff6c3058ef607453086b278ee128726eca4f45b7 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_cb72c5b6-7bc1-40f4-84a0-264b0de8d2bc.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_cb72c5b6-7bc1-40f4-84a0-264b0de8d2bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:964b29a98f8ae98df928ff62b15aafae58f3764da86c65b8d295f1525b65c120 -size 1296417 +oid sha256:7091705ec5b925357c560137c0562019691accb6f9a55a1dc24c43122c84b367 +size 1710987 diff --git a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_f59ec5b0-8588-44d1-b254-2a83421b4b23.png b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_f59ec5b0-8588-44d1-b254-2a83421b4b23.png index 6f2300bd34c51a295ea458654f36583f4c924dd9..4ae473801db3910a07506d8bd31aefa2bb28e072 100644 --- a/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_f59ec5b0-8588-44d1-b254-2a83421b4b23.png +++ b/images/1b17b79c-589f-45eb-b55d-c977d1b9708e_f59ec5b0-8588-44d1-b254-2a83421b4b23.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df23c52f9d9a9855f5e2cebcc9c17ed6f7280f23a1d4d9b94d43132a6baf0d8a -size 505913 +oid sha256:661bd4d7f2fd708cda55ae24cd35b31f0d0f44c4883950e1170c5722f04636f0 +size 498267 diff --git a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_1048aad4-2ed3-4bb9-8c0d-234cdb6b90ee.png b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_1048aad4-2ed3-4bb9-8c0d-234cdb6b90ee.png index 558c17c983bf1f42bfd41e1ec88cc103727e18a5..641468b2885d78c48a1105fac55a9b70a5a7d987 100644 --- a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_1048aad4-2ed3-4bb9-8c0d-234cdb6b90ee.png +++ b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_1048aad4-2ed3-4bb9-8c0d-234cdb6b90ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0edadea65174fcf45efb53d144909819e240cf79109fed213466de69e481ddf7 -size 885312 +oid sha256:8523f4edc710a4ca703332e06ad7ae6f462d08c18b1787dab77c7c0f0e8d9205 +size 1244423 diff --git a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_223fed6c-ab5a-40ca-8fd7-4ca5fdc52d9b.png b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_223fed6c-ab5a-40ca-8fd7-4ca5fdc52d9b.png index c349bcc2afa67f89fba96a9ca405fc27935ba97a..5c78c2d6f242beda67d87d956e12dfe7303c4786 100644 --- a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_223fed6c-ab5a-40ca-8fd7-4ca5fdc52d9b.png +++ b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_223fed6c-ab5a-40ca-8fd7-4ca5fdc52d9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0382c0cef73c5567fdb77f975957e834848e096a7ff8ece99030deee6c2200fa -size 1522462 +oid sha256:d890aaed4e1dcb51c09b39f6fd59a62ad2067c6932d9447ac362751d5d0cae94 +size 1685505 diff --git a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_37ad3e47-c309-4efe-ace6-3208fe05fdb8.png b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_37ad3e47-c309-4efe-ace6-3208fe05fdb8.png index 69c65c08700ca31bb53d3ccb23b95fc8b4961fe2..26eab7038bc7dd56a2b35b01ad75753741df9d60 100644 --- a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_37ad3e47-c309-4efe-ace6-3208fe05fdb8.png +++ b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_37ad3e47-c309-4efe-ace6-3208fe05fdb8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:547160a17aaa9d240db4628b79f2ebb7c2adf6cfb25c751e5606b0d1e8a92c97 -size 1340278 +oid sha256:d0824fed21e91641e760d68ad8a54dc0a66e95a1d03f3c17b79dbf1bafd15208 +size 1226807 diff --git a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_4231bc71-9555-49ec-8edf-0e46843f0832.png b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_4231bc71-9555-49ec-8edf-0e46843f0832.png index 92f1bbf4635fe335a372cc05ba97eafa99d3742c..8828d36fa8b0580c7365711d92be08ee6c095c8d 100644 --- a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_4231bc71-9555-49ec-8edf-0e46843f0832.png +++ b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_4231bc71-9555-49ec-8edf-0e46843f0832.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:113a1df98d6b7eae4ade8fb017b1c2e0ed2037e1ca667e4b024f6d14e19de569 -size 849136 +oid sha256:ad901bc92fdd31731d95d71fb407ef7cd842a492908911804548ec34da7e7cb5 +size 1328775 diff --git a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_943c6b5b-f363-4e61-aa72-4dbd15fe24b4.png b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_943c6b5b-f363-4e61-aa72-4dbd15fe24b4.png index 9a10769f897342bce23417c89e78ccb416006c74..84d567923f3c99db58f4fa3e411e3fd9cdd1bb7a 100644 --- a/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_943c6b5b-f363-4e61-aa72-4dbd15fe24b4.png +++ b/images/1b310ebc-d9f4-4eb9-b348-30b329207a36_943c6b5b-f363-4e61-aa72-4dbd15fe24b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5509514b3859587a76abda9965e19b29566c91feaabd060d7b7638aeaf144083 -size 1195716 +oid sha256:2fc0b65a007da64b06080d59929d23d0df9bbdddbc6f4ffd7aff7eace22853e3 +size 1262698 diff --git a/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_c6af7067-da65-403a-be07-5e2f40406cdd.png b/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_c6af7067-da65-403a-be07-5e2f40406cdd.png index 38f20036ef2974142a3c866d136b024aecf97b7f..857f620e7da33b06ac59059182ca6d3e27228f15 100644 --- a/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_c6af7067-da65-403a-be07-5e2f40406cdd.png +++ b/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_c6af7067-da65-403a-be07-5e2f40406cdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4260825a372f22d8a53f359e508c16e5a2460b3eb866c5a5743cfa6b0ae49868 -size 1255511 +oid sha256:8e2908eda7ad6cca2dcb7f875586ea56e6b053c9475f27f42fa8ba63f9abdeb9 +size 592080 diff --git a/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_dffd4c05-f61c-46df-8ab9-a2c7da5b03af.png b/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_dffd4c05-f61c-46df-8ab9-a2c7da5b03af.png index 4d4f7d4a95856e7ca8ce584b1fda3e26bf8b3021..a76c71a2f54a24625f3abfa3405d2b6a363f1d70 100644 --- a/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_dffd4c05-f61c-46df-8ab9-a2c7da5b03af.png +++ b/images/1b74fa2c-3236-45ac-9b4d-ff913112db52_dffd4c05-f61c-46df-8ab9-a2c7da5b03af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d379143ff8cbea6fb1b5f34535a3d25c3ca0a9f79136a73d511105ad2ec49df4 -size 987514 +oid sha256:c3cc17ec754e5ef8f75360de44cb40526f6dcb9f893e5753c847d1567ef34024 +size 1008689 diff --git a/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_5e729cf2-659d-4934-bd09-364bcb174861.png b/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_5e729cf2-659d-4934-bd09-364bcb174861.png index f97d830dc4715b98b4eeb7dd0103d066a462fde1..9a22a88bca0c8f7d1170ea522fdf6ffd6f832806 100644 --- a/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_5e729cf2-659d-4934-bd09-364bcb174861.png +++ b/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_5e729cf2-659d-4934-bd09-364bcb174861.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18ffa2ff9dae1bc2989c8c6c11dbd1a8de8a03eaccc70fb9a76f191c2b86d8d4 -size 1298976 +oid sha256:142100d06300107533b4d1885531a5b23d96c1f8ecc8455709288871faa77fc3 +size 684222 diff --git a/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_bdc83bef-edce-4e40-8ec5-8613da4be602.png b/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_bdc83bef-edce-4e40-8ec5-8613da4be602.png index 60178f1447927fd23beedb0d7482a6f9e2fad1de..fc9b0e04e02cbf7d2a93cd69f17dfd4c8c8d4a10 100644 --- a/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_bdc83bef-edce-4e40-8ec5-8613da4be602.png +++ b/images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_bdc83bef-edce-4e40-8ec5-8613da4be602.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66fc3df2fd53c2fec7e2f80cf9ee20984316fb50e4b7788fdee401e1af2ca49d -size 1411669 +oid sha256:4a05a62c9c77ca9b7691ef9a42ab1f64f7468220f04d0678b6130b17db0fcde6 +size 1209188 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_14157082-7e93-422c-9c85-b1595ec919f6.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_14157082-7e93-422c-9c85-b1595ec919f6.png index 69e376ed0f131825a67848ee3f0d64efab7fdf24..33726a5423e7be4b8d06d708e83e8533bd0a0c29 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_14157082-7e93-422c-9c85-b1595ec919f6.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_14157082-7e93-422c-9c85-b1595ec919f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6420e8d255621edfeb27470912afeb0d5939bfd58abe01880ced97399070131 -size 1706397 +oid sha256:be77e289bc983b4d6068be698907b11029441c751ae0156fbc1dfa03163003a9 +size 2725620 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_1755a651-6a6c-470d-8c28-8470e6038b82.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_1755a651-6a6c-470d-8c28-8470e6038b82.png index 69e376ed0f131825a67848ee3f0d64efab7fdf24..4ece72da77683084ce7b969a1fe3e42295753e4c 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_1755a651-6a6c-470d-8c28-8470e6038b82.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_1755a651-6a6c-470d-8c28-8470e6038b82.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6420e8d255621edfeb27470912afeb0d5939bfd58abe01880ced97399070131 -size 1706397 +oid sha256:5d3d093023ce1475c305063a79d256612f0c3113bc7fa38cbc5b542de5338f2b +size 1598201 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_502cb52a-b1bc-4917-b8a3-05b5c0d471eb.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_502cb52a-b1bc-4917-b8a3-05b5c0d471eb.png index e05051458d3fc828c2623444d93b4e6b0c039bb7..1a9ceeab32e7025ad3475f32317825e41175e228 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_502cb52a-b1bc-4917-b8a3-05b5c0d471eb.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_502cb52a-b1bc-4917-b8a3-05b5c0d471eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f495b241f72a24a3ac5febc3e42882998675e6db7f08c5281e04388a13eed6e1 -size 2126036 +oid sha256:be777113eaad7f292c087d65e672680acec8e315d03641bd7cd8030b29934190 +size 2117013 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_627981ff-61e9-46da-ab13-3e011fe1a748.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_627981ff-61e9-46da-ab13-3e011fe1a748.png index 69e376ed0f131825a67848ee3f0d64efab7fdf24..2b0f1ac6e90c44cf47c8d75af531425ae71f22d2 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_627981ff-61e9-46da-ab13-3e011fe1a748.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_627981ff-61e9-46da-ab13-3e011fe1a748.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6420e8d255621edfeb27470912afeb0d5939bfd58abe01880ced97399070131 -size 1706397 +oid sha256:29ecf2c05b031c580d63d3b988c2fa7cccbbc62977d5020e705044c98fd8f819 +size 2750104 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_71404d12-c5cf-47c9-8128-8390e15252db.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_71404d12-c5cf-47c9-8128-8390e15252db.png index 69e376ed0f131825a67848ee3f0d64efab7fdf24..e7c31885edd070070f228149e73706c641790499 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_71404d12-c5cf-47c9-8128-8390e15252db.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_71404d12-c5cf-47c9-8128-8390e15252db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6420e8d255621edfeb27470912afeb0d5939bfd58abe01880ced97399070131 -size 1706397 +oid sha256:06afef7458a23ff15a346b38b9673463fc5244eec51331c13568d56908a2a00c +size 1642773 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_90a2701a-0940-4f95-a93c-d61d8a2cecaa.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_90a2701a-0940-4f95-a93c-d61d8a2cecaa.png index 69e376ed0f131825a67848ee3f0d64efab7fdf24..5ce891135d23505d7f4cd5b53ccdaa4595b294f0 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_90a2701a-0940-4f95-a93c-d61d8a2cecaa.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_90a2701a-0940-4f95-a93c-d61d8a2cecaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6420e8d255621edfeb27470912afeb0d5939bfd58abe01880ced97399070131 -size 1706397 +oid sha256:0d6cf347a505f8902cf51c3974ae51314bea56839d7951e20c1d8601ce0d6151 +size 1544237 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a0318c58-8752-4304-9f6b-235154d272b0.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a0318c58-8752-4304-9f6b-235154d272b0.png index 69e376ed0f131825a67848ee3f0d64efab7fdf24..0394a199984f7dfc660c9993b2f986a1ee0673af 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a0318c58-8752-4304-9f6b-235154d272b0.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a0318c58-8752-4304-9f6b-235154d272b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6420e8d255621edfeb27470912afeb0d5939bfd58abe01880ced97399070131 -size 1706397 +oid sha256:e0b828d5c4228d0b493e70cc79c582cdec7997a6230df25172a2f085aa372378 +size 1568526 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a6f3a180-c5b5-4939-9b17-8493fda922f6.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a6f3a180-c5b5-4939-9b17-8493fda922f6.png index 69e376ed0f131825a67848ee3f0d64efab7fdf24..e42a39285272c1ade5eda8235d391015a2f33d40 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a6f3a180-c5b5-4939-9b17-8493fda922f6.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a6f3a180-c5b5-4939-9b17-8493fda922f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6420e8d255621edfeb27470912afeb0d5939bfd58abe01880ced97399070131 -size 1706397 +oid sha256:41dca69bb8efaffd4b3c0c227a53425fe2565b3b443e87fa80d1dce6257b9295 +size 2749263 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_dd0d509f-3050-4610-baa3-cd8f57e8ab83.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_dd0d509f-3050-4610-baa3-cd8f57e8ab83.png index e59be432f29ac96c4991a127e42786c94b405aaf..33fb888e9d8489ea10b7c66f8b5be437d2c44651 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_dd0d509f-3050-4610-baa3-cd8f57e8ab83.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_dd0d509f-3050-4610-baa3-cd8f57e8ab83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cfec328d30ade69523f687c9c92f595d607a6c46f5883908ba36128cac0112c0 -size 702987 +oid sha256:ee2d86afa9916278233173f92774e3eba097afc99208ffd441e3fcf95cde3bda +size 541924 diff --git a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_eef7bfcb-4ff5-42d2-b573-855aa991eb4e.png b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_eef7bfcb-4ff5-42d2-b573-855aa991eb4e.png index 29979dcf45b367b219d95d903e55ad056ce6a64f..687714da1560ca32b3af1d2ba8d82b7b5a0f79ca 100644 --- a/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_eef7bfcb-4ff5-42d2-b573-855aa991eb4e.png +++ b/images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_eef7bfcb-4ff5-42d2-b573-855aa991eb4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32dea9e465ddafe93ac499153182a4564d5da394580402e96ce00c3e2f2aec97 -size 1948034 +oid sha256:d25dffa889ce26316701d97042a34dde0e8bcf6026069b91b9ea8aff67f6daa4 +size 1636290 diff --git a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_15e83256-e4cc-4937-b835-1cf9ab6b1cee.png b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_15e83256-e4cc-4937-b835-1cf9ab6b1cee.png index 7126f4bba49aff2c97a08ad1dd0c392cdc7dab43..1b4d6653e729e8b19b043f2dcb0ea05cff84d3b9 100644 --- a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_15e83256-e4cc-4937-b835-1cf9ab6b1cee.png +++ b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_15e83256-e4cc-4937-b835-1cf9ab6b1cee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a88d49cc087373dc3b1f473e67c96848c220809f50d812097af0a0b8080ceafe -size 1149487 +oid sha256:739c66cfc02e3e3247f0069014fa202ad8c9a1d034d6c14c0ac38c430c7b0cfc +size 1258510 diff --git a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_638d3c1a-7e71-476f-bd07-42cfcf96f211.png b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_638d3c1a-7e71-476f-bd07-42cfcf96f211.png index a3988bf19b180ebddee7eb0486ec034285f9060f..e13316201bb7b22c222c52c786f27f63815e1522 100644 --- a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_638d3c1a-7e71-476f-bd07-42cfcf96f211.png +++ b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_638d3c1a-7e71-476f-bd07-42cfcf96f211.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f991cab1c4f448f9ec9732218b4d249105f033bb44118e077b97a4ab6e031230 -size 886877 +oid sha256:0284c7f64a932fcfb00b24844b7c8481a881216622f913c6be172ac2a07689d9 +size 567556 diff --git a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_6a110bc2-e04c-4274-b0b5-4b40194e6780.png b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_6a110bc2-e04c-4274-b0b5-4b40194e6780.png index f5c4dfb130e970ea53f73acd98af3390f0eb4e82..55bcd59121dbc40aeeedb9b787ba35d54e89f24b 100644 --- a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_6a110bc2-e04c-4274-b0b5-4b40194e6780.png +++ b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_6a110bc2-e04c-4274-b0b5-4b40194e6780.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed2e5e5d52628da9cc933a138af1e28304f84e0a507d04cef2e827d9c155739a -size 684773 +oid sha256:d0281c69b18241c1da42d29f3b70ed5e8a64c22bb92ff5449d850eea14ab318e +size 339778 diff --git a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_aa73fdd1-6124-47ae-b9a4-922e2cf5c1b6.png b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_aa73fdd1-6124-47ae-b9a4-922e2cf5c1b6.png index 3833a24a0957821ddad10deb0ecb87b5d1fd7ae6..c5b2449dafd4182ad695f7c9ce41507eca6d78fd 100644 --- a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_aa73fdd1-6124-47ae-b9a4-922e2cf5c1b6.png +++ b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_aa73fdd1-6124-47ae-b9a4-922e2cf5c1b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0ef46dabf551ebc14111c6e01d6563872aca7a9cb46278a8c3b0fdd0a52569c -size 661002 +oid sha256:9289968ecb39ddf947850aef7c1897121e514df3abeedbfd6018f4e4ac52da72 +size 568686 diff --git a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_ad546f94-f9c4-4693-9e29-6dab15f82b4a.png b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_ad546f94-f9c4-4693-9e29-6dab15f82b4a.png index c17c52400e7a460145ebfe4646a3d143ab188d6b..18691f439ea1d13107ed7e3ccf2d95d673a9102c 100644 --- a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_ad546f94-f9c4-4693-9e29-6dab15f82b4a.png +++ b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_ad546f94-f9c4-4693-9e29-6dab15f82b4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e001e6d250ac7a8889d6ef6ecb5422915a03a1056ccf99e79207732cb9c8b7bc -size 685369 +oid sha256:93c6cbec4a97be1a4e832ca3441b64545f83ff0aff02fc0b74750acf87e256a9 +size 1035765 diff --git a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_d8abd1e3-05b6-4a3c-8f94-95268d8eb712.png b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_d8abd1e3-05b6-4a3c-8f94-95268d8eb712.png index b16810ff8d1c38a38640e9990e04e434fe2897a3..402b13b50da70b2dd59657e7d671ab00e1357f0a 100644 --- a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_d8abd1e3-05b6-4a3c-8f94-95268d8eb712.png +++ b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_d8abd1e3-05b6-4a3c-8f94-95268d8eb712.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcd09ba0c4a0648efa2dee7e6e58877e9b881cea4d0415eb1e16444b74eafe6b -size 709865 +oid sha256:8a3049f05c9b755dfdc645b251f8832aeca35644b37d129168986bfc40bffb7d +size 566866 diff --git a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_de2d5ac6-ca8b-4dd1-a72a-f464a3709a05.png b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_de2d5ac6-ca8b-4dd1-a72a-f464a3709a05.png index 77292c5519deb792d05703ac81dd30a21d93bd31..80cd5608efe5645e0bf44666a72215ac5ba19ec1 100644 --- a/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_de2d5ac6-ca8b-4dd1-a72a-f464a3709a05.png +++ b/images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_de2d5ac6-ca8b-4dd1-a72a-f464a3709a05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b682510ce4cf40cf2da1a05d8ef3c8e742a0c82f326396d0a0c727b750d3d167 -size 760245 +oid sha256:a357e302e5c482641f413f8fd38635938085cb6389b38d7eb00308bee916a703 +size 1005443 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_0f8922cc-c34b-40d7-a6f4-4c095f40a94f.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_0f8922cc-c34b-40d7-a6f4-4c095f40a94f.png index 34b51dd2ee732e0c79be4694b7545dd2bf3049ee..8f3200f515216a75d336a075a580df8cca8f2114 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_0f8922cc-c34b-40d7-a6f4-4c095f40a94f.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_0f8922cc-c34b-40d7-a6f4-4c095f40a94f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9b42c1b9beb28f9bfcf19e706ce8329df4bf5bb35495e1ec6db83109c42ee55 -size 756721 +oid sha256:0e98442ec8769d2816034ac4a9af10aeb676c4f99e0e1059c685447a1a08f1b3 +size 782078 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_262f044d-75e5-4a9d-863c-bbc2e8206b5a.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_262f044d-75e5-4a9d-863c-bbc2e8206b5a.png index 33af242d387b819747d816ff947009093cdf2598..8bbdf3a1013249b76a99c9ebfdc72d19791a2e76 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_262f044d-75e5-4a9d-863c-bbc2e8206b5a.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_262f044d-75e5-4a9d-863c-bbc2e8206b5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afac3443a71025d10da8fc84ce2bec88fe9ae62363099f26963d2177b516f609 -size 504491 +oid sha256:f300c3cd636cd45d053e0e591f3e41a84160a92d87257914c5150d98e07807ef +size 645864 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_28a61364-9726-4983-96f8-f68988ba8da5.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_28a61364-9726-4983-96f8-f68988ba8da5.png index 5e3338d3c480d37ef498af6bfde0026d3c1818b2..32b7baaccf3abeea7c201c716dd1a1e8cc683af8 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_28a61364-9726-4983-96f8-f68988ba8da5.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_28a61364-9726-4983-96f8-f68988ba8da5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba40cf3cd7b33e08fae3fd83c936d9db90dcce02cfc992cf0520b2b11f719f17 -size 544033 +oid sha256:588a06cfdbe59045c2a0bea98bee95643452470352c0ee9ee0f615bb6487db9e +size 634300 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3d51e444-a3b2-4aaf-abaf-dde9346fdd65.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3d51e444-a3b2-4aaf-abaf-dde9346fdd65.png index 9fecc0cd7db3542fb6b6af153b23a7fd8a3921b8..0c86697dd6f966b307af6974fe847372336a3bab 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3d51e444-a3b2-4aaf-abaf-dde9346fdd65.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3d51e444-a3b2-4aaf-abaf-dde9346fdd65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5fe28f792e9d06e753023cc7d38a4275c6edabdd9eb582a91993fc360df216d7 -size 818179 +oid sha256:d49163fbbec19533e0ee149933117ec92831b2169a7ba71dcec2324cbcbdea18 +size 909244 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3fb0efb0-a518-4c6b-b5e6-709f2274140e.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3fb0efb0-a518-4c6b-b5e6-709f2274140e.png index 86a75edb7040fd03386eb42f1ffd75f39e92755a..710378acfb0b5975f75e07653de05201b8a2f75f 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3fb0efb0-a518-4c6b-b5e6-709f2274140e.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3fb0efb0-a518-4c6b-b5e6-709f2274140e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ac3e41fa41788d72d0f7d3c82cc43ca30608df903845c1a2ae2d55fc864c512 -size 874113 +oid sha256:1f7f566f5282ccb453ffcb4bfc689a1effef0550c7106ad8c94e176625d86616 +size 1116820 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_450227c9-df44-4ea7-a169-6997823c8105.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_450227c9-df44-4ea7-a169-6997823c8105.png index deaae737b65ee4dbb81476af0b5a8d8c61710e38..89c97d4cf38d3e793497097625f0604c8d42fc70 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_450227c9-df44-4ea7-a169-6997823c8105.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_450227c9-df44-4ea7-a169-6997823c8105.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcb4dee798d41f5c3fb2c2185c8d288cedf7daf97e7f812cfde1f6454972fc3f -size 865815 +oid sha256:d9024d33860de9523321270a6c4ff9af142b562f030e30dd057179d2391db219 +size 756212 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_4e9abe92-bb44-4c26-b5b2-c782737e121d.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_4e9abe92-bb44-4c26-b5b2-c782737e121d.png index a1c6d15f85418fa5860d2c684a6f6317206079bf..f15857296e85a3ac3964367aa86755d3ac1c7e81 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_4e9abe92-bb44-4c26-b5b2-c782737e121d.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_4e9abe92-bb44-4c26-b5b2-c782737e121d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:277a587836d3a35eb509b821dd7b524da1cc18b48482df98c5d4daa9fa672b06 -size 710884 +oid sha256:a79037efe53d8e0041b5facae9e0ed40dcf5732424cd95ae3afadd51506b181e +size 818827 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_521ea3e1-7da1-4fd9-94f0-6d5eafd32fe4.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_521ea3e1-7da1-4fd9-94f0-6d5eafd32fe4.png index 0aba9b2278385366e917fd860f61f8590581c16a..83e77f06db380466be2f4c1730763d4b2926a615 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_521ea3e1-7da1-4fd9-94f0-6d5eafd32fe4.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_521ea3e1-7da1-4fd9-94f0-6d5eafd32fe4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6aefebdeda069c7f59f3aff710bfa7d9e7d7391acf92ead321329f612892ac92 -size 1082116 +oid sha256:9e2e4c8efb90991a31a8c8038b8c951904284d83072b9c4c17d0d7500c2e434e +size 1611692 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_993af1e2-ecb0-4a4d-bf38-6ba35b599c98.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_993af1e2-ecb0-4a4d-bf38-6ba35b599c98.png index 3b875989b7ea6c18dab963564213718acf627812..6333a954be325251c32c077e2f658cb2a4049a4c 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_993af1e2-ecb0-4a4d-bf38-6ba35b599c98.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_993af1e2-ecb0-4a4d-bf38-6ba35b599c98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7efa79fe263038f6d24e5aebdc378a91de7a41c863d5c5230f352b2d36b750b4 -size 641122 +oid sha256:57975b5d9cec3c4f5839f20fb7bf1d8199b6f2f58f77e986c80fc5ccad0e8f3b +size 1063460 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_9f5ae924-5319-4085-9d3d-f0e93305d8b0.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_9f5ae924-5319-4085-9d3d-f0e93305d8b0.png index 7c2d6a03b81b0795b5c650020feeb0539611aba1..bf2ef4c9fa903325c2ee655eda7315e7e288afd7 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_9f5ae924-5319-4085-9d3d-f0e93305d8b0.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_9f5ae924-5319-4085-9d3d-f0e93305d8b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d58a8508850a062ab47b1b76f97ae006830c588c716935509714af5ae72ed64 -size 1059767 +oid sha256:96f6d69d80273ab5bf8274f7672a170c0672f7c706972b81a8ec1b26e006e121 +size 1016325 diff --git a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_d1ad5da4-0888-4482-9973-f2aace082189.png b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_d1ad5da4-0888-4482-9973-f2aace082189.png index ebf47ec66ee275a404011c8ef5f84ba96a55042e..8e8a04d2f32754f84c18a539eee7892b4582a818 100644 --- a/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_d1ad5da4-0888-4482-9973-f2aace082189.png +++ b/images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_d1ad5da4-0888-4482-9973-f2aace082189.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:482363b99ecfa4440f68592da316fd4712d8b48c4547e74838e2dfdb9364a197 -size 535543 +oid sha256:b4ec86d01b63b96d830e5c835e9c4425904bd8924fa717508cc62bcedabee708 +size 698256 diff --git a/images/1bf4f465-99cb-483b-aac1-a7512b150755_0675f7cc-293d-46a5-a6ab-9f38810c1376.png b/images/1bf4f465-99cb-483b-aac1-a7512b150755_0675f7cc-293d-46a5-a6ab-9f38810c1376.png index db9051fc3595f0197e6a6ea392b0fcd81a0fb4f5..3b59cc0cab79e28ea3082c12187d3a709feba4b7 100644 --- a/images/1bf4f465-99cb-483b-aac1-a7512b150755_0675f7cc-293d-46a5-a6ab-9f38810c1376.png +++ b/images/1bf4f465-99cb-483b-aac1-a7512b150755_0675f7cc-293d-46a5-a6ab-9f38810c1376.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c5cb80cb94c43e0db6cf67c9f60edb1bafb239f32e1d84be8796ce9972286e2 -size 532555 +oid sha256:50bc78543c54380edbb99fd0792bf70f06cbbf82080f0f5ff220f173c6c1e708 +size 387306 diff --git a/images/1bf4f465-99cb-483b-aac1-a7512b150755_e209fc5a-4d34-43aa-88dc-898fc2cb3c9d.png b/images/1bf4f465-99cb-483b-aac1-a7512b150755_e209fc5a-4d34-43aa-88dc-898fc2cb3c9d.png index c1fb8a179158a8996fcbd6d2017d3d0ee6de0744..c50a2c492f4d562268a4763b2a2211c8ef0b1011 100644 --- a/images/1bf4f465-99cb-483b-aac1-a7512b150755_e209fc5a-4d34-43aa-88dc-898fc2cb3c9d.png +++ b/images/1bf4f465-99cb-483b-aac1-a7512b150755_e209fc5a-4d34-43aa-88dc-898fc2cb3c9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e46c9d8fbecee46f171a39a624962f78b9550890188cd9d750109ae5bc98e052 -size 557598 +oid sha256:d66075f224a7b96a312a263ba8db9bff480b4e4c68e5922078720edb7ffa74f4 +size 1097809 diff --git a/images/1bf4f465-99cb-483b-aac1-a7512b150755_f6e5cdcb-7b34-4d49-9c97-e74cb6428e87.png b/images/1bf4f465-99cb-483b-aac1-a7512b150755_f6e5cdcb-7b34-4d49-9c97-e74cb6428e87.png index 9b6d69b6b049fcf1df21499ca2e5f62aca01464a..d0779f521253253d3685b30b542b6907decc527e 100644 --- a/images/1bf4f465-99cb-483b-aac1-a7512b150755_f6e5cdcb-7b34-4d49-9c97-e74cb6428e87.png +++ b/images/1bf4f465-99cb-483b-aac1-a7512b150755_f6e5cdcb-7b34-4d49-9c97-e74cb6428e87.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3c8c7b3f742a410455abe7f0d710821746deb9810c14585a99ba08b73f0c097 -size 554833 +oid sha256:d2f833782b75cdc66c14ecc2e737d5e95266a6b3ce38f4dadecd2206c62b14c7 +size 583047 diff --git a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_084b94a2-6e3c-4b64-baa1-ba2dc61777a7.png b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_084b94a2-6e3c-4b64-baa1-ba2dc61777a7.png index 590592b7cbc863f7eac4ae8157b7f127e87f6a3a..d79b767c259c820daf38e2bcdb9b474f02356a30 100644 --- a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_084b94a2-6e3c-4b64-baa1-ba2dc61777a7.png +++ b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_084b94a2-6e3c-4b64-baa1-ba2dc61777a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fe8af4e2a280d8a2664c53649082182bf750866bf4e018f96117b6687217356 -size 2054440 +oid sha256:9f2a14e2b40c780b877484885c0ea56c3d116af5936dd8873007889a3a2ea743 +size 2483047 diff --git a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_1c9cfff8-1391-492c-8dc1-3071bbfe0f64.png b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_1c9cfff8-1391-492c-8dc1-3071bbfe0f64.png index 2a5c5f9c7279fba25289f549aceab3cad8288f63..92042b3ff97dc0920ab0bc6846394208b35a337a 100644 --- a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_1c9cfff8-1391-492c-8dc1-3071bbfe0f64.png +++ b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_1c9cfff8-1391-492c-8dc1-3071bbfe0f64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64d7d5c7fc8f60ad8410c2db2ecb16a0132fbfb0b2d2a78144d65250a779a7d4 -size 1485845 +oid sha256:ba13989129127254046ca4f27e3336694e8a335124ceab55c940287b885aaf19 +size 1924917 diff --git a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_a45c4879-9bb8-4c59-b2af-754d7f00f809.png b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_a45c4879-9bb8-4c59-b2af-754d7f00f809.png index e60127583faa1a6371ca3d9ee08490bf6d3bc391..d915cd149d1ed92a1ef05b8156d4fe30cd7096fd 100644 --- a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_a45c4879-9bb8-4c59-b2af-754d7f00f809.png +++ b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_a45c4879-9bb8-4c59-b2af-754d7f00f809.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e70f5b3f7abd6311562f266bf3f9badd296ebb811f278f8a115ff322da115701 -size 2238658 +oid sha256:aa9bb01686954fc8867781930424f0dac4bee5fddcababa211de4b3d04eb7ae5 +size 1897689 diff --git a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_f2b10367-aef5-46ac-805f-5d684a9c958d.png b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_f2b10367-aef5-46ac-805f-5d684a9c958d.png index e59589ec5cd191cd87ce3f1acef7ca81dd4a2b4e..4ff730fb7463771101c3955af906ca596a4eace0 100644 --- a/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_f2b10367-aef5-46ac-805f-5d684a9c958d.png +++ b/images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_f2b10367-aef5-46ac-805f-5d684a9c958d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8205d6870ea897040f216b1d0805ba8120c6a26e647b7ba8a43f690a6b86429e -size 2451085 +oid sha256:89e2dc196b51be9263e10012db1cb2ffa97711131b58ac55f21edebd217ca70e +size 2399937 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_05e33d5a-8cac-4627-a403-d66707fd9217.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_05e33d5a-8cac-4627-a403-d66707fd9217.png index 811c7f17d8c7f2cfd5b55ab355035af7621bc5a1..6798eb3dede25e81dff144e8b06207924e5dc326 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_05e33d5a-8cac-4627-a403-d66707fd9217.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_05e33d5a-8cac-4627-a403-d66707fd9217.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7945cafb30ed55cacadf88b2e4e103a92042c14d1c0e1edfc94bb7d2668ea824 -size 2024730 +oid sha256:d935605c474b1ec00c42ef29db51e32f51cfed06a2e1331f3f84ee8eaa146fd3 +size 1573291 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_0a2b00df-658b-4670-ae54-556abe0f89dd.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_0a2b00df-658b-4670-ae54-556abe0f89dd.png index 14820acabe1d75f8b5dc7a41fc623aba1ee98358..84df06379b99d8305a79bbb931e62a20a86768a4 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_0a2b00df-658b-4670-ae54-556abe0f89dd.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_0a2b00df-658b-4670-ae54-556abe0f89dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c24f837ab2577e934f1c0cdfa604a772278b44240a328797de79c76f0d2c0a4 -size 1425760 +oid sha256:0865fe983e32d23d725473480848582461a24f42152fe7f8777caa933a1deda0 +size 1802335 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_1b1deeda-16e7-4c5c-b287-5fb707328edc.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_1b1deeda-16e7-4c5c-b287-5fb707328edc.png index 9a748686070442ffce46239a6a7ab1be476da4cd..7ee02cec3b5d138424f023973108cf74f035b656 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_1b1deeda-16e7-4c5c-b287-5fb707328edc.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_1b1deeda-16e7-4c5c-b287-5fb707328edc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c14fd6a802e399d3c112c8eee85f5384ddbebdc682f9ea70f81e2e34e6a85cf -size 707930 +oid sha256:2776cc8529629a4e0b8cb7098110b6566a5397c3feebde4d88ae9280342906c9 +size 852246 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_5ebb131c-9681-479f-ae2a-2c8d50b7e606.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_5ebb131c-9681-479f-ae2a-2c8d50b7e606.png index 45a9323494b5e6d409bb97e44be4eba93a42abc9..1d456652c46c4cf677bfef8e4b67bb1a85b6385b 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_5ebb131c-9681-479f-ae2a-2c8d50b7e606.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_5ebb131c-9681-479f-ae2a-2c8d50b7e606.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d494682b60528ac4f2fded8ba49a9cbf1d97bbd312fda19875f189323ae03759 -size 732825 +oid sha256:08f3f1bd0aa1bae4e738974c01849d2b20e4d5ff8b177494575828ee3b4a6c0c +size 1223823 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86c95750-be3d-4f61-85ac-8399619f41de.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86c95750-be3d-4f61-85ac-8399619f41de.png index e02ce131de0d6aa4a237cebf264ed78c0a9546f1..390535fcaa967258e763ef3230a91d961f4f710d 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86c95750-be3d-4f61-85ac-8399619f41de.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86c95750-be3d-4f61-85ac-8399619f41de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28939253309604e030321a93ab81018e0badb0f1e769c0dbfc22e2efcb44219a -size 1402308 +oid sha256:fed2d3164a77ab859fe0a3625b9bda090c1b28181971881bac0abc70f8faf3c4 +size 1577420 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86eb051c-670a-49bb-b354-428ae03e2016.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86eb051c-670a-49bb-b354-428ae03e2016.png index 7cdae0eafda9fe680669ccef605ddf5f9df78448..0741d711afe2ebdad13ae5886b9a74b9996df2f1 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86eb051c-670a-49bb-b354-428ae03e2016.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86eb051c-670a-49bb-b354-428ae03e2016.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbb7479fde79ab89a078706db889dbc145f73c5d4dd0f499902eaa93baf5ad23 -size 2479595 +oid sha256:d04c9dc6b40cad5e2ea08482e298e30d47e524a4a55bdd9f822114a666405941 +size 1737326 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_bd1ad30c-c61c-4dc2-8445-1e2d605ca95c.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_bd1ad30c-c61c-4dc2-8445-1e2d605ca95c.png index 2f2e8d11aeb87254d6c7706f7493cc4c8ce7f9f5..7ed24e22e1db4748e02d279ef67f27c369921a80 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_bd1ad30c-c61c-4dc2-8445-1e2d605ca95c.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_bd1ad30c-c61c-4dc2-8445-1e2d605ca95c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15a02067ddda3362d1d04c361a4f6d5993d5f31d5532041a2ccc265f97ca21e8 -size 1015955 +oid sha256:80da03cd394200adc6421a13c7a1f3850555fa9d2b01478c511985daa9cd43cc +size 1224547 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_e5094c07-65e1-407b-9bd1-e5fbc050372b.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_e5094c07-65e1-407b-9bd1-e5fbc050372b.png index 3e9b70d73c530cbb4486791aa782d3a165edf7cd..e1d7d50bd9a1c2fafde076b179e5aa6bb7c3c174 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_e5094c07-65e1-407b-9bd1-e5fbc050372b.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_e5094c07-65e1-407b-9bd1-e5fbc050372b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc2e974d65bce9cc5ade65b3ade10d3e3f661d06318b29812e67265b68ce86ac -size 1330502 +oid sha256:5f654dfb64e18ec7bcc7a0ee3e0f2eb5b5b06eb55c5d1e916e324077e48eed32 +size 1692407 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_ec68d5c3-9dd6-47e0-ae07-61673d79709f.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_ec68d5c3-9dd6-47e0-ae07-61673d79709f.png index 14c350eec09034b4bca9d0ba3a97b015e896d98e..30fd17e51afd2000bc62b8e544272afa3cc5dded 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_ec68d5c3-9dd6-47e0-ae07-61673d79709f.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_ec68d5c3-9dd6-47e0-ae07-61673d79709f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45f67783067314ff3bd4df59d6217f9d248a3f501246dd3d1b2f0d62d9d40258 -size 1193009 +oid sha256:52fccffeba6872f5fd9b0b9e299f61f065990d067b096652a0f324518f7b50a9 +size 1637178 diff --git a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_f6f37749-676d-4faa-8a44-22139190c76b.png b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_f6f37749-676d-4faa-8a44-22139190c76b.png index c11a4070ba8f1438c55cbd44ea420f971507ba0d..625ffa898f7f6a812539d0443699ecb0f04b4a5e 100644 --- a/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_f6f37749-676d-4faa-8a44-22139190c76b.png +++ b/images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_f6f37749-676d-4faa-8a44-22139190c76b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90c7e15048e2ebf4ad950d3abe6e1d967cee80474aff164ca486d2bea48e1866 -size 734270 +oid sha256:75178dde0475c0ae13f1eb7628e41ff80a6c6b293940e45bfd706963fea9ac00 +size 1135476 diff --git a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_00d49134-71de-43ed-9c37-19452b46685e.png b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_00d49134-71de-43ed-9c37-19452b46685e.png index 609c311d3b2611ce31a42df997647efa0a7b409a..19f98145057d87b86811b211aa5e389b08cf3c5f 100644 --- a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_00d49134-71de-43ed-9c37-19452b46685e.png +++ b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_00d49134-71de-43ed-9c37-19452b46685e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7c2f5399d7f55c7a9632e7b893a53d8e770e160bc214e558b248ae4a8144c3b -size 1223539 +oid sha256:178b22d3249d5d4bb9e3ee3339ded012492f91927918edcfaeaf319a86a286ce +size 1429159 diff --git a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_71442cf6-a544-45d1-8185-2965fe1171b2.png b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_71442cf6-a544-45d1-8185-2965fe1171b2.png index 91340ce32c70ffdbb582866b08dee149e21db6ab..0b53b21a2fe9973f9450f41c30831ee7f98216f7 100644 --- a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_71442cf6-a544-45d1-8185-2965fe1171b2.png +++ b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_71442cf6-a544-45d1-8185-2965fe1171b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c9eb252f2bffb42f305353fddefd63a523461db37f98214e7aa38e0d5e526cf -size 2336780 +oid sha256:e5962bf938bbb02ee395cb900ff3799e464654d07ba14c19a02be43fc4a09faf +size 832907 diff --git a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_d5aa4626-0afd-483e-adcb-bb722903ce10.png b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_d5aa4626-0afd-483e-adcb-bb722903ce10.png index 1aa76d54b8150e8b198c3c541f1e4a4c5417a76f..fe5dabb71d3e9e41bf61a51a6d1ce16fc30581d1 100644 --- a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_d5aa4626-0afd-483e-adcb-bb722903ce10.png +++ b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_d5aa4626-0afd-483e-adcb-bb722903ce10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cf68a03c054bc82e0f46b19334d853a41822de3fc5fbf42295ddcb7cf4883fa -size 586559 +oid sha256:1a21c6cd065f2ef61e8116f8bc43b0d09b9c399e474adabd1cde4a0c9d48e7ff +size 321607 diff --git a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_ec48b801-b658-459c-8b45-d2e9ff9f4238.png b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_ec48b801-b658-459c-8b45-d2e9ff9f4238.png index c45812dbc4405dde868f97545feae1be92789e20..544a9ebed2570807ce83dc9b9dba5fb39bd5ae78 100644 --- a/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_ec48b801-b658-459c-8b45-d2e9ff9f4238.png +++ b/images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_ec48b801-b658-459c-8b45-d2e9ff9f4238.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8066a391b804cea9e2d1c543ccdcb86d6b03ec5c23838844cbcd3223cbe0c7ee -size 483760 +oid sha256:e4dc79136c7da920cfc38724ca6df958a92a0af9180298f992b8f84e4884cb1f +size 347174 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_038b9509-b8b1-4e84-9426-a5377183ea28.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_038b9509-b8b1-4e84-9426-a5377183ea28.png index 35485e41ad43734ca439bdf838bff22ffbb050b7..7ac38ed01775ce830a8d92e85975f73a541bbfd5 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_038b9509-b8b1-4e84-9426-a5377183ea28.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_038b9509-b8b1-4e84-9426-a5377183ea28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e5af7285f2288bb39d8008b4b699d84de7e66dfa7fd18d8b5a13564f68a04d8 -size 2032202 +oid sha256:582b53df9cc53477cf17768ef00cf2d83f43980fdf56c437eb412b900644fe25 +size 844262 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2e754e3f-f3a2-4f55-9783-bc7ad866d622.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2e754e3f-f3a2-4f55-9783-bc7ad866d622.png index 2fb121507efdab74978fdf9e16079f956ffe48b1..7f4cce7d5125c24031e1444e0d8e15f6074f75c7 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2e754e3f-f3a2-4f55-9783-bc7ad866d622.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2e754e3f-f3a2-4f55-9783-bc7ad866d622.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb1622aabe1d4015de9ddfe1518a2e6db5509e6eeb0556c784dc9a65156a8f5b -size 6584167 +oid sha256:3aea8f812c2153b67d1f9d3ce580656ab7b0077823781697f33102fbdfba63df +size 1618965 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2f6c50ae-f7f9-4cb1-bf09-185df432382f.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2f6c50ae-f7f9-4cb1-bf09-185df432382f.png index b56e72e26ab96584be04e4f13cfd0ebce685366c..d9b02e054ef06f719a70a488e953b71b7d6daead 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2f6c50ae-f7f9-4cb1-bf09-185df432382f.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2f6c50ae-f7f9-4cb1-bf09-185df432382f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d51c407ace0076f5ba8891b67e4af224f04c836436dd8fafd23113f0a69a653 -size 1233276 +oid sha256:85e0b43c278ddf231eca623a1c80d073af22d8650d91ed267098397845bd92c6 +size 1386300 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_3e01a8bb-5799-45c2-b0c9-83891b43492b.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_3e01a8bb-5799-45c2-b0c9-83891b43492b.png index 16a07e7786f628bb143f7f0ab926f850ca681759..e78b35e69c1963527d961bdc2dbee3ea6c1d13ad 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_3e01a8bb-5799-45c2-b0c9-83891b43492b.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_3e01a8bb-5799-45c2-b0c9-83891b43492b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61a2b207d2404673fd4d7cd56e143197f638a1205025fb3e64071b7434ea91e8 -size 4118812 +oid sha256:60ca1922acd881b385a66baba2109ba7bd5955dff672b90bd92ff3be2438d45c +size 1479173 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_59b5850a-eab8-437d-9b9b-571a2835604e.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_59b5850a-eab8-437d-9b9b-571a2835604e.png index 6c2c120720e4a5911238f41fe5de8197873a5c54..d58453bdaede972d4923b13784dec76e180f8566 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_59b5850a-eab8-437d-9b9b-571a2835604e.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_59b5850a-eab8-437d-9b9b-571a2835604e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6816cf519dd78c4bc771c430f2c944f2f2a29db293a63b97d87035d2f8c42ed4 -size 6308268 +oid sha256:4ebcd26cb0ef0936033a9f572ebecd1cad9affe4702aafc10fe284fef9c947dd +size 1691608 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_636bf503-609b-4c28-9677-2735b7389f07.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_636bf503-609b-4c28-9677-2735b7389f07.png index e5d48b8d390f7f018d7cc31da71e8a4ddc159e16..1c3cfa5c29aa23dad54c2e5d467e0260907c9d7a 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_636bf503-609b-4c28-9677-2735b7389f07.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_636bf503-609b-4c28-9677-2735b7389f07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c553e6c7b81e174a2eba2af7d61272a114f5498920ed6da8d71d163569f4092 -size 6249335 +oid sha256:ba080cd5be63527f47726622b751ddc6bc23e1381471bfa2db112b350a1d5518 +size 1149668 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_67a25df3-31cd-4a0c-88ca-4468b63ad958.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_67a25df3-31cd-4a0c-88ca-4468b63ad958.png index 3409fadb60a2a325125582b4870e20c31c72e60c..42dae71ac9030ca3a912a33c59f741622415dbf9 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_67a25df3-31cd-4a0c-88ca-4468b63ad958.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_67a25df3-31cd-4a0c-88ca-4468b63ad958.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddf1bd27feec5197c41e77029f34bb03475755e0a27ea710d294a3a2d768646f -size 2166280 +oid sha256:aeb88e19633a515abcec8a82a46ac82e45498e6ae3d3e012f4ceb24705461382 +size 1981128 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_6c7ee22f-e6b6-4cdb-a287-5162da143ba5.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_6c7ee22f-e6b6-4cdb-a287-5162da143ba5.png index 177a95ddd3d89eee3224a1c6b8b9094e6dc559a9..49b5b8d8343c83933463a05c2ec2222dc7684612 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_6c7ee22f-e6b6-4cdb-a287-5162da143ba5.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_6c7ee22f-e6b6-4cdb-a287-5162da143ba5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:606ed79b33356e04cf7a208275042871c0ccc142808b5e800bd0e39117452e69 -size 2345653 +oid sha256:13bceb18856ae1c2d47e610056bccbe4c4db5a13e393813ff42f293f583b6c47 +size 1637291 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7c668a7b-1de9-4df9-b75b-69ac45fc6d15.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7c668a7b-1de9-4df9-b75b-69ac45fc6d15.png index 57d8a6fa1e0750ae7c47629ed8bdc0bde4308efe..ef4e1b3c5facfe5c3ee8d114aafc1fac02cddda0 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7c668a7b-1de9-4df9-b75b-69ac45fc6d15.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7c668a7b-1de9-4df9-b75b-69ac45fc6d15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:886bb9280b4c7f333e886229942aa783cfcfacad53646d464f55799c1c2e5663 -size 1710892 +oid sha256:2d7eb589cd17fc000da1843f3a9c2580e894146b338e012db342f2621ad72d49 +size 1825763 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7dde899e-d348-46d1-90db-7e248ce0bf50.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7dde899e-d348-46d1-90db-7e248ce0bf50.png index c923617f7d84e0526add64c9c8fa71e124664f5d..7341695df53d43e3aa8a0901ca3e575680aadbcb 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7dde899e-d348-46d1-90db-7e248ce0bf50.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7dde899e-d348-46d1-90db-7e248ce0bf50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a84ed892b9bd997c1186732f622d78844cc13a3d45e7c044421f857566648850 -size 2439134 +oid sha256:4c89b2104110f406c20d09b57858abd7a660c105543b12e1503733bcaf72b26b +size 1862041 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_8fd55a42-471e-4418-b2f5-bca74ede84ab.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_8fd55a42-471e-4418-b2f5-bca74ede84ab.png index 0e259d064efa2a40d5f1f58ed4c5c55cc878ef8d..9801e97cad9c85f86e2ec529ad7aa4cf9520f7d8 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_8fd55a42-471e-4418-b2f5-bca74ede84ab.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_8fd55a42-471e-4418-b2f5-bca74ede84ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78dc263ba6fb2dced4a103f0fe5943d2d3a30d3caa9d23a1618b52d86046fb59 -size 2308654 +oid sha256:00715427402120eadcf877b581dc770497399c991768b083d7185357772029da +size 1958759 diff --git a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_f135e797-d76a-4008-a5d1-7de7cb7b4e15.png b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_f135e797-d76a-4008-a5d1-7de7cb7b4e15.png index 70c6537e4bb2edc81c45c3ef7462662cb1eb0d75..f6b4fbe60e1b56f6c7bbcfa488881250d8d61372 100644 --- a/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_f135e797-d76a-4008-a5d1-7de7cb7b4e15.png +++ b/images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_f135e797-d76a-4008-a5d1-7de7cb7b4e15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:447393d8efd82cf4466849221ca8513f1cda1f930e112240a695aa34b614b785 -size 6116887 +oid sha256:0fcb8ecd03399bb1aed1fad7af64431645177c5a3df04b4a25098170a296c771 +size 1812814 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_27fa3863-1da9-4a0e-849c-837c9f0abeaf.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_27fa3863-1da9-4a0e-849c-837c9f0abeaf.png index b7465bdbfff2b745aa227f69e34fe09b97cbf44a..bbf63e86ca01b65cecc75fd10a0af4536f6f822d 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_27fa3863-1da9-4a0e-849c-837c9f0abeaf.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_27fa3863-1da9-4a0e-849c-837c9f0abeaf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:402b79725ade855c4b11cc0ca7e49d2d09d63c5a4dfe9646909b79338ab24c31 -size 1046427 +oid sha256:454f2b1d3d64373ab5425c96d6971a9c440a7016d96076d5bc57f24a9fcf761a +size 1100069 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_29c3163c-4c53-49b0-a0a1-49bc3b1e21ed.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_29c3163c-4c53-49b0-a0a1-49bc3b1e21ed.png index 9e40fb0491c9b0411ffa87538693489275a76ab0..6ea8d975ddbf836768ed1e949b276297de13ff5e 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_29c3163c-4c53-49b0-a0a1-49bc3b1e21ed.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_29c3163c-4c53-49b0-a0a1-49bc3b1e21ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e54d1083a88566043a8ee3b4662d55ec358b7f6a0a35ae6798a9605cd72deb7b -size 1876598 +oid sha256:855b427a4064d4a80045a186368e87c56a7171444c939a2a4970a936a26d6abc +size 1424599 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_2f60a0c7-f38b-45e6-ab39-8b984c0ecd9b.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_2f60a0c7-f38b-45e6-ab39-8b984c0ecd9b.png index 5d223bf9f6a42f93bcf857bd5f2a627805c648ff..10ac5ac165a0c788ef6b60ad0217754b5aa43b20 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_2f60a0c7-f38b-45e6-ab39-8b984c0ecd9b.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_2f60a0c7-f38b-45e6-ab39-8b984c0ecd9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53949814cae19c26ee22dfae2b9379e81230a43c0bb77ffb1cafce77f1d67a5d -size 813689 +oid sha256:74f44239074f37f520a1a0724ae5af08b09e9672544dc561de8ff8335617aaf4 +size 1136494 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4888c4f0-14f5-4277-80ee-930c07442426.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4888c4f0-14f5-4277-80ee-930c07442426.png index e656e84a39e7a7e30b9ee8e8604648c7ff2eb261..bd9a333a9e3ab6bbed3b21b150546204273a5e51 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4888c4f0-14f5-4277-80ee-930c07442426.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4888c4f0-14f5-4277-80ee-930c07442426.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e623e39ec9573b771ab86214e58c0edef2ada2ff023823aea811e7a280243e82 -size 863400 +oid sha256:ef363e494664961173a50f8fd2700c2f11a06aa47eedb81cb3256983de975cd7 +size 1537900 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4ef64b7b-5543-41f0-87b8-90c79cb7aa92.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4ef64b7b-5543-41f0-87b8-90c79cb7aa92.png index 7d3b7ba829b07d52565e7bcdf61d31e0ab8b54a6..8a5838bffb7d6360d6bb8274d037e381a1aefe14 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4ef64b7b-5543-41f0-87b8-90c79cb7aa92.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4ef64b7b-5543-41f0-87b8-90c79cb7aa92.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2219e0bdc2d1d78d6d161226760ddc71cf8438167a56f93e192b7f8e906cd59 -size 944617 +oid sha256:3daf50845e7a7dac11cb9ca1af7195da3d053d1bae981484fe36a571cee515a6 +size 896154 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_516114e2-6390-41c7-b809-44aea3dfef43.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_516114e2-6390-41c7-b809-44aea3dfef43.png index 7b7dd0e52900d809541b028d6ca147085c42867f..f92876c73bae50beab2cd5d6baf4579ca5b0af3d 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_516114e2-6390-41c7-b809-44aea3dfef43.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_516114e2-6390-41c7-b809-44aea3dfef43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a0c8a2154c1316aea5de7d288e0b0915b1dfc5edb48a2e6f4467821e1e91b7d -size 1022094 +oid sha256:bbeab8b793059b08faaa949785605850887c1957cb0365fda31010c47cb21ee9 +size 1001110 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_6544d313-8e7a-42e9-a996-497789511924.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_6544d313-8e7a-42e9-a996-497789511924.png index 422e948da0c46277d8cc37fd9cde76a8ce5093df..7ea929517cb081b126decc02e984c7aa0e1c5a4f 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_6544d313-8e7a-42e9-a996-497789511924.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_6544d313-8e7a-42e9-a996-497789511924.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6389c9a2692e1a0ef6e36dab389d11f14bb9e8e43adc67680be76393157b72c -size 862807 +oid sha256:44b2b7d5e2296be1c9e448860cecc31b302d6c8f82849957b115dd7334826c35 +size 688200 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_9d457ae2-7f3a-454c-9bcd-38738fdc80e8.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_9d457ae2-7f3a-454c-9bcd-38738fdc80e8.png index 757281061f496f4593c33dbcb2623478c4a968cb..4664eb74d231f187405686448798fa6d42588bc9 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_9d457ae2-7f3a-454c-9bcd-38738fdc80e8.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_9d457ae2-7f3a-454c-9bcd-38738fdc80e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:baab39943674b3a59ee9530a61bdf46fb1d0860d107831961ab54e558d88e536 -size 940085 +oid sha256:5a3d19fc9d4ae8da5ad502c1bd629019bc4ad51f4dedcdc9adf28eee9d0dcec1 +size 1165378 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_b4680bd7-becf-4477-b09b-b3e9351c8e25.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_b4680bd7-becf-4477-b09b-b3e9351c8e25.png index cb17415ec12a812ed5e2a1f15d3f3cbccd4d24cf..7d54ac97a471eacd47ab5225661e698f7a9c3ca7 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_b4680bd7-becf-4477-b09b-b3e9351c8e25.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_b4680bd7-becf-4477-b09b-b3e9351c8e25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eff9a28a77e39dfbad97cdd8c290f6b70c2a0faca06d9bb389d06213906ccb77 -size 787736 +oid sha256:2e8193e3e457913fabd3656d67d095d35738de9751dd99cc27da9d42a77acbdf +size 1043448 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_c7f7cff6-1d18-48c2-8a61-dc14b5b44b96.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_c7f7cff6-1d18-48c2-8a61-dc14b5b44b96.png index 2a7e154dcdd1fefb6af004a8a52170be46583ab6..816a1de94561c4ac7d2fac381d13747502e890b5 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_c7f7cff6-1d18-48c2-8a61-dc14b5b44b96.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_c7f7cff6-1d18-48c2-8a61-dc14b5b44b96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f8b40026b324a33a5f21629c0e63468090bd0a48fd7f4ed2c6d4ebe894211e0 -size 1872622 +oid sha256:2dce741cd6c2a1ccc7822962cf5701ab2f731ce364b62652a6c3461c32f95800 +size 1441112 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d02f058f-0877-48fe-bec7-bb51b808656a.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d02f058f-0877-48fe-bec7-bb51b808656a.png index d2fa85e2d9e2a167754bd36c9b63ddc069b97b87..1dfc432c449b15e6abde463f224befae11878e83 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d02f058f-0877-48fe-bec7-bb51b808656a.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d02f058f-0877-48fe-bec7-bb51b808656a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:434e2870958e46c833cfe25422cffc4ffcb8f20589b26f61d80b44c94ae0e51e -size 1032890 +oid sha256:0e60cbefe38b53135f3870074d47daae9c22f2b0a11fc75963d43e41d991777e +size 1062170 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d233722a-833e-4708-baa1-b6e6ed139325.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d233722a-833e-4708-baa1-b6e6ed139325.png index ce775fc92487f9fe179a5e64e4912f7557153da6..8ffc01e608364029c153358fda6ec594d53516ab 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d233722a-833e-4708-baa1-b6e6ed139325.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d233722a-833e-4708-baa1-b6e6ed139325.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3309549a895a5854fba1105445780248384d1fcf3cbbafd65f28f928a1052a3b -size 941599 +oid sha256:6847b14d041a271773613278fa51c15e145b1e346b596bde548f1b64867805a3 +size 1204310 diff --git a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d7ac76c3-c31d-4daf-ba91-f07a2250eb2a.png b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d7ac76c3-c31d-4daf-ba91-f07a2250eb2a.png index a6f84a828f3e1d13b859f949e73afdb64bdda9d8..c1c26a6ede4cf9c31979c1cc930a1bc8ff651fe0 100644 --- a/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d7ac76c3-c31d-4daf-ba91-f07a2250eb2a.png +++ b/images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d7ac76c3-c31d-4daf-ba91-f07a2250eb2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60f2a2793a5df95f36f7f0b22f97bd90eec2ca8af7e00082cb5adee1463970ad -size 1018170 +oid sha256:79acf9761705215f0be6f4ae3fbc795ed2f76aa5ef73150f026274735ee06dae +size 1129917 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_01ebc433-929a-451d-88ea-5e8a625df494.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_01ebc433-929a-451d-88ea-5e8a625df494.png index 14857fe62636047ece601acd80ea085f67ef130a..a88529ab7adafe3d20c1cb7d08bf50e35ab593cd 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_01ebc433-929a-451d-88ea-5e8a625df494.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_01ebc433-929a-451d-88ea-5e8a625df494.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ba649d148106d1688c9f4108c1e3ce7341af32159b436d38d2cea83a09674cc -size 238869 +oid sha256:6c8b881490a1bead5f02d96ad922d311bcd899a3dd4af589929262782f04f6e8 +size 493641 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_129ca29d-80b0-4d60-ba91-0e80e47f9911.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_129ca29d-80b0-4d60-ba91-0e80e47f9911.png index b0087ce8d647b5a831c64658ee172120e8bcd920..d7c18b25e250aa73280bb56afe8fe13a1f032ddc 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_129ca29d-80b0-4d60-ba91-0e80e47f9911.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_129ca29d-80b0-4d60-ba91-0e80e47f9911.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9da0e496a6223bb960626897a9cd3e900707e20570e06deaffae0cadfc9a5e8f -size 1335228 +oid sha256:939babe604ac8361eb5bdb5d7996563288b6ffae87a0643f2f44fa3839f5642e +size 1330295 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_2996fbd4-6cdc-42a2-a6ad-cc26162df9bf.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_2996fbd4-6cdc-42a2-a6ad-cc26162df9bf.png index abdb2a5fdd4da63fd4d0146317153132c767636c..82343ccaca7aa36595602f7abb50fd1eecf5d232 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_2996fbd4-6cdc-42a2-a6ad-cc26162df9bf.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_2996fbd4-6cdc-42a2-a6ad-cc26162df9bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a70c862ca89c35298cde4a2b951308961c5bad732d36dcd882321ac1e81c5fb -size 855273 +oid sha256:a2385cf0262c1fd4c38d8cfa7c622a09418dd9bc5cc460902bd42942dd381934 +size 390451 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_3f6cc639-ac20-4823-b0b9-b6bb1a1c9d26.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_3f6cc639-ac20-4823-b0b9-b6bb1a1c9d26.png index 4937d35b38e4751caf838f58673734beadbb9f70..ff89bc2e1be3e4be90396e182e241c6cb21ed324 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_3f6cc639-ac20-4823-b0b9-b6bb1a1c9d26.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_3f6cc639-ac20-4823-b0b9-b6bb1a1c9d26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4db1144168804f8c601de540c60b4a25f98a83ac4f4ff916a1c0e4a782f9b323 -size 1182379 +oid sha256:5af8da25649ca6674784abe5a8357705ed6ab3b831c7d17fa969fd30c1f0caa4 +size 1198240 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_495a67cf-f571-4d50-ae2f-f2f2b6274b27.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_495a67cf-f571-4d50-ae2f-f2f2b6274b27.png index 6f7848e3805775f99e53dc7fc6cb6a4a1cc27a21..69a98b332ccd2859aaf41e029986cab9baa45a08 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_495a67cf-f571-4d50-ae2f-f2f2b6274b27.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_495a67cf-f571-4d50-ae2f-f2f2b6274b27.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4760b57f2857d78eb6dcc73ab3daa46423897ba4c957155150cf8f7c97fff94 -size 148463 +oid sha256:7e4f9759b7ed558a0aa992e55a1890f94a212d3abe0749d7c36afb05e182b8ad +size 285724 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_709e43ed-4bca-483c-8cf6-20e17da426c1.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_709e43ed-4bca-483c-8cf6-20e17da426c1.png index d06ff9de9c8809fae8e1921b13c9c4287b922a1a..b85df271a551b7797593978ed5d9a53252c4dae1 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_709e43ed-4bca-483c-8cf6-20e17da426c1.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_709e43ed-4bca-483c-8cf6-20e17da426c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74b8d71f60c58f7dbbb21833051c02a1be1ade5269799bcab68f1ffa0802b607 -size 554837 +oid sha256:3c84489fc6d0a20b003f7dac7eb15f8ddfe923a3c8c75bc8244780b846e248cd +size 573042 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_70a5e455-dc3e-45b1-a233-1c4f6a4b7464.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_70a5e455-dc3e-45b1-a233-1c4f6a4b7464.png index 768765e2fcbda84a4a767c6743adbf288cca7fc1..6ce694a38cd616524b82e05e9e9b2670816d568a 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_70a5e455-dc3e-45b1-a233-1c4f6a4b7464.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_70a5e455-dc3e-45b1-a233-1c4f6a4b7464.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb2d2da8b896be84b4facec0d0e155baba26187c6b68f9cc70111ca15d408425 -size 1333267 +oid sha256:934eb9eb6d7545ec39ddc5f28b25aa6392c891db9ab8566b25d716a1adbf7908 +size 1264516 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_8c524439-5c1d-44a2-842d-14d4cf92a4c7.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_8c524439-5c1d-44a2-842d-14d4cf92a4c7.png index 976955c706aa251ad85896d1e00b9b80c5a949b8..d156e75079b5f5bf3fa952e585b9a6c249b10072 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_8c524439-5c1d-44a2-842d-14d4cf92a4c7.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_8c524439-5c1d-44a2-842d-14d4cf92a4c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ee3b84b22583d204b3a29003970dbebff38f7a387096b7a2de06017da219b04 -size 481428 +oid sha256:1781263a70699b6d0b1c6b8595861c119de6cbbaffab84c050b237050285f984 +size 357434 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_92f2909f-c267-419a-b4e0-2a5ec5b1fae2.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_92f2909f-c267-419a-b4e0-2a5ec5b1fae2.png index dc250273f209744c777d6f6fa05e5d34ce514122..0fe433fea35c4d124fc039f86d0bed8273ef0c41 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_92f2909f-c267-419a-b4e0-2a5ec5b1fae2.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_92f2909f-c267-419a-b4e0-2a5ec5b1fae2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d396401d302342e985fff9165cc302fbb0c05580241e9ff5f2fc7faee228f5f -size 662066 +oid sha256:c3a87febe867c76f7a561c0eb0bd01ac1f9d8650777818f08e188b41c7a80834 +size 415482 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_9cd4c1dd-80ee-402e-992a-70c4e072e0ef.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_9cd4c1dd-80ee-402e-992a-70c4e072e0ef.png index 554696e4266a8b36d67d7c560f09612ae457596a..2aa055add565c4f5e4b8d834e9fe2ec12d0130d7 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_9cd4c1dd-80ee-402e-992a-70c4e072e0ef.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_9cd4c1dd-80ee-402e-992a-70c4e072e0ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2530b0ce796973375bb6910bff8684571f9a6d105afd44c736a1df400ec6a9c1 -size 1435669 +oid sha256:3941a19b09d1e64e510954d5b217891397baa1f9c6e3fbe18463a308617b07d4 +size 906454 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_bed3001d-01d6-431c-bff7-bcf8ff8ea839.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_bed3001d-01d6-431c-bff7-bcf8ff8ea839.png index 77f591c38b68f1cd90ce1eff9dc9ec1e593a9105..646e02e878e9d6fc8d9284ba3618ed48b16ba008 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_bed3001d-01d6-431c-bff7-bcf8ff8ea839.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_bed3001d-01d6-431c-bff7-bcf8ff8ea839.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c81548e62f082264c92c24b71b38ca4ce28c4513e1b4ec1fb44c6b38db6a990 -size 1231712 +oid sha256:c6af0a4a2ef8c62b3ad71cf0ddfae069f6784f29f14007358e77f40d41d79c18 +size 692770 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_c4e0a089-60dd-4bb7-8945-bbe783357494.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_c4e0a089-60dd-4bb7-8945-bbe783357494.png index b4d8fe192d9a38d5b05b221b96bb9936ca5e3d9a..38598d67b45dbbb46fc99f838d9b9060fcf7d408 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_c4e0a089-60dd-4bb7-8945-bbe783357494.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_c4e0a089-60dd-4bb7-8945-bbe783357494.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbabc32a56826282728c4508bcdf77e696fcb236c2fef4c13fe3d588245cb784 -size 610202 +oid sha256:5a1fa5cf8dbba6f3624185684265752281deb6474f11d9865ea436c459615ff0 +size 795363 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_ca8b04e6-e90e-436c-84fa-b5af56223c3a.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_ca8b04e6-e90e-436c-84fa-b5af56223c3a.png index 04fe39b074e499d636ccfeb6d87608384c902316..ed4201ea5696eeb3081322cedcba46959cf84c83 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_ca8b04e6-e90e-436c-84fa-b5af56223c3a.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_ca8b04e6-e90e-436c-84fa-b5af56223c3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01709a17059e00c9c10da7644b5d6e0c25edb5bb5196b4da4210c21fd729a466 -size 663659 +oid sha256:04ce963a147ea4a648be04c7473563c2b98aafdd5bf95c1fd1dc3bad9e6ffff0 +size 720467 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_e7149b0a-8004-47b5-a369-901f174947a6.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_e7149b0a-8004-47b5-a369-901f174947a6.png index 4f5487e4aed8d28b0ae5d3c853b4b28658307b70..c8f414c398a380b0b7fbd5695be5721a228dedda 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_e7149b0a-8004-47b5-a369-901f174947a6.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_e7149b0a-8004-47b5-a369-901f174947a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cba36c10683d5ca53d40a0c1c2ef47d53e6d5034987ce32f57985098c95129c7 -size 672302 +oid sha256:bd96d01f4830fe37410de34aface00333acf02f26ef00a95eb8e4e5d3cb2b140 +size 971066 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f265ebef-4567-412c-affb-b29a66b3318a.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f265ebef-4567-412c-affb-b29a66b3318a.png index 56f8b0c68f088844a7887fdd7d6d0e0b7e9f770f..64a6ac742283672feee7a5183d6483b85dc688b1 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f265ebef-4567-412c-affb-b29a66b3318a.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f265ebef-4567-412c-affb-b29a66b3318a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0dbadd89a6b0f1afcc4d563d9e8abb81969f1dadeb5d2b4d78d1570159a99722 -size 1491028 +oid sha256:ef19d774f1cb26a95bdca9c2a83929a2b6587b4c59700cd908953aea10486cb6 +size 1207127 diff --git a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f5ffe0d3-f3c5-4f81-800e-d00052a64734.png b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f5ffe0d3-f3c5-4f81-800e-d00052a64734.png index 84d8566b749d7b31f2dba1239dd6b31dc642359d..b668316f78e74950fc90128c1b6446008e6b861d 100644 --- a/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f5ffe0d3-f3c5-4f81-800e-d00052a64734.png +++ b/images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f5ffe0d3-f3c5-4f81-800e-d00052a64734.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a56c58390149796c78cbf73f500f774d45b44766f8c549158ef5a05b9ad77430 -size 840530 +oid sha256:b413c37113d7d03eb45d8ef48392f4afa5dfbaef7ee36798dbf0c5f55253c06a +size 735356 diff --git a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_61ab2c90-12d3-4294-96d2-bd79d9ee8181.png b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_61ab2c90-12d3-4294-96d2-bd79d9ee8181.png index 5a20253f46885349a3ffd2f41b2847baececeafd..f34065a6227048918f1ca87c5397323935417bab 100644 --- a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_61ab2c90-12d3-4294-96d2-bd79d9ee8181.png +++ b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_61ab2c90-12d3-4294-96d2-bd79d9ee8181.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:351ff8116563361811d7d1798522bb855e5d10815d824b12ef33cea9d1f3bcb8 -size 1150420 +oid sha256:3f5748ff4c9988e803191447eea3ebc29106048deb7a3fe161e52a398b238224 +size 1127221 diff --git a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7624a77c-25cb-456d-96d6-a8f4841f7a30.png b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7624a77c-25cb-456d-96d6-a8f4841f7a30.png index 51dfa638a1c5bdc2fb3fbd03ddeb3fdc9129210c..c539fd298acc1d3d65a75830aa9f2c2ccf385697 100644 --- a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7624a77c-25cb-456d-96d6-a8f4841f7a30.png +++ b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7624a77c-25cb-456d-96d6-a8f4841f7a30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8b8f55cb74d844c6c5781cb8f705bd57db2a05c8f88db10defca27c5b5fe406 -size 1025019 +oid sha256:a5e2f2810e6d66d3dff86f2cb7ed384574076ff8d65640c8a4fe2f6e48cd7e94 +size 1235095 diff --git a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a5b8ee8c-a196-4fb7-b6c4-b3c98765d90c.png b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a5b8ee8c-a196-4fb7-b6c4-b3c98765d90c.png index bb237363bb9e0ab3500c7bc5944ef4a84708cca8..873b4f609f97b91acf82af8d717fe9f6bb2c31de 100644 --- a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a5b8ee8c-a196-4fb7-b6c4-b3c98765d90c.png +++ b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a5b8ee8c-a196-4fb7-b6c4-b3c98765d90c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2dae171ff23b8c9c8ca2a964243060c76cdb08ec546a5761110757377d0716a7 -size 1150268 +oid sha256:33b7785b119baf3b43f1f44c0a031dbb0968cbcfaf444bbc866ddbf3b5093a87 +size 982959 diff --git a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_e1297f39-b8fc-447a-a101-078ec44c68f2.png b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_e1297f39-b8fc-447a-a101-078ec44c68f2.png index d9902c6b7efbd4acc306fec9a131e01c1a831282..a17be467434dced84f32c3913b309433597ce69f 100644 --- a/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_e1297f39-b8fc-447a-a101-078ec44c68f2.png +++ b/images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_e1297f39-b8fc-447a-a101-078ec44c68f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:adff7a959e247bed03d3d5fe34a069d6f9337d90c23a6080dbfa2f6ea1019965 -size 1546283 +oid sha256:a9b954d92e9be79ab82fa1f7084a7ba94f315d14dbd5cd12367f988dd0d453cd +size 1648663 diff --git a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_0bf9dab8-18e1-471d-a458-72708badf771.png b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_0bf9dab8-18e1-471d-a458-72708badf771.png index 24f8a5f6de955de40133038c6537191dc45bd6b5..2ec2673c66a89518a612fcabee285aa8cfca97e4 100644 --- a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_0bf9dab8-18e1-471d-a458-72708badf771.png +++ b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_0bf9dab8-18e1-471d-a458-72708badf771.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31994517882720a99aa3858f5e5aaaf9f46092590c19db0d719a40d78d73cce6 -size 1206606 +oid sha256:cbde686a98bbcbf250b8bae648b47a31b72dcfe0a4f314fbf74b3e475d609acf +size 1339415 diff --git a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_652b20f7-ac5f-4df0-a811-8439600ebe0b.png b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_652b20f7-ac5f-4df0-a811-8439600ebe0b.png index 17cdd261fb84c2b5cc55b17d3c565a4db33a35a5..f8cd0920b51976439bf8efd0027131df9a80873a 100644 --- a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_652b20f7-ac5f-4df0-a811-8439600ebe0b.png +++ b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_652b20f7-ac5f-4df0-a811-8439600ebe0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6b35a7641fdfb3147c1dd2e4c935bdf2c321e8ab1562cf43b17968430ccfe4c -size 760115 +oid sha256:1e5975d2d4f1d861b7ee095b20f46d3d90a354d97a7ed8a08a57cdaa264e84e6 +size 726724 diff --git a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_a222958a-28e5-4650-a828-970e8418f440.png b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_a222958a-28e5-4650-a828-970e8418f440.png index 02c70f0b9c47e190040aa8730a46d4c07e5b1822..01eeabddfc9a5ff55124d54a7d0559e34f723cd4 100644 --- a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_a222958a-28e5-4650-a828-970e8418f440.png +++ b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_a222958a-28e5-4650-a828-970e8418f440.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd7d727788767192f60238f3c0f7e799b91aab715e386648bfecd3b9c2532c98 -size 1926828 +oid sha256:6fb02df6502af8bbcd0d8a8947cd190036aa85180ed798c565227328a162f88c +size 2041403 diff --git a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_cacb9fb9-a747-403f-87e6-1d720ec9c876.png b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_cacb9fb9-a747-403f-87e6-1d720ec9c876.png index c1cb55a6df61eca6944bfd992cac691c25757d78..58ee6bac4acff56d084ebe1801a97efa87148c65 100644 --- a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_cacb9fb9-a747-403f-87e6-1d720ec9c876.png +++ b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_cacb9fb9-a747-403f-87e6-1d720ec9c876.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50f94579d811c2498e16f16b631bbdae6138b9374f3c1503f9d41e0f61945702 -size 1205150 +oid sha256:02ef4f329a804017d4af965894c0c0b9ccfe641f565406e3b6637a2eeaef44ba +size 1413525 diff --git a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_f104a722-6ce0-4b4b-ab2c-28c8653c333c.png b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_f104a722-6ce0-4b4b-ab2c-28c8653c333c.png index 198af74ff2d4c88aec78ca4ac12602f64950773a..d039e827eb5908673a4e68eefa80d7336c97c8cb 100644 --- a/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_f104a722-6ce0-4b4b-ab2c-28c8653c333c.png +++ b/images/1d738d01-507e-46ff-8d4d-d4a7dffed936_f104a722-6ce0-4b4b-ab2c-28c8653c333c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8d9ad02ab29d1a19db5965852cbc96921d47f726e8b8c898958e89293319a08 -size 1147722 +oid sha256:dc35c837ff87656df519b6442ed7a2ea62bcfece4e7a4620311d68b721bea976 +size 997781 diff --git a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_2a47b218-77f1-4189-abf5-aa8933b7584f.png b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_2a47b218-77f1-4189-abf5-aa8933b7584f.png index ddbef8f81fdb439a0e6cb8b25ec2794f68bef120..04ce4c4a2794fd4881e3cae75c894da25a307a91 100644 --- a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_2a47b218-77f1-4189-abf5-aa8933b7584f.png +++ b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_2a47b218-77f1-4189-abf5-aa8933b7584f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58e11b9572bf093564086c8149d8a067a14c39ca11d19a2ef4467bb4748c8542 -size 1638862 +oid sha256:0d0b02118609aa483c523d6af4acc1eda9ccf9ad06596fbda8fe57319cd6ed05 +size 2082914 diff --git a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_3142cfd0-23ad-43a8-b417-f4d77c8545de.png b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_3142cfd0-23ad-43a8-b417-f4d77c8545de.png index 1e11568f2d77fe98a397d295bdc8f2cb06be63d9..62b54a192209a45d831d8cfe9a8692db25b8b504 100644 --- a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_3142cfd0-23ad-43a8-b417-f4d77c8545de.png +++ b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_3142cfd0-23ad-43a8-b417-f4d77c8545de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9478cb6f2dceb9a80fed4ab147579fa045f7c93460f5ba431d298405d5218568 -size 1523115 +oid sha256:da455a52b9e0126556ad626285c6b556f8bb618ead296cc7b0181863452c1dbe +size 2295077 diff --git a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_398a1cab-4bfc-42ae-b8df-5bb1fabdb9cf.png b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_398a1cab-4bfc-42ae-b8df-5bb1fabdb9cf.png index b23b360b851e61d41caa20a3f908965a23194a96..651abe91e3bc2dc52be461bf148eaf9b78150f6e 100644 --- a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_398a1cab-4bfc-42ae-b8df-5bb1fabdb9cf.png +++ b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_398a1cab-4bfc-42ae-b8df-5bb1fabdb9cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:187297f8dbb09ca13accb41c8cbfc3d7385aa67c76f31521d683dc51dd62e647 -size 1103131 +oid sha256:8cee8eae0fcbebb10d84823db2ae81c76e9f71e775a9ed61892a3f0edbc3cf1a +size 1202160 diff --git a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_8365081d-8726-4dad-9a47-25429f6fb4c8.png b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_8365081d-8726-4dad-9a47-25429f6fb4c8.png index bc544fb408a5f79714e19707b097637d0a591bc8..dfa09b78f712fea61a3f95354bca39f5f1c09bd4 100644 --- a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_8365081d-8726-4dad-9a47-25429f6fb4c8.png +++ b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_8365081d-8726-4dad-9a47-25429f6fb4c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1fb6d26b5411acd8c76efab7604c5a03f5f8eff5dd0b62725a88e05f5a00d8fe -size 1521780 +oid sha256:42375c8cf422ce12206609c9855e422286dba2fef599295d15eff9a22b5b8869 +size 1707856 diff --git a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_90bf1701-92ac-4889-ae87-3983445c4c0b.png b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_90bf1701-92ac-4889-ae87-3983445c4c0b.png index 96732e818f72edd106b52c658ae9d1f7d1e6e5b3..19253886c4dfc0e40e914f3b3f171f4b1b9d3fc8 100644 --- a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_90bf1701-92ac-4889-ae87-3983445c4c0b.png +++ b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_90bf1701-92ac-4889-ae87-3983445c4c0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:682593efb63078a705020dc1e3043485136d77d5185344c0b7f574c80dd5170c -size 1094091 +oid sha256:6efa7b1fecd7aee6b4bd7e23a06b6289c4320d1f07be9c7edd4220b53857d35b +size 1443409 diff --git a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_9cce8c69-8195-4b45-822d-283e082837b6.png b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_9cce8c69-8195-4b45-822d-283e082837b6.png index 95ebf15e904aeb10ccf5ec8cc35e01e81f786321..5f06ad8ac983cc213beff925628409ad2265e754 100644 --- a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_9cce8c69-8195-4b45-822d-283e082837b6.png +++ b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_9cce8c69-8195-4b45-822d-283e082837b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b40fc6f4314c41510a9f8c48a6678ed45f356f20842478e83b5599695b75eb2 -size 1114987 +oid sha256:503ffca5c17c712c6669bfb3a8fa94f4a3eb91bcb6f7fe2a02ac69e468541781 +size 1750021 diff --git a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_a390dcd6-7459-4945-bcfd-9a161018eda5.png b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_a390dcd6-7459-4945-bcfd-9a161018eda5.png index aafc65cd635cff4800d1ed38574f92f80c820975..8f3a6a064b7e7564ef22c780faf3c9f6ff0f61a7 100644 --- a/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_a390dcd6-7459-4945-bcfd-9a161018eda5.png +++ b/images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_a390dcd6-7459-4945-bcfd-9a161018eda5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17a5ff73f950883076c8cebb232811364dcbab562cb23b5e0545bea325a8da3b -size 1119156 +oid sha256:3c96b383d41c764495d74afd7ba5124511ed1e59fe03e69794f6ac3a94f5f328 +size 1855644 diff --git a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_45d4c97a-1f19-4b89-9069-3f4820b8484d.png b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_45d4c97a-1f19-4b89-9069-3f4820b8484d.png index f7e360a00d10abe19cd59e53c26f04ae870c62b4..64cbfd79ee421672e294b749440cb2243c040c7b 100644 --- a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_45d4c97a-1f19-4b89-9069-3f4820b8484d.png +++ b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_45d4c97a-1f19-4b89-9069-3f4820b8484d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce664552d948bd69bd4f392f29a0539fd976c262a16f375b67c15e577ee65abd -size 950534 +oid sha256:e6ad44a9f10cd627f1645eac38f2e1b91d5a489eae31c7258f71903baddafc83 +size 1555992 diff --git a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_55207068-c425-4a70-ad93-db28b62041e3.png b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_55207068-c425-4a70-ad93-db28b62041e3.png index 4a16d8847808934b0bc525baf81d6aa4845f9072..b1c34b057ed5832599c480fc239324ba6cd2373d 100644 --- a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_55207068-c425-4a70-ad93-db28b62041e3.png +++ b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_55207068-c425-4a70-ad93-db28b62041e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b31dbb6f24e6d4137c4a8e18fe9df17333b625adcd36113ad09a9fea0aa40a5d -size 1374159 +oid sha256:b7093cdb225c2d5c31d5c8b8445fa855a9bffa34ec1690bc155bc5c7e483dbf2 +size 848745 diff --git a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_819a213b-c14d-4a2b-92d9-438c0755d8da.png b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_819a213b-c14d-4a2b-92d9-438c0755d8da.png index b45b0250c113675de2e857b97fabb2ec554564e5..a71d77dc2559b782e6e3fe586ef0952d9ea1538e 100644 --- a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_819a213b-c14d-4a2b-92d9-438c0755d8da.png +++ b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_819a213b-c14d-4a2b-92d9-438c0755d8da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6206e62d04ca1ff0626ac552edfacb370e46b45f0331f76f0923d2c7aec5775 -size 1148043 +oid sha256:971ac73e80841cb3629bc20bb87bac930388c97aece970235e96ab4f4593e3a0 +size 1376240 diff --git a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_e1d49b1d-8ccb-40dc-b6ed-08c19adbb66b.png b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_e1d49b1d-8ccb-40dc-b6ed-08c19adbb66b.png index 96a2ad6688d0e954c0566ea6f9a2d061d728f8ba..175157ff635036373df65dd6ef75022deaa1067c 100644 --- a/images/1df0723c-f732-4ecb-b202-8e1854b7c079_e1d49b1d-8ccb-40dc-b6ed-08c19adbb66b.png +++ b/images/1df0723c-f732-4ecb-b202-8e1854b7c079_e1d49b1d-8ccb-40dc-b6ed-08c19adbb66b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee4e943a3f546033fc6da6d24673d41f2ed2d76d1fe4533a336772261bf4020d -size 948542 +oid sha256:8323332e53161f6d44f7065e8862c9c61c0844ea7b4eb3f98cd1db7e6e2a863f +size 1369121 diff --git a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0245d291-b55c-42cc-9700-3869687e0b6e.png b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0245d291-b55c-42cc-9700-3869687e0b6e.png index d0d2e388ac868ee3d69a9953106bef369ae41148..e0e0ccfbdd352bd197387c4a6be25cd9acd0b75d 100644 --- a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0245d291-b55c-42cc-9700-3869687e0b6e.png +++ b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0245d291-b55c-42cc-9700-3869687e0b6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f094d22184f763f0f571d4b694bf15c68a4223fcc479be5234e879affce7d5a0 -size 210324 +oid sha256:4e866abf38e42edc35286f446b329a4a7cef568352aa3f3d28803f5bd8b6e04a +size 146445 diff --git a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0ed3f86d-df38-429e-90d1-7fafffec69cc.png b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0ed3f86d-df38-429e-90d1-7fafffec69cc.png index b4da40357914997b8adfd1c1d5c8dd74be770031..886a165d54d70b5d7041987d0c4a5085f34f2c87 100644 --- a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0ed3f86d-df38-429e-90d1-7fafffec69cc.png +++ b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0ed3f86d-df38-429e-90d1-7fafffec69cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acdc3548e26cbee307a75a8fae36073d0c78266d84177ae745e12c7dffd0b040 -size 223715 +oid sha256:e42bcab4cb08f80b3b0e4e46e4f2fa142ef4a5ec01423c6e9f0ed87efd415a53 +size 164152 diff --git a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_6e31d60d-78ec-444d-b5b2-09cddb6700d6.png b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_6e31d60d-78ec-444d-b5b2-09cddb6700d6.png index 4aead832c45e669e9d24cf2000eaa60f4cc66bc2..2faca20bc0f8cbcaa41eb48d866701cd2d46ebbb 100644 --- a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_6e31d60d-78ec-444d-b5b2-09cddb6700d6.png +++ b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_6e31d60d-78ec-444d-b5b2-09cddb6700d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8eeea715a3822c0916ba8f0ed47e9acf9d439e0398a29df16fd29fdf0ad7922 -size 1054244 +oid sha256:f04887ca304e67fd7d72e7690eba6ca281c576b983ee781bf82d2cf4fb887c07 +size 999539 diff --git a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_a15a2c86-9065-4217-990d-60b0a09cf1a9.png b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_a15a2c86-9065-4217-990d-60b0a09cf1a9.png index 760d83e191db0e7bec8a4eef8e62f00f37f119fc..3ef395bfddbdf5e05299780565454273a1ca7129 100644 --- a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_a15a2c86-9065-4217-990d-60b0a09cf1a9.png +++ b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_a15a2c86-9065-4217-990d-60b0a09cf1a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1dcbe56566d61f490db089ebede4427da52d46bca8a84a803d2901de840e2438 -size 221387 +oid sha256:df2123aabee088e940c15c4e95b2915461d47fe644a4925e5ed295ca1aca2776 +size 153192 diff --git a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_b46b2518-cf00-4a95-9c7c-8be9d9ea9bb1.png b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_b46b2518-cf00-4a95-9c7c-8be9d9ea9bb1.png index c8937c8fee99ddb425197ba3ed0d96f921eee836..b81cd14f206af901b33b63b6efc2f22c193b2536 100644 --- a/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_b46b2518-cf00-4a95-9c7c-8be9d9ea9bb1.png +++ b/images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_b46b2518-cf00-4a95-9c7c-8be9d9ea9bb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc81372d02207b21658502b9594d17c2a17fbaa95906a122a6248489a8286cce -size 230713 +oid sha256:332d003a6760a9c75abdf28736e6d2a6abc3ae85deb823736020b3b3d2ad3bff +size 234908 diff --git a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_178bcb43-7ba1-429d-a973-088ef383426e.png b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_178bcb43-7ba1-429d-a973-088ef383426e.png index 4ff69ab3152da686f07ff1eb66a3846733182d19..2d23318360f768210226bac63fa73b1cf122b089 100644 --- a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_178bcb43-7ba1-429d-a973-088ef383426e.png +++ b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_178bcb43-7ba1-429d-a973-088ef383426e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:169b09269f57cfe04c564a71c30ce23149751c267e83cc6684c3ac9d80d49856 -size 1608660 +oid sha256:6c79faa4be2dd1ff6968cf61c770d7b27dc0a3737cf0f0c112d54e55aed935c1 +size 1545008 diff --git a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_65225c2f-524a-4d2f-b2f1-277e85b90696.png b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_65225c2f-524a-4d2f-b2f1-277e85b90696.png index cef87babd8fb617bfe7294b2e3bd027e085780ea..04f2e62a50640ac27b3f0af0d67556fbcf75566c 100644 --- a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_65225c2f-524a-4d2f-b2f1-277e85b90696.png +++ b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_65225c2f-524a-4d2f-b2f1-277e85b90696.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b74d74d7038ae8bff018a618f16a661cf7b85993cfd9fb0962e12ecaecfb79c -size 1696472 +oid sha256:b21899dd2fa6bb0cd39764e38747a4c29e0bb2d0489b2347caea1a593c219895 +size 1256475 diff --git a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_75baa18c-3317-48fd-a276-3fd8f74781c0.png b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_75baa18c-3317-48fd-a276-3fd8f74781c0.png index 3f32722f5bcc547ddd9637d40ef18919e32224b6..1faa1b1602c194cf4895f8e2179c78d662871fbd 100644 --- a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_75baa18c-3317-48fd-a276-3fd8f74781c0.png +++ b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_75baa18c-3317-48fd-a276-3fd8f74781c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dae551985dcef9987124b13c70d83fb339d27978627d0fdee922fee1ca305c3e -size 1838304 +oid sha256:8303187b2e6f6c62637bdfc2cacef11541f3c987131fe76003f69a9ce86334e7 +size 1706391 diff --git a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_ab6a19ab-94cb-4b50-a231-3ec9df28c9b0.png b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_ab6a19ab-94cb-4b50-a231-3ec9df28c9b0.png index b46c5759cc4453e61d2b19f96b2ace2b910e230a..72159a8fef916c6559b88ce28116e28eff4403f8 100644 --- a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_ab6a19ab-94cb-4b50-a231-3ec9df28c9b0.png +++ b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_ab6a19ab-94cb-4b50-a231-3ec9df28c9b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14c1c1da752a608506609ab3790559b664fefbf4ed883355673938671527a4e5 -size 1840540 +oid sha256:1547c0d32f197caea14b681fd9d77ce1a7ef1062a44fa8eadb83c450e0363017 +size 1058784 diff --git a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_b1cdb3df-d1c0-4a37-966f-b0f460a30a65.png b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_b1cdb3df-d1c0-4a37-966f-b0f460a30a65.png index 5a110d919d62ecc9cbb5fa527e68f50832b5a04e..c2b635d1214356b87245d435da43b719a8af105b 100644 --- a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_b1cdb3df-d1c0-4a37-966f-b0f460a30a65.png +++ b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_b1cdb3df-d1c0-4a37-966f-b0f460a30a65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eda85b3237e816b2e475d15666731102d340abb6e3d548216225aaa6f2a5cf97 -size 2011674 +oid sha256:a69d73f6e70f6133da23a4f54ad1958979dda5d1c7215cae4cf55ca19274c500 +size 1992449 diff --git a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_cf1fff18-833a-4080-a441-29c38bb95682.png b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_cf1fff18-833a-4080-a441-29c38bb95682.png index c78845f17a9c2ed2655afb35f5586ce7d7b6c4e2..08783a3ad117219727bd93a8062ce4f8c4094d24 100644 --- a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_cf1fff18-833a-4080-a441-29c38bb95682.png +++ b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_cf1fff18-833a-4080-a441-29c38bb95682.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7b59bd13e7c6d59c6a11f88cb200a9a98edf544478fb59545ea045196990b46 -size 1836373 +oid sha256:bcb918393c1720175cdb38bdcf378301027ed7ea01c5dd679bcf86abde7ec009 +size 1948373 diff --git a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_f3eb8779-cf1a-4688-a021-2a5257bba89d.png b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_f3eb8779-cf1a-4688-a021-2a5257bba89d.png index c0c2a471f94c7e8fdf0eaf7c1f089a393e5fd57a..6a0904ac99ed6e15afcb343c0e6ff90e0f9c23b4 100644 --- a/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_f3eb8779-cf1a-4688-a021-2a5257bba89d.png +++ b/images/1ed913ba-62a3-4214-a947-217b74c4b8f7_f3eb8779-cf1a-4688-a021-2a5257bba89d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a07c2cbdb7e32c60d921a82432f53af65d68174c26b7b95ebf6509c9c06f65e -size 2154545 +oid sha256:76344e09ef793f605e4c03e3a38037013c6ce8c290fd26d4ccac39c956357008 +size 2550173 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_226e8bc5-1ca6-42ef-867a-370029f7942b.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_226e8bc5-1ca6-42ef-867a-370029f7942b.png index 6fdf7373b22fedd65540cc452e0821e14d3e7fac..538de805a42b15ea2a271ae469ff59e3c6b45ffe 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_226e8bc5-1ca6-42ef-867a-370029f7942b.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_226e8bc5-1ca6-42ef-867a-370029f7942b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e1f828c590a78defc7540f709824c2a7b68e55bd7095bcdc621a6510b5d4bef -size 1244453 +oid sha256:abf510a61b1bfee8fcb42627489e61b24f6d5ede90d6f0ba755380a528679657 +size 612783 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31a7e3cb-79ad-49f0-bc24-1e908f91db3e.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31a7e3cb-79ad-49f0-bc24-1e908f91db3e.png index 6a31f195b583d1215c9485059f2ab95bc6cf3e66..1349f9c364aa140228693d5d1f5684cd3118e1c5 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31a7e3cb-79ad-49f0-bc24-1e908f91db3e.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31a7e3cb-79ad-49f0-bc24-1e908f91db3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8f895becb7b6d2cb8c850a7134e16208c1020d8179f294bdc53cba4e9670f9b -size 673555 +oid sha256:d74c364451fc09e06b268e6f73fc1f4080cad35dbd2c61b53c0f030f8cec05f4 +size 893297 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31f7682f-dbf9-40fa-8368-f25c2670dabe.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31f7682f-dbf9-40fa-8368-f25c2670dabe.png index 570a2478a1193283a2210f46bf3b683aa7a90e5d..5c40cd61ed32af62ca6d6d45827a20cb9ec13b37 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31f7682f-dbf9-40fa-8368-f25c2670dabe.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31f7682f-dbf9-40fa-8368-f25c2670dabe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f4d5309725fb023d541cbe1499d1c4f4c0dab8388552dc37e20c1e0217b3cad -size 603455 +oid sha256:d4768759de8586c81dcdbe231277b7a5626a2c01b854035a0c3a3c46bd20d9f8 +size 1113467 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_3e80cc44-0172-49ab-b2b6-bf770c28f9e4.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_3e80cc44-0172-49ab-b2b6-bf770c28f9e4.png index b1c2522782d0a999dad8d350fe72bafe32d782a2..65163856b98e3cea57692a34b8e316f53fbe01c3 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_3e80cc44-0172-49ab-b2b6-bf770c28f9e4.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_3e80cc44-0172-49ab-b2b6-bf770c28f9e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4363b8fcc22c207f6be679ba5dae27fccbb7b525be786dfe7b92a76865dc8c1b -size 555494 +oid sha256:8dec59611aa4d37278daa73284cb1d4143a634c762cc89221ee9432d6d782cc3 +size 1075068 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_42038435-d0d2-408c-9edd-8c6b49b062b2.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_42038435-d0d2-408c-9edd-8c6b49b062b2.png index 9b21725737a1200000e5824141e98f77ca85cb7f..8f4cc4c16a25d12a1e0db5859638cdddef0624ff 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_42038435-d0d2-408c-9edd-8c6b49b062b2.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_42038435-d0d2-408c-9edd-8c6b49b062b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9151ccb2952a36f38854d1dd0756489f3036cd22bcb8340fef3f40cc2da0027 -size 673409 +oid sha256:f63c47ae781921c799e142ce854567ca1376b06b2480eb593d59d9f38e5bb65c +size 1036763 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7005bf98-3214-4bfb-8133-79cfab48306e.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7005bf98-3214-4bfb-8133-79cfab48306e.png index a42b6f616905bae24fe89852bf40a6bfb8f4813f..d46fa8121b465142b9749ecf2c88c93d848233b8 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7005bf98-3214-4bfb-8133-79cfab48306e.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7005bf98-3214-4bfb-8133-79cfab48306e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9d17c217cf9138e240e6a5bc23a67b26db71956d522c676c3a575506d4a9e25 -size 691988 +oid sha256:fea57af335f8c67241b8b267d9459ad130251d8d1254251a7aff0ea0a056d80a +size 1136978 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7470ad8d-b2a2-4965-827b-7a794991454e.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7470ad8d-b2a2-4965-827b-7a794991454e.png index 581390fa7913f45528c980d76e9f9afc9e8bfe3c..138750cd6687daf56ade56521d07850dc3e60e2f 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7470ad8d-b2a2-4965-827b-7a794991454e.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7470ad8d-b2a2-4965-827b-7a794991454e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4180931751f5942725fdf829a38de9e77488870a6c15a35d4551b4cbb48e25f3 -size 1002741 +oid sha256:339235362a1ac5ffdd71caf61d21830fd8bcc199591bfd5668a6ee151a49c897 +size 1148242 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8338e3ee-2170-4b88-b346-742f10b82e06.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8338e3ee-2170-4b88-b346-742f10b82e06.png index 6888c66d6dc79ccfc594c30ff96ea91d0441a851..53e9db58175f8d521f07dcd473a0bfc1559b97cb 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8338e3ee-2170-4b88-b346-742f10b82e06.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8338e3ee-2170-4b88-b346-742f10b82e06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1aaf24338cf1dc281558e38d95f7d66dfe68230c4b6d36ee54829b63df230fff -size 310135 +oid sha256:7a07d542f02902929822900398cf988b0271f125edeec5a3ce2e8488070ef2a6 +size 310073 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8aeecea7-7ffa-475a-9844-2b49f26b6ce1.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8aeecea7-7ffa-475a-9844-2b49f26b6ce1.png index b80306248bce9494c4b980b691b76d45b904a734..6f2e418a9975443ffddefa95ee80eaeef02cb527 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8aeecea7-7ffa-475a-9844-2b49f26b6ce1.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8aeecea7-7ffa-475a-9844-2b49f26b6ce1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7d661cdee1688fee7de5b16ee5d060feb9d0a265614f3d62cd2f1a6549303852 -size 939236 +oid sha256:791b3b4578ae48acd4247836775cc47916655ec6e20b3d702e82c19309498a60 +size 667957 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8c0f2845-0345-4194-a6bd-c1143e3da795.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8c0f2845-0345-4194-a6bd-c1143e3da795.png index c3bbaae2b54f08af5c3b1f2a2c1307e5487c845d..d48d97176a09cde33e957e714f117d2ddd34a593 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8c0f2845-0345-4194-a6bd-c1143e3da795.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8c0f2845-0345-4194-a6bd-c1143e3da795.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0953459af1c4c2c9cd8d2219c7669131e40644614b10d96f91a35806aeaa966e -size 946531 +oid sha256:ff6a5ea7213d759c45080d428c0e3162ef60651a1dc85ab2c213ea9314371550 +size 728687 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8f3e61f6-be1e-4fbc-b01c-904f68a74086.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8f3e61f6-be1e-4fbc-b01c-904f68a74086.png index 72d0c87af4141e2526378ac9f929140a67b2ad75..f4705b3b4e474af409299dfd4052169dbad6b9e7 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8f3e61f6-be1e-4fbc-b01c-904f68a74086.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8f3e61f6-be1e-4fbc-b01c-904f68a74086.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22eadd0916dae8306014d8f870c8a66fc90ef980a1d65b204b5c70b25dfffb79 -size 823682 +oid sha256:ddc81aedbd1f3ebcf92851ee9ad322b8c3969138b4a7a0161bb64d4b5f5ff855 +size 1090611 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_94e763d4-5367-4cfe-8d21-24e4c5eeb937.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_94e763d4-5367-4cfe-8d21-24e4c5eeb937.png index be5c91466201876d32b3e99069e26e460cf45041..12022c4343f31e42a4db8b52779fd921aa8c47c3 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_94e763d4-5367-4cfe-8d21-24e4c5eeb937.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_94e763d4-5367-4cfe-8d21-24e4c5eeb937.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd841a361d82dcc626452d2ced5e3f087e8f143fbc5f076cfa091a18087218e7 -size 931076 +oid sha256:2eaa2229502126c2b5d1785f061b45c20fc50cffc06d7b314f87235853c37cb6 +size 926891 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_963d0e12-9794-4fa0-bf40-2c8b8d7a7885.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_963d0e12-9794-4fa0-bf40-2c8b8d7a7885.png index 2dd9c8fd9d25205a5b9cb7a39cb289034985f296..d5b975992c5dc691157394d717f32fd1ef4cd976 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_963d0e12-9794-4fa0-bf40-2c8b8d7a7885.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_963d0e12-9794-4fa0-bf40-2c8b8d7a7885.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d35ec633bf54ca0c83ec6c8b854973a1528f17c2791ebb12bd342197f8f2d9b2 -size 893094 +oid sha256:ad928b91e1780e37565f3c1836d82590be68a4b7f41cfbba92979949b5bdd5ce +size 418535 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_98f238b5-18bb-4181-816a-6d9b5a5d3b55.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_98f238b5-18bb-4181-816a-6d9b5a5d3b55.png index bbf14fbcdc33eceb5951e7e7b017a5d3909e0e38..fb4c4b6407b8f2c80ec66d7d0da9ac2322364fce 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_98f238b5-18bb-4181-816a-6d9b5a5d3b55.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_98f238b5-18bb-4181-816a-6d9b5a5d3b55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4124a6c148d7a242d761dca18e08183993dc4ec3d4d0a4939ae37f244cea0da9 -size 321842 +oid sha256:85521f88265507c1c958cea2a491e9dfabcc21f0864b7e3f240dcdecd73d5028 +size 324088 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_a0fe253d-3df0-4239-b233-b9f03863b5b5.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_a0fe253d-3df0-4239-b233-b9f03863b5b5.png index 126a6c638282505c7d474e5e613e5449fe36d6fd..6036f9888262b52e98e50fb22c854815f94c0920 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_a0fe253d-3df0-4239-b233-b9f03863b5b5.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_a0fe253d-3df0-4239-b233-b9f03863b5b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9949ded8e1df1d650ec864a7858cba34d4004e6ae0409dc84483d60037ec4ff2 -size 1004257 +oid sha256:69082740ee9277ce5f22166e293ff4aa8765fb0d638df9c72f007dc72e78a70d +size 643680 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_b315afc2-75f7-4067-a09b-2a8b3b31c8b4.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_b315afc2-75f7-4067-a09b-2a8b3b31c8b4.png index b55c800fb3d38609ec4c353534d8fdfa5b86edee..1b4c680f9d76741eebf2b950a3939358016e3760 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_b315afc2-75f7-4067-a09b-2a8b3b31c8b4.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_b315afc2-75f7-4067-a09b-2a8b3b31c8b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d3de658db4a0841bb4d01e37831527216325bc25972a9ba4313e9399aedb885 -size 957432 +oid sha256:2f5f9307baa8a1e3251ad6a12061cc296530b9c6cc022e38c616efaf74c44746 +size 1079093 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c4186aed-2d4d-41ca-bc89-ec1e003fc4b7.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c4186aed-2d4d-41ca-bc89-ec1e003fc4b7.png index dda99a360d9541c966bee8e0ca39031d973a3497..ededee8599addd3167eb3898934335daf6c504c2 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c4186aed-2d4d-41ca-bc89-ec1e003fc4b7.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c4186aed-2d4d-41ca-bc89-ec1e003fc4b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ed968a29b3877faac867c61790247dfd98da829dd42693913211f4f83e65351 -size 878126 +oid sha256:7a38f54bb6cbccb4d05ab8b9603d4a9ee5f7589718ec288886c823e031a59d7d +size 564623 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c83aa179-2b1b-4f4b-8d0e-714e90cb8743.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c83aa179-2b1b-4f4b-8d0e-714e90cb8743.png index cb45b3313808d710caeacb518c14051584ee02c3..230c03093e3d8d0f360a1c860dadab38385fc160 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c83aa179-2b1b-4f4b-8d0e-714e90cb8743.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c83aa179-2b1b-4f4b-8d0e-714e90cb8743.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e071a9a7c8d881391b3c3d1de1fa2119c51102b5f1a0f3f8b29d99bd9bbbf40b -size 764223 +oid sha256:d18a08d0f57ce1b9b5884ba012a5234ce804d50b7f7ffe4efcf19d3f0c9ce7b7 +size 332123 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_cc4e6036-7475-48cc-99dd-d130b01c3dea.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_cc4e6036-7475-48cc-99dd-d130b01c3dea.png index 3052d7ea9fa769bf511e5e42be661d790f19216d..3751c069f9acfa4a83c552658422b7dfd6399d0d 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_cc4e6036-7475-48cc-99dd-d130b01c3dea.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_cc4e6036-7475-48cc-99dd-d130b01c3dea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e45476d9214b76444a352c0016db8cb8555a6a4e368b96be7a857c901ea3c7ca -size 940775 +oid sha256:1609246b75f912d03ec3f9c0c7cc1c908887f1c4a8d1729651238aa97411f98c +size 1060203 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d3696fd4-af25-471e-851b-6b0f1e991970.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d3696fd4-af25-471e-851b-6b0f1e991970.png index b0a71d516723dc68f814e48e588ed5a4d17db860..4128eb6ea408692163dd5b3b2bf73c49f2d5f4e8 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d3696fd4-af25-471e-851b-6b0f1e991970.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d3696fd4-af25-471e-851b-6b0f1e991970.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd6854923d859191c61f978dd7796d6f49886decc1650a0622dfb05649e01a77 -size 936381 +oid sha256:abe632a9302435252e8991b96df7a606e585f6accf0b71f4ffd18184ab007aa5 +size 820086 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d411b028-22bd-42d8-a4c2-ffb7d2c40d32.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d411b028-22bd-42d8-a4c2-ffb7d2c40d32.png index c57ce9c091883a39419e3b77c9a3a6b8bd44acfc..e89e4841e619dbf1ed2a8d56a8405f233e054cda 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d411b028-22bd-42d8-a4c2-ffb7d2c40d32.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d411b028-22bd-42d8-a4c2-ffb7d2c40d32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f579bee069812416121bb56010fb1579183f4c2d2eec99b280948f90f0b962b3 -size 764217 +oid sha256:cc9026a2b96147b7e4df8a9627b5238f29aa4f8a6e2d4521b3f7f67b09737c74 +size 1000187 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_e1032b62-f375-4745-9278-9923947deba4.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_e1032b62-f375-4745-9278-9923947deba4.png index d8156ce6d331010d15d6dd0a9fccaab484d37331..6bd33a74b976cf99c2af04514fef5ec74626ef42 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_e1032b62-f375-4745-9278-9923947deba4.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_e1032b62-f375-4745-9278-9923947deba4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ded88e5013166c8356ababffe54d4b416e799de1a0a2ff2bb7934071a10efce -size 950523 +oid sha256:194663914691acc8976fcf322e5d38fd34a5077136cf9937d1cdbb018eaf2125 +size 1076134 diff --git a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_f88f7e83-95c2-41e3-a733-ec1997c2f55b.png b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_f88f7e83-95c2-41e3-a733-ec1997c2f55b.png index 6e3c7e38b26c64c4d188e3e6822473bca29b0417..7b0eea46caff6b91c4e78b2a06a3333550f6b21d 100644 --- a/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_f88f7e83-95c2-41e3-a733-ec1997c2f55b.png +++ b/images/1ee63f83-8b6a-4883-813f-63f589e6e52b_f88f7e83-95c2-41e3-a733-ec1997c2f55b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90c5d4f4abac08b9287edff42d7bf489e415b68ee28032c9d7eb754f0f3c984e -size 733103 +oid sha256:67d960c8ae45c26d3abe76d4a4ed99c1021c263e48c88f95edd1b4892230ad64 +size 1053177 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_58fa66a3-0966-4b45-bb59-6ae4cbac9f9f.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_58fa66a3-0966-4b45-bb59-6ae4cbac9f9f.png index 4c134e155ddc7ee6b50f0fc0d4694a488f2d18c7..b1e7c4d01fde9d89170da8667b6be9fd586bf1f0 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_58fa66a3-0966-4b45-bb59-6ae4cbac9f9f.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_58fa66a3-0966-4b45-bb59-6ae4cbac9f9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d20e3654d31c7ff488ad886c3d32b8ca8b64517e44515030b3b107bbb656f6f -size 760373 +oid sha256:8e263d58fb8c6a3a46ec8568bad3189f55609af03fc29df9834183b5be23701d +size 344925 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_94bad45f-8cab-4d3f-9c10-ede8de8da2a5.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_94bad45f-8cab-4d3f-9c10-ede8de8da2a5.png index e4a7f023965e001405336c8b79477eb285bc79ed..319517c4b48f67cfa1e330998be4368ee620319f 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_94bad45f-8cab-4d3f-9c10-ede8de8da2a5.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_94bad45f-8cab-4d3f-9c10-ede8de8da2a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2091943d3265a6c6c826f0115220e7a38cf29775f8f14fbf76a4f7843ca22ed -size 838574 +oid sha256:c56fbb9249189c0aa9767ba526fe6f7655c5638cdb8532c11060d5cdb4e544a3 +size 754348 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_954678cc-e3c6-4ded-85b6-8032ea329f5d.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_954678cc-e3c6-4ded-85b6-8032ea329f5d.png index b3c14d55c230baf0e05e9110d8dcac9c92720551..7d5693f7dc87a54967df4a9c5d5d18b9e2bd7de1 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_954678cc-e3c6-4ded-85b6-8032ea329f5d.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_954678cc-e3c6-4ded-85b6-8032ea329f5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e5c0768a8dce51a435c28d750eae0b953125785064050cf2cdc937ed6760509 -size 768637 +oid sha256:cdf963d89c3d41b850f6fe00b5a22d1098cf305d28ac1e62b0170c7581e3547a +size 835199 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_9b98022c-c2e1-4233-b9e0-547ad4c678c4.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_9b98022c-c2e1-4233-b9e0-547ad4c678c4.png index 7c04153e56e2cfc1076010a7b455bb7b7b03ed3c..f04c0d1ae5dd98717166c8c8512929cbf06629c3 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_9b98022c-c2e1-4233-b9e0-547ad4c678c4.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_9b98022c-c2e1-4233-b9e0-547ad4c678c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93ebcfcb777fa95a13577b860bbc5dbb745eb77b7a4c3a2d9828e581904fa22d -size 829567 +oid sha256:aa8f685d0a29cfc6a214dc7463cddcbabd8e5d2e391f262212fd6737baf43054 +size 925338 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_a03b4d90-3f1b-4a34-a8ac-dda1d6e458e1.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_a03b4d90-3f1b-4a34-a8ac-dda1d6e458e1.png index 4cdb70f930c0564cfc40e9830d94136471652655..760649b8b36575960236822acce16a1504de1f2e 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_a03b4d90-3f1b-4a34-a8ac-dda1d6e458e1.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_a03b4d90-3f1b-4a34-a8ac-dda1d6e458e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:196f4cb5175b5acc326f5209acbc6a6370fd06e4e0681ffff17cb7a7eec6bb6c -size 760971 +oid sha256:9714c0cbaa83b76e0b7cbfa6eb3a1cb2622cc44143510850b08aad6bc9a3efa1 +size 837311 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ac89e5b0-f806-42bb-81f7-e0e072172796.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ac89e5b0-f806-42bb-81f7-e0e072172796.png index 1ebf455d52ab208fbb814e575d11360f4c10158a..d421081390bed48513a3d8bc7a7a15d1dc752d8d 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ac89e5b0-f806-42bb-81f7-e0e072172796.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ac89e5b0-f806-42bb-81f7-e0e072172796.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a48453e7f8806cbd38849a01738b3b69220cdb930f9b49dc99c4246d032dafc9 -size 782368 +oid sha256:4626193f156d68ec7a39c852cd9875606683646d9a30b0285cde7c14a7799309 +size 731254 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ec3faf8f-2b87-4367-8e09-3f7977f994e4.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ec3faf8f-2b87-4367-8e09-3f7977f994e4.png index 8f29d272cdedda8044952d390b985fa3b185efe2..7dc34040f45c0fee539844ac4e829927c291bca2 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ec3faf8f-2b87-4367-8e09-3f7977f994e4.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ec3faf8f-2b87-4367-8e09-3f7977f994e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fad271096e73fb58a762f1da927ed59f8a02166afca870d412784a16072806d6 -size 810759 +oid sha256:98a08b35cbdca7005867e3f5197a146bec7b6e068fdddc7cbd7f6942f4183537 +size 712432 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f2e91900-b7dc-48c1-86c8-0add81412717.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f2e91900-b7dc-48c1-86c8-0add81412717.png index 3237425a3597db081231d30c342738f0dacdabca..0b7f9209b27c10e6a9bbe147c8ff1bb847f36fba 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f2e91900-b7dc-48c1-86c8-0add81412717.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f2e91900-b7dc-48c1-86c8-0add81412717.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5f9dc36792061265d1ec4c828f197d8c57264a0f2743305f2a1df508f574903 -size 772689 +oid sha256:3e2e3e6a5d601caf132285492c691dcab3213fccff86128d7a521f4893f3bc83 +size 554755 diff --git a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f6e611c9-ad21-49ca-a841-7ad529b56c95.png b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f6e611c9-ad21-49ca-a841-7ad529b56c95.png index 21fb393a7c66a67d87abfa8358f8ac4e5bdff036..b3e83aed5768d2b10ffc76d0cb139b3e179c3690 100644 --- a/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f6e611c9-ad21-49ca-a841-7ad529b56c95.png +++ b/images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f6e611c9-ad21-49ca-a841-7ad529b56c95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:450a8802032acbdb91282d4e702f5c7a4e5d94ff1f241897f7b288d4ce0ef0fe -size 792544 +oid sha256:4f6c02624c699f1d89d370316d230f89e143998156e2559aabee016142afdad3 +size 861109 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_073be97e-382f-4914-9a6e-ba12b35d6460.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_073be97e-382f-4914-9a6e-ba12b35d6460.png index d9e072432fafb20e778f239878a30f065c47d6db..3d8953e2b6d37c5a544b155c90e678170cc19fff 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_073be97e-382f-4914-9a6e-ba12b35d6460.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_073be97e-382f-4914-9a6e-ba12b35d6460.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01debd27b96973b5b4c3e40f6a56b75a3bdac5a8c315e504d24c242c232dd131 -size 1555220 +oid sha256:36d10f9232ae25e04a22383b4a14d9d56c81404aada9231f65f87397f1764df4 +size 1920367 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_1b93049b-6898-492f-b0a7-fe1adb3bcd9c.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_1b93049b-6898-492f-b0a7-fe1adb3bcd9c.png index a7e852cc74e3a50d9b13687959747d4a11b5e355..0506bedbd9d862a73e8d8830462f60d00e2955ce 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_1b93049b-6898-492f-b0a7-fe1adb3bcd9c.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_1b93049b-6898-492f-b0a7-fe1adb3bcd9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8e64512c316172414bd186433638f8b96968a441041be43b543cb97124cfb73 -size 672971 +oid sha256:771a4325f703c0ed653aa860aba088793d2b0fc8c2a8a84c33f56cbcef639e6f +size 1021864 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_439a6ca4-c0e8-4e02-8b9c-37632fdbf3d1.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_439a6ca4-c0e8-4e02-8b9c-37632fdbf3d1.png index ae4d51f815f53a1e99ec08bab2efb6d694796089..1e03b194de0d4e2f8276497e0a9a4c2b8aaac849 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_439a6ca4-c0e8-4e02-8b9c-37632fdbf3d1.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_439a6ca4-c0e8-4e02-8b9c-37632fdbf3d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fba29994d6a17d5134377b452e68ff41e977dc4a64b369f2aa05a193afb5ed73 -size 732503 +oid sha256:7363bb190c2c3e9a8ec5b5a84f653912df89f4a994c5f30d2dcd65641aa92801 +size 860957 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6651b25d-7ba0-4963-a7d0-3211b2eb79b6.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6651b25d-7ba0-4963-a7d0-3211b2eb79b6.png index cf45a8336ec195e6f8b75059d433041b45d6edf8..64be6845ccb5a1225cdd710e24b951ad70dc7bf1 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6651b25d-7ba0-4963-a7d0-3211b2eb79b6.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6651b25d-7ba0-4963-a7d0-3211b2eb79b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5213322afe082722faaeb679632c132178d2e9fe1d0e617afb5d37fd320d0e3b -size 1224840 +oid sha256:291c7e9458aec57df589ebc7aab4b49dde49e813b48539b4e4e99f3acb1c5b3c +size 2061039 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_69ff5db0-8fee-4696-aea9-2b9142a8449b.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_69ff5db0-8fee-4696-aea9-2b9142a8449b.png index 62e6ce5d782ceda20d3629f6c5a65190ae41118e..c12375fc83fa7791ae661725875333468e880a9c 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_69ff5db0-8fee-4696-aea9-2b9142a8449b.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_69ff5db0-8fee-4696-aea9-2b9142a8449b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05f455f987f61f73133ca47391dd7a8fc635741ea51e9557a004dee9d65da357 -size 727325 +oid sha256:c503f81224cd0203c38eea61e18ac25ae07ecb20a1d82fb7a792f38b98268eaf +size 727180 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6d3daffd-e582-43ec-9bde-6823e140ab89.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6d3daffd-e582-43ec-9bde-6823e140ab89.png index b860caf9182464f6c40ec85cf86b0a578624182f..a9226cf898b00816dfde49c73c66f7177622f161 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6d3daffd-e582-43ec-9bde-6823e140ab89.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6d3daffd-e582-43ec-9bde-6823e140ab89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10571565452f5e493e94b73f05697b99612eb7effbd6b7ea3e0c4e8a43c25195 -size 952627 +oid sha256:423dd60e196bc8dea7f9545139e4450861f9c6883d456519bedc0193a8424dc6 +size 1623516 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_baefd0f9-5e71-4c4f-9263-83765e760b4a.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_baefd0f9-5e71-4c4f-9263-83765e760b4a.png index ddaef94464d01627664deecede7fed5a6740ead6..35f58cb592da836c38ec530d4edb0a4ec4634177 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_baefd0f9-5e71-4c4f-9263-83765e760b4a.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_baefd0f9-5e71-4c4f-9263-83765e760b4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9822803284ce7608573319d00a204106824f9c4a90fb36f0871794184c34033 -size 1227414 +oid sha256:d4b07b919046175f0118330c68301a4e665cd5a7c334a155f0433b2f71381e69 +size 1375144 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_cb9d3a61-b898-4330-8767-fb2a56c37b64.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_cb9d3a61-b898-4330-8767-fb2a56c37b64.png index c91fd821824e7921ae1ebff120d81a8f7693ce8e..48efc8c1f9b3efde71537deb1d7ba76d9692ae7a 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_cb9d3a61-b898-4330-8767-fb2a56c37b64.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_cb9d3a61-b898-4330-8767-fb2a56c37b64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec506df1a2cda708651be4e03ec015a3bdf17f4f9b03c9ed988fa262c7375ac7 -size 1067295 +oid sha256:63edd2f72b344977681d8c3d50c76d31e62ccba652135ab0eb55a174a1411805 +size 1084434 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_e3b4f654-b33f-429f-8327-feb0da8ca5e9.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_e3b4f654-b33f-429f-8327-feb0da8ca5e9.png index ae6e71aafad2d84d3ec1f96dfa19eca3ffa9e0b9..dcaff53bc1a372fc0d38b44b7972c1c3b8609194 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_e3b4f654-b33f-429f-8327-feb0da8ca5e9.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_e3b4f654-b33f-429f-8327-feb0da8ca5e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c8b68878ab03adf2a8d5eec31cfc25a781fcd99bd4bc47c99b9df967ebb9260 -size 2390001 +oid sha256:1d6fce963f6c353f77de989d72b4cfdb3b488d0aedb889ca4687995ce1797b7d +size 883226 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_f669df1f-2c14-404e-b43a-e6dbb96e757e.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_f669df1f-2c14-404e-b43a-e6dbb96e757e.png index 3de78dc13654ec9a0383b43cabb59fa7e80fdd9d..155b4c69014df9de2476ac2f3941fe9c995ce24e 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_f669df1f-2c14-404e-b43a-e6dbb96e757e.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_f669df1f-2c14-404e-b43a-e6dbb96e757e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3575f9da971c812c9f64c7aac7d4b3c7d5b098f0aafb73a24c7c55a03b908fab -size 728786 +oid sha256:5c5e63f06568f427a4cf29c5f535f66105c5e88dbabb4f53f1fed718452ddd96 +size 958315 diff --git a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_fc5ded8c-80f4-42d5-b087-79319e6d4d09.png b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_fc5ded8c-80f4-42d5-b087-79319e6d4d09.png index 508b1ff2a07218648241760d2d7b227d8926ddf3..55eb507e1a351a500910a21fd40b781daa354d42 100644 --- a/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_fc5ded8c-80f4-42d5-b087-79319e6d4d09.png +++ b/images/1f128c19-3093-4c49-bfc7-13589aa8e88a_fc5ded8c-80f4-42d5-b087-79319e6d4d09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dca09430c8bf9569ae6c4ae3f06515f2f724c5358c373088d5eff37353fec60b -size 733858 +oid sha256:88c2dde7c1ac799264c6490a6a2a510728ba02114689a77b3cbf888cf7cf8cea +size 969795 diff --git a/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_39a1e694-4b56-4f10-845c-e3d03dc73e11.png b/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_39a1e694-4b56-4f10-845c-e3d03dc73e11.png index e318cdd703ac16a1ac4f16a323231631126cf422..75dfa875de14b2ebfe3182decb3a313789ba0746 100644 --- a/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_39a1e694-4b56-4f10-845c-e3d03dc73e11.png +++ b/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_39a1e694-4b56-4f10-845c-e3d03dc73e11.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d172017aa8657d61f8cae198cc4e8dd2285d48ccd3530da7457769f046b42b83 -size 1257836 +oid sha256:eed1ac3c8f9ee87cca75b22b03972a1d681c974c70566a890d0c81733750fa99 +size 880845 diff --git a/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_50b5e01f-dd2c-4329-b782-a44c27b2326f.png b/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_50b5e01f-dd2c-4329-b782-a44c27b2326f.png index aa895e147e906b35dcb501bea38816fae06f371e..fad74aab7e06ebc8059533d56cc88649da62dc6d 100644 --- a/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_50b5e01f-dd2c-4329-b782-a44c27b2326f.png +++ b/images/1f28fed3-bed9-444a-bf2b-3700b516b97f_50b5e01f-dd2c-4329-b782-a44c27b2326f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5114d1bc69b18be232cfbda672813e151bd82d2fad0bfa9add08df40fc640452 -size 913293 +oid sha256:249df3c0340a1970396e0d6dfeb74319073716fd77c3d54cc85329ee6b42cb2f +size 575806 diff --git a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_51d326c2-eb8f-4c5d-b4b9-95716d9a7618.png b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_51d326c2-eb8f-4c5d-b4b9-95716d9a7618.png index 904d759479a4c77189c90178eed391f511747340..6d682a3cd39e51e08160bded0e44109d4f9d360b 100644 --- a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_51d326c2-eb8f-4c5d-b4b9-95716d9a7618.png +++ b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_51d326c2-eb8f-4c5d-b4b9-95716d9a7618.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9992ea66d957e863160f5cd4b20fb2d680cf6a17d5e631c54fa35fa25c127dab -size 571834 +oid sha256:75dc4d6fdf76c43ea3bc3b1cd87a7c83282e16d4ef379014e2e81acf1b650414 +size 902714 diff --git a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_ada350af-59cc-4e4c-aedb-8b128a8ee14e.png b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_ada350af-59cc-4e4c-aedb-8b128a8ee14e.png index e5b241978f265dc97150ff738c6d826f1cd08c42..c46f9eab806bc0d54ce46dd26a3f558dbdf76ec5 100644 --- a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_ada350af-59cc-4e4c-aedb-8b128a8ee14e.png +++ b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_ada350af-59cc-4e4c-aedb-8b128a8ee14e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13cb4e17b7d5f4a4318dcc125e4d1b37233a8b341c24b998e3f32db942a44035 -size 1203365 +oid sha256:62443d8161879bbe9a063d406c7d946238ae34912f18f382d6529e1a726109de +size 1648820 diff --git a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_b70a73e5-9154-46c8-8498-5790c0807ec0.png b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_b70a73e5-9154-46c8-8498-5790c0807ec0.png index 97925a5f3823d0d9e961927d89561aee28f0d2f2..55a12fc8a371e2f30cb56a8c8774de275fd23ffd 100644 --- a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_b70a73e5-9154-46c8-8498-5790c0807ec0.png +++ b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_b70a73e5-9154-46c8-8498-5790c0807ec0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eaa64e4840fb817fc0a1ba564af2d79ec274bd9b7727890f4f1cedabfd60ba87 -size 472246 +oid sha256:b3b9c805ca3991cd242f5db00ff10cfb1539825adfca0b1a9ae1a926e067e97d +size 666208 diff --git a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_cbbfb5cf-8c1a-47fc-a015-afaa2567bbf7.png b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_cbbfb5cf-8c1a-47fc-a015-afaa2567bbf7.png index f6ac9a18e3783d83a4475eb818be33c0720cb2ff..dc28cb9fe1651a8a77e6d7a6261ccdb99bcd3a87 100644 --- a/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_cbbfb5cf-8c1a-47fc-a015-afaa2567bbf7.png +++ b/images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_cbbfb5cf-8c1a-47fc-a015-afaa2567bbf7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c8c1f3ba7be808253ce20b6b213671b6250e8d8af2b2978aa5330b62abcff0c -size 1189669 +oid sha256:773c6cf9db7c5d6015c8e75b3ecca0e3bd9b571a0f6d4dc79e4104d9bcb8043c +size 1607084 diff --git a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_0efc9d65-98b0-46ce-9791-66f408e8cd1c.png b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_0efc9d65-98b0-46ce-9791-66f408e8cd1c.png index fe105495dbc78d4bad970205e5d9bdaaa2aa12da..513ef198fc52f5a24dd5a1cb631201ffdb4d55c7 100644 --- a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_0efc9d65-98b0-46ce-9791-66f408e8cd1c.png +++ b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_0efc9d65-98b0-46ce-9791-66f408e8cd1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85e6ebc6313c545fea2fc937603e537d886ec74c7ae0a8715b3cd725d555a4ed -size 1215843 +oid sha256:aa2c32ccb96328656f6c5143fe431eb64d1178fc81440619c691841938385ae3 +size 1287257 diff --git a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_2008bd87-e75d-4056-ab8d-218ec362bbb5.png b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_2008bd87-e75d-4056-ab8d-218ec362bbb5.png index 6a28ba382d82a04c042b8c66d2f077486c0b1c80..2bb943768c024a730384aa15687521f4aa2ae54e 100644 --- a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_2008bd87-e75d-4056-ab8d-218ec362bbb5.png +++ b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_2008bd87-e75d-4056-ab8d-218ec362bbb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2cb68f007fe2e25f145c688a75546066e6d425f672e2163f12408e3a30ec7009 -size 826753 +oid sha256:e7685b048fb16d2d777b70cb79175ef35e0b60bf78c83c501674fc04fce90e09 +size 1031398 diff --git a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_e7072a8a-6a01-4c81-a4f3-9a80483927f5.png b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_e7072a8a-6a01-4c81-a4f3-9a80483927f5.png index 2907f04974f0b9c3a485c94718b7e1a7fcdefde1..9609cad1bb7e4a2c65834027a3717d63ef3284b6 100644 --- a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_e7072a8a-6a01-4c81-a4f3-9a80483927f5.png +++ b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_e7072a8a-6a01-4c81-a4f3-9a80483927f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40be31fdac0d45212bbf755ede74c4f3db0d3c2ddbd23ce84597fbea7dc8909e -size 500068 +oid sha256:db70031e868bf7247962d90df4ea0fb0385bde0f741ea0ed4b580f61b3cd83f3 +size 392554 diff --git a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_f3f7201a-72b4-4659-8a82-feec13d3cb17.png b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_f3f7201a-72b4-4659-8a82-feec13d3cb17.png index 9fa4f6f712f7dd8344979b18e90692f416f7a8fd..6a9f1602c9ffcfbc09e5a40d2b06f0213d606e00 100644 --- a/images/1fefdb27-882b-481b-97e2-720f4d2338a3_f3f7201a-72b4-4659-8a82-feec13d3cb17.png +++ b/images/1fefdb27-882b-481b-97e2-720f4d2338a3_f3f7201a-72b4-4659-8a82-feec13d3cb17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0120e9049d999280a2e51db2abf3ad2c9cb909665438023ea4e80886e7536a0b -size 1204873 +oid sha256:592cd2fafafc26e76e73f54b5e9a4d943d9967aa65594f5c68b6069e13a51de5 +size 1291060 diff --git a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_170ece26-3a41-445a-b474-e2f643fcd5a1.png b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_170ece26-3a41-445a-b474-e2f643fcd5a1.png index 62b97979d5632f6aac4bfb16fb6850b60cf878c7..b3383aae80099a8ede08576d67760f1ac2bf6cb3 100644 --- a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_170ece26-3a41-445a-b474-e2f643fcd5a1.png +++ b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_170ece26-3a41-445a-b474-e2f643fcd5a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a30596a7801baf7bf97072f7f26d5aa6d57d7780c9b1a053347be878532aedd -size 1531478 +oid sha256:64283d18cde0b578f4fe6628576be3695f886117d4f15828aeb70bc9a44cb8a9 +size 1645537 diff --git a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_54518298-2aa4-45ef-91ee-ccc0b8c495a5.png b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_54518298-2aa4-45ef-91ee-ccc0b8c495a5.png index b41bca30018d73e583d5126dbd21d3b8af1dd624..792041484456bc5a54d268803f5ecd27d2619a1d 100644 --- a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_54518298-2aa4-45ef-91ee-ccc0b8c495a5.png +++ b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_54518298-2aa4-45ef-91ee-ccc0b8c495a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8eb3bd6c1e1f667a8ddb624028622f64f596bdc387e652f8b3d3ae219b7c6ab3 -size 1527183 +oid sha256:e57828afd6043bfb551710088f9fa499f4ba20158597e310242b3892d0c20abf +size 2292258 diff --git a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_7c725110-9b24-416e-bf34-1566ee5fd7a0.png b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_7c725110-9b24-416e-bf34-1566ee5fd7a0.png index d981c127c23b3835c600eee4825de4fd16645f75..8c5d07af57c2066a08b6703a735900c0b5411a95 100644 --- a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_7c725110-9b24-416e-bf34-1566ee5fd7a0.png +++ b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_7c725110-9b24-416e-bf34-1566ee5fd7a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5327723e24a2ec031f9d481bdf6efacecaec71532e3ae5f744b657931cdcbba -size 1155893 +oid sha256:398ecc92df82f47a578fc4ffb0f530eef1aa8423e429a7bc7726b83bef908288 +size 2067740 diff --git a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_888d6f02-95b8-4b33-8eb6-25baeaba2feb.png b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_888d6f02-95b8-4b33-8eb6-25baeaba2feb.png index fba0a69cdd2c4a4decd4c3c40b6aee30cbf39010..797c668a58579dcc5b8dc28a32e0bd471971c6f1 100644 --- a/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_888d6f02-95b8-4b33-8eb6-25baeaba2feb.png +++ b/images/2089ee5c-1ccd-495e-9a80-f62e129554ed_888d6f02-95b8-4b33-8eb6-25baeaba2feb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:681d14ce2118509c67bae5235b2db1571838d699a8b2a39fc89138da954bd351 -size 1422330 +oid sha256:77bc131e253ab85c82c64eeef353451152630bc5338073b1fc2ec585fe5d58d9 +size 1272425 diff --git a/images/2159d768-6657-40af-b336-ad5726fec1e2_37493ad9-bdcf-4e45-a8bc-de60d4dc7fde.png b/images/2159d768-6657-40af-b336-ad5726fec1e2_37493ad9-bdcf-4e45-a8bc-de60d4dc7fde.png index a4c23cb0794b7287cd2dc294b4b7dcc80708da18..14530e3060e0cf049a9356b3a5ac1c90c113c4b0 100644 --- a/images/2159d768-6657-40af-b336-ad5726fec1e2_37493ad9-bdcf-4e45-a8bc-de60d4dc7fde.png +++ b/images/2159d768-6657-40af-b336-ad5726fec1e2_37493ad9-bdcf-4e45-a8bc-de60d4dc7fde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5212cde296ec8816f045d0c33b6da6542405dc30147fdd5ad56aea27cd64696e -size 1146328 +oid sha256:df015f0b0b1a021215e9b264c0fc32acd0911d5cf2e5515d2a50623a53d69a17 +size 174365 diff --git a/images/2159d768-6657-40af-b336-ad5726fec1e2_5c6b6d07-967f-43cf-9ddb-b8dc31465744.png b/images/2159d768-6657-40af-b336-ad5726fec1e2_5c6b6d07-967f-43cf-9ddb-b8dc31465744.png index 7d4862d262023b25e529ffb054d098c5117b5931..da37924aafa6daf1701292c9ea807b704db48f71 100644 --- a/images/2159d768-6657-40af-b336-ad5726fec1e2_5c6b6d07-967f-43cf-9ddb-b8dc31465744.png +++ b/images/2159d768-6657-40af-b336-ad5726fec1e2_5c6b6d07-967f-43cf-9ddb-b8dc31465744.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5dc5213638609587f0cd936f0c995305b8d66e6111afbe8de33356b48d4ed2c7 -size 3458590 +oid sha256:6b4506b7744c68340c1fd0712079360e84f2ce57965936ab507496238676ed3d +size 3269965 diff --git a/images/2159d768-6657-40af-b336-ad5726fec1e2_90f0dbff-5c71-40dd-aa7d-a28f941b2827.png b/images/2159d768-6657-40af-b336-ad5726fec1e2_90f0dbff-5c71-40dd-aa7d-a28f941b2827.png index 3b9fd9c37efa4b78e4d5d26688ff4d0f1f4292de..24e661faa7ae4b3fd52bdc352b26ab88fae80260 100644 --- a/images/2159d768-6657-40af-b336-ad5726fec1e2_90f0dbff-5c71-40dd-aa7d-a28f941b2827.png +++ b/images/2159d768-6657-40af-b336-ad5726fec1e2_90f0dbff-5c71-40dd-aa7d-a28f941b2827.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfef6b68627455f66f4c5c68c3c28610f8df4b1243c9299469c8aafea1592981 -size 3204475 +oid sha256:f827555e324652c8885e9d92ae41bce231bfcb6658704351faeb62b029fa3cc7 +size 2179711 diff --git a/images/2177b546-5718-478b-8535-bc37975b0d0c_441ca13c-8adc-428d-b2ff-025df829b1b3.png b/images/2177b546-5718-478b-8535-bc37975b0d0c_441ca13c-8adc-428d-b2ff-025df829b1b3.png index 897f1959496298b57967457c6f8d561b094f3fb2..78f16d52e7a7dc197cfa4d328c34a11b4a844470 100644 --- a/images/2177b546-5718-478b-8535-bc37975b0d0c_441ca13c-8adc-428d-b2ff-025df829b1b3.png +++ b/images/2177b546-5718-478b-8535-bc37975b0d0c_441ca13c-8adc-428d-b2ff-025df829b1b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4c08b1cca5e362e58987f61282f1f450b6040f1aa74e288acc09128abf988ae -size 1092195 +oid sha256:fe6d84ce47ec46969c9e6dc9ba0834a5e8ceb57abbd018d68925c5004451b533 +size 1345089 diff --git a/images/2177b546-5718-478b-8535-bc37975b0d0c_9d81fbb9-c6c8-4473-bb74-f9725bc210ba.png b/images/2177b546-5718-478b-8535-bc37975b0d0c_9d81fbb9-c6c8-4473-bb74-f9725bc210ba.png index ca31beb22e37539923afd51e4e8c038ab789be80..e187de1b11f1ea193eaa9a1cd30a1e639f0a7f2b 100644 --- a/images/2177b546-5718-478b-8535-bc37975b0d0c_9d81fbb9-c6c8-4473-bb74-f9725bc210ba.png +++ b/images/2177b546-5718-478b-8535-bc37975b0d0c_9d81fbb9-c6c8-4473-bb74-f9725bc210ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a77f384550c6ef45806f0d8501b6839e529644ea40b462b390670b8c50c40fe -size 1712667 +oid sha256:33a19aca4d58694c8b6b09f5a24839203dd30762e30be3510b6c497e1cea20e4 +size 2001254 diff --git a/images/2177b546-5718-478b-8535-bc37975b0d0c_fb3aafe7-6077-4b37-9cfc-65b1e614cdea.png b/images/2177b546-5718-478b-8535-bc37975b0d0c_fb3aafe7-6077-4b37-9cfc-65b1e614cdea.png index e215649fdb60a75087a01db597aad9a2f644977e..fe5341d929ad6c02a59558db0c5d5edc2b429157 100644 --- a/images/2177b546-5718-478b-8535-bc37975b0d0c_fb3aafe7-6077-4b37-9cfc-65b1e614cdea.png +++ b/images/2177b546-5718-478b-8535-bc37975b0d0c_fb3aafe7-6077-4b37-9cfc-65b1e614cdea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff5bec235631e315ebfcf7cba5495976ded2c6d7377ca1f320ededdabc467c46 -size 2053916 +oid sha256:00cca584b30a0c4f133327c287375a9f18d869b0ca5ca7a7ee89475ca9f35f17 +size 2148815 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_08743486-379f-4213-a796-2f2ec65df153.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_08743486-379f-4213-a796-2f2ec65df153.png index 191cf936c3a2c229de25928614252d5a31c7b0d2..52710754790d2487ed0c60eea17dc9e5fa87b014 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_08743486-379f-4213-a796-2f2ec65df153.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_08743486-379f-4213-a796-2f2ec65df153.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7108364bd30834a91b2f6820a374eea8ef9ddf351f45c2126f0cdb98ec3921ca -size 1198552 +oid sha256:368177818c8a936deb9c88df193883318ab883df1c9b1a16147084b5aca67821 +size 1721152 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_2fd38569-ac81-4db5-8534-ac8b52302caf.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_2fd38569-ac81-4db5-8534-ac8b52302caf.png index 43f04a5546f7381bce87e69b5e5cf66662f4ea71..d99c024959b150994ccabebe8908b18f03f343dd 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_2fd38569-ac81-4db5-8534-ac8b52302caf.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_2fd38569-ac81-4db5-8534-ac8b52302caf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52bac39adb9d5b3035e174223e759cdd44a7726ece991b1ef095af4bf6c59060 -size 1180631 +oid sha256:f1bcd5e0056d7c44cb1de0146bce7b57ec57ef649bf1fa0899ac37171998a257 +size 824794 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_4c93db0b-982d-4815-933d-10283c2fb380.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_4c93db0b-982d-4815-933d-10283c2fb380.png index 4f3eb2a5f754075d843c58eefaba263c78e92d9f..f32374f39aa6ad885fa685a8b65e735f9f86d247 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_4c93db0b-982d-4815-933d-10283c2fb380.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_4c93db0b-982d-4815-933d-10283c2fb380.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af7a6cfac89604ec421d532d86d92e64124e5f60e619e5d191cc17f7e403a373 -size 1201235 +oid sha256:e814faeef660c7b62ee4de6e7a0be28e9791e411274da84dac492d743c785736 +size 1797456 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_6c70afce-e87a-4d1b-8d7f-f99589b2b407.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_6c70afce-e87a-4d1b-8d7f-f99589b2b407.png index fb65f7b08cde5f9d349361b06271d4362ee25971..217d9df48b358ee08700671293066233fcf93382 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_6c70afce-e87a-4d1b-8d7f-f99589b2b407.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_6c70afce-e87a-4d1b-8d7f-f99589b2b407.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0117579a6f150eee0f4957843bdcac92700a120220238d77e2ba0d454b800712 -size 1272142 +oid sha256:8d15ac4919b2073d503bf82e1d4e5257c85326773aff2b5ab8f5515c58ac30c6 +size 1799481 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_746fec9f-bf5f-49eb-a9b2-0c96916e881d.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_746fec9f-bf5f-49eb-a9b2-0c96916e881d.png index 7260b5024562ddb5333484085c6c2300f4ae191a..abeb7803a3a478ec63fa705bc46a39984c9c5b61 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_746fec9f-bf5f-49eb-a9b2-0c96916e881d.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_746fec9f-bf5f-49eb-a9b2-0c96916e881d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ac208a3afcac4880f16fcb3ca2320c629e5bb7709ed59dfea1ab8caca3a9685 -size 1209651 +oid sha256:e0abfa6bf3df18158f090f48cc26aa51fb366e85e7aff0a4174c48da01b584d1 +size 1876323 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_8acab6b2-6b28-45c9-84b2-0c56d4964684.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_8acab6b2-6b28-45c9-84b2-0c56d4964684.png index 668f564770acd5d60066787110dc64880d4f5334..5ac46fe8663be82de7a217e9e7adf69f547a934a 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_8acab6b2-6b28-45c9-84b2-0c56d4964684.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_8acab6b2-6b28-45c9-84b2-0c56d4964684.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d636dd0b8c5082ef9d00a09d087107a1df900742bab27ebf906459db727f71c9 -size 1253474 +oid sha256:fe4ea44e83693d24937c2b916e4703515317466e0745271087ceb97f0ed278a7 +size 145624 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_e909e5cc-b764-4ebc-9e91-b87b5863879d.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_e909e5cc-b764-4ebc-9e91-b87b5863879d.png index 28fb29eb27dda00757b2e7c95e3a4fcf9c4c1252..939605145f399e8a89395a010ffc662babf7ef5e 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_e909e5cc-b764-4ebc-9e91-b87b5863879d.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_e909e5cc-b764-4ebc-9e91-b87b5863879d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbdef94e64070232cadc1fb687d05c8bd271ac861e65c8a5cb9d5435f514708a -size 1272436 +oid sha256:47695deb1cf1d8f1843cc00298a7adce57edbd40e9471daf3fec6cb0b9132216 +size 1889317 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_f60f9f16-8348-47e1-b2ad-67b88dd5fac7.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_f60f9f16-8348-47e1-b2ad-67b88dd5fac7.png index 54cf7e3fcad2f5513a98532e1b3b3f608ea51355..4f30d2a6d04a45f0a9a7019ba47389264ae36015 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_f60f9f16-8348-47e1-b2ad-67b88dd5fac7.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_f60f9f16-8348-47e1-b2ad-67b88dd5fac7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dfc521b4b939e97cecfe5a5cfaf4eebbf8a45842532e997a58b599252557d36 -size 1272778 +oid sha256:58a0c421baf3a541bfde13732b918e8cce027c3907bdf73783e0913eb1d3bfa1 +size 2068050 diff --git a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_fc871d4d-aa26-42a3-a595-7ec4b6c676ed.png b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_fc871d4d-aa26-42a3-a595-7ec4b6c676ed.png index 143f15b4c08ae9ff5e1bfd52754e52b8df9f0292..3e4c42224c23c3bc6c9c0f1dbdd8ac2cbba99c00 100644 --- a/images/21e5c264-df85-4055-a566-ecb65cdd8c63_fc871d4d-aa26-42a3-a595-7ec4b6c676ed.png +++ b/images/21e5c264-df85-4055-a566-ecb65cdd8c63_fc871d4d-aa26-42a3-a595-7ec4b6c676ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:421e28d962c1089543ad498ea0681d8beeaa1f3367728195aca84c32b7fa32a0 -size 1168197 +oid sha256:25d9bff05099bb9ef819be54ab2af0cf74bc45872f1dcdacf854d6316b4d19a4 +size 1488309 diff --git a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_47d12b84-360d-4519-b16c-db6972664cf3.png b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_47d12b84-360d-4519-b16c-db6972664cf3.png index 6e563e36f0b5980c95a0885a87aa680f30c2674f..c27857c1fd17e9d2857d519d801ff5bafd1da924 100644 --- a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_47d12b84-360d-4519-b16c-db6972664cf3.png +++ b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_47d12b84-360d-4519-b16c-db6972664cf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42f26294eb095ee4290623565d49a717e497c8df7868bb576e690128ac78d8a8 -size 1836696 +oid sha256:6fcc2e05e501543083783c8a27ecc52f5a49697b9188fae728a39c39c4a771c3 +size 1644349 diff --git a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_6be71501-c895-4f3c-934f-16a21938dec0.png b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_6be71501-c895-4f3c-934f-16a21938dec0.png index ffc99d3250868a326eba158d5ececcb6a88491bb..e21adafb1a66d9accc66905d2778a0ec33b602ca 100644 --- a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_6be71501-c895-4f3c-934f-16a21938dec0.png +++ b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_6be71501-c895-4f3c-934f-16a21938dec0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf52ee3d1f23c9359c9e4062c6d03641b9143c7201c5b380dd5a3e2563f6226a -size 748876 +oid sha256:5d945ced6d175c03500a9a743e5557550406eacfbdb8315f580c2fa5802734ec +size 740981 diff --git a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_93323461-5177-468a-b61a-e0248520605a.png b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_93323461-5177-468a-b61a-e0248520605a.png index 0414e277dd47b2fb5762887bd0d0dddd04a9d7ff..275f68b6fe1009552b9349f90beb29061599d5cc 100644 --- a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_93323461-5177-468a-b61a-e0248520605a.png +++ b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_93323461-5177-468a-b61a-e0248520605a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d68c99ad6e0a10c3e7f8e62c3587fd03b3c6b254b2523c9e8c483d954ee01d0e -size 744837 +oid sha256:439eb257719518bab0d6cbd72a299330bd47d6956fab12e3edb38c16d341b1a3 +size 604266 diff --git a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_991edd8c-d233-4898-80b1-d91ad3831f9a.png b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_991edd8c-d233-4898-80b1-d91ad3831f9a.png index 9fb16330eef95f8b14c4dd6d9923af10684e6ea2..d10cd3cc3a34e574f4ac932fdda81658808f1840 100644 --- a/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_991edd8c-d233-4898-80b1-d91ad3831f9a.png +++ b/images/21f5aaaa-d54e-42a6-91c0-d1a427533963_991edd8c-d233-4898-80b1-d91ad3831f9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66dd7d928b5b4ec2c660e1b0b0bdbc97ee3dbcd068a6813c6f4aad5294aaa9d4 -size 1850788 +oid sha256:b3bf54e385f311a33fef509b932be0e16993e5a72982d9beb0bd7be183ca6128 +size 1529968 diff --git a/images/22509b64-b643-44ec-b486-9828e686303c_71b4f18e-103b-420c-8bcc-da6f09c0d8cc.png b/images/22509b64-b643-44ec-b486-9828e686303c_71b4f18e-103b-420c-8bcc-da6f09c0d8cc.png index 9e01df3e051491ba91295960ff6c11487ac11283..2b609c94e118d3a3d39414ef74988e2f31ad111d 100644 --- a/images/22509b64-b643-44ec-b486-9828e686303c_71b4f18e-103b-420c-8bcc-da6f09c0d8cc.png +++ b/images/22509b64-b643-44ec-b486-9828e686303c_71b4f18e-103b-420c-8bcc-da6f09c0d8cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:614c2940ec42f3e1b348f63b3736319ed2f4a46c2457d541cbe37bd3423510f7 -size 1264012 +oid sha256:792387cc2644e9b8d138ebaed6bd344f0ea893bbf2f23de3bd6bf03eb7616a7a +size 1251860 diff --git a/images/22509b64-b643-44ec-b486-9828e686303c_a0b11591-7d3a-41ce-a01b-fb76318531da.png b/images/22509b64-b643-44ec-b486-9828e686303c_a0b11591-7d3a-41ce-a01b-fb76318531da.png index 1c86a5273526b4ddc12e47deb618f5ed65e273d2..eb38468365762520e9cf4c5087f6777907ce816e 100644 --- a/images/22509b64-b643-44ec-b486-9828e686303c_a0b11591-7d3a-41ce-a01b-fb76318531da.png +++ b/images/22509b64-b643-44ec-b486-9828e686303c_a0b11591-7d3a-41ce-a01b-fb76318531da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f24cb7efaa6036eae505760eba385ed6436b2fa9fc6bbf2a9fd748c67752d4d2 -size 1122036 +oid sha256:64941a51ee98fa66f33a635a8f001cd45d2281913f9cc505f872e9904b2613ff +size 816195 diff --git a/images/22509b64-b643-44ec-b486-9828e686303c_c466b584-1fa5-4720-a342-51bc560b65c1.png b/images/22509b64-b643-44ec-b486-9828e686303c_c466b584-1fa5-4720-a342-51bc560b65c1.png index 9efcb59292c6eec45b01136c66cf916fa000e29a..d5773c754c3619507e767b0fce428ef0e298cc2e 100644 --- a/images/22509b64-b643-44ec-b486-9828e686303c_c466b584-1fa5-4720-a342-51bc560b65c1.png +++ b/images/22509b64-b643-44ec-b486-9828e686303c_c466b584-1fa5-4720-a342-51bc560b65c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e66798a643c6c7abaa830fa212feb129111f3c7c15f4349cb84850d784d28d8e -size 1824459 +oid sha256:9194cb6b6dd97e96709c17faf4e05217ca406b1d691b0b56e5f7172f80e6556c +size 340521 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_03ea971c-d3cb-44e0-92d7-0470361bc977.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_03ea971c-d3cb-44e0-92d7-0470361bc977.png index 56bcf8d3cb5614f900ae9417f3af4c7716b82fb3..2de38e3cf48aebe51d32d0090a0e841fc5f9e979 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_03ea971c-d3cb-44e0-92d7-0470361bc977.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_03ea971c-d3cb-44e0-92d7-0470361bc977.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fd144181e75d3c26b50142b75f63ac01d941b1f51772555176d6ef4a9788865 -size 663348 +oid sha256:91b64a3fa873f7c9a98038fb88dd93b53ccfd108fd1b72567d081a97109e8782 +size 913078 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_31fe500d-c8ce-4a15-a225-c86333e8826a.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_31fe500d-c8ce-4a15-a225-c86333e8826a.png index 93346ef4310099fc53a7c4f7c89b6428c405b59d..e0f3dc528d383cee8135848d8e6d9ab03a75f122 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_31fe500d-c8ce-4a15-a225-c86333e8826a.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_31fe500d-c8ce-4a15-a225-c86333e8826a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42cb53b1aa3703dcdface958b37548d68f1e2055c1a6b6f02ac7b1925c42b297 -size 815008 +oid sha256:92dcf136cc97c1d67ef69a8df2a34f0a3531e17c47ecd9581fbd8c2f80fd6aab +size 630227 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3851cdfd-1081-4f86-b05d-6062a054e094.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3851cdfd-1081-4f86-b05d-6062a054e094.png index de3d19bfb3f1dc8324d2567954c86fe9d283741b..b2b6b8572aa2ce6f8c8d8ec646eb6f1e9aa9d743 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3851cdfd-1081-4f86-b05d-6062a054e094.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3851cdfd-1081-4f86-b05d-6062a054e094.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28464dfd627b2595f6c40374ba69d137285ff3cf348395fedaf60db94588e321 -size 655757 +oid sha256:df166ce9cfc0755598972ce1398853cc71e42ceb247176f46f64cf6300130533 +size 949132 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3e0071a1-0e7a-4dcc-afaf-8d49bc8ba14b.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3e0071a1-0e7a-4dcc-afaf-8d49bc8ba14b.png index f83d4968d54cc6589ed2cc13be56ca33cdfd0a9b..02969af92fef5dc3291d22478290b61ae91d281f 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3e0071a1-0e7a-4dcc-afaf-8d49bc8ba14b.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3e0071a1-0e7a-4dcc-afaf-8d49bc8ba14b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2f1e9042d277935f55d3e8c4675d2293ca0b2047f880653cf62b8fb0bc909c3 -size 631430 +oid sha256:2014fe6446906dee01fad16497caa0119df7a27bc6614a0a1b2e3b207c3e4142 +size 615666 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_4cc72811-d9b9-4f6f-8ae1-5556a8f76045.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_4cc72811-d9b9-4f6f-8ae1-5556a8f76045.png index f38261eaf5ed7ed6b55bc2176389d0bccf83213b..f342c3a80d0b802ee3e0800287fe8747aa3def19 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_4cc72811-d9b9-4f6f-8ae1-5556a8f76045.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_4cc72811-d9b9-4f6f-8ae1-5556a8f76045.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d5a7584ce821546ebda6b94229190cc785abab64627efa3e272150e14636028 -size 1136732 +oid sha256:6acf4156a2b1a2d1fece6cb7164f8c57997df887a1dae814c2a5cc609f11c4f8 +size 1253964 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_6c06c719-5c8f-4536-bf8c-e31d5d14af89.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_6c06c719-5c8f-4536-bf8c-e31d5d14af89.png index 05bf4ac42208ed5e21007148740147d85e299e60..88a87ddd9b096fe5353f7a159a9eedbcfc17b5b3 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_6c06c719-5c8f-4536-bf8c-e31d5d14af89.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_6c06c719-5c8f-4536-bf8c-e31d5d14af89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a64e806b458544642f2c87391c1f1a6dbc2e5565ddceffa88e9f780b6978fa6c -size 1372110 +oid sha256:f7de605e0d943a5f507620a802c1ed5e35de09fe6213996642b7a544963ea277 +size 942173 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_71990241-9b93-43ab-8200-e6ea3063bca2.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_71990241-9b93-43ab-8200-e6ea3063bca2.png index 8cb971f3786fb7a9a2c404b0e0c87cfcc1b505b3..6b4452c1d71f36baacee874f4fd75ec395a1a1e6 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_71990241-9b93-43ab-8200-e6ea3063bca2.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_71990241-9b93-43ab-8200-e6ea3063bca2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5cf4d10cdcdf5aa6f3108f5aa375becc86d0cb451008b25a1180a9c015a359a -size 990241 +oid sha256:0341f56068b7fc2a720fadde72790080b40dedf9080db5566900eca603f52daa +size 1401336 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_a36e1bc5-c3fe-4821-b962-0b360dab1f1c.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_a36e1bc5-c3fe-4821-b962-0b360dab1f1c.png index 1db2c49706118cd2159cd86106fbb2d1ac4daa96..d1072265568f25610b5eb9d2833e3404b5f1b237 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_a36e1bc5-c3fe-4821-b962-0b360dab1f1c.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_a36e1bc5-c3fe-4821-b962-0b360dab1f1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5d8f1fdb29c78b4e237ffaa89f7bb680b8f5c599e555f691cf76ee5dc4957b1 -size 1049397 +oid sha256:73b79526640c72f5e41656ca41c778bde7b0a19edfbc028e160888129a3a1951 +size 1215626 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_c4c50fd3-c84e-455d-84e8-0276aa7a9aab.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_c4c50fd3-c84e-455d-84e8-0276aa7a9aab.png index 162c1797669c4c5f202606d47bdc5575a48bde9d..7b83c15e6f3da6ceb9b3a7d4979dcd8d84814b7b 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_c4c50fd3-c84e-455d-84e8-0276aa7a9aab.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_c4c50fd3-c84e-455d-84e8-0276aa7a9aab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:662cdb9898d4fed247a2faa0039fd17ec1522fb3011e8bee62112d71c60459cf -size 1003937 +oid sha256:1c570bf9c20d0d8fe2944b6a8778bfa66c3524eadb80288fc1fc4c7ba66c6a4b +size 1013921 diff --git a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_ed26e713-359e-4d11-b4ac-600a1d0d1610.png b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_ed26e713-359e-4d11-b4ac-600a1d0d1610.png index 6061231a160b8759231f512551553228b9ad703d..ca1d61c3d20a3f2a3e8041995076aa70f4e3645c 100644 --- a/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_ed26e713-359e-4d11-b4ac-600a1d0d1610.png +++ b/images/227054d0-24f1-4f8a-9dcb-5146fac623f4_ed26e713-359e-4d11-b4ac-600a1d0d1610.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26be16c8648d4ea404145ee7108c8058328c196ef292ca0bca2852b425985dac -size 916777 +oid sha256:c86c912f70824affaa63130f9a9956bad859c501f9cfe74f3b8422969404a23c +size 804842 diff --git a/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_3e95ac82-a5a0-4db0-87bb-0e446a69412e.png b/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_3e95ac82-a5a0-4db0-87bb-0e446a69412e.png index 25042f983990e5c3c5ab7e43f0ae2b01f3da5d63..b0a9c4bca4f6a4b53c367efbf49ca5e6c8ae84c3 100644 --- a/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_3e95ac82-a5a0-4db0-87bb-0e446a69412e.png +++ b/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_3e95ac82-a5a0-4db0-87bb-0e446a69412e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5b431c38d97d6f729bba36c384b8bbcfc27258559e55fa72c8fab05fcd14f18 -size 1020565 +oid sha256:a1f981de444aa6f2a370eb533cd9db8aee02886d4aca8e3ecd4ec13d6d5cfd84 +size 677696 diff --git a/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_48e54885-a445-42a6-9b49-b473a0468246.png b/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_48e54885-a445-42a6-9b49-b473a0468246.png index a9e565d62f8d3558ad3758c72485ab4943ada779..73cdf3f79f2e95bce86144bd9be9007d39fbab16 100644 --- a/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_48e54885-a445-42a6-9b49-b473a0468246.png +++ b/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_48e54885-a445-42a6-9b49-b473a0468246.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39bef42737d1cc8a156cac84a9538a39e385635d97e1ffc42c470075360f8ca8 -size 994067 +oid sha256:089bbe806a4fb46ab43db27fdaf2f147b07f9bcf23518f97db99a60bdeb2de57 +size 916523 diff --git a/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_cdff5762-b4b7-4a22-955e-f8148168d909.png b/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_cdff5762-b4b7-4a22-955e-f8148168d909.png index e98d14c5d377ba7f0d43f50acf8a98c077777882..8da91a3322a572fc6c723dee13c27c1a90a6cd4b 100644 --- a/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_cdff5762-b4b7-4a22-955e-f8148168d909.png +++ b/images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_cdff5762-b4b7-4a22-955e-f8148168d909.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27d3ab21dbbae2ee6a498cead53d0325cabb054a33843814509d5081aecda2f1 -size 469319 +oid sha256:c790681c62800732ab34c42404cc71a1530cf0d514c19ed8a66b33e539e34fb7 +size 405587 diff --git a/images/229199b4-9988-485f-8175-b5efd8faf08b_2ab5735a-26cd-414e-b9ed-52d802f1408d.png b/images/229199b4-9988-485f-8175-b5efd8faf08b_2ab5735a-26cd-414e-b9ed-52d802f1408d.png index 1673e989519989b05ed0d375187edd21ea08321a..7801f5e463860f22b83943ad9761aa90560e73b2 100644 --- a/images/229199b4-9988-485f-8175-b5efd8faf08b_2ab5735a-26cd-414e-b9ed-52d802f1408d.png +++ b/images/229199b4-9988-485f-8175-b5efd8faf08b_2ab5735a-26cd-414e-b9ed-52d802f1408d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54afbaf4cdcca13382a94f7093dbb5dabe09d2e1e1e229c6548699e24a0663c7 -size 671098 +oid sha256:99a459deda827f545a9a2c5f4d8be7e18523a9f9043dc28d8f1ef2f041e850b1 +size 710640 diff --git a/images/229199b4-9988-485f-8175-b5efd8faf08b_3b858472-560e-42e4-9f9a-d3134e1e2f69.png b/images/229199b4-9988-485f-8175-b5efd8faf08b_3b858472-560e-42e4-9f9a-d3134e1e2f69.png index de45a9773bec14df50de752a68ac84183f662987..94345141cedca3d11b9074a71ec508d1383ffce8 100644 --- a/images/229199b4-9988-485f-8175-b5efd8faf08b_3b858472-560e-42e4-9f9a-d3134e1e2f69.png +++ b/images/229199b4-9988-485f-8175-b5efd8faf08b_3b858472-560e-42e4-9f9a-d3134e1e2f69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:742da7858ce484b22a09c0a9b87f418058dcb14371decd3f1defe736452730d2 -size 846607 +oid sha256:26f44f2ec5ff9d8fbf15397ef9c3453d89dd6a01e9120d75e26c4b17e545586e +size 988317 diff --git a/images/229199b4-9988-485f-8175-b5efd8faf08b_a309368f-6646-468c-8039-1867c9223c7a.png b/images/229199b4-9988-485f-8175-b5efd8faf08b_a309368f-6646-468c-8039-1867c9223c7a.png index 72d803274f9f9e889471ff81ca75db5e1ef928b5..6064bfe349d269928ce1fd38218d7406e805a87d 100644 --- a/images/229199b4-9988-485f-8175-b5efd8faf08b_a309368f-6646-468c-8039-1867c9223c7a.png +++ b/images/229199b4-9988-485f-8175-b5efd8faf08b_a309368f-6646-468c-8039-1867c9223c7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1eb98e52106a002e589e1a403766a5458a1129bb1498b097492a7b4d8f277aa4 -size 630139 +oid sha256:61a1b3ee9fe0ee6c07c9ebe72ee159df9be4137b2a50008f31e33d74d295f18a +size 763685 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_1148e403-4327-47a0-ba61-c781b3c53813.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_1148e403-4327-47a0-ba61-c781b3c53813.png index 0cdb602f864c5d309acf8a7c93642e1eb754bcdc..3e3a1851f2dfdf0170e559c612f763b794571a45 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_1148e403-4327-47a0-ba61-c781b3c53813.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_1148e403-4327-47a0-ba61-c781b3c53813.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ceccb5eef6efcf6877f38ecf6e5dd9d761cf96226201a1d4eb85fa8234c121a -size 676019 +oid sha256:e7baa2a66bc6bf58e99d9a491a6ba07b33978cbf94f30b67e57ad4a4ea7517bc +size 964217 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_401aaaf4-e63a-4957-ae58-38fab003cb30.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_401aaaf4-e63a-4957-ae58-38fab003cb30.png index 07db9f9ac07bbbb7219fdb3f4536e33cc33041a8..8347d3736e5893fe819e2bbeeb57314f1046305e 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_401aaaf4-e63a-4957-ae58-38fab003cb30.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_401aaaf4-e63a-4957-ae58-38fab003cb30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b157c1d86ab0ff10c0939c38064016bd92fbbfb6ba2f2173de480495ce3ab8a -size 683186 +oid sha256:68035b5b928ca72cdc8f987f69963ca655adc5f4c4d24f93f0a54522742b5d56 +size 876651 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_437b48ad-7167-492d-ae11-280b37292671.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_437b48ad-7167-492d-ae11-280b37292671.png index 1ca0d093adb39cd12b5a7845f0e3a02f382f08b5..091285dd984feeadf8a532f07e2cf81fe888f03f 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_437b48ad-7167-492d-ae11-280b37292671.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_437b48ad-7167-492d-ae11-280b37292671.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef799685e1585da06f226fa3b1de0fce03e69c45d008b5827031715b5d52a632 -size 576557 +oid sha256:bc30c8e3a7414b0d8786beb21d859e1c10355debde3c102360e2c9a258896d79 +size 631019 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_43a17e87-29ed-4e79-8b90-ede9013a6030.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_43a17e87-29ed-4e79-8b90-ede9013a6030.png index 26f24434270c9693f731c6fcfbdfe3fd17277e4b..781eec5e213ea64c46757021f07135406dd0d9bf 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_43a17e87-29ed-4e79-8b90-ede9013a6030.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_43a17e87-29ed-4e79-8b90-ede9013a6030.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d82efbed1fac0634d643c5547509fb657fca46a1cec453c047e51304f5276cb -size 545378 +oid sha256:c1812dfa93ef5caa49be4c49d4cfaf5ffff1f414d435328db3e8fb099a932de4 +size 619790 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_4f07778f-b3c5-486e-ade9-13d279de1d0c.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_4f07778f-b3c5-486e-ade9-13d279de1d0c.png index f4a3ecc822f6dd54bbae99e80711221759528e96..9a7031f57433ac7f93a4d617adb6ed1d3ce27806 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_4f07778f-b3c5-486e-ade9-13d279de1d0c.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_4f07778f-b3c5-486e-ade9-13d279de1d0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:909a3ecc7abe1191efd7c8bad2efb0b1e8c3c65fdeb7db1dbd3310a2a21d3d6a -size 641757 +oid sha256:b78da900dbf13c9c21c18341c63366bc0fc038277117ab3999bf19f68f8d681c +size 642579 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8d01874f-6ff5-460f-85ca-ec27f7a38461.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8d01874f-6ff5-460f-85ca-ec27f7a38461.png index b54fdaca6230897ef81585c53f069298cb587626..6e85e832df229febf643a03019c276be9f247b8e 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8d01874f-6ff5-460f-85ca-ec27f7a38461.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8d01874f-6ff5-460f-85ca-ec27f7a38461.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2edd6ef30794b8cc96141c3f1bd1d04acb0ff5a28b2b7000253ec0e9a11aef8 -size 631857 +oid sha256:5c023709d46cc8f879dd537ca8da606def841b3d95e3c1b33e092e6e58640a70 +size 533531 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ff5337a-6643-4d26-88c1-f731c8d15f93.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ff5337a-6643-4d26-88c1-f731c8d15f93.png index 2cd6f245889af252d2865fe9e10ec211b7978b91..ee94b8803f1242776a9a6582055ab1235623eb85 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ff5337a-6643-4d26-88c1-f731c8d15f93.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ff5337a-6643-4d26-88c1-f731c8d15f93.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc4080de62ef1f02d2d44cb6966335cc2a6458dc237ce5e7058ec21ce3aaf568 -size 1405177 +oid sha256:36797c0d3fda408e863dd73acee81b91522c4f9a3be1266f3bc4c67f9c644418 +size 972487 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ffa34a7-0378-4a31-9367-019e2fa0115e.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ffa34a7-0378-4a31-9367-019e2fa0115e.png index ebcb4a77580e0f2b31945f113a69241105f6f131..a4664f91438e215f1fb9e6c6cba187b9d4f5479c 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ffa34a7-0378-4a31-9367-019e2fa0115e.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ffa34a7-0378-4a31-9367-019e2fa0115e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdb8353356b6fe1f413eb76122d916c6ad67d4a4e78e1e4ffe9aa80c3b9815ab -size 574711 +oid sha256:4a38e25537b22c14db245ccdd8d4245f170f3d7adfee319c73a0755f03e58b44 +size 642329 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_b81f2774-e594-49a0-a9fc-07df56177c9b.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_b81f2774-e594-49a0-a9fc-07df56177c9b.png index ef273cd2f2b0ec63950d287e111ba95025329fcf..8b9a78cdc6d402bbb7e96f9e077837e8fc0f939b 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_b81f2774-e594-49a0-a9fc-07df56177c9b.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_b81f2774-e594-49a0-a9fc-07df56177c9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4abc5453df90524a842ae2d056cd8ea065f822d0735ed17370a20c2101119c10 -size 496728 +oid sha256:d9c6b387180f695c042952b76e0b512c5db4e988ab75c7bb5cdacb7b664ad6b8 +size 740967 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_bee495fb-a632-4df6-b714-a7b289a9c7bd.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_bee495fb-a632-4df6-b714-a7b289a9c7bd.png index fe32cd51b6e32efeaaea94822bebd3480f27e079..b8f3136ba2ef3513cb6a2ab7e816ad73a731feb8 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_bee495fb-a632-4df6-b714-a7b289a9c7bd.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_bee495fb-a632-4df6-b714-a7b289a9c7bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f2dad1f203c7a5c62d0121ff98d478ddcac4f693e3d4ade15fce342acaa4781 -size 1041861 +oid sha256:02cd57a253c74bc9a5b4fb245e58354d16a0bc2868dcc854314f3b3c76e77a72 +size 1621929 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_f2cae883-42b7-4d29-8f26-c4caf0e0b1d0.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_f2cae883-42b7-4d29-8f26-c4caf0e0b1d0.png index e2e0b647dee538ec4e9b0e73651f6323c84ecff3..d95a0d5f04f800b34ab4c09a93f9a24eb7d27112 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_f2cae883-42b7-4d29-8f26-c4caf0e0b1d0.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_f2cae883-42b7-4d29-8f26-c4caf0e0b1d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7765106c5276ee2753226a03be125b07e98d3a90bb42918dc8b0f183e86e86c5 -size 683391 +oid sha256:f64dab71298604eceea020a2b2bb04028e409bff60add6f806b747ea4311effd +size 736225 diff --git a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_fe4efd1c-410a-48b7-b828-680d8788f260.png b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_fe4efd1c-410a-48b7-b828-680d8788f260.png index 249c9287e2a65ae08a99906363f035502cab7861..677a7dc0943f458174f959e4ceed3af8e4ff1bff 100644 --- a/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_fe4efd1c-410a-48b7-b828-680d8788f260.png +++ b/images/22e33a38-902c-4f62-9e9a-822b2370b6d1_fe4efd1c-410a-48b7-b828-680d8788f260.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0bca07f987d2748e4ed0789eb5860f74e5c7796d1505f7fd98a7983da96ff61 -size 631111 +oid sha256:022dbc9e6ba977e3305641a743c7676b135191bbcaeb4754c34b74513b79568d +size 539547 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_1b7055a0-0d97-4373-acf5-fd2eb15e484b.png b/images/240952bd-853b-4653-a358-49c2784cf568_1b7055a0-0d97-4373-acf5-fd2eb15e484b.png index 0fd4aed3bffdb9d071ff050fb9026f76c5bac5e3..0a840fee9f5f725746ceea73ef5070c5c19f9993 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_1b7055a0-0d97-4373-acf5-fd2eb15e484b.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_1b7055a0-0d97-4373-acf5-fd2eb15e484b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f17f8713ab0ef22222b7cdf1a9afa178ba53ed3c47370fe9bd4b4937f288055 -size 1528114 +oid sha256:b03c9387415ddc58448c0be0e80970df89736ac9efb3da6f77a9bf8532845a4e +size 2361987 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_39a3a8d8-dbe1-4949-8d5f-a9097d58c4fc.png b/images/240952bd-853b-4653-a358-49c2784cf568_39a3a8d8-dbe1-4949-8d5f-a9097d58c4fc.png index 6a559d4304427a265e23b0c30b8e543550604e58..87b47797781c33726efb6481649695a92b542782 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_39a3a8d8-dbe1-4949-8d5f-a9097d58c4fc.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_39a3a8d8-dbe1-4949-8d5f-a9097d58c4fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b755b2d2e8d4abbf71299d1e9f64f8a80070c777263b53b78829239f8f8c19f -size 1156868 +oid sha256:5ee5a38320c82c41fa82707d7c59192afd27b225434a74e3087301ae1e28293b +size 1991104 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_713c45db-226a-41c6-adb6-c348424b9e20.png b/images/240952bd-853b-4653-a358-49c2784cf568_713c45db-226a-41c6-adb6-c348424b9e20.png index 9a6cb2e4c7c25711ba82a97f7c9c78e9c97aae61..28916e1952f8ba98a9b0f43db2c0f7b049d36d4b 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_713c45db-226a-41c6-adb6-c348424b9e20.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_713c45db-226a-41c6-adb6-c348424b9e20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53bf5a61737f8fe257ca615604cffc52bda11e13ca23df7a2da32373650543e0 -size 1151353 +oid sha256:17eee78a184928abcca41dfbf2c0d19848f0c66bfa8f853b5ab5f46605a0907d +size 1700646 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_725e6ba0-21ea-43c8-b477-46717892546f.png b/images/240952bd-853b-4653-a358-49c2784cf568_725e6ba0-21ea-43c8-b477-46717892546f.png index 5bb62e3df7738446a88511dce9cd3ffbf04a468d..7cb1730c60c476a7da6c32d1c3e0c3b6dc11ac3f 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_725e6ba0-21ea-43c8-b477-46717892546f.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_725e6ba0-21ea-43c8-b477-46717892546f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:308209537465a34a863936d1a8d7c7c5eeb4434dfe541f874e4e7477a00764a3 -size 1230664 +oid sha256:70990350ff77ca7c0b50b9e6899f5f089c215b09f4b53d331641027e53c155ec +size 1231701 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_7478efe0-d084-4691-b17c-4eb86f32538c.png b/images/240952bd-853b-4653-a358-49c2784cf568_7478efe0-d084-4691-b17c-4eb86f32538c.png index 5496acd7fcc8dc444c9417bc80172a5db58cd7aa..88ffc0c9954725c50f49f5bd457ff6baabbbee4c 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_7478efe0-d084-4691-b17c-4eb86f32538c.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_7478efe0-d084-4691-b17c-4eb86f32538c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e38be34150dc546e43b8f4db0de3c6d82925d654a73599b0a74255ef0c932dbf -size 405867 +oid sha256:c0fbe2056f84e7c58f3c7c8c64fab80a43ee46c8559575a8d559d6946027252c +size 389739 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_8521d188-44bf-4585-9ddd-10af35e11bf3.png b/images/240952bd-853b-4653-a358-49c2784cf568_8521d188-44bf-4585-9ddd-10af35e11bf3.png index f6bce564769280cf267caa5eb185bc21efeca766..0b48d8c2e21f08e444a8863d34aea10f3bebd2d6 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_8521d188-44bf-4585-9ddd-10af35e11bf3.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_8521d188-44bf-4585-9ddd-10af35e11bf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a588c818e1fc9e63bb5aaf2bc4572722587799ddf6ec472e58fb1bd7174c1a3b -size 1109046 +oid sha256:53caabc256a91483acf75379e636f41f63f8279fd250c92452b68b1364f1cd76 +size 1883154 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_d1d704cb-2130-4933-b894-6c0a492dc4c2.png b/images/240952bd-853b-4653-a358-49c2784cf568_d1d704cb-2130-4933-b894-6c0a492dc4c2.png index edbe796f026827756d925459603f095016c9822c..11337bbf3a5feef7da0fdc2c345f2831239cc2f6 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_d1d704cb-2130-4933-b894-6c0a492dc4c2.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_d1d704cb-2130-4933-b894-6c0a492dc4c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a1aa6add52e3e31f3620701b5b91215530acd4dfee659bbc6ab53e722478eef -size 1220754 +oid sha256:7e82456dbe476f09378751e92c08bc9036c4766636ca1d52ed2da552a70bdde3 +size 1899021 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_d4beccf5-98e8-4e29-9e4a-f6f38a31e064.png b/images/240952bd-853b-4653-a358-49c2784cf568_d4beccf5-98e8-4e29-9e4a-f6f38a31e064.png index dc88c30349b08ec08f53392bfb72678cd9d7a8ae..d51f57db61852bc49311522370a1dfcac4c3ccf1 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_d4beccf5-98e8-4e29-9e4a-f6f38a31e064.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_d4beccf5-98e8-4e29-9e4a-f6f38a31e064.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:117de8eaf491af7e1cdee1747efd519eb6e4057e75455e7ebcb55544260248f4 -size 1242720 +oid sha256:f9b3e3b0b63c695b03125740c82dca3cb05baac42a92a143dc133a79fdd1525b +size 1722502 diff --git a/images/240952bd-853b-4653-a358-49c2784cf568_e165de37-91d8-4552-88cb-72773a2d61ef.png b/images/240952bd-853b-4653-a358-49c2784cf568_e165de37-91d8-4552-88cb-72773a2d61ef.png index 7765c0bebe2d946b14cb5c2dd78544991f6ffe98..30039d418f86f64d379865d1f56b6c85f3d37404 100644 --- a/images/240952bd-853b-4653-a358-49c2784cf568_e165de37-91d8-4552-88cb-72773a2d61ef.png +++ b/images/240952bd-853b-4653-a358-49c2784cf568_e165de37-91d8-4552-88cb-72773a2d61ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36d95540caa13f3e15c8e84b8608212591fa3bcac8011ac2ea9ea364e4a9b9bc -size 1541958 +oid sha256:38b14d87980d08abdf11b3a0ec80c9680a52eedbb3c32f62fd66c06f896d1004 +size 2361914 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_08dff165-73d4-4827-8dfd-92aee651a914.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_08dff165-73d4-4827-8dfd-92aee651a914.png index 29269f2bc2634c87d9537a88569175ed55eb0ebe..de54f56e634f84770c5c5b56758e1e2da3b19f30 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_08dff165-73d4-4827-8dfd-92aee651a914.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_08dff165-73d4-4827-8dfd-92aee651a914.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16b716c54bfb35f16b26b7a4020fca7aa499ab63acb4fa33626bc9158d8abbef -size 616270 +oid sha256:71084a8b8dfcb080969651d0fcbe1eb248571ee077dc49ab96addef07f511934 +size 541936 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_0a38b7ed-8182-4324-bbae-469672aa4c1d.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_0a38b7ed-8182-4324-bbae-469672aa4c1d.png index e1c7157467a6c8a35f56fc45290ccb6774832d05..438284e67d3ec965638de86d71d18763f1af8460 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_0a38b7ed-8182-4324-bbae-469672aa4c1d.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_0a38b7ed-8182-4324-bbae-469672aa4c1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9648fb04317e145d9310576b5bfca8b028634ea70f1f7890ac9475ef7e5dcf66 -size 823232 +oid sha256:ba55de10a80b9aeb33a0556d99d626953216f4d7b67d2fde462e1eedf7e1878e +size 369154 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_1524351c-9647-484f-83b4-c844747fec77.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_1524351c-9647-484f-83b4-c844747fec77.png index b02802bb8ccdb36169ce8f39d7835a922dd9a3fa..e0ac045e663dd498c02a4c2b06198148250e63d0 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_1524351c-9647-484f-83b4-c844747fec77.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_1524351c-9647-484f-83b4-c844747fec77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98a994acba1381e957052390d828da8f11d614d0de3c0a77beaa2e07eb7433ce -size 321686 +oid sha256:cce536cfcfb5805b147cd2e33b35f6a7e156fec353e5747fe8c5638779cfe08f +size 242754 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_2d4ff701-58d7-4a52-b443-9927b918a992.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_2d4ff701-58d7-4a52-b443-9927b918a992.png index e2ce5b15894fa0d8735eb131f73575101be2e4ad..ab623e571f8e975633e4fc6ef051cc4bf791f4c8 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_2d4ff701-58d7-4a52-b443-9927b918a992.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_2d4ff701-58d7-4a52-b443-9927b918a992.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f5469a0c7bb1a79fca395f71378ed190ce2e16ed97c197850af800f5d1d7eb5 -size 553889 +oid sha256:3f163af210000975d4b05a0b4a9b0a94e0f5ff561efa17b33416828322000d50 +size 1302513 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_4f3485c5-4000-44a9-b95c-82f5f488f49a.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_4f3485c5-4000-44a9-b95c-82f5f488f49a.png index 5f296ae4614b68edfb37a15511ad17f5f114984b..b2c027472bd74499361650cac05516a8bdf6cb40 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_4f3485c5-4000-44a9-b95c-82f5f488f49a.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_4f3485c5-4000-44a9-b95c-82f5f488f49a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:056df314e550730d07d1397dcc49be19d324a2a3c1ecd86051c3a02ec1260430 -size 911666 +oid sha256:a7e821b10ac17e7e6b324611d6e1a0318325376cdd6475514cfdd5dae8e457e3 +size 751726 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_52744503-3e70-4e58-9502-dfbd39ccbedc.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_52744503-3e70-4e58-9502-dfbd39ccbedc.png index 7c3679820c12237fc7df24a8cd58f705e4ac1591..e46755119bf98d889045af6db2c0528311029e70 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_52744503-3e70-4e58-9502-dfbd39ccbedc.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_52744503-3e70-4e58-9502-dfbd39ccbedc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:609c47380a179a8f64e0f1b9ae4278e892fc5472e6a103841dc1324dba5b7868 -size 630838 +oid sha256:db31a6e9736e260047e9c01cedfb6aa06b6af8dc8d93d39840592244d4767f41 +size 862206 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_593599e6-9edd-4f0d-bc13-d6d92f8ce00f.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_593599e6-9edd-4f0d-bc13-d6d92f8ce00f.png index f3152900ddea24a01c36818d8251ef4f2dd741aa..ea98c751cddf513961ec3cc8adb00c06996e831e 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_593599e6-9edd-4f0d-bc13-d6d92f8ce00f.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_593599e6-9edd-4f0d-bc13-d6d92f8ce00f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:330ef2c9f760a14519d104ab71b19ac344e6b1f5fc74dd8f50fd2a474019ec53 -size 703614 +oid sha256:29645fd4d1da7cafa61556304f7353646397f44f63ee1aeca55a9aeade9e6c79 +size 852462 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_64d2cf14-ff84-4d7e-8dfa-fa0fd7eb2bec.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_64d2cf14-ff84-4d7e-8dfa-fa0fd7eb2bec.png index 828a15076628ffdd407137c6c4597d3d773032b9..1506f4329ca9227f32eb082010b250a6cf697533 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_64d2cf14-ff84-4d7e-8dfa-fa0fd7eb2bec.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_64d2cf14-ff84-4d7e-8dfa-fa0fd7eb2bec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:897f8b99cd5072967b07383081038c2217d2e581a9a69b701a13a28b7af4bd0d -size 826390 +oid sha256:9ca15bf2ed8019b3a0d4f3bdbcad35372b8ce7a7b0660bc078df70035bb6503f +size 1052268 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7623cff3-2eb7-4a39-aadc-25e7d26866b7.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7623cff3-2eb7-4a39-aadc-25e7d26866b7.png index 2695c077190e8176a7a5f302a09b910e4805e1b1..50d54e26214ce0c6936d6b8eb728db3f5b799cf9 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7623cff3-2eb7-4a39-aadc-25e7d26866b7.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7623cff3-2eb7-4a39-aadc-25e7d26866b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f8c1e2a9afa3fd2b0ff9dda721d4716f39e4b3d768f554c2089d45ecdc83ab0 -size 1211398 +oid sha256:2f355839302ad466de8a18a642af0843bcbddc24a363870be8e0723bdcd64661 +size 1264552 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7b485017-057c-4657-821f-25df616be249.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7b485017-057c-4657-821f-25df616be249.png index e35d825eb5335dbd9ad370de1715a2204b73ea10..725461ccd657b4e49da5e7b3db89ee51f42c8144 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7b485017-057c-4657-821f-25df616be249.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7b485017-057c-4657-821f-25df616be249.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9b58b7f6fdf7452a5fc46c8b55ad628c53c5566466d257506271bbfd68c51bb -size 747804 +oid sha256:4f60ddbdb67451d67e75034131d4f7dcec3f667edaf3b3d02ab9d51180019317 +size 1540792 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7c000882-e591-4c81-85ea-ed2ff428e75c.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7c000882-e591-4c81-85ea-ed2ff428e75c.png index fda90ef91d6e0382fd926a2ae842612fd74f5186..f66f0953456831435a730e4a3928cb2898b41e7f 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7c000882-e591-4c81-85ea-ed2ff428e75c.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7c000882-e591-4c81-85ea-ed2ff428e75c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d235edd93174351e64dbcf4f59190f05fc5094da6441218b4ff91d6b5c0ccd87 -size 816920 +oid sha256:e6a3b57f43c3b7d484c72a7a3e532fd2d9163648275b6ee749c6d746979294f0 +size 731556 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_9b9714a3-1307-4aed-8fe7-c4aa796cf448.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_9b9714a3-1307-4aed-8fe7-c4aa796cf448.png index 1c28a3f52791b54c20b5e533bde4076a91467c9e..bdd94f9201119f6080756cc8eb334d310a530020 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_9b9714a3-1307-4aed-8fe7-c4aa796cf448.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_9b9714a3-1307-4aed-8fe7-c4aa796cf448.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4125dad66756d52503075658a9e401b4d8e86d1095aaf2dff9abbc92d5fc9399 -size 1211842 +oid sha256:ce1fdd64e8e7e96ac9598145b1f8b9ee0b32ffc4363775f6c88b7f1c4fd5beef +size 1165421 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_bfb91ba3-f56d-4ddf-893f-0742d11e5d15.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_bfb91ba3-f56d-4ddf-893f-0742d11e5d15.png index ab28d10cfabfcd83c06870b0dcf963932eec5230..398b401e3ee9b966b84230fbf9b4dbb8eaddc018 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_bfb91ba3-f56d-4ddf-893f-0742d11e5d15.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_bfb91ba3-f56d-4ddf-893f-0742d11e5d15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:031466d5cd0884c3904a640b2e7374d211cac555b987ada16e032f313dd9d970 -size 567002 +oid sha256:a2acebb5cf1aed8998a70aade29c86446875959b5a90186a3521950c55656df7 +size 666846 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_d4441fd5-a932-4be8-9301-89a7764372d4.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_d4441fd5-a932-4be8-9301-89a7764372d4.png index cf5ec20d4a838f500ac98799ad9fb571610e6b13..4ff9a5028ddc77f5fabbfebfa9a938c4f25b9aa4 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_d4441fd5-a932-4be8-9301-89a7764372d4.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_d4441fd5-a932-4be8-9301-89a7764372d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05fb0b8a4753527d84116713a8dcd2348834ee17a6ca6f31555ac1501a07b9a7 -size 826391 +oid sha256:b8fe0ab034bb5f80eba1f7bf5c3f02ae6952b5bcb3dd1bdc62089e4573b05841 +size 498999 diff --git a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_f49b8bab-88cc-4b73-a5a1-d63b597c4b0d.png b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_f49b8bab-88cc-4b73-a5a1-d63b597c4b0d.png index 04fe5a10d314627747439331bc416a27582c3aea..69f34914fbc15060088cf749fcb8c857ff5cdf63 100644 --- a/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_f49b8bab-88cc-4b73-a5a1-d63b597c4b0d.png +++ b/images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_f49b8bab-88cc-4b73-a5a1-d63b597c4b0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:798093870c3671f9bcbdeec4a68d861e0e4d08118b043b126267fcc21a882664 -size 842148 +oid sha256:b4e860897dad77f2a0854ff81e8369f4ce85f8d0a4dcc059f33567f7c7b48882 +size 1075007 diff --git a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_7690935f-c901-40fe-8b8c-afd20a6e4a91.png b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_7690935f-c901-40fe-8b8c-afd20a6e4a91.png index 190dfed5ce142b2a40f9c4d2013a5f5776355440..79eb2e9dd25751dc554929f43500d22d13e55e62 100644 --- a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_7690935f-c901-40fe-8b8c-afd20a6e4a91.png +++ b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_7690935f-c901-40fe-8b8c-afd20a6e4a91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcd088daff686686a8cd74ad20764eada35694342505c4a978bd19c7301d0b72 -size 629873 +oid sha256:3d5f9107be4de1f3c143e9311cda9c1f1485aec4e3c4929b0a98ba6a7f4062f0 +size 808289 diff --git a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_c99edf07-d6a6-46ea-a1da-f1cdbea62441.png b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_c99edf07-d6a6-46ea-a1da-f1cdbea62441.png index c25afb627b29dc8a3f9ab1d55b294d4a3749f7dc..ab4bc412e4d4e9092685468176e9f0e843d475cc 100644 --- a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_c99edf07-d6a6-46ea-a1da-f1cdbea62441.png +++ b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_c99edf07-d6a6-46ea-a1da-f1cdbea62441.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c43f28ea0b5a0d1b3fb4c9e1ce4d07d95ed9b40e13c7b4f79b9af945bed7d2c2 -size 637056 +oid sha256:8782a45c33d65f388979090624e5f77d277c4b06bb9b027088ac770fec9395bc +size 689030 diff --git a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_eaf260c8-e239-4e96-b387-970c8a48e56e.png b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_eaf260c8-e239-4e96-b387-970c8a48e56e.png index 075c31bd4675ae04896ca1905c962e88adb34d51..cb3664affa3656bc6f46333e8defb0f46a646de2 100644 --- a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_eaf260c8-e239-4e96-b387-970c8a48e56e.png +++ b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_eaf260c8-e239-4e96-b387-970c8a48e56e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70388215c49bdc539f07e21b7509b90e34a83dce0028420e6ba4452b705ba446 -size 1046985 +oid sha256:c5c3739748897f1f0ee8ec3e1992ce749a86e18f58ce5ff06518c1e00db2cfde +size 1429241 diff --git a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_ffaef589-2d9e-4621-9fd9-ac90bf31af16.png b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_ffaef589-2d9e-4621-9fd9-ac90bf31af16.png index adcdbcd3a72a6daa2b5babe36b4b3f8d545db1b8..b5b3b0fb29154d066b5421c37e2f5d365fd6d5dd 100644 --- a/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_ffaef589-2d9e-4621-9fd9-ac90bf31af16.png +++ b/images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_ffaef589-2d9e-4621-9fd9-ac90bf31af16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f355b1afe62d4fb7b73700ceb4a2c4f4b4211d683a1645febbd4acc331966629 -size 720018 +oid sha256:4a18dfd4f9a33047e56bdd07581c9efa0e4e49803a83bf505539a34c979de3a7 +size 1167880 diff --git a/images/265cd715-0607-4ebe-8420-046b1a165239_0a054763-8af3-4199-864e-2582834bd49d.png b/images/265cd715-0607-4ebe-8420-046b1a165239_0a054763-8af3-4199-864e-2582834bd49d.png index d280d46e5a8cc3512035f5400f2a962689bb65c8..282b3653ad3e9df24d2f21d2ce732726d8d21c32 100644 --- a/images/265cd715-0607-4ebe-8420-046b1a165239_0a054763-8af3-4199-864e-2582834bd49d.png +++ b/images/265cd715-0607-4ebe-8420-046b1a165239_0a054763-8af3-4199-864e-2582834bd49d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3734f2626448f146831c6be54b69e88609736725c98c42b5c5fc44ffce7851a8 -size 173846 +oid sha256:c54c2ce265860b0d66db95e9ffde92972316bc39a737df0142511bd300f3c91b +size 191010 diff --git a/images/265cd715-0607-4ebe-8420-046b1a165239_49c4fcaf-64f4-4bbe-8357-c31d97aa56ad.png b/images/265cd715-0607-4ebe-8420-046b1a165239_49c4fcaf-64f4-4bbe-8357-c31d97aa56ad.png index 8a9110da4bd003e5832f2bb52b672c5659f5fe38..fa0605e80c93cd3b097f6fd0ffa8d1399ac60ae3 100644 --- a/images/265cd715-0607-4ebe-8420-046b1a165239_49c4fcaf-64f4-4bbe-8357-c31d97aa56ad.png +++ b/images/265cd715-0607-4ebe-8420-046b1a165239_49c4fcaf-64f4-4bbe-8357-c31d97aa56ad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94d26aedf18a86e00dbc7a8e94b594d7492d3bfbe4fd06de452bebd8dc99c2fe -size 355220 +oid sha256:97fff89cbcf6cd30630eccc0113790cd9dbd67deed534784d5a3184fcba6c8c0 +size 432420 diff --git a/images/265cd715-0607-4ebe-8420-046b1a165239_5bb4feb3-f367-4e03-b999-c2007de19ba7.png b/images/265cd715-0607-4ebe-8420-046b1a165239_5bb4feb3-f367-4e03-b999-c2007de19ba7.png index c26c3e904f92514ffb71b110905f9c0bbc75f29a..2f2e7335a18a5d922de0d4c4f7325d49edd32af6 100644 --- a/images/265cd715-0607-4ebe-8420-046b1a165239_5bb4feb3-f367-4e03-b999-c2007de19ba7.png +++ b/images/265cd715-0607-4ebe-8420-046b1a165239_5bb4feb3-f367-4e03-b999-c2007de19ba7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74663617a75bdded6594ba285444aa80860dd02b9048e87e8489354530cc79f0 -size 1073970 +oid sha256:3af53b2b5c93c5a2d9526807655b9cc243a65bb6d52834e51900ce5588d3146b +size 476055 diff --git a/images/265cd715-0607-4ebe-8420-046b1a165239_fd52e1f0-8e62-4dfe-87ba-46653af03edd.png b/images/265cd715-0607-4ebe-8420-046b1a165239_fd52e1f0-8e62-4dfe-87ba-46653af03edd.png index bf8103e218259be66f69f27126c04e1295dcfcaa..d8507a07356f1d613c35916c7e696822e8cd82b6 100644 --- a/images/265cd715-0607-4ebe-8420-046b1a165239_fd52e1f0-8e62-4dfe-87ba-46653af03edd.png +++ b/images/265cd715-0607-4ebe-8420-046b1a165239_fd52e1f0-8e62-4dfe-87ba-46653af03edd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7dc1804ef4fec2c761497d121620ca81d7c28eb3f0a558cf9f56253828e2fe4f -size 479167 +oid sha256:8f0190385ffa9c6186b2ddd5362103fddbb2cd19f61f3fca35cc171d0efb7480 +size 618782 diff --git a/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_6b99a1ca-17aa-452a-9370-27bb2a175812.png b/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_6b99a1ca-17aa-452a-9370-27bb2a175812.png index 2d9a1b150d48b05697c2a161c1053b02e704c143..7eaca529293f5de406d5d4702a2f6450401ad674 100644 --- a/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_6b99a1ca-17aa-452a-9370-27bb2a175812.png +++ b/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_6b99a1ca-17aa-452a-9370-27bb2a175812.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:372003ec4e614ebead040d90a827d72b8ef179ddd788a6f2ab2285a59119763a -size 1367134 +oid sha256:b2899099cfaf1c3bed55468f2a058dc088791fd80bb9bb4e85522ee2d0f64b43 +size 777745 diff --git a/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_9c452aa0-3de5-4570-992e-52374b7e7678.png b/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_9c452aa0-3de5-4570-992e-52374b7e7678.png index 5a6fef14527a43b36cd68e3b7c32d40ca34d1fae..e8f91b9f63842ac8035a7b6be51de043eaae4660 100644 --- a/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_9c452aa0-3de5-4570-992e-52374b7e7678.png +++ b/images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_9c452aa0-3de5-4570-992e-52374b7e7678.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5eca37c9bd5fe984a8fe5018fb028ee3530d681b66de6c9fc2b1ba572dc9a2a9 -size 1283658 +oid sha256:419e437e693346171a14c45697b8a7257e36c035c031781ba6f2d9950705e7d2 +size 1799640 diff --git a/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_2ff008e7-d6e6-46bb-893d-375d5dd41af9.png b/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_2ff008e7-d6e6-46bb-893d-375d5dd41af9.png index f72bb63062d75ab82585f51e9ee7cf1abdd67aa1..f7b04154c3c7617dd134ccd36c84947e1a4d5754 100644 --- a/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_2ff008e7-d6e6-46bb-893d-375d5dd41af9.png +++ b/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_2ff008e7-d6e6-46bb-893d-375d5dd41af9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec4c07d5bbb6f1da570131fb3bd6d63a0d0abd2e3d509730c6ae7b3c1e37b3b4 -size 1896167 +oid sha256:7300b17e72d1550aeed200b54e900afa059f3cd8bda2a2e7de42ad821fa4df9d +size 2068463 diff --git a/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_99a22a2c-d4bc-42e8-922b-7e29faba46d8.png b/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_99a22a2c-d4bc-42e8-922b-7e29faba46d8.png index ba5e42ed0457ac68e14278a0ba3fa30820f1f606..437499cca0b251f7fe43088af0b00826f7c54508 100644 --- a/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_99a22a2c-d4bc-42e8-922b-7e29faba46d8.png +++ b/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_99a22a2c-d4bc-42e8-922b-7e29faba46d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:716c4b7db1dee3f2d15a78692e2a841256401259bb60e02ddd0424b58e495c25 -size 266402 +oid sha256:e5ca9449ef7e6cc02ea76bf677a399697c1b8ab4d977d5d387708e5703249278 +size 315864 diff --git a/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_b644b13e-0e08-4e91-8dbb-e80427e1b76f.png b/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_b644b13e-0e08-4e91-8dbb-e80427e1b76f.png index f0caccb7d0cd47af9a59e4b3d034b2c2e051eda6..d557a8ea81e222a9ea8a3ae16b9e8530b905b5b8 100644 --- a/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_b644b13e-0e08-4e91-8dbb-e80427e1b76f.png +++ b/images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_b644b13e-0e08-4e91-8dbb-e80427e1b76f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8f2b8dd90e692fcfb252d2eedcf0b4fc6acfc2e1145eadad3eb7e2d1795d3a5 -size 756381 +oid sha256:92e965c5afc09953841e9fcb0c96f076d9ae49f16a3f9802be90bac4b5163ebc +size 657988 diff --git a/images/26fff471-7083-46e4-945e-d1b167157a0d_17cc8fec-b781-48d4-86ab-a842b9ffa5bf.png b/images/26fff471-7083-46e4-945e-d1b167157a0d_17cc8fec-b781-48d4-86ab-a842b9ffa5bf.png index 8885188f9cad0e6a03e8f09c2e3106ad242ed079..9e9f29bb9b0a6ad7e1a3d323b318f923b035a6ae 100644 --- a/images/26fff471-7083-46e4-945e-d1b167157a0d_17cc8fec-b781-48d4-86ab-a842b9ffa5bf.png +++ b/images/26fff471-7083-46e4-945e-d1b167157a0d_17cc8fec-b781-48d4-86ab-a842b9ffa5bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc57448df1dcfd4805fb52050040c054d246946c69dd7aad76d7798a9a233342 -size 234325 +oid sha256:a6c0d5187ce3911d43764084c7623c97a15e4977f167ac8f81f2f7a3e17d5a6b +size 234273 diff --git a/images/26fff471-7083-46e4-945e-d1b167157a0d_2330fcb7-1d5f-4a97-b2a2-621ea171fcca.png b/images/26fff471-7083-46e4-945e-d1b167157a0d_2330fcb7-1d5f-4a97-b2a2-621ea171fcca.png index 059cfe474f855ba8c5a3d21aba8206f5236c6a28..531c028ebabbc4c72aed38ee87af700a28dddcde 100644 --- a/images/26fff471-7083-46e4-945e-d1b167157a0d_2330fcb7-1d5f-4a97-b2a2-621ea171fcca.png +++ b/images/26fff471-7083-46e4-945e-d1b167157a0d_2330fcb7-1d5f-4a97-b2a2-621ea171fcca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc6e90a69a16a7865cef0e9dd346b0340a0edecb5bf458a5a2f1a800ab4f23df -size 1510248 +oid sha256:9b02c65509fd74238509c8e47e4674983b4686472b4ce58da581dea5b2837b91 +size 445533 diff --git a/images/26fff471-7083-46e4-945e-d1b167157a0d_933b0ad5-f7f5-4195-96aa-530e47401fbc.png b/images/26fff471-7083-46e4-945e-d1b167157a0d_933b0ad5-f7f5-4195-96aa-530e47401fbc.png index 71f9a879a2acd7e96caa1f047359db91b93f32bd..5bab003a108d9afa9c94112606e0b4e93b35acf6 100644 --- a/images/26fff471-7083-46e4-945e-d1b167157a0d_933b0ad5-f7f5-4195-96aa-530e47401fbc.png +++ b/images/26fff471-7083-46e4-945e-d1b167157a0d_933b0ad5-f7f5-4195-96aa-530e47401fbc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebd3d2d02a0597c89c5abc4947d551a82bc31e4a5456e19e8042ae2c3ef8226d -size 484692 +oid sha256:07bcc22613d7e3843f3b94e945799e3b20878b20c3e994fb693b458b547bedb0 +size 481814 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_0a333e24-0a4d-4c6c-a466-3a1807f60957.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_0a333e24-0a4d-4c6c-a466-3a1807f60957.png index 998bf1f1813d78545383436c88473b4c3b43810d..f7c61b834a62dc0f04df7397f7ebfb591c03e88b 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_0a333e24-0a4d-4c6c-a466-3a1807f60957.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_0a333e24-0a4d-4c6c-a466-3a1807f60957.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7b8bbda435e42f194007b1039abddb82ef787755c7ba2a60a8f575055e2d4c6 -size 349010 +oid sha256:ec4ec5fdc4f91081084a10b7c8f6dfb96bb9c04a1d40be90ef22d87f0bc2adea +size 296222 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_0b463dd8-3b69-49da-9a8e-de032b2c24ce.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_0b463dd8-3b69-49da-9a8e-de032b2c24ce.png index 1f1b07d1d5281151ebff49a029eefb7a4e6e68eb..231dca10eac4a32943fb41e01b10c06980c9a2a1 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_0b463dd8-3b69-49da-9a8e-de032b2c24ce.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_0b463dd8-3b69-49da-9a8e-de032b2c24ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba865c0649be5db7e130b376b3cc4326185479cdd047b517840043d387e52ca1 -size 328056 +oid sha256:59dad224e1f3597db2a0a72c7018518c5f4fd308a2a3fdd5ec840e5c52040383 +size 268228 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_2fa1df82-2bfc-4e2a-a8dc-bf00f7ea75a4.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_2fa1df82-2bfc-4e2a-a8dc-bf00f7ea75a4.png index 98f5b269c364fcbc4083db9eae66838bac53262a..634107e8c298d0af25d56fc41084828ad4a34e2f 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_2fa1df82-2bfc-4e2a-a8dc-bf00f7ea75a4.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_2fa1df82-2bfc-4e2a-a8dc-bf00f7ea75a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6665a6a3d4b9f6a6ee1676de65a7a4fe06baf2156700ea039b1243c2996cc584 -size 726246 +oid sha256:33a0e3536277eb23fee97e70ecb5c27cf2cdb4213417037034a9ec53ba0470d7 +size 667939 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_3afea1db-55b2-42ec-bbce-86728f28a0ca.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_3afea1db-55b2-42ec-bbce-86728f28a0ca.png index 46b08eb8a0ca16120692fea247e23ba278bb83e4..a276f4a65aff80d39ba27dc455b0c3e7a98c4918 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_3afea1db-55b2-42ec-bbce-86728f28a0ca.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_3afea1db-55b2-42ec-bbce-86728f28a0ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72ebc2eea2bb15c66196a46be782603dbc01f33b1bc48f52d1296d0347df0d2f -size 311451 +oid sha256:cb42f4530a263dd8217af559b562fb1880a190c9c7913db65fd3ca9e8abfb5a1 +size 220293 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_3fdd479d-2e1e-477d-b8dd-f21c40d2d86f.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_3fdd479d-2e1e-477d-b8dd-f21c40d2d86f.png index 76b455224804bf312709df0ceb8eb0929c243a9e..93b3c02ebe83a55625b1d42f5cb6dc9589efed04 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_3fdd479d-2e1e-477d-b8dd-f21c40d2d86f.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_3fdd479d-2e1e-477d-b8dd-f21c40d2d86f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8926c86a152b2f3d0fca5ef808da07054db338757b4529c9d75a78afd32f23b2 -size 728342 +oid sha256:842f0ce515241ff97f6980d435b02ca43942f05faa3f952fa6842f468deff2bf +size 514777 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_7b525852-d861-4b68-96ac-240e8e78e5e2.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_7b525852-d861-4b68-96ac-240e8e78e5e2.png index f5c347898f1f47bc1dda51d2caf21bd09c469cbe..6bb5200f33e357e1ba6b285c46c383a86d82a60e 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_7b525852-d861-4b68-96ac-240e8e78e5e2.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_7b525852-d861-4b68-96ac-240e8e78e5e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68aee5487472d7cff581f9673df2ea20cc9bba54a13a7fc348eb41a289451ddd -size 2243540 +oid sha256:7af609e8a7dfb9196d437f5326cc7ca1c09db220878b3bc20c9024b8cf6220e2 +size 1122380 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_91ccb5c2-07c7-4ad6-afd3-8371104390d0.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_91ccb5c2-07c7-4ad6-afd3-8371104390d0.png index 71da328c609b74a1f3afbe93300eb4ecd711eeea..431845d120e134b40177569cbc4b98b55e9a5fae 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_91ccb5c2-07c7-4ad6-afd3-8371104390d0.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_91ccb5c2-07c7-4ad6-afd3-8371104390d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:705907d31ab25542e0918c281ddd41d38f43436dd4b25e506d1dd50e38cd9c78 -size 302035 +oid sha256:9348d6febc9f3ca49cbf10ff61dbe315fb97754c43b5d37a840c18a261eedd81 +size 154804 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_9c2f6d1a-a5c7-4094-97ed-0dfaf2eff284.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_9c2f6d1a-a5c7-4094-97ed-0dfaf2eff284.png index 157fd99318174d33d343e084f8f21dcca41d5ce3..41d088ea2d57ba43e02cb3aa5ebdba276b148596 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_9c2f6d1a-a5c7-4094-97ed-0dfaf2eff284.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_9c2f6d1a-a5c7-4094-97ed-0dfaf2eff284.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4222c97b2319b098f5f3a56ce9dba9c667ed0e8affa631efbf09de25c83900b -size 651511 +oid sha256:56091d5af6775488ecc706c8bfa9b2aab1732b9f0bc4809e8380e6f2fd44ecd3 +size 616476 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_a4c79037-e990-4a7a-9d4a-c0f8936dba07.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_a4c79037-e990-4a7a-9d4a-c0f8936dba07.png index 54dcf1d7248cbc9d987ea8ba88700258ee906c4a..296ddaa2e2e80e18b7f50f21f1849ec653ca87f2 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_a4c79037-e990-4a7a-9d4a-c0f8936dba07.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_a4c79037-e990-4a7a-9d4a-c0f8936dba07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b886bd45e5239b035c46ebd3fcbfeba889201acc1725176b52926793cbd3b5f -size 583815 +oid sha256:9f9c1dd6b3ac6abe4526c9a14d85f78d5dd5ddd8513891348356a683bd3a51da +size 256497 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_b4e1ca8e-3b28-4edf-ad6a-992c4ed20441.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_b4e1ca8e-3b28-4edf-ad6a-992c4ed20441.png index 1e92716dcdd2a1d1c44d48737342501ecff17a10..8faff9bd7d6d575779ce78e1fa7aa7da91bb5689 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_b4e1ca8e-3b28-4edf-ad6a-992c4ed20441.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_b4e1ca8e-3b28-4edf-ad6a-992c4ed20441.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9e2bbecfe720577c4ba1125d49dc8b52af5050b2f00556ddde1b3a54b020ca0 -size 410740 +oid sha256:f896ac82a388efde4e4e16535813a5b0329f7b6c6e42188e4e90d9df9dfcd468 +size 396719 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_dd9f943d-ec6a-4c26-ab14-1e616956da46.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_dd9f943d-ec6a-4c26-ab14-1e616956da46.png index 893b6d07a14c89a36afe238cd51a005f77988f15..c16a54359b88724323d6013fe8aecb2b1aa6c9ca 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_dd9f943d-ec6a-4c26-ab14-1e616956da46.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_dd9f943d-ec6a-4c26-ab14-1e616956da46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4293eb87e292cbca574933b99fb7425dfe1a0f6e8ccba686258262b84fefb291 -size 583303 +oid sha256:1892524d722c9641823437df5e77b2fb7a325826f54288773dcf3c75a65cacce +size 864127 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_fa2e4ec5-e583-4ee6-9768-6bc6f7d43822.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_fa2e4ec5-e583-4ee6-9768-6bc6f7d43822.png index 4b021ea634184deaa3a586f026bbf8ab97f1fbf8..d3fc7f75793625bd4dcd04f980b2fb7cb670cb1d 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_fa2e4ec5-e583-4ee6-9768-6bc6f7d43822.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_fa2e4ec5-e583-4ee6-9768-6bc6f7d43822.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24cf7d15cf39c84871a8cde0bdb755d398f1954cb841f0fd46ef921d60c40eb7 -size 577455 +oid sha256:bd32619c822a417a7c647e403d155260b9cc9f83af567abdaf2bf8a7d4b91069 +size 446917 diff --git a/images/2705de3e-4461-4668-8573-8f10c1eb6641_fb6edc44-52b6-4f64-8245-8ce967249d84.png b/images/2705de3e-4461-4668-8573-8f10c1eb6641_fb6edc44-52b6-4f64-8245-8ce967249d84.png index 19c88e2fc863286c69e61d6208a739494d95053c..5e785d4c1638c10a52cbf990f5fb4cde3bf4822a 100644 --- a/images/2705de3e-4461-4668-8573-8f10c1eb6641_fb6edc44-52b6-4f64-8245-8ce967249d84.png +++ b/images/2705de3e-4461-4668-8573-8f10c1eb6641_fb6edc44-52b6-4f64-8245-8ce967249d84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3a5c2dd7b9b8bed74198985dff7b1ee75f22dd104c1452d571a87656506bcb9 -size 605869 +oid sha256:c6bd7dc4d669b99b09060349216ebe01fdcf1f9282182df110b2d982ef54a145 +size 1008966 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_11b8428e-580a-427b-945b-e9964306d187.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_11b8428e-580a-427b-945b-e9964306d187.png index 97be182aebfcc81e3ddf752f5a441dcdbc3e8603..071fb489fb2712bd8a036166c4c57e25a83cff61 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_11b8428e-580a-427b-945b-e9964306d187.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_11b8428e-580a-427b-945b-e9964306d187.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6293b1289236e52cd4538da60ddd581c4532183de410028274c5c00ef5a9a9f5 -size 983550 +oid sha256:76afc68c1b0ff6e9d3711f0ff1c70d0dab36790adbba60f2eef26b777cdb5099 +size 795712 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_26a9327f-ce5d-41b5-b34f-e87ee369fe33.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_26a9327f-ce5d-41b5-b34f-e87ee369fe33.png index c830ecf0cdb9d3939dff09d14c966e58b566000a..daed64a70ac18e677eca07a280e64e542667ce3b 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_26a9327f-ce5d-41b5-b34f-e87ee369fe33.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_26a9327f-ce5d-41b5-b34f-e87ee369fe33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:554d62c13f1376a7f2ff9580f9279617ee0ddf3c6cb9774e25a654584ba7eec6 -size 923982 +oid sha256:2cebb61f6f214279d0d64baaa35556a8b6447948df7e1c36169d7ec95f03a996 +size 860939 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_49b0a764-2d11-408e-81a9-a1f9983a7ac5.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_49b0a764-2d11-408e-81a9-a1f9983a7ac5.png index 19ac2b882d0b5424055c6fb1332b4e7fd79b08d9..646dd8c9acd32a5b43f58744a0c4056cceaa9a96 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_49b0a764-2d11-408e-81a9-a1f9983a7ac5.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_49b0a764-2d11-408e-81a9-a1f9983a7ac5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:367d287fa751af212e640fea15e293411d70f20b5ae083365ce06c7f01296f56 -size 757782 +oid sha256:a1273967dc9dfbb93a42ffa32f1c1d329559f95e379312e05910afd45ef3ae3d +size 707064 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_9a76e93b-f2bc-4cad-ab30-cd3ffbbd96c5.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_9a76e93b-f2bc-4cad-ab30-cd3ffbbd96c5.png index 5aada582e2939c13124a2d496f64a65e3c1837b6..bcf5bc66436ab3cfd24534e7f10087fa8da7a7db 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_9a76e93b-f2bc-4cad-ab30-cd3ffbbd96c5.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_9a76e93b-f2bc-4cad-ab30-cd3ffbbd96c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74100a81a183c93ccbfd9345b724baec213f3e79be0223d1921ed98caaa721c7 -size 995737 +oid sha256:00a1cf32e822926dfbbff692d2f3627ac6cf726c1c5b0e9a828247371d45ef0c +size 1567849 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_b37d0499-8f2c-42f3-98a1-93d81e2cae6f.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_b37d0499-8f2c-42f3-98a1-93d81e2cae6f.png index aff215a14773ba9b4986950bfc6d212e8568bdf0..17344fca16ab14d1b4def8dcce06ba73eb2edb96 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_b37d0499-8f2c-42f3-98a1-93d81e2cae6f.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_b37d0499-8f2c-42f3-98a1-93d81e2cae6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a00ac0d4a21a5a5dfcd8b4ffbe6ca1b7559084c8500b8c316083548e4d750dbf -size 1017444 +oid sha256:34cfb8110889db99b616dd10b089538d8039e172557d672ed7df9f98d218d8c6 +size 1171226 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_b756f03a-d366-4b47-a6f4-5acb671698b4.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_b756f03a-d366-4b47-a6f4-5acb671698b4.png index ee91feaf2aea93d44221ba77edcdcdd86a4bd567..3bdafa0543d9ca18d4fa62a2b47f1d4da5ad11df 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_b756f03a-d366-4b47-a6f4-5acb671698b4.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_b756f03a-d366-4b47-a6f4-5acb671698b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d755c7eb378ca732c45e8d6910fab9f79342ba16b051af1cfa1ddde729f23f7d -size 1018620 +oid sha256:b52b44f518a46fff8ee27c2d4e9723c2f1aa487f5f7649cca1e347b0ff6affef +size 924064 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_d12d7503-85c9-4e58-a998-eb5cb3fd47a2.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_d12d7503-85c9-4e58-a998-eb5cb3fd47a2.png index f2d314584a4a2a399cdd17bebab5a52b5aaae2d0..5d1cd8bc335cbbdf5c42bc08493ae188290afdd6 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_d12d7503-85c9-4e58-a998-eb5cb3fd47a2.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_d12d7503-85c9-4e58-a998-eb5cb3fd47a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e6e412154c1c8d760838deb19d6f0f990bb3c8c704b6604af3344c919930af6 -size 1032396 +oid sha256:1924fbee771997b6c38969ee895f5d4c9b66d1d12f1172e730e789e20d7786fe +size 1237172 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_e929f088-cc08-476a-b91a-607c0572186e.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_e929f088-cc08-476a-b91a-607c0572186e.png index ccdaa848fd446905ae678ef84eb3fbabedbb9b11..3322d69047301f260921609ed138be262ea6bf85 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_e929f088-cc08-476a-b91a-607c0572186e.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_e929f088-cc08-476a-b91a-607c0572186e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f82e2d0c460340febe54f8b2467cefcc25ee8bb1e3a89c4abafd1303dc87aeac -size 1169656 +oid sha256:d1d93962a8e675b4232e4dae369a495ddb76dba56584e830a74e0d73a2424485 +size 717028 diff --git a/images/270c18c6-b3cf-4409-ba6b-18160525692f_ea03253e-d374-4d74-ad87-4190b34c30c7.png b/images/270c18c6-b3cf-4409-ba6b-18160525692f_ea03253e-d374-4d74-ad87-4190b34c30c7.png index 54b9f28a533e4bf932bdf14dbd0e14f8b1042dab..a4209b7d376df8ca427addd8d944d6a60cd51aa1 100644 --- a/images/270c18c6-b3cf-4409-ba6b-18160525692f_ea03253e-d374-4d74-ad87-4190b34c30c7.png +++ b/images/270c18c6-b3cf-4409-ba6b-18160525692f_ea03253e-d374-4d74-ad87-4190b34c30c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2de4da59f6003616b3898c37045b341e3a46254ec8bf7e330050c8ec611572f8 -size 1034630 +oid sha256:ea998b06007d97810580ab9392918c2038cb7352699ee52c150f0d3549a605f1 +size 921190 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_4672b855-b26c-4b81-9010-18d6ec210c9e.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_4672b855-b26c-4b81-9010-18d6ec210c9e.png index 8f40b440e68dc075c3740d1a664d4183b6c1547e..cb2cf398139b6d9ee0db34599d1b281fed71a04b 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_4672b855-b26c-4b81-9010-18d6ec210c9e.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_4672b855-b26c-4b81-9010-18d6ec210c9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05f25aa670fd1eaf8982190c202c18c7f8bf1351ae42e098d0587aba4a4a500a -size 1225081 +oid sha256:309ea6906468799e279f96baef168f161bd0eeaf669eccb57f803d88ebb2759c +size 1420657 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_50cf3b67-127e-4d22-b584-4708cb56b602.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_50cf3b67-127e-4d22-b584-4708cb56b602.png index bc38eecc12948bab05983ea399b668810d6ace06..473956b828b8daf7d9839ab93280af80f28821fc 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_50cf3b67-127e-4d22-b584-4708cb56b602.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_50cf3b67-127e-4d22-b584-4708cb56b602.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eba2fe2dca00dfbafe794c61dc006eb9824f953a5ffe966fbcf130614b209f79 -size 1190573 +oid sha256:aba4784b9fda4a9093ed612140dd16b0d20462a6ef77520145d78e145ed2f9c7 +size 1536412 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_52dfe9c5-b379-4ce0-8c66-dd85b7724207.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_52dfe9c5-b379-4ce0-8c66-dd85b7724207.png index 05e33a569c598df11f67ad99033079d959f144c0..2025b2e0b84307761e068a94925da1c90d8a6367 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_52dfe9c5-b379-4ce0-8c66-dd85b7724207.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_52dfe9c5-b379-4ce0-8c66-dd85b7724207.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:387f9d0ca8d99a81c18bc03fbabf6e869712797ede5b5fb0d387aff13fff6a46 -size 1179403 +oid sha256:d377055a94b713564c462b35031852fde1f2443fc10572f89523452f67e5e175 +size 1426015 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_57fab646-1b28-4e2b-a267-e7b8b41ec858.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_57fab646-1b28-4e2b-a267-e7b8b41ec858.png index 423d7578fc509965bdd163a7a394f3fffaf478a1..23bb77c03588a848facba2dfda47ce3f65a562b1 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_57fab646-1b28-4e2b-a267-e7b8b41ec858.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_57fab646-1b28-4e2b-a267-e7b8b41ec858.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3324046ed7c3256fa9779d352e72bd8e50a65e8e62d584a07d23067a76646a7a -size 1355692 +oid sha256:7307d705e9f76d32c8dbea9391e6bcb5fd3254fa8382464b657d34338debd677 +size 1551229 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_6860793b-b094-4e1f-88c6-07680327486c.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_6860793b-b094-4e1f-88c6-07680327486c.png index e6dd157c444a87972419732883e4200262ecb1b3..5e96f870affc9b42d7fae5e1f11cde21504c2ede 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_6860793b-b094-4e1f-88c6-07680327486c.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_6860793b-b094-4e1f-88c6-07680327486c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14fb485da9a871b2bd33808348570efe11d72142af9a7ac10c6067d523a5de9b -size 1233793 +oid sha256:9ab05b05afcf4cd9e8787f17682de0055b09d4d7fa61204ff97cbe58a4a8b72b +size 1579475 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_cf2ada0b-3120-4416-b301-08bf8df0fa65.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_cf2ada0b-3120-4416-b301-08bf8df0fa65.png index 1326dff32f4d6da33c3bea70426f3411232eabec..3fdec52e3336b7a5d1a20a6885e0eb73f9fc7836 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_cf2ada0b-3120-4416-b301-08bf8df0fa65.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_cf2ada0b-3120-4416-b301-08bf8df0fa65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f69cb52b4e74a40d99e762acfad46ab669ac0ac6606a7bf0efe4dc317c51350b -size 1186116 +oid sha256:e3b7beb93ba7ef728d081dfdaa843202e37b637cfe0084e9a3a319e173cee85a +size 1273204 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_f4121080-df90-4bbe-bf17-79c8b584ef9c.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_f4121080-df90-4bbe-bf17-79c8b584ef9c.png index b369cb9dbcc79f6c8cbd563854ff1680a0417e94..4cf0fb79add9495dc92b8c349c94d11bbfffbda0 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_f4121080-df90-4bbe-bf17-79c8b584ef9c.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_f4121080-df90-4bbe-bf17-79c8b584ef9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cf75e3b1e594a8157258bb8ae3bb58c217ef545620511707068f31098c7c066 -size 1215979 +oid sha256:c1a7dbaccc8fc4d8eea21c7ea41d8a6dbad851533949b48ee79cfd3a2782bed2 +size 1063598 diff --git a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_fd2ab52b-93bd-48aa-8b72-5f8a0835e72c.png b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_fd2ab52b-93bd-48aa-8b72-5f8a0835e72c.png index f4e772baf5d3a39d6a0e80018b3cdf66307896d4..ece0f64769c42faa60912aa2c3e3301d796e37b6 100644 --- a/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_fd2ab52b-93bd-48aa-8b72-5f8a0835e72c.png +++ b/images/2742f638-cf66-4c72-a6a6-69f2a12bc269_fd2ab52b-93bd-48aa-8b72-5f8a0835e72c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e9f0f59a457a730b4bf6ebce4113302806b9756949cb2bbf07c10ce02edbbae -size 1349925 +oid sha256:4b8a932cda1d5d64d2e96f81bdfa6e3550b916d1fbd880a039c0bd4744d2eb29 +size 1258548 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_03d09aee-1faa-4853-a0b0-d989d64b8c36.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_03d09aee-1faa-4853-a0b0-d989d64b8c36.png index 086d20476eaac269008c43ac0a4b763544019c85..a4eda009c58eb09896c66ac05c94b731580a5705 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_03d09aee-1faa-4853-a0b0-d989d64b8c36.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_03d09aee-1faa-4853-a0b0-d989d64b8c36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d883280172d8a27096b701fccff311add27fd8ecd59452f29b1d14a1a9204815 -size 265533 +oid sha256:5232115bafe4885a4e262175e0eceec1c9ec636a47de5f057c1241655d422ca1 +size 553232 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_05442d32-f8bd-4cac-8990-cc1c6885ba52.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_05442d32-f8bd-4cac-8990-cc1c6885ba52.png index 22c686b8e40469f87b0a2cb28d99fac72126ca80..521b6b32c5984c9a13afcda9253f4b96895f6f9c 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_05442d32-f8bd-4cac-8990-cc1c6885ba52.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_05442d32-f8bd-4cac-8990-cc1c6885ba52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2e9117c71966d8f5b3a9c206e66d6432ee81a3a452877cbbf6b9de942558fe4 -size 268373 +oid sha256:c5576fb54f59fe4a1ecb86f687fb8828f159a639a67973115a92c52bacf7e758 +size 410962 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_11ca4f53-e4fc-45c4-b503-bd5af383ebe7.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_11ca4f53-e4fc-45c4-b503-bd5af383ebe7.png index 67497a04cf731d3df4c63d67fcaa63e463ec85de..dfecc2e1a1cbc4c92c0637c82f42c29ddb34a7f5 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_11ca4f53-e4fc-45c4-b503-bd5af383ebe7.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_11ca4f53-e4fc-45c4-b503-bd5af383ebe7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61593c86917009e0a6824cf125d57d0f47bf730629ca831b3b856a8d5c92b3e2 -size 1606847 +oid sha256:7b72700b5abec021b8a459a167cc2429e7f6bbcd65b94dfa4f6b3fbf19af0a27 +size 1572378 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_148c30a5-ecb3-409a-a1b2-610d4b504d8d.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_148c30a5-ecb3-409a-a1b2-610d4b504d8d.png index 752f5e6bfc89f6d7aac74c009809c8b7ec9b11c0..78bd044640c0506fbab65c103aeeb6d7c8d10363 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_148c30a5-ecb3-409a-a1b2-610d4b504d8d.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_148c30a5-ecb3-409a-a1b2-610d4b504d8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd0c8ac8a9da71852fcd68c44959fd10d2e46bc5ab692b0e419329a1a2ff8911 -size 494665 +oid sha256:06e4b546814396af2d48bf32a145bff92392db580b36dead42b35018c0e47e3a +size 1117873 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_1f5cb659-0c88-4f0e-a389-97e9e90a0893.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_1f5cb659-0c88-4f0e-a389-97e9e90a0893.png index 97f9a6a492f2e451379aa3c77e0be4991ebcfcd0..971e2f0049e058d299ecbc539f6258cfd2ab52f2 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_1f5cb659-0c88-4f0e-a389-97e9e90a0893.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_1f5cb659-0c88-4f0e-a389-97e9e90a0893.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6be33002861c7fce9a9e422e1cac93936a17306b5912d76fff36ce9bf041a332 -size 523648 +oid sha256:3c4e5fcb573d87b8181301a80e704aa0764bbf69bdeceb3b74177bf14d0ffff9 +size 622701 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_303b1894-bec5-49b2-a4e0-b0c0cdc3d3d9.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_303b1894-bec5-49b2-a4e0-b0c0cdc3d3d9.png index e225ca5101df7895aa1cd3d07a317a12fd5c3075..473da48db077ad8fdfa2e883eda8e6270109c512 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_303b1894-bec5-49b2-a4e0-b0c0cdc3d3d9.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_303b1894-bec5-49b2-a4e0-b0c0cdc3d3d9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d2d9d560bd3bb6a3b8482c33eba0556e5fe43b0f14dc48a0391107fb866ff2a -size 805920 +oid sha256:6396fae484b6e6b05efa1ffffea94163a4cf3ebc5d8dde90f7f646e7ae45fb36 +size 1037315 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_3d430b08-3b31-40be-966d-0ebc25c0e439.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_3d430b08-3b31-40be-966d-0ebc25c0e439.png index c95463bca4d3c3bb97153b7049619d8c057ba12a..fc07b8ef3d39f4f308a399d8219a2eecbbc61c88 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_3d430b08-3b31-40be-966d-0ebc25c0e439.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_3d430b08-3b31-40be-966d-0ebc25c0e439.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f25e04e072c2f85e7eee662acea7cdd8a786ca5156061ea6c140f0ed429dbf5 -size 237067 +oid sha256:de6331ffce13103eba275937fbb12368684a241cfea595be76377a044d82b10f +size 237794 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_4a697de0-aa51-43f3-ad3e-d3312265bd48.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_4a697de0-aa51-43f3-ad3e-d3312265bd48.png index 526c04cfcb4a02f6e2eb64f3001a1d8261dea142..e904dbef76c96786ad40d41cdcd8bc5b5884b30f 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_4a697de0-aa51-43f3-ad3e-d3312265bd48.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_4a697de0-aa51-43f3-ad3e-d3312265bd48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4e40d35cc88db20aaf7b676d8964cba6dd684c81cb60001464f846529614f97 -size 793891 +oid sha256:10c29ffca924bd7e68608dff129673bd4a374aebd9c18bd6fe8a3ddf4ec1e703 +size 556343 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_610dceb2-f1d8-49d8-ac16-046af44796d1.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_610dceb2-f1d8-49d8-ac16-046af44796d1.png index 00421126b8cbc518857fe51f582e26f6d0bbe60d..bcecdc06441b02d9587ac3fcb6ce6ca89f5408be 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_610dceb2-f1d8-49d8-ac16-046af44796d1.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_610dceb2-f1d8-49d8-ac16-046af44796d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4aec9d3ca72c638f076320b61542fac43d3a50f9b9b676886f4ea84b9e78708d -size 805741 +oid sha256:7fa15302dc6fc68d2b7f154f74649b445119f7dc217c1545ab1d8c7e7f4162d7 +size 1037008 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6b113310-2e4d-4c97-b6e3-51d42e406e3b.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6b113310-2e4d-4c97-b6e3-51d42e406e3b.png index 00421126b8cbc518857fe51f582e26f6d0bbe60d..dec4d719e55e952c3b8fe93ef13105580c47b051 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6b113310-2e4d-4c97-b6e3-51d42e406e3b.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6b113310-2e4d-4c97-b6e3-51d42e406e3b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4aec9d3ca72c638f076320b61542fac43d3a50f9b9b676886f4ea84b9e78708d -size 805741 +oid sha256:ab59d995b19e15fc2e7d276384131aff3bc645f94d5b480fe119ad0d8c753772 +size 1038662 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6c235a86-4748-4b46-bb48-86f39329f0e1.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6c235a86-4748-4b46-bb48-86f39329f0e1.png index 1825719aaf3bbeb1ed052c352e2c1bf199f40d02..6b499108e7e9eb0f8591c2a4a171f8fb7f86ed95 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6c235a86-4748-4b46-bb48-86f39329f0e1.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6c235a86-4748-4b46-bb48-86f39329f0e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ff3a02738bdd0e2e7a37d4460e7fd90e5d14464f6ec08d7157e95d9e5df8acc -size 226595 +oid sha256:487c0a67e54e74708700215b6367ed72aa6b2119630528e645a2d08e5007db00 +size 531555 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_76413976-1d41-430a-ba07-e0862e40f90d.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_76413976-1d41-430a-ba07-e0862e40f90d.png index a48a81300f44d2316dc140ee36c75380fc3ccef2..93a6b8923d692a66a8510b065d46fb2fab90d148 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_76413976-1d41-430a-ba07-e0862e40f90d.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_76413976-1d41-430a-ba07-e0862e40f90d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88f006e77d760f51d8889a4050ac4c7904a82211cfafea567802b53c9266fdcd -size 437807 +oid sha256:960323970f67ac33e241bc54744cbb3c1dd1c6622624aa6e1b45e58600534f28 +size 418075 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_7db38950-78ce-4a65-a62e-a5df13e62ff5.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_7db38950-78ce-4a65-a62e-a5df13e62ff5.png index 681bad351475adafd6b4bad37caf4c46d05b0d73..b466dfa48a71655951a83929a629205720bb12d5 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_7db38950-78ce-4a65-a62e-a5df13e62ff5.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_7db38950-78ce-4a65-a62e-a5df13e62ff5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b66e5f672f3b3540531f98db24162c05d54abf8d31253b82dd97feff762e9f81 -size 804535 +oid sha256:71c939ea3e1f6199d1ae04910dc505919ad22849eeabc9b2ae85b07b7525d49c +size 756550 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_89a66828-0f63-4b8f-9090-933d55e222a1.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_89a66828-0f63-4b8f-9090-933d55e222a1.png index 44f59dc544baf2f5f4216844e12d747eeaf1b9a5..0b0c18da5505aecc65ea205fe7fd09f6fe1bb249 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_89a66828-0f63-4b8f-9090-933d55e222a1.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_89a66828-0f63-4b8f-9090-933d55e222a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20178a60eef5ed6d958b1840e996d365bd8cee1a9576d786cc56342edef1e0d1 -size 210066 +oid sha256:cb3528b624744b06dadcd0c71d71f0f8d547071aabbea2543a734aedd7a7a7b9 +size 459291 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_9bae8776-4d35-44df-9f57-73a91801eee4.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_9bae8776-4d35-44df-9f57-73a91801eee4.png index 41b540163ea2a83663b331e880f356cd2ca088c5..8b4756d816671caa829e3f8d613c31298c36a03b 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_9bae8776-4d35-44df-9f57-73a91801eee4.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_9bae8776-4d35-44df-9f57-73a91801eee4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:043013a3b8572331b0d346c18807c83d0a6a2e6649e5f65345548ce932880984 -size 532221 +oid sha256:6f2f548401f16f92a463bfe726c873e5be80fe6885be5cea8f8ca78d3f1f2fbe +size 1097734 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a2e3ce74-6960-44a8-9352-ce292abb6b25.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a2e3ce74-6960-44a8-9352-ce292abb6b25.png index f9a218999236a623c35ed9b89ccf4a63fffc0133..e349b72324c8720fb8e8d6b588ba94126bf6eb23 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a2e3ce74-6960-44a8-9352-ce292abb6b25.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a2e3ce74-6960-44a8-9352-ce292abb6b25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e7e1feeb24b87c0722f3517846abc0a62a87a5f67caeaf9961f08da68c4e992 -size 1988032 +oid sha256:b3bf493f1b07f59a7c66a8317249bb45396256fc597fe5dbf84df54870269cdd +size 1617018 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a57e5ed1-2c94-4dc6-b280-6d75b63a3eea.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a57e5ed1-2c94-4dc6-b280-6d75b63a3eea.png index 681bad351475adafd6b4bad37caf4c46d05b0d73..f5021921cf6c77d650a89b2682dec267f15d8fc6 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a57e5ed1-2c94-4dc6-b280-6d75b63a3eea.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a57e5ed1-2c94-4dc6-b280-6d75b63a3eea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b66e5f672f3b3540531f98db24162c05d54abf8d31253b82dd97feff762e9f81 -size 804535 +oid sha256:9e8563d75899cfa4a4dabcb8910e3df136d7a84d13027be3b03dd7413c561b54 +size 637938 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b1b808d1-e980-4885-9add-57e9801759d2.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b1b808d1-e980-4885-9add-57e9801759d2.png index e682c952959ee6b120452b02f299fd702bd4f023..a25ef5d9a02f0a44180045da25f66755140a2ec2 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b1b808d1-e980-4885-9add-57e9801759d2.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b1b808d1-e980-4885-9add-57e9801759d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc2aac55f6e6f2d674a107164b3d160bd1fa7118da9b88fd4a5baaf2164d582d -size 537190 +oid sha256:f68b4beec8e1c3ca197cba67040b6b5e4996df63efee381a5d67102a2db403f7 +size 523225 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b6481fae-c97a-4af4-a416-ccd071c8cdc1.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b6481fae-c97a-4af4-a416-ccd071c8cdc1.png index 8f0603ec484bcbd25d7a33e091c72b15b618c82b..534170dc8ae1bcad07a45a3c56567c3cc8b1905b 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b6481fae-c97a-4af4-a416-ccd071c8cdc1.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b6481fae-c97a-4af4-a416-ccd071c8cdc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1d2c75150ff5f544f73615f602938a799c8060b28bea78f758a70fa0840398d -size 2010497 +oid sha256:eee7c7ebaeec2bd8caf5a8e396cc730173280b7b9c0247734835d62e19635662 +size 1058713 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_ca703888-1346-4c10-af36-2ecd3a7f5fcd.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_ca703888-1346-4c10-af36-2ecd3a7f5fcd.png index 6401b9686c883a689b75b08cf115c05177786480..e3affff68b6766fd6880b5ca96a3b3367f854fa5 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_ca703888-1346-4c10-af36-2ecd3a7f5fcd.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_ca703888-1346-4c10-af36-2ecd3a7f5fcd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5424d975dea5db4edfd22d2b04d99f1bbca1b6b5ff637be84038fdee05510ee0 -size 228897 +oid sha256:ccab94491a9522ebe529ce682763848e191dd4b65488ee5e3be5aaf639b5c61f +size 556537 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_deeff052-6db8-4239-be1e-1939ba33fe3d.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_deeff052-6db8-4239-be1e-1939ba33fe3d.png index fce4f843bafcf4c7d35086f2c583ea4b86b40a4d..646173cff8ee252995a2bcf7c4c17bcc9e20c477 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_deeff052-6db8-4239-be1e-1939ba33fe3d.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_deeff052-6db8-4239-be1e-1939ba33fe3d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:945cb32b1b2936bb3565fe33fdafd083eaa24b50a727db33232713b3f8e62b99 -size 198961 +oid sha256:83cc0d6330030ccbd84966f147b12ee236809a6bc5da6b273ab542fd25b72b69 +size 173164 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e3c95f6e-c1f6-4930-90d4-a34358b98d49.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e3c95f6e-c1f6-4930-90d4-a34358b98d49.png index 705569f797d81597714752d17c7ef220e255c327..c71b2ddf8a45e336850cb4aa929b65a27aed427b 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e3c95f6e-c1f6-4930-90d4-a34358b98d49.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e3c95f6e-c1f6-4930-90d4-a34358b98d49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7187d40eba556f789884b77fe3d21ce8eb497758eea0ff45b019d52d0c3fe5ca -size 258474 +oid sha256:09c2dfb8e053d099ec5ec9f7935e224ec42d5512df76d23851b78e9bfcf6b2f5 +size 554636 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e6345ea9-a5a4-4b88-95b5-4efececed261.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e6345ea9-a5a4-4b88-95b5-4efececed261.png index d49c90bc6c757772352ea0b4606ca686f2103c8a..1aaa8d6b964982e6f3a850d4d2a96c95d59f8ed2 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e6345ea9-a5a4-4b88-95b5-4efececed261.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e6345ea9-a5a4-4b88-95b5-4efececed261.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96d019db57751682a31c8f6ee3ab4125f378c15af1db3e9f7e16ba412d9ef9e2 -size 737197 +oid sha256:4134dd9cda2a1b330fb8ca0d9a03996081ddb5a7d02995ea4c6a21b5fde144d7 +size 505997 diff --git a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_f063a765-bdb0-49b3-916e-7297e2dd0019.png b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_f063a765-bdb0-49b3-916e-7297e2dd0019.png index 7625cffab27d6dfc887a6ae3b2b8d0ca5f8150ce..37f3d55910685ff2b69c5d5a5bcabf0ef1c06ab9 100644 --- a/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_f063a765-bdb0-49b3-916e-7297e2dd0019.png +++ b/images/274571ea-fc2f-4353-86ba-00ecb112d6d2_f063a765-bdb0-49b3-916e-7297e2dd0019.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:735e74b3a9ad3a3c792e64f043372a621405552dfda0415bf89fc5ab7690d5eb -size 255107 +oid sha256:c7855ab4d0e7fa775d3eedfb57f919e4c40954a0adcd13369e92d31c15931ecf +size 477525 diff --git a/images/27724810-0bc8-446a-a2f4-b53a87e190df_32f9fa3a-cc74-4d29-8347-ec82ea2f97f8.png b/images/27724810-0bc8-446a-a2f4-b53a87e190df_32f9fa3a-cc74-4d29-8347-ec82ea2f97f8.png index 8ecc50fb796521026fb258a1883c303af02562ae..dc8666254f2cd4a65fba6e23800937393fc2f02c 100644 --- a/images/27724810-0bc8-446a-a2f4-b53a87e190df_32f9fa3a-cc74-4d29-8347-ec82ea2f97f8.png +++ b/images/27724810-0bc8-446a-a2f4-b53a87e190df_32f9fa3a-cc74-4d29-8347-ec82ea2f97f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c14f114ff1410fb9117abb8684ac53a50f3b217deeda0fc0dba127d032c061d -size 675462 +oid sha256:c148e28e1f9bdbf7c7884a831d8802d922598df77ca1733b840d5654c7968002 +size 746911 diff --git a/images/27724810-0bc8-446a-a2f4-b53a87e190df_36b4afb0-08a0-487e-9118-53846861391d.png b/images/27724810-0bc8-446a-a2f4-b53a87e190df_36b4afb0-08a0-487e-9118-53846861391d.png index b25206965cb68f6940355f83bd5f43136d7e5e81..8f81272060da043dd8618fb40ebc80f94b0687ce 100644 --- a/images/27724810-0bc8-446a-a2f4-b53a87e190df_36b4afb0-08a0-487e-9118-53846861391d.png +++ b/images/27724810-0bc8-446a-a2f4-b53a87e190df_36b4afb0-08a0-487e-9118-53846861391d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ebdb6e8b8863df0a477ae6a578e548f218e1c4ee3bed6c49221026f5aeeb35f -size 675415 +oid sha256:1bd1bdde9727677851222f151b57c9b442d838c2ca7587d1be95a802b5f4f994 +size 1058578 diff --git a/images/27724810-0bc8-446a-a2f4-b53a87e190df_63a067c3-55b3-4dfd-934a-c0bec0d8dccf.png b/images/27724810-0bc8-446a-a2f4-b53a87e190df_63a067c3-55b3-4dfd-934a-c0bec0d8dccf.png index 3a5f0c74eb16887c912a6d974aae23a8a4400ce2..79b082e2f7f540b8558331c41e63192e3c0cbe42 100644 --- a/images/27724810-0bc8-446a-a2f4-b53a87e190df_63a067c3-55b3-4dfd-934a-c0bec0d8dccf.png +++ b/images/27724810-0bc8-446a-a2f4-b53a87e190df_63a067c3-55b3-4dfd-934a-c0bec0d8dccf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8159a0aeeb10fb3853443ef6e0f716905eb4841d988b4d792249bfed5fc55213 -size 713804 +oid sha256:999b95a5143cc6991cf04fe2b154e20810c3a4efd3ae66fcdfe09ad8ce305ed5 +size 744462 diff --git a/images/277a776a-b335-4761-a543-bed1c6eca1d6_1d358c36-6333-4e3e-bb32-505bd9a44c2d.png b/images/277a776a-b335-4761-a543-bed1c6eca1d6_1d358c36-6333-4e3e-bb32-505bd9a44c2d.png index 4858794eddfbce2cb214315a2b3097d5389bd43c..bc6cd4368b484f453ef28afd77409eccd362d7b1 100644 --- a/images/277a776a-b335-4761-a543-bed1c6eca1d6_1d358c36-6333-4e3e-bb32-505bd9a44c2d.png +++ b/images/277a776a-b335-4761-a543-bed1c6eca1d6_1d358c36-6333-4e3e-bb32-505bd9a44c2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a511cc5216b6feea5e8b64da25cf2a2991ed8364e1f64045430eb26e144b69ae -size 543062 +oid sha256:00cf42fadae33563abd4ca7969a53bbbf963cb9b2114aa4cf79beb07e5633969 +size 1050762 diff --git a/images/277a776a-b335-4761-a543-bed1c6eca1d6_b29cd238-ad53-4b4d-b6cc-8a139d724bf3.png b/images/277a776a-b335-4761-a543-bed1c6eca1d6_b29cd238-ad53-4b4d-b6cc-8a139d724bf3.png index dddff4d382ebcb0f652898547af07905189e5365..4d1fecffad0dc68e0fe0f243a62a917958afefe7 100644 --- a/images/277a776a-b335-4761-a543-bed1c6eca1d6_b29cd238-ad53-4b4d-b6cc-8a139d724bf3.png +++ b/images/277a776a-b335-4761-a543-bed1c6eca1d6_b29cd238-ad53-4b4d-b6cc-8a139d724bf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:190a3fdbea94e079c67fadd1393c992d755e6b5a094160d2e5ce4a8c4d91d8b5 -size 548072 +oid sha256:445071b229754c991c28f44390b91a21bca5788703bdbb3d0d77df41b24b66d6 +size 360804 diff --git a/images/277a776a-b335-4761-a543-bed1c6eca1d6_d147dd83-a0b3-4263-9db4-30b58e266a21.png b/images/277a776a-b335-4761-a543-bed1c6eca1d6_d147dd83-a0b3-4263-9db4-30b58e266a21.png index 4858794eddfbce2cb214315a2b3097d5389bd43c..1e0a47ef87ca3d687ea0f44e42df638044a974d5 100644 --- a/images/277a776a-b335-4761-a543-bed1c6eca1d6_d147dd83-a0b3-4263-9db4-30b58e266a21.png +++ b/images/277a776a-b335-4761-a543-bed1c6eca1d6_d147dd83-a0b3-4263-9db4-30b58e266a21.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a511cc5216b6feea5e8b64da25cf2a2991ed8364e1f64045430eb26e144b69ae -size 543062 +oid sha256:0bfd44978ef78d385e2972770641e97c9188520cca3da1aa0643fd4304ff0048 +size 739424 diff --git a/images/277a776a-b335-4761-a543-bed1c6eca1d6_d675f8fc-0d5d-4d60-8f8e-9da0ea5b0edc.png b/images/277a776a-b335-4761-a543-bed1c6eca1d6_d675f8fc-0d5d-4d60-8f8e-9da0ea5b0edc.png index 577befcedb265aad0a44a535466151554733edd8..96f270d1f71cb01079a7a3e7d4891b0185c20002 100644 --- a/images/277a776a-b335-4761-a543-bed1c6eca1d6_d675f8fc-0d5d-4d60-8f8e-9da0ea5b0edc.png +++ b/images/277a776a-b335-4761-a543-bed1c6eca1d6_d675f8fc-0d5d-4d60-8f8e-9da0ea5b0edc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c36996a8062287d8efe4d58a02939dbf2f9e87842ef5a3f03ebad0ea0a38df4 -size 479818 +oid sha256:a9ee6a952be6f2e5074c0870952e249677ff393182bafe5765c0442326f5b229 +size 624696 diff --git a/images/277bdab6-e924-45dd-ba26-d659d864d671_0b62a0d8-1317-4f8f-b64c-d11d7f14b218.png b/images/277bdab6-e924-45dd-ba26-d659d864d671_0b62a0d8-1317-4f8f-b64c-d11d7f14b218.png index 962cc40ddf7f4f02f875b0048cc8be1a8138d877..b5e92786cfb4bd7e5524cab3d58cff1baa90b5cb 100644 --- a/images/277bdab6-e924-45dd-ba26-d659d864d671_0b62a0d8-1317-4f8f-b64c-d11d7f14b218.png +++ b/images/277bdab6-e924-45dd-ba26-d659d864d671_0b62a0d8-1317-4f8f-b64c-d11d7f14b218.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23264f31cf05e0309287fbaa66ec9b914d4ca59a2bfdb7f633f06e86e6b91c6b -size 1362955 +oid sha256:9abeae5ceae6e2373e8b4abef23498633e3ff121ec0534c2f6ef7085917dc64e +size 287077 diff --git a/images/277bdab6-e924-45dd-ba26-d659d864d671_81fd0300-9ad3-40fa-bec3-798fec6e088d.png b/images/277bdab6-e924-45dd-ba26-d659d864d671_81fd0300-9ad3-40fa-bec3-798fec6e088d.png index aed8972666f3eea8a22a10f0523d712f225bfebd..49b7855271605bf5158c7f04b07e26e704336d39 100644 --- a/images/277bdab6-e924-45dd-ba26-d659d864d671_81fd0300-9ad3-40fa-bec3-798fec6e088d.png +++ b/images/277bdab6-e924-45dd-ba26-d659d864d671_81fd0300-9ad3-40fa-bec3-798fec6e088d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bce8ee5142626b891ea1114704754984f1b08ebb1e21d26075633ba095f83bfd -size 970667 +oid sha256:ca4c8f9a1b477ca355fa1bd2d59becded6ea9d57e7d27657b66b7b7a796725da +size 368862 diff --git a/images/277bdab6-e924-45dd-ba26-d659d864d671_86fafa00-08fd-4d15-9ca1-88658c080ec1.png b/images/277bdab6-e924-45dd-ba26-d659d864d671_86fafa00-08fd-4d15-9ca1-88658c080ec1.png index 26fc2290a6bc83799f1827a5e4514f87b84f621e..1a5385cfbd7e57620e571b41911b826627dafbb4 100644 --- a/images/277bdab6-e924-45dd-ba26-d659d864d671_86fafa00-08fd-4d15-9ca1-88658c080ec1.png +++ b/images/277bdab6-e924-45dd-ba26-d659d864d671_86fafa00-08fd-4d15-9ca1-88658c080ec1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e339b16b83ae2f15ee8953a9886ace1538104b1616aadf240b56065e066d9584 -size 970909 +oid sha256:83233d4a05fd9a488ae369964e7aced31e0fd8ea960a63eb296f59e51e80f3f8 +size 774456 diff --git a/images/277bdab6-e924-45dd-ba26-d659d864d671_9cad183e-9be2-406a-9b05-f4373367e1a2.png b/images/277bdab6-e924-45dd-ba26-d659d864d671_9cad183e-9be2-406a-9b05-f4373367e1a2.png index 9fe0428a4b2057d847dd814b62d0ddb344fe005b..b716f8dffdbcc9ac0d64fc0033d53e13477017fd 100644 --- a/images/277bdab6-e924-45dd-ba26-d659d864d671_9cad183e-9be2-406a-9b05-f4373367e1a2.png +++ b/images/277bdab6-e924-45dd-ba26-d659d864d671_9cad183e-9be2-406a-9b05-f4373367e1a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba1ef59320e6602992f71fa571d61e6fd59353d5aa1ef5e41a1ac0abc5d0a968 -size 939002 +oid sha256:dfe8819e65e9d99470df41cef026821a8b59e1335f0138b30a5fb9391d956e52 +size 477768 diff --git a/images/277bdab6-e924-45dd-ba26-d659d864d671_a69f9ab4-9419-40c0-a22b-d1bad1fd7c55.png b/images/277bdab6-e924-45dd-ba26-d659d864d671_a69f9ab4-9419-40c0-a22b-d1bad1fd7c55.png index 8ea3b85a669f857b1c3c1ec69c4b6b52ad6fec40..e07b902373bbd7fda62d58fe1615bc0d54a4f456 100644 --- a/images/277bdab6-e924-45dd-ba26-d659d864d671_a69f9ab4-9419-40c0-a22b-d1bad1fd7c55.png +++ b/images/277bdab6-e924-45dd-ba26-d659d864d671_a69f9ab4-9419-40c0-a22b-d1bad1fd7c55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9779b6ab76890741eaf532e5b439302dd7542e52bf2ac64bf46540176d2d18e0 -size 1047672 +oid sha256:6fc94ad7e8ae3db419d8b761e070d077218936b3d9ce0730b76df4c195261a1b +size 933465 diff --git a/images/277bdab6-e924-45dd-ba26-d659d864d671_b716dec6-b13d-4e4c-bfbb-96a9fbd930ca.png b/images/277bdab6-e924-45dd-ba26-d659d864d671_b716dec6-b13d-4e4c-bfbb-96a9fbd930ca.png index b4a4804c4c201a73d9e6cbb72e82def126ad519e..8e672f1a4a0be170b8ddd7d39b8790b75f98b2fc 100644 --- a/images/277bdab6-e924-45dd-ba26-d659d864d671_b716dec6-b13d-4e4c-bfbb-96a9fbd930ca.png +++ b/images/277bdab6-e924-45dd-ba26-d659d864d671_b716dec6-b13d-4e4c-bfbb-96a9fbd930ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9f2b21c431fe50238240e55086b0050705f687b130187f99ca1c9e6ea2ba679 -size 857315 +oid sha256:7a7584dce025883d130f2d6d0f41c78888eb48411368dc239a7e7880b7a0e421 +size 438688 diff --git a/images/277bdab6-e924-45dd-ba26-d659d864d671_e7a69198-f985-4899-b721-e53fc38e8dde.png b/images/277bdab6-e924-45dd-ba26-d659d864d671_e7a69198-f985-4899-b721-e53fc38e8dde.png index 2f3c8c72784ce57b49ebef42193554b963651646..6a3b72160d7669913da2eb96635b426c81f5b230 100644 --- a/images/277bdab6-e924-45dd-ba26-d659d864d671_e7a69198-f985-4899-b721-e53fc38e8dde.png +++ b/images/277bdab6-e924-45dd-ba26-d659d864d671_e7a69198-f985-4899-b721-e53fc38e8dde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28c3955c3159bd51a0db689c87c359a4177191e466daac997e74a52b8d321468 -size 467383 +oid sha256:c554a5f58017e49fb4b21a7d69369ea96112a16697fde8c573ccd68772f87512 +size 370330 diff --git a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_104bc12f-8c89-401a-9b45-17f03ab34fc6.png b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_104bc12f-8c89-401a-9b45-17f03ab34fc6.png index 23b54b81e069667ca1a177d2c92d3fd1062f9168..ade2a9df80853d5059b2fb7b1ace557afded041c 100644 --- a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_104bc12f-8c89-401a-9b45-17f03ab34fc6.png +++ b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_104bc12f-8c89-401a-9b45-17f03ab34fc6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df41c8cc7e4aa4aaa197414e2299bf97b19a5ee6bfcc9a2f160169978f133723 -size 1314593 +oid sha256:97a8d6696cc038989b556b604722210f02d18464634b641086cb6c044d748e63 +size 811883 diff --git a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_1ae1d6c7-b4d4-4b78-a3f9-5e6974eb5bde.png b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_1ae1d6c7-b4d4-4b78-a3f9-5e6974eb5bde.png index 825641ef81c56ad719a6e779e663d99b5c764034..102887a62ded7ef198416e1ae4981946cba96ce7 100644 --- a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_1ae1d6c7-b4d4-4b78-a3f9-5e6974eb5bde.png +++ b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_1ae1d6c7-b4d4-4b78-a3f9-5e6974eb5bde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:662f217d1852af7fac302c4d73d41b4b54a021b3b78d80e2af784517434444b4 -size 1355121 +oid sha256:a3cd469394963100458f625cc1ed9041b84d397a4361660589098628e312d43b +size 927858 diff --git a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_2f4873c2-1964-4640-8275-11655aa7465f.png b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_2f4873c2-1964-4640-8275-11655aa7465f.png index 52dd692bcbd200322e9c019a24c03604453a4c64..59361bd59c66b7b4eb9678faddf4a85c01dfe923 100644 --- a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_2f4873c2-1964-4640-8275-11655aa7465f.png +++ b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_2f4873c2-1964-4640-8275-11655aa7465f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb613f24de22d34304ac9c02532b2588ae0d73aa08ae1678f7b52c47e1d5cf44 -size 716757 +oid sha256:2d49de8052d0c72edb8f73f9ad176325294ef766bb48a1ca1fac3000fc3bc0d4 +size 672268 diff --git a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_342fe554-d039-4d10-a909-323c6af8fead.png b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_342fe554-d039-4d10-a909-323c6af8fead.png index c5fab9dc89716f0e5f04ef3bb61ed0f73de7b4a0..dc64723cd0cddfc4f673aea3729f44849e4283e1 100644 --- a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_342fe554-d039-4d10-a909-323c6af8fead.png +++ b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_342fe554-d039-4d10-a909-323c6af8fead.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:932d78762cbc3449c4311cabd65f70f3e48c62214911263322f36a3914f11029 -size 546435 +oid sha256:b69e913e4039950b805f2fd6e9fed7a63fa854fda7b2f9329d6ff98ae8c060a6 +size 444683 diff --git a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_53a3e30c-6fe2-4f5a-b132-8390e74be073.png b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_53a3e30c-6fe2-4f5a-b132-8390e74be073.png index efd5a2d518d938e988315a2090cf636d00b3553b..97026914c433c1007d9121467bf957e391199a41 100644 --- a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_53a3e30c-6fe2-4f5a-b132-8390e74be073.png +++ b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_53a3e30c-6fe2-4f5a-b132-8390e74be073.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01ae986b4131eebd34be7144ddf4040d2a9d3bfc57001b388c31d4ccaeb9307b -size 959144 +oid sha256:8217879d44eae72ef0ea0b20f9f8b4b672aca9ad0864d7d993686a13818e6504 +size 773707 diff --git a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_645e7a33-cc4e-47ee-bbe7-06941488d9f9.png b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_645e7a33-cc4e-47ee-bbe7-06941488d9f9.png index 7362e67e780e85fb6b7678af8bbba2688cae96de..cfae994fd33e8d8cdf588cdbd385df5c6ee80cc2 100644 --- a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_645e7a33-cc4e-47ee-bbe7-06941488d9f9.png +++ b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_645e7a33-cc4e-47ee-bbe7-06941488d9f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa94104f10864cfc791f3c5550e777fcb9f8f3e0ef4cf313af22daa36927819b -size 555745 +oid sha256:8f240ffe70bbb863d12c56f88f66510eb3d0233d5a03a0a2bba2a300ee42f946 +size 476529 diff --git a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_66ece2a6-3789-462d-8cd2-627355cd988a.png b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_66ece2a6-3789-462d-8cd2-627355cd988a.png index 7d0086408363d6c98be06732f89c6021e4306bac..8219b133ad75acc181cafed08f499ec59ebd6f55 100644 --- a/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_66ece2a6-3789-462d-8cd2-627355cd988a.png +++ b/images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_66ece2a6-3789-462d-8cd2-627355cd988a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcdc4938882fabef1b30abaa44dac24ab8ceddcb4561f0fa1f47788f837e22fd -size 1079177 +oid sha256:b0f3adea04cd39413dc04719138d6b15cc089f2fda143b24394210f537f4884f +size 1038464 diff --git a/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_6c0f5b45-5196-4eb1-ad83-ae44d2d157e1.png b/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_6c0f5b45-5196-4eb1-ad83-ae44d2d157e1.png index f3cc5265687ca01cc86e98edc8cb7cf5012babbf..776f2baa9edc0fdd59fc1e0315846deba9c866a7 100644 --- a/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_6c0f5b45-5196-4eb1-ad83-ae44d2d157e1.png +++ b/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_6c0f5b45-5196-4eb1-ad83-ae44d2d157e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab44a6971813fd487614012c6516cbd22ca73f41d15febd9c30f17071789161e -size 2147842 +oid sha256:d6669e78987174ccd2c9a810aa52692cedb0c09207afd9407ba7ed02a2466ae1 +size 1815561 diff --git a/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_879c0f11-6c7b-4133-b30d-ecbee152194d.png b/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_879c0f11-6c7b-4133-b30d-ecbee152194d.png index a0105eddb261245093285e73338a2280dc9dd786..95879233ba345479e68c0ede09acbdf237df0f92 100644 --- a/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_879c0f11-6c7b-4133-b30d-ecbee152194d.png +++ b/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_879c0f11-6c7b-4133-b30d-ecbee152194d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52c8532f1c69b682b3054f999eb8b28031eedbf90e34a278cec026a0761ef2ea -size 2332430 +oid sha256:36d0b26738d9bbce9178128376cb7fea9c1e9af3850157d16879080ca4973a6f +size 2059781 diff --git a/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_8e5a056f-17cf-401a-8338-09bd7aad3e3e.png b/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_8e5a056f-17cf-401a-8338-09bd7aad3e3e.png index f2cd4ca3c6327c146941b610f6066920c616d25e..fea14e2078914cdbf07ccb0264ae9f84c68ee181 100644 --- a/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_8e5a056f-17cf-401a-8338-09bd7aad3e3e.png +++ b/images/2879afa9-05f9-4d97-bbfe-f95f5d665174_8e5a056f-17cf-401a-8338-09bd7aad3e3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:540c0c5c7cafae5fbdcec09ee6bc3399201051b9dfa1b1437909e1aa78f7fa89 -size 3714559 +oid sha256:6dd8454b41301973316356cdf47bb32af095da4859c797d92617addde122cd29 +size 2645643 diff --git a/images/28d54466-de85-45e6-9649-2575d38adfd4_0f18ac4c-1a06-4005-a45d-4b9e1b1096c5.png b/images/28d54466-de85-45e6-9649-2575d38adfd4_0f18ac4c-1a06-4005-a45d-4b9e1b1096c5.png index 8bd98ead20b85e4243117b0a7090933f100a3826..67642ebd02ddd0e25ecfcb9a9fb1ca004c96e26c 100644 --- a/images/28d54466-de85-45e6-9649-2575d38adfd4_0f18ac4c-1a06-4005-a45d-4b9e1b1096c5.png +++ b/images/28d54466-de85-45e6-9649-2575d38adfd4_0f18ac4c-1a06-4005-a45d-4b9e1b1096c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cf1653968296facc6ad6aba2abbb5a548db6ede3769662655ac9d962a804172 -size 659712 +oid sha256:828f531de5c7f60527b24b9c3b888191ea55508c3e80fca983a8f17fe94b10d8 +size 783075 diff --git a/images/28d54466-de85-45e6-9649-2575d38adfd4_14d0b2bf-2ab6-4ab6-abad-772760082d0a.png b/images/28d54466-de85-45e6-9649-2575d38adfd4_14d0b2bf-2ab6-4ab6-abad-772760082d0a.png index e55f45ad2e7e558f26988b12adb8142a84b69cbb..a10c6c072314fd49a51352e0c8d39f96d89b7374 100644 --- a/images/28d54466-de85-45e6-9649-2575d38adfd4_14d0b2bf-2ab6-4ab6-abad-772760082d0a.png +++ b/images/28d54466-de85-45e6-9649-2575d38adfd4_14d0b2bf-2ab6-4ab6-abad-772760082d0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c2daf15521a97b72f416cdcc721dada0d929959d51b0dca3f49db349711f9bc -size 740301 +oid sha256:44003c38c4beb668ac567373df97fcbabb92334b0bf8cd65f57190294da97039 +size 611316 diff --git a/images/28d54466-de85-45e6-9649-2575d38adfd4_dffa71c7-91d4-4a03-8642-c9af8bc7a05c.png b/images/28d54466-de85-45e6-9649-2575d38adfd4_dffa71c7-91d4-4a03-8642-c9af8bc7a05c.png index ca20310dbbdd0e72ce436cce3d6a64302ff27cde..ac9f84c65007e1e8d7e226036043ca5855330577 100644 --- a/images/28d54466-de85-45e6-9649-2575d38adfd4_dffa71c7-91d4-4a03-8642-c9af8bc7a05c.png +++ b/images/28d54466-de85-45e6-9649-2575d38adfd4_dffa71c7-91d4-4a03-8642-c9af8bc7a05c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0195ad851c9dff2b1f41d1a0cf08c1047f924376ad3bdd80a61f185aa7bd5ccb -size 662049 +oid sha256:82f6d8375609ff7fbde721f2f1c3469b2eb0d72b6a33a44d750afed2f083f012 +size 1179391 diff --git a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_76dfc623-7691-4193-8b56-0c3e654a9511.png b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_76dfc623-7691-4193-8b56-0c3e654a9511.png index 6d83b1982af45d19eb0f1928deb23155f1880d9f..c9dd4b280ae9407e16ef0933125a1394f0ec2c79 100644 --- a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_76dfc623-7691-4193-8b56-0c3e654a9511.png +++ b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_76dfc623-7691-4193-8b56-0c3e654a9511.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa473712ccbc3107afe04c7f88068596bfd755e2469f95db9b35764a6e5d5fb4 -size 341443 +oid sha256:8bf393940e119350688741619586ede77d7c2ffbf1dd782b718319480e2c170c +size 344265 diff --git a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_aa57cbad-a560-403a-a60e-dac248b9a9fe.png b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_aa57cbad-a560-403a-a60e-dac248b9a9fe.png index 64faebdda188c55ddeca9791cad17071922e6c72..d801950ffc29b7be9937dbe346948e72e3aa01da 100644 --- a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_aa57cbad-a560-403a-a60e-dac248b9a9fe.png +++ b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_aa57cbad-a560-403a-a60e-dac248b9a9fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f748e5ea20cadcedb02d841d31bbe006cc97d8eb7ad32e016ab458f286c2c57 -size 1588076 +oid sha256:1994b02b5577e061c2eced7de020666956631712ecf5cfcb9b7ff06c0861d2e7 +size 1706235 diff --git a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_db770456-a5f4-40c0-9b55-2e3e0857f4bc.png b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_db770456-a5f4-40c0-9b55-2e3e0857f4bc.png index 9522d17fca2323dff139d78b7fdd8cd46f76f426..cb35e7b1f92f96bf9317697066d2d2af75ff4675 100644 --- a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_db770456-a5f4-40c0-9b55-2e3e0857f4bc.png +++ b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_db770456-a5f4-40c0-9b55-2e3e0857f4bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48ce014f64d624941b8fae08e8384f03b39de108aca3cfb7457982130759b4f2 -size 358933 +oid sha256:24ff0a06ce68bc7950eec62330144a8d4a6984995aa7b88941c66e3c943edbfa +size 361324 diff --git a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_de29d2ea-22c4-4b25-ac58-063235e2f9c1.png b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_de29d2ea-22c4-4b25-ac58-063235e2f9c1.png index d736e388ee66c51864608d03bdb5bee95ed1c527..28f7d6add312e8100876bc762b5e90c5f04bf091 100644 --- a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_de29d2ea-22c4-4b25-ac58-063235e2f9c1.png +++ b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_de29d2ea-22c4-4b25-ac58-063235e2f9c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:282638385fbb3f675067723e7a8296594c0000d006f21a8add333f44050bf381 -size 359571 +oid sha256:7bda94bcd4ae267c194bc174c2b8b17a0813dcbb90aab9368729b84637e01954 +size 361997 diff --git a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e24f4566-ec7e-49e8-b98f-bfab996bad35.png b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e24f4566-ec7e-49e8-b98f-bfab996bad35.png index fbf0f905a5e27c114336256f988e709273cec009..e71e1b1e3d7e5a91765f4bd7acd630a0bd3c8d0a 100644 --- a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e24f4566-ec7e-49e8-b98f-bfab996bad35.png +++ b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e24f4566-ec7e-49e8-b98f-bfab996bad35.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a597ebec15478ea570c575fcfd7090a502db8cf998506bc586d19ea8d2ac01a -size 345857 +oid sha256:f17f4c0ba86699451509ee8bf47cd4d7ea9dcf41247f360d7db9fe762fc330f9 +size 349101 diff --git a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e50a5cc2-36cd-44a5-8540-32d37ae310bb.png b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e50a5cc2-36cd-44a5-8540-32d37ae310bb.png index ce77f82cea5b5db93820dbd8a82e951296a02692..37de6cfdf9cd645edaca542367462c04fb94189d 100644 --- a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e50a5cc2-36cd-44a5-8540-32d37ae310bb.png +++ b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e50a5cc2-36cd-44a5-8540-32d37ae310bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c239684015f6a146ed45cab5d1b599d912b4c8aafae9c73f18e875bbcf2b2d2c -size 338766 +oid sha256:e908eb232f95a128d3c81bebf748acff08212368267ca5940425c7fab698b2f4 +size 341530 diff --git a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_ff05c512-c5cf-458b-a977-051cf2423d2f.png b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_ff05c512-c5cf-458b-a977-051cf2423d2f.png index 25fd22be7633584541a29bd6f1a0cde79eca1391..dee903c3becc00e33c4e2fa22ee12d19eb8296d4 100644 --- a/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_ff05c512-c5cf-458b-a977-051cf2423d2f.png +++ b/images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_ff05c512-c5cf-458b-a977-051cf2423d2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8babf7d14f84761ba6aaaaeda9fc9808774562d979ee3c2d988bcfcfc328857d -size 415528 +oid sha256:7feb010e3d5b84ce5f8f3134f4086a6eccd70e6b8025cdbd0e0732bc15043141 +size 546905 diff --git a/images/298c854d-2987-498b-b43e-6a6452fb11c7_176a7cf8-69c3-47c4-8090-30c9a98a3633.png b/images/298c854d-2987-498b-b43e-6a6452fb11c7_176a7cf8-69c3-47c4-8090-30c9a98a3633.png index 1aaf33cf648eb19fdc431f0454f85fe90ca68c08..2f2d0a55bf48ba54bcd30cc78d1ff0f9c29bd5d9 100644 --- a/images/298c854d-2987-498b-b43e-6a6452fb11c7_176a7cf8-69c3-47c4-8090-30c9a98a3633.png +++ b/images/298c854d-2987-498b-b43e-6a6452fb11c7_176a7cf8-69c3-47c4-8090-30c9a98a3633.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7ea8b996482e131e3e8a16c93ec8042f6f7f9640e5cb01cc37c53817ee6fc8b -size 1311689 +oid sha256:d01bb706fb3661885ac57a6b06ebc67804a5b3508564ac8413914defea657bb6 +size 1379407 diff --git a/images/298c854d-2987-498b-b43e-6a6452fb11c7_2e506c3a-e3d2-4d40-8ad2-d345bedcb636.png b/images/298c854d-2987-498b-b43e-6a6452fb11c7_2e506c3a-e3d2-4d40-8ad2-d345bedcb636.png index 1069ad8c70c5067985b5b078c517aedd453ec011..0635abd4469248abf6d8c57c1b6eb069176c313e 100644 --- a/images/298c854d-2987-498b-b43e-6a6452fb11c7_2e506c3a-e3d2-4d40-8ad2-d345bedcb636.png +++ b/images/298c854d-2987-498b-b43e-6a6452fb11c7_2e506c3a-e3d2-4d40-8ad2-d345bedcb636.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8541f7c594c99d70a93d0d172f1e80c408f60e2b6f615ee777e1427c2dfdf844 -size 1382143 +oid sha256:1987b3a4179e7990f39c2668a7f7597bdf4e7dd316b77dd407facb964520c841 +size 1291064 diff --git a/images/298c854d-2987-498b-b43e-6a6452fb11c7_349619be-15c9-4731-a1c9-c020a40df044.png b/images/298c854d-2987-498b-b43e-6a6452fb11c7_349619be-15c9-4731-a1c9-c020a40df044.png index 4105a582f4a942713c5125a3f645507bd909f72d..d9c812e3140c3388a9b775c5a26e68542864945c 100644 --- a/images/298c854d-2987-498b-b43e-6a6452fb11c7_349619be-15c9-4731-a1c9-c020a40df044.png +++ b/images/298c854d-2987-498b-b43e-6a6452fb11c7_349619be-15c9-4731-a1c9-c020a40df044.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55319dac2026a1700ed645e6d2e919e226cdd080db098a24c2554458cbca1727 -size 1175855 +oid sha256:fd1774cbe0b660379ad6a5ba5afe1307184e26bc31b99d870c2343c30c2acfc5 +size 926424 diff --git a/images/298c854d-2987-498b-b43e-6a6452fb11c7_5a234296-fb23-449c-877a-cbc770096ab7.png b/images/298c854d-2987-498b-b43e-6a6452fb11c7_5a234296-fb23-449c-877a-cbc770096ab7.png index 3505c5a122085126214e7a9d49d8119bcb185d91..5cfa7219caba212662c803245547224b2a8070d1 100644 --- a/images/298c854d-2987-498b-b43e-6a6452fb11c7_5a234296-fb23-449c-877a-cbc770096ab7.png +++ b/images/298c854d-2987-498b-b43e-6a6452fb11c7_5a234296-fb23-449c-877a-cbc770096ab7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:974ed10daab52f4783d763c8196d73c48c784111b1a3da9f3d4c02d61cccae5d -size 1342061 +oid sha256:aa79642f44f9983b587c51c01c37b4ed9b0d7d0919eacd5aa59cc192574c6697 +size 1405311 diff --git a/images/298c854d-2987-498b-b43e-6a6452fb11c7_6d93218f-905d-4f25-a513-a1e03c29eed5.png b/images/298c854d-2987-498b-b43e-6a6452fb11c7_6d93218f-905d-4f25-a513-a1e03c29eed5.png index ef0e2d851810a30295432dd85dfedd75bc5ece7f..e0bdf82bbcdef82d19f54add08e79e694167ea4b 100644 --- a/images/298c854d-2987-498b-b43e-6a6452fb11c7_6d93218f-905d-4f25-a513-a1e03c29eed5.png +++ b/images/298c854d-2987-498b-b43e-6a6452fb11c7_6d93218f-905d-4f25-a513-a1e03c29eed5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38b93341569bb239ce84a10dd60a87e18eb4058157cf4b75230afc223e809356 -size 885032 +oid sha256:9c978e02773d55f5846de4902472051c3d34b8189904090fc43c3d20dfa8f55c +size 886272 diff --git a/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e4610afd-1311-4ec1-97ee-3ecf4c573381.png b/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e4610afd-1311-4ec1-97ee-3ecf4c573381.png index a884bb6c4bc7b390d9dd31670d2a31a4190a8905..c1727cf5b0eef69d33fb3cf695ad01d7ef7e120e 100644 --- a/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e4610afd-1311-4ec1-97ee-3ecf4c573381.png +++ b/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e4610afd-1311-4ec1-97ee-3ecf4c573381.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c702849fe101b83bacbae09e38e5f52faa34e35a6256b710bf48f7b7ed8ee96 -size 1330402 +oid sha256:a8d8a5a5b0b1aa7b30fbc6bca991394d2fe9853c513112d952cb7dcc03d201a9 +size 1152297 diff --git a/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e79b9f3c-2ebf-4731-b982-935811aeddf1.png b/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e79b9f3c-2ebf-4731-b982-935811aeddf1.png index eb712f593d7a83f933d407a8e076a8395043f818..4c5f9698a6719b7f6117e6ba9aca7bcc271fe3f4 100644 --- a/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e79b9f3c-2ebf-4731-b982-935811aeddf1.png +++ b/images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e79b9f3c-2ebf-4731-b982-935811aeddf1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:890660e8846e2598860c33873a03377a5d66a82278a2c68115b0f3f533e5baf2 -size 423966 +oid sha256:96b89ef5420c50ea58598e31f39305963f8440a7b5301210594b4fa2e4486303 +size 471035 diff --git a/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4c7fd1d2-f2a3-4fb3-b095-0b4009b4d455.png b/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4c7fd1d2-f2a3-4fb3-b095-0b4009b4d455.png index f7fb3c4209a221b323f21bf2d3a04a4099303b66..a403066c75d0492f92f70b465457c39552f7ab69 100644 --- a/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4c7fd1d2-f2a3-4fb3-b095-0b4009b4d455.png +++ b/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4c7fd1d2-f2a3-4fb3-b095-0b4009b4d455.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9812b7edebdd6cc777489b098ec596194dd11d7786bc9edb329f6d541dfdd0d5 -size 658572 +oid sha256:d54720b563437a9928015b0ddb19c8d5b259a6c6fe8d2965395c4620844eb2fb +size 645250 diff --git a/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_8c16969c-e931-4482-9e62-dc9ac32fe338.png b/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_8c16969c-e931-4482-9e62-dc9ac32fe338.png index 38b186975c2760f7add99cc23e5eec1a0c3a40d9..024ada6be75bbb7df95757a98f8f7ed960367a43 100644 --- a/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_8c16969c-e931-4482-9e62-dc9ac32fe338.png +++ b/images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_8c16969c-e931-4482-9e62-dc9ac32fe338.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1e312159a350801ea70fa3afa9e9bf713cb2aa326cef6f014ffa0d2b65b73a7 -size 505367 +oid sha256:b4f519065758c864ec280ff6b60bdc09f11d37af8923116cc53ce8370c129d60 +size 505008 diff --git a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_2a36bfba-4c63-4682-8629-38002691467e.png b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_2a36bfba-4c63-4682-8629-38002691467e.png index 5a1eef0c41aae048edc60cf276dcd0e423d54cec..560464b73c79065b4cf3a1e7764c8d27800227f7 100644 --- a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_2a36bfba-4c63-4682-8629-38002691467e.png +++ b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_2a36bfba-4c63-4682-8629-38002691467e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0152680dcdb4d5ab42888f1aca4de2dddca3960709ceafe0e72c1acab9872269 -size 1912078 +oid sha256:200fe21186afc63b6a4884f222b4a2a17f806dbc8c4729a3d6702b14bb8e4468 +size 1919687 diff --git a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_493e29f6-6afc-495a-a79d-e419581db53b.png b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_493e29f6-6afc-495a-a79d-e419581db53b.png index dc22f52908da57e54c6346ac4972a37a11024f1b..d09d66a5131a2e7f94de328f6b621b27ccb47224 100644 --- a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_493e29f6-6afc-495a-a79d-e419581db53b.png +++ b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_493e29f6-6afc-495a-a79d-e419581db53b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:480ec8c078c04349ab01ed17ba23389e17f588df1272adb14855f3432c0b2d4f -size 606268 +oid sha256:cf7e42ef42c41da5be1097486d6d220f82dee4aac0f13ebcdaa0b752605cbdc6 +size 808189 diff --git a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_711a103f-e023-47d5-bba6-84481d512f69.png b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_711a103f-e023-47d5-bba6-84481d512f69.png index c5cb817df692ce58172b1a9c834a065abdae7fa3..6592a509c9421985f02d9a7bf2e4843ee3f069a5 100644 --- a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_711a103f-e023-47d5-bba6-84481d512f69.png +++ b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_711a103f-e023-47d5-bba6-84481d512f69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66b1a56a7916618c477a482a43178bd4688540cbaada60c4b8116201b5603b51 -size 638962 +oid sha256:5ea77eb2708bfde29c1999306bbdbe9bad4e6a75f6d85b4d5ff55f9701cc7cdd +size 595343 diff --git a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_93aa5675-083a-42ed-9c3f-a25176a028ec.png b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_93aa5675-083a-42ed-9c3f-a25176a028ec.png index 518a86410a9ea9158331759dc347b2935a6d3789..ab1e39751463a3490b60cec98fb1943aec2d62e5 100644 --- a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_93aa5675-083a-42ed-9c3f-a25176a028ec.png +++ b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_93aa5675-083a-42ed-9c3f-a25176a028ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97c81ddd63159cc78d54a2b359f933cef54248d0cb9427487fcd0f9948280911 -size 696256 +oid sha256:af71811ce9b68b82974fb0ad91c50e5bc76a8e63df586b4987cb48547051c35c +size 743117 diff --git a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_b0af8890-c5ec-4c3e-b40e-069dcdbb91e9.png b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_b0af8890-c5ec-4c3e-b40e-069dcdbb91e9.png index 1003eb7c1ca2546b4cbdc40c5700b2b8058c6893..5daf4c712c582f09a1c73cf56c9ed3369ebfd398 100644 --- a/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_b0af8890-c5ec-4c3e-b40e-069dcdbb91e9.png +++ b/images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_b0af8890-c5ec-4c3e-b40e-069dcdbb91e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eeee5cc51d4b2ea1251b1290861f1d81eeefc6bd99a67fd3296795e6719dee6d -size 38659 +oid sha256:1c32453290947b34c6ecaa2f088b9aab289a22fd3441ca26aeb4df6b97408ba0 +size 41223 diff --git a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_02461d6b-dd73-4855-9d43-5545b559e29c.png b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_02461d6b-dd73-4855-9d43-5545b559e29c.png index 270067b9208f11590228976d1db2947ccf9d152a..3be9ae8b4b527cb69c3cb93b240098509fdb6ec3 100644 --- a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_02461d6b-dd73-4855-9d43-5545b559e29c.png +++ b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_02461d6b-dd73-4855-9d43-5545b559e29c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe6546d6958ca977b025f96b6eacbcab40fe7b98a459599b282605d8fc4ca334 -size 668699 +oid sha256:84842b0610ce8950afdaf2dc5e831840dae32b50c02070fdfc3a8bf700872ac5 +size 874727 diff --git a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2a0253c2-d580-4b2a-a8bb-32aa79df68f1.png b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2a0253c2-d580-4b2a-a8bb-32aa79df68f1.png index 42d9df8a33fba5918290e6a87315ac5d1eb966b0..6c925840e7f7de53997a27e3f18f93ffaea6f350 100644 --- a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2a0253c2-d580-4b2a-a8bb-32aa79df68f1.png +++ b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2a0253c2-d580-4b2a-a8bb-32aa79df68f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c251cede82eeb23bffb33e05250007c741d35fa376a14d3e672ad3d5944f3adb -size 668002 +oid sha256:b8724295f17bf13edc5f42d520cca801db4cb8ccfd297b4ea5f5093a5ccde77d +size 640996 diff --git a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2b73cbf1-4de3-4555-9070-0c329cd919b7.png b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2b73cbf1-4de3-4555-9070-0c329cd919b7.png index ba9403b3e7b51ce365e5a415238f6008f91be120..cfb07df5fbe0dba1635f6bab7dbb3d747563b9ea 100644 --- a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2b73cbf1-4de3-4555-9070-0c329cd919b7.png +++ b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2b73cbf1-4de3-4555-9070-0c329cd919b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1071f18cee71679ff01e1ad064941c2096cd35cf746b95f561ab507edcab492 -size 465551 +oid sha256:4a88bae051836c5add87831ef65b551028ab7b9280f70ef5e84efbb0c5d50087 +size 375723 diff --git a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2d7e4e1f-c4e4-4952-b72d-8578d04e5a20.png b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2d7e4e1f-c4e4-4952-b72d-8578d04e5a20.png index df0d7f344f560ef987e47abb48c30ae74c3d3e29..5135260ba9904d8604deb0efb96b665a5a4f462c 100644 --- a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2d7e4e1f-c4e4-4952-b72d-8578d04e5a20.png +++ b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_2d7e4e1f-c4e4-4952-b72d-8578d04e5a20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0c25df097f8a8afd94c6806aec738aadb1f095943254909418f3075e28bb391 -size 1874455 +oid sha256:db20bb80fbc2a884e1ccb8760a199eb131389ff15522641a9b2821d714e4c2cd +size 1050469 diff --git a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_5c4ef2f2-8851-483b-9f3e-c966e222ae8c.png b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_5c4ef2f2-8851-483b-9f3e-c966e222ae8c.png index 23ba446d6dc3e1220eace545d0bcd821f7a7e386..6dfa0f0a82d63068bd0158046855b0da99590817 100644 --- a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_5c4ef2f2-8851-483b-9f3e-c966e222ae8c.png +++ b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_5c4ef2f2-8851-483b-9f3e-c966e222ae8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15deadb49b5fa54b22d7f46b38d95fb9acd599733404f74d1944ad4aca7625df -size 983904 +oid sha256:48c8259a806e627a11000413b6c90f7a55e734103214d9b37922791b39fea42c +size 1599562 diff --git a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_65fab831-efd4-477b-9da8-0faaaef8bb8f.png b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_65fab831-efd4-477b-9da8-0faaaef8bb8f.png index ce1e05dc84edeaad4d90d31dde78ddd15fef0845..24876781f241b5206a42f9066667d58451c04c3e 100644 --- a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_65fab831-efd4-477b-9da8-0faaaef8bb8f.png +++ b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_65fab831-efd4-477b-9da8-0faaaef8bb8f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15445275af49f27f81c76d9fa92a0a90c306de6ba638c9a54ed5e55eba1b7caf -size 649933 +oid sha256:93c0353f1b84ee2720bd1061f10fd09bdf7bfbd502b6ece16ff241698248f8f7 +size 947033 diff --git a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_f0f24934-782b-4b19-a80e-cae0dc3acafd.png b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_f0f24934-782b-4b19-a80e-cae0dc3acafd.png index e9a07c481bd44e9443568c41ab6f2e6e763813e2..fe010ae278e797908ec927aa558a6fcf9028c600 100644 --- a/images/29fde741-075c-446c-8e8a-f432e81e7ac0_f0f24934-782b-4b19-a80e-cae0dc3acafd.png +++ b/images/29fde741-075c-446c-8e8a-f432e81e7ac0_f0f24934-782b-4b19-a80e-cae0dc3acafd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91ec27bd1e54f7b32c38d37b69b331ce8f5d99729d64772b3a95a7cc80769d47 -size 468890 +oid sha256:552464d208e6bf6d22f91191b589edd34caac045be4e3cf0005092213b382081 +size 417918 diff --git a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_146148dd-b0a6-4ee8-a061-0ecbe585e606.png b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_146148dd-b0a6-4ee8-a061-0ecbe585e606.png index 42b2e3c50ef5fb8dba66d463d39034baa4715da6..f09f39538287bbea863a5d35cc524e0194b2f437 100644 --- a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_146148dd-b0a6-4ee8-a061-0ecbe585e606.png +++ b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_146148dd-b0a6-4ee8-a061-0ecbe585e606.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3418962a93d8dc2d45e1f0e42e8b9ddfdf1f39811e6c355e87b3d1a6a09f214 -size 1782444 +oid sha256:dfc1f975a91d40387e95ce6a3301e54572078632aba31fe47271a7da30113985 +size 1782495 diff --git a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_7e8dffb8-17a7-40c5-9344-b115886fd488.png b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_7e8dffb8-17a7-40c5-9344-b115886fd488.png index 9354e9d9015540f81132b9ed48a716fd6f285338..4dc5a0d549b5f013eff8ecef05e84e657424b5c0 100644 --- a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_7e8dffb8-17a7-40c5-9344-b115886fd488.png +++ b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_7e8dffb8-17a7-40c5-9344-b115886fd488.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7c30ab79aae9d7fae41e6d38e4d24d50823f5c60f3af5e6f5944c5ff517d6eb -size 874161 +oid sha256:68b30c49bd10f7211c1ed4337e49241df82eb0c7c8684311945fbc5b22448342 +size 636289 diff --git a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_991f70b5-0160-4220-bfc2-f69b70d2b1f7.png b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_991f70b5-0160-4220-bfc2-f69b70d2b1f7.png index 3400c6a96b5bea2b170e750d76d87f7265f59a98..f372cb52dd25bd1a2a021afee7e07f03442d182b 100644 --- a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_991f70b5-0160-4220-bfc2-f69b70d2b1f7.png +++ b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_991f70b5-0160-4220-bfc2-f69b70d2b1f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d9412e9e8d5afbe0acfa42d4075c7f9430035a6c5dff6d6750f5c9e687846fa -size 1146677 +oid sha256:4010385a6a9a6cace4e3f5a2cc8af860e49b6bd11e6b8d162c82c34c774ad2a7 +size 1612070 diff --git a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_c53a794a-53ea-4564-b4f3-5ef7c0279bab.png b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_c53a794a-53ea-4564-b4f3-5ef7c0279bab.png index abf4cb6757134f78fc25a9b7145bdbf38d96e9ad..c21575019df185e48ab2efe204b43f52b623490a 100644 --- a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_c53a794a-53ea-4564-b4f3-5ef7c0279bab.png +++ b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_c53a794a-53ea-4564-b4f3-5ef7c0279bab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a7eabd2e1899e254c9a76c4f3a2d8e48ddd3a10bb3626aa600b235266b252c5 -size 874793 +oid sha256:d0ad0363b110a3e612cb70026332d34cfb0c42f1362913cdc2ebb9b969bf567f +size 644558 diff --git a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_ef03f2ae-a21c-44a6-b180-a23414d36bf0.png b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_ef03f2ae-a21c-44a6-b180-a23414d36bf0.png index 319748f5bf872ee771c5807942e98373ab35e8d1..ceabcd77c83aa791f58354d47cf269b0e167fb53 100644 --- a/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_ef03f2ae-a21c-44a6-b180-a23414d36bf0.png +++ b/images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_ef03f2ae-a21c-44a6-b180-a23414d36bf0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99b1204f26d4b309979c7a35729e3863c3be118aca7e650daf693cab0e0e7b80 -size 1128733 +oid sha256:6882d740f08504b6fe3bf6ba99961270f1bcf62cb0b73db78de098e2977f389b +size 1225921 diff --git a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_342bae0c-0a5a-4040-8b4e-238906800a1f.png b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_342bae0c-0a5a-4040-8b4e-238906800a1f.png index a27a21bb00ec836ed7e28c1d7daa3ba8287adf55..dd5cf04787a448052d6b3d061ea2a7bcc55c7907 100644 --- a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_342bae0c-0a5a-4040-8b4e-238906800a1f.png +++ b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_342bae0c-0a5a-4040-8b4e-238906800a1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:929e42764952c61554d2c607d85b4d4cde3ad9a4efb0b7d60cfa35344d6f8a11 -size 1489667 +oid sha256:96dfb0226cbb1dcf1fa0d1efe757b08682fd3de2202793e03b834e375022e00f +size 1523815 diff --git a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_6dfde72b-7747-444c-835b-2feaf91878ea.png b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_6dfde72b-7747-444c-835b-2feaf91878ea.png index 2dedbb8c953d605e164462c32b18a63acd62a33d..7690084be8ad06d0c2c0e702426d11153d91d23e 100644 --- a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_6dfde72b-7747-444c-835b-2feaf91878ea.png +++ b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_6dfde72b-7747-444c-835b-2feaf91878ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14f9db8562a5fb15d5b0fdee3757de9a2235a7f04b32c8159003174840e90598 -size 1793679 +oid sha256:701dbe62294d770d87d7e3c748b2025b5a61eda90430130cd00115817d23567c +size 1728391 diff --git a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_753df684-6110-40eb-88b5-aae9df30ed15.png b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_753df684-6110-40eb-88b5-aae9df30ed15.png index 9132bd1236a8dcd6d52dd09e075b64f47c0b505d..e1d6e7ea975ebc1a707c4dafd6f1702e2c5a353b 100644 --- a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_753df684-6110-40eb-88b5-aae9df30ed15.png +++ b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_753df684-6110-40eb-88b5-aae9df30ed15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db674b3cb6c02bd668fddffccb805965352fc4a7f6657634e72ca6f394ad41fc -size 1044666 +oid sha256:1273f590c9bde8cdb002e6e5f1aac2b29ec2e99a4343ffa4c10d953aaf2b6b3d +size 610476 diff --git a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_942f53c6-7c06-488e-af27-0fefddaa6b13.png b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_942f53c6-7c06-488e-af27-0fefddaa6b13.png index 7e80751eaaf363a31cc6071be6117362ad2b1e24..33bb1f5a1e43741baa8c263f8428fee307e7de26 100644 --- a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_942f53c6-7c06-488e-af27-0fefddaa6b13.png +++ b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_942f53c6-7c06-488e-af27-0fefddaa6b13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d138c75777dd7e46d3745569a44e3c7b9618db24e1b89ac6b44b57b8ce3e87b2 -size 1400989 +oid sha256:07ccac4b103f1ec077f8f00c2dc710f0e2bca2105369c6718c5fca5bbf8875b0 +size 951641 diff --git a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_ac106afc-a33a-4df9-9a39-62e856864f0b.png b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_ac106afc-a33a-4df9-9a39-62e856864f0b.png index d6c027d38d9de9e812fbfccf3137a9a3da30c31a..a151bb0b3d53daaa4b3a2cc367d5fcfd52f36a02 100644 --- a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_ac106afc-a33a-4df9-9a39-62e856864f0b.png +++ b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_ac106afc-a33a-4df9-9a39-62e856864f0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:daffccfbaf842a98c469048451ebb8f105ed106318df2c5e28595f1b08ecaa67 -size 1590193 +oid sha256:df1db81fb314b6af1e7eb475bddcf7da8ae1c6d2754714a7d0c6147d92c8c564 +size 1581002 diff --git a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_aed97d15-86ae-44d5-b329-3b10758f50bb.png b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_aed97d15-86ae-44d5-b329-3b10758f50bb.png index 5e8bae5270bb77c1bfee5674e77cb7e58b8072cf..f2bede24741ffa14b8e9227d455c1525be93325f 100644 --- a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_aed97d15-86ae-44d5-b329-3b10758f50bb.png +++ b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_aed97d15-86ae-44d5-b329-3b10758f50bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c46e0178281bfb8a58fcbd1a94d1150c0df50b079723589efc1742bede512bd -size 706780 +oid sha256:8e52c88a7fbfce505affe935b3f7bafdd893e688b922f85e66d85a93c4b6e9fb +size 925927 diff --git a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_f83bc69c-b77b-4683-998f-5d9e4694add3.png b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_f83bc69c-b77b-4683-998f-5d9e4694add3.png index 53ecf70435ce364faf3876b22fb2d550104b9ebf..eb4e399e85aa7fe3bf44e4405c901b7718562b0b 100644 --- a/images/2a45ede7-6449-40b2-b032-149ddbe205a9_f83bc69c-b77b-4683-998f-5d9e4694add3.png +++ b/images/2a45ede7-6449-40b2-b032-149ddbe205a9_f83bc69c-b77b-4683-998f-5d9e4694add3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e72751a1f29edff38a416dbd366268e5394a206c7561f0c6a92daf33fc1342ee -size 628007 +oid sha256:c40dd282053dbb6d669fbfc54201880d4e1632a6982c52455bbae85ac63c386c +size 1119443 diff --git a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_00e89ec7-a6d4-4c75-ae50-335ba459f64d.png b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_00e89ec7-a6d4-4c75-ae50-335ba459f64d.png index cc55119e91555625457e584831205abde50d14c8..6b9baa2322a70e5810d862cacd03375dc825eb6c 100644 --- a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_00e89ec7-a6d4-4c75-ae50-335ba459f64d.png +++ b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_00e89ec7-a6d4-4c75-ae50-335ba459f64d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:022ba1eb3c8b3d09b0af0cd44318ed2e6e49e372c21ead0d16d6a5fccd37c2aa -size 1534049 +oid sha256:fbfe55b57d3380efc51885d1a8bb91f6a857c55895945f0fb0d0e22e2d2f0338 +size 1420234 diff --git a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_3d3cf273-c147-48b7-8ac7-0e2f84ccfc4a.png b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_3d3cf273-c147-48b7-8ac7-0e2f84ccfc4a.png index 67b6ac7270e91b9f1aa7141cfacbdc3969b789b5..b58080c7aca93141c1a8c6f72eff0fd308e3c7de 100644 --- a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_3d3cf273-c147-48b7-8ac7-0e2f84ccfc4a.png +++ b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_3d3cf273-c147-48b7-8ac7-0e2f84ccfc4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a7364f8f4db04fb18a3fc580491e6af20749510f5cb79bbdc5c94ea1615a00a -size 1533314 +oid sha256:c20f5a73b5129db80c16abed7d9c6ceef7b2d33649eeb97497b09a5de261b874 +size 1900258 diff --git a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_6fad4f05-f655-4e45-b926-c773034e90c4.png b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_6fad4f05-f655-4e45-b926-c773034e90c4.png index afd3b1a88521f4fb97ebfcab899024e5dc80e6a7..a86b72841f266849a85ff1f8099c69de73930b1c 100644 --- a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_6fad4f05-f655-4e45-b926-c773034e90c4.png +++ b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_6fad4f05-f655-4e45-b926-c773034e90c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53fd2297dbc6f5363baa6968a9fc1e700e1bc25705c2cece80dbd100a3c74635 -size 1529713 +oid sha256:27b8878a5b5f85665a20cdc83881880d56d53f7a25c690edeec68d1d9038b3f1 +size 1786813 diff --git a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_9eb3684f-bdc3-44d7-aa67-be7839fb83ec.png b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_9eb3684f-bdc3-44d7-aa67-be7839fb83ec.png index bbbd6c950885c6ef16db261d31ed2cc9e48cf31f..ef6f662689aeb4e9fe0b2694f4075cdd856726db 100644 --- a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_9eb3684f-bdc3-44d7-aa67-be7839fb83ec.png +++ b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_9eb3684f-bdc3-44d7-aa67-be7839fb83ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0db10a43d2db00a9816f349c71954051fbedfeacabc2ac0b65f9a91bad303540 -size 1429328 +oid sha256:a90afbdade180652f24f93341501870af386c53e9ad0737128246840cdf1ed30 +size 1206207 diff --git a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_be5b4fb3-a2e4-4dff-a6a7-c3050aea75b4.png b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_be5b4fb3-a2e4-4dff-a6a7-c3050aea75b4.png index dd58e5dd49b6a029b673a980184a4ceaa7eaf307..da65726423eadb57e4a9af2073c712f9bebe2313 100644 --- a/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_be5b4fb3-a2e4-4dff-a6a7-c3050aea75b4.png +++ b/images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_be5b4fb3-a2e4-4dff-a6a7-c3050aea75b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a83fa70162d9bb0504e4ecc17a7968a119030d076e81731207204e88b48a00e8 -size 1025137 +oid sha256:a4bda4cb808a2d5cb1cb014cd2f9d33c48695e44f1615a797bfaf7d9cdbde03f +size 1243375 diff --git a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_1ea88ab5-b80f-4656-8554-af68a9752d0c.png b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_1ea88ab5-b80f-4656-8554-af68a9752d0c.png index de8e8834008dcbcb1de1b3187a5f55b7cec3ae40..2fd4d8e3eee843e9795b3a5ae96361bc98fc0241 100644 --- a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_1ea88ab5-b80f-4656-8554-af68a9752d0c.png +++ b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_1ea88ab5-b80f-4656-8554-af68a9752d0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2eb751357bed081848e40cf6ba39563c749b82a08f467a677017374ac7281399 -size 1423657 +oid sha256:d097a41e0ea1d97bebd8ee3625b3c3647062780ea057ba34e28372e694320301 +size 1979399 diff --git a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_21d16a48-dcdb-4226-92ba-31ea01da9118.png b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_21d16a48-dcdb-4226-92ba-31ea01da9118.png index ff69c67cf4a423e20bfc397b86bb5f47aad7cbf4..65f62beb9d08cfc6d58b926a171d267d368335a6 100644 --- a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_21d16a48-dcdb-4226-92ba-31ea01da9118.png +++ b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_21d16a48-dcdb-4226-92ba-31ea01da9118.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:577ee244dd0a34c6c057421adfc446dd79f11f5fd172c6da7b9262cc9cff0470 -size 1482867 +oid sha256:4d85bf7f954e770075d57fd6dad6f1b51f3b8c7c122f75ae463dc53a220406e8 +size 2240184 diff --git a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_2cdca21b-352f-4f82-84fa-16b60dde7c28.png b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_2cdca21b-352f-4f82-84fa-16b60dde7c28.png index afb1a9b69142ca9425b978597ee8c0b39efd984e..fdade728d262e99a53121b7cee844f93c161d104 100644 --- a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_2cdca21b-352f-4f82-84fa-16b60dde7c28.png +++ b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_2cdca21b-352f-4f82-84fa-16b60dde7c28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5092f3af09378249b2aa29b75c66a62542a143586a5d18fea50b9e18ad9da1e9 -size 1787139 +oid sha256:afe8694420d607c2d82400f55bf7240f6244e4a86cb85a14ab3370559403b48a +size 1208574 diff --git a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_677f0c0a-d900-4ca9-8c5e-73fd4036a379.png b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_677f0c0a-d900-4ca9-8c5e-73fd4036a379.png index 683d5dd0344ba6bfbcfeb9bea4794155e734b419..e9cb92d145b7e2353883259366eee807ade6561c 100644 --- a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_677f0c0a-d900-4ca9-8c5e-73fd4036a379.png +++ b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_677f0c0a-d900-4ca9-8c5e-73fd4036a379.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4df7a2d5366db4971f0aa407890a6a8b93c4048322dd30bbdc0fdc3d75234caa -size 1892350 +oid sha256:81d91bf9d0b38cf1d4b58985c9cd18291cbd9a422b38c9a0c15f75169614095c +size 2255967 diff --git a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_9b765e22-bd76-461a-abf0-47558fa3de83.png b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_9b765e22-bd76-461a-abf0-47558fa3de83.png index 993e12b8ff3e294f38f9b9fe9065b1ffb75be601..50bb933ff1fef546208d789dff5063f194aef29d 100644 --- a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_9b765e22-bd76-461a-abf0-47558fa3de83.png +++ b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_9b765e22-bd76-461a-abf0-47558fa3de83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12ef9440238a5ce1901d34b884ad92186ecb378e733ecbe9cfd03d94350438ab -size 2484199 +oid sha256:ee42e3db32a7225996bcc0714769ec5059623d5885305a3d0103c71c47c0310c +size 2492973 diff --git a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_f50ba556-898a-4e6f-a470-ce593af6304e.png b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_f50ba556-898a-4e6f-a470-ce593af6304e.png index c0fb2875c325234f0d84c5bdd0a420dd0b8b23ec..10e214a95504db94ad14f664b223f05fc12dd501 100644 --- a/images/2a831fb6-3110-4ffb-8687-1d2acab09873_f50ba556-898a-4e6f-a470-ce593af6304e.png +++ b/images/2a831fb6-3110-4ffb-8687-1d2acab09873_f50ba556-898a-4e6f-a470-ce593af6304e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:134021057ac28f2421f05fa3b927b70bb4fc9856f199761e4f303826e7e367a7 -size 1420635 +oid sha256:3c0563da31f667c9aa7f86ec6bbb1fc8c8b7733a6f1bc6a2f749b870dfa74be2 +size 1019323 diff --git a/images/2a8ae104-6f06-47cb-80a0-045188125868_57b4fe29-38c0-4171-8721-a773b02c3366.png b/images/2a8ae104-6f06-47cb-80a0-045188125868_57b4fe29-38c0-4171-8721-a773b02c3366.png index 3e00ba9cefc493097caa8650a2bc5b457f38af13..1648df22a30ef4546ec8a773824e3f644a8fc1c5 100644 --- a/images/2a8ae104-6f06-47cb-80a0-045188125868_57b4fe29-38c0-4171-8721-a773b02c3366.png +++ b/images/2a8ae104-6f06-47cb-80a0-045188125868_57b4fe29-38c0-4171-8721-a773b02c3366.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:995efc699f8457dd892947a17029b94a706cd75ec0795777c59b7dd6978e7132 -size 158783 +oid sha256:e1280da48d6ac70ff084f9e31fb900bbbfd837276c027f6c89b00ae742791aea +size 175122 diff --git a/images/2a8ae104-6f06-47cb-80a0-045188125868_fc3816cd-1221-4d65-a475-ef22b1771303.png b/images/2a8ae104-6f06-47cb-80a0-045188125868_fc3816cd-1221-4d65-a475-ef22b1771303.png index 96d483012b280721e0c92190b641a4eab45b3966..d6579aee137909881acd8447f73cc3367b6f9d99 100644 --- a/images/2a8ae104-6f06-47cb-80a0-045188125868_fc3816cd-1221-4d65-a475-ef22b1771303.png +++ b/images/2a8ae104-6f06-47cb-80a0-045188125868_fc3816cd-1221-4d65-a475-ef22b1771303.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2ebdcce215bc6c920505410be785e608cdac215a595a3880064e4b71e9bbd07 -size 89622 +oid sha256:382450d2667521ffb7455990150d2e21c65e4210b3025d46f682b528ee3999e4 +size 90590 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_163ac6b4-cfa8-4e29-8a90-0e0b9ed3c8c1.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_163ac6b4-cfa8-4e29-8a90-0e0b9ed3c8c1.png index 6b521dae4f93f5df08d67e21ea22d4327ef92f43..0136b0b3bdf7ef2c49d0e7ccb7fb23abf63b8cb4 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_163ac6b4-cfa8-4e29-8a90-0e0b9ed3c8c1.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_163ac6b4-cfa8-4e29-8a90-0e0b9ed3c8c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4559f911216013f5f180f4c6a2bd3c319f6280eb33e9daea31c9b6564b5d9b8e -size 499544 +oid sha256:3e114f0e08a0d8869f30dd37171b47e11e3749bfd74a4a047135d4e5302ca07e +size 353298 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_27988d8a-0da4-41ff-bb40-f20d4a1a7749.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_27988d8a-0da4-41ff-bb40-f20d4a1a7749.png index 2200bcc6f1d27912333c1446613ded9e10b7a0d2..08d586056214a7edd56117962bbe226a898d3db9 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_27988d8a-0da4-41ff-bb40-f20d4a1a7749.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_27988d8a-0da4-41ff-bb40-f20d4a1a7749.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4858016477f6eb5b70b3a66861cd970643b95a565afbf0dce9f96ead64a34e4a -size 761725 +oid sha256:28e4595e93c5889385cce948af036bd3b744fd3ea0bc473d0f9a0feba2b3cd74 +size 500296 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_2c1ac090-674a-426e-9cab-3857abef2dfe.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_2c1ac090-674a-426e-9cab-3857abef2dfe.png index f9bf3bcfbd5840754f902996ecf6d4d75f387246..e2922070289d1926221eec364b429bbdf623ba68 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_2c1ac090-674a-426e-9cab-3857abef2dfe.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_2c1ac090-674a-426e-9cab-3857abef2dfe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d19885b895fa39cbee9cd6d995273b326d5ce57abaf1b689b58bd9205d21d715 -size 499332 +oid sha256:272ea2373b287ff266b72b43140134f6dfe2775a83569b48ca9b28df3590e01a +size 705001 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_5330614e-ce1a-4da4-906a-6fc408c6c3f3.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_5330614e-ce1a-4da4-906a-6fc408c6c3f3.png index f6381e69c8b548a2a1e1e861d896d04cbadd6820..beb8ab66b49957b4b07ad75b85d45959ab892ced 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_5330614e-ce1a-4da4-906a-6fc408c6c3f3.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_5330614e-ce1a-4da4-906a-6fc408c6c3f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bd80b7459917ecbe210bc78ed88dd88e84a6fa3c76c56ddc707b291e29fbd5e -size 2317302 +oid sha256:3f21a34387839cbf0f771f653aa792e836b1d878c28f57b60bfdb5876aa91a1f +size 726432 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_6de1827e-c854-41bf-86ca-4ebe2a33339c.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_6de1827e-c854-41bf-86ca-4ebe2a33339c.png index 0e057dbb5afc4f529ebb575787809fe1a45cf390..bcc003256e81c7e4a841262872110fa8d0a362f7 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_6de1827e-c854-41bf-86ca-4ebe2a33339c.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_6de1827e-c854-41bf-86ca-4ebe2a33339c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f25e4bd5d6f681803fddda453cf4095d16d6fe293215e3f5c08600ea00dcc25 -size 561171 +oid sha256:f6dd2db213481f78be94b69371d7b5fd71508a80a49048b78ad62c020a32e3ca +size 683824 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_b0fe0bea-76e5-4c18-9b01-925d5f4d247e.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_b0fe0bea-76e5-4c18-9b01-925d5f4d247e.png index e7a0e12562599d5a02c4f02d26af7d578453bfa2..f4096476b7cf5c9547b49fced3f46a2561494a89 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_b0fe0bea-76e5-4c18-9b01-925d5f4d247e.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_b0fe0bea-76e5-4c18-9b01-925d5f4d247e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1c11d67d74cff592aa3dee41cb8a35afb9bbdd0d79f9076a8624671f85fe3e4 -size 753826 +oid sha256:50d56cd003d931d86b6453825aca92b3ea6c797e06d35fc87ef8aacb585d8c97 +size 643903 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_c3436179-32b0-4eee-87c9-92f564819bb9.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_c3436179-32b0-4eee-87c9-92f564819bb9.png index b0bda0138b585a5fb09a5f49b27c57757b4160fb..37a2c8bc5be8b0fc90ef675d4baaeb66964a0b65 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_c3436179-32b0-4eee-87c9-92f564819bb9.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_c3436179-32b0-4eee-87c9-92f564819bb9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1033382e7b4f4859454ef26173bb93655935a43a47b47ca567ec0712277a6542 -size 558671 +oid sha256:23cbcc30065a419cef6e60f7c646fa13cd2268cc95d88263d5c760668ab98b1c +size 452491 diff --git a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_e1e4c60c-9081-43ac-96e0-4b8e5bd6003d.png b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_e1e4c60c-9081-43ac-96e0-4b8e5bd6003d.png index cf2964bba4a745279214a24681e1c9da940fa1bc..67cbf1c05f4625544f16c619aad5bc0710a104a4 100644 --- a/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_e1e4c60c-9081-43ac-96e0-4b8e5bd6003d.png +++ b/images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_e1e4c60c-9081-43ac-96e0-4b8e5bd6003d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8be1a9d4c9eec772131429450d7b5fc39debc1eee250379c3adc5937fd54820 -size 1293761 +oid sha256:69c874cfee891d48087e25c5bdb5b98c8973fe06412faa0db60221804b0e983c +size 1736341 diff --git a/images/2b562465-e325-4743-8e68-6e7852594f93_2ce00d9e-383e-4b57-86f4-b2e5bea18060.png b/images/2b562465-e325-4743-8e68-6e7852594f93_2ce00d9e-383e-4b57-86f4-b2e5bea18060.png index ba0b4d509657d40005e04b907295bfed2a6e1e09..4266a2bc09b60d5f1cd1050a1c43268656a3285b 100644 --- a/images/2b562465-e325-4743-8e68-6e7852594f93_2ce00d9e-383e-4b57-86f4-b2e5bea18060.png +++ b/images/2b562465-e325-4743-8e68-6e7852594f93_2ce00d9e-383e-4b57-86f4-b2e5bea18060.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5aeec8fa544c40766b9c610b9337a7f5804b7ef886ce8f1ab221dc52dac3cbb4 -size 1756828 +oid sha256:9b5b00899683681f28ca0fa9abfd68528d1102713a4b6be7e6d77d3c9249cc9f +size 1026006 diff --git a/images/2b562465-e325-4743-8e68-6e7852594f93_a107d49f-937e-412b-9e1c-4497d02bef15.png b/images/2b562465-e325-4743-8e68-6e7852594f93_a107d49f-937e-412b-9e1c-4497d02bef15.png index fa6df812d2316ab85adfd151c1c01f1df47da418..e906c9382609a887c70a17c28328bb205234d09c 100644 --- a/images/2b562465-e325-4743-8e68-6e7852594f93_a107d49f-937e-412b-9e1c-4497d02bef15.png +++ b/images/2b562465-e325-4743-8e68-6e7852594f93_a107d49f-937e-412b-9e1c-4497d02bef15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93f32e423ab36877fb4efd57277af0b4e5016b15733540fa45f1ce9b1832c8d1 -size 1701069 +oid sha256:65908c338d559d69c74ae45e6ed90481c8bad18dd3f128f57fc93dde6fc1e452 +size 2447236 diff --git a/images/2b562465-e325-4743-8e68-6e7852594f93_e71abc2c-4ffb-460e-ba94-f76587391fc8.png b/images/2b562465-e325-4743-8e68-6e7852594f93_e71abc2c-4ffb-460e-ba94-f76587391fc8.png index 171ee094b27c55231ee9938c2a7bcb4e821b3cbc..ac8fb438907f6fca5197744d39201db4461676b5 100644 --- a/images/2b562465-e325-4743-8e68-6e7852594f93_e71abc2c-4ffb-460e-ba94-f76587391fc8.png +++ b/images/2b562465-e325-4743-8e68-6e7852594f93_e71abc2c-4ffb-460e-ba94-f76587391fc8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a4e0a7f99ca7e8ea3dd699e7e1f95a1224e9e2eb8a58499feb36ed1dece7c26 -size 1179571 +oid sha256:68b9d620662bcffb7888ec872bebe1e9b169e1be6ccae50358b25a4a494d9521 +size 1150122 diff --git a/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_1cf49d43-a70f-4b13-aeeb-fe28f507be53.png b/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_1cf49d43-a70f-4b13-aeeb-fe28f507be53.png index 7d653a7436f39ad00a6a294ddb253f1d58b51695..47b6fd44100ea46db9c8d1d080178b7d6bd3ba1a 100644 --- a/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_1cf49d43-a70f-4b13-aeeb-fe28f507be53.png +++ b/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_1cf49d43-a70f-4b13-aeeb-fe28f507be53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2b03483e472a89a2e1a2a5999dc8dbcb0cb85eca6414b9a1295e87f4f6c65d6 -size 1883160 +oid sha256:8788e5fa56e97fd284b6fe73a9f6fc087d3de5400a15e88bea78ef6c6ef023b8 +size 1343025 diff --git a/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_41b08949-112b-453b-83cf-1426058407d7.png b/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_41b08949-112b-453b-83cf-1426058407d7.png index d1e4de46c881abceb195010c1afcf2e821711e15..528a51aaceae51922837f982e7ef19b7cd42a346 100644 --- a/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_41b08949-112b-453b-83cf-1426058407d7.png +++ b/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_41b08949-112b-453b-83cf-1426058407d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a998ee9ce7f067794540ad35fdb52803c6049645774a9e3324aa37f61587880e -size 3322748 +oid sha256:1cf362a44ab55c1e5f8320c33390b76b6852502aa786e6cca729ef7d5f9ef48c +size 2249776 diff --git a/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_bbefae42-d680-4113-a45a-8319079ac7fd.png b/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_bbefae42-d680-4113-a45a-8319079ac7fd.png index 7ae32e297e34f3d679f8064ccb39266480962adb..8ed177817e5af4ccf7a5db0eadedae5851106d30 100644 --- a/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_bbefae42-d680-4113-a45a-8319079ac7fd.png +++ b/images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_bbefae42-d680-4113-a45a-8319079ac7fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbc78a54802900f699cc2997318644f6aafa6586e1504643144b5b1899ac9124 -size 2130217 +oid sha256:b72adbf305531d646555c06251f48e5e972416bab7ca685faf314e81a1e17ca5 +size 2397938 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_272ad721-8eb9-4e16-b522-ec352a3edc47.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_272ad721-8eb9-4e16-b522-ec352a3edc47.png index d8d043d44cebed6d46d45f8499654cc54a9f91a5..e13edc646abbb5b3d83da7858e2e8efce522dd53 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_272ad721-8eb9-4e16-b522-ec352a3edc47.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_272ad721-8eb9-4e16-b522-ec352a3edc47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd8f3b1076dfbf33dfddb8157650a773abb585a165a36657c9e397f2a3bb147e -size 1023274 +oid sha256:8d644cf9fe8fe7e982016e8b1f94094d7caad834880072a515d180e3da826012 +size 1424801 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_2f2fab58-539a-48d5-acac-1c7f8dcd741a.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_2f2fab58-539a-48d5-acac-1c7f8dcd741a.png index 2e36c749a871a504302955ee6bde8416566fffa9..c66bb974e26ff585893a332f9734d4bc079ca282 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_2f2fab58-539a-48d5-acac-1c7f8dcd741a.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_2f2fab58-539a-48d5-acac-1c7f8dcd741a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e745b982bc4afd7c822db9f470c4f2ae29e2ea2ef6aa250fa1ee59fd96009f3 -size 1048698 +oid sha256:3091434de53601f6febbf6e9fe185fa4f149361e189b508905a9822dbfae9d78 +size 1495351 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_8126292b-9121-4097-ae47-90374a2d66b6.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_8126292b-9121-4097-ae47-90374a2d66b6.png index 0a358272a882ccdac65a9c3901da56f2d17e12aa..ab76b907a9238b3470e1de326c9fab596cb1f97d 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_8126292b-9121-4097-ae47-90374a2d66b6.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_8126292b-9121-4097-ae47-90374a2d66b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de1d4f32d19dd6bfbb567b2182972e76904ac229871a68931b882944de476734 -size 991459 +oid sha256:c3d050209747fe151320af1a4fd7bb38357ce340f10d1520248a4490f3c5fd45 +size 1304366 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_8d91ee68-49ba-4c63-a109-0a0728c06026.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_8d91ee68-49ba-4c63-a109-0a0728c06026.png index 0c9862458998b3c79dae700e2b733d4d1c209e4e..9325ed8f7fb7b2193fb34e2f5472b0eeebff82ed 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_8d91ee68-49ba-4c63-a109-0a0728c06026.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_8d91ee68-49ba-4c63-a109-0a0728c06026.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8be50320c9f89591c4b7b0e813a71ed485e8e4e61a539c71dcd05ef1b7e2a9b9 -size 1007676 +oid sha256:53cb869c58c3d9caaee85b584c9111dfc6b96b6fdb5853e690dbe2b379bf8adc +size 974951 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_c2d92dd0-dd6a-4957-abed-473e7a82bd0a.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_c2d92dd0-dd6a-4957-abed-473e7a82bd0a.png index e39fe97d1cb225cf1e4de7b2c4f77d3feb8a6942..3c41c8a232af6600da40f53cf539e2cae7b55a9f 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_c2d92dd0-dd6a-4957-abed-473e7a82bd0a.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_c2d92dd0-dd6a-4957-abed-473e7a82bd0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5e2e20d5ca4688e179d462f44945d8f93a29f89b6b3aee2c8ce943bdf986024 -size 1309087 +oid sha256:d52e9744dba7be5aaa94647361fa9ab2f74154fe4fcbcc8d7a7010158647bae5 +size 1392456 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_c8a5297b-22ae-40b2-9e2e-b4950bd670f8.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_c8a5297b-22ae-40b2-9e2e-b4950bd670f8.png index c74b3e0a6b1e519dc88705c345e967b0632fd111..bee62252c30c1475a9888b8c6dc52f34c5cdd19f 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_c8a5297b-22ae-40b2-9e2e-b4950bd670f8.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_c8a5297b-22ae-40b2-9e2e-b4950bd670f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea2cb6d1a9576271bd7aef7982d08e27a796c22ce78d8210ae116d17cf61ffa -size 895449 +oid sha256:44f8276b8d3466ab3fd457a2d72d1c706e3048821c9196f76c40732a066908fe +size 1400414 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_dd2e43c1-312e-420a-b90a-c274075490db.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_dd2e43c1-312e-420a-b90a-c274075490db.png index 3e564c38fc85831a99e5d771799541b39b54d3bb..009df74599941aaa3c84d68a974f51ffa2284cfe 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_dd2e43c1-312e-420a-b90a-c274075490db.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_dd2e43c1-312e-420a-b90a-c274075490db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:780603c6931b36cb0741489ebc901bc3931816d03df163ecb6d203cfc8185667 -size 985270 +oid sha256:8b0c645e39d635a8c0886b0841331839cba6156a3852756ac5b926651c8ae0e6 +size 596402 diff --git a/images/2bce1096-f573-4752-94a9-e139ce37eb27_e5453e8d-5e53-4cbb-b9b5-9066cf3ff1e0.png b/images/2bce1096-f573-4752-94a9-e139ce37eb27_e5453e8d-5e53-4cbb-b9b5-9066cf3ff1e0.png index 63bbdbb0951edff87c26f14d751de5e7e282cf60..c3a65c3390e090b77d42374ab888f2c03c968f3d 100644 --- a/images/2bce1096-f573-4752-94a9-e139ce37eb27_e5453e8d-5e53-4cbb-b9b5-9066cf3ff1e0.png +++ b/images/2bce1096-f573-4752-94a9-e139ce37eb27_e5453e8d-5e53-4cbb-b9b5-9066cf3ff1e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e859d0ea443a97e962ff91607267a185b5316a408b19c87a3f5ccdafac5fbbb -size 2324229 +oid sha256:8d2106995cf579735ff5ce1699537eb682824e6e3f62c2929105f64e09620714 +size 2000547 diff --git a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_0f483551-50fe-4653-8fac-ed575e420118.png b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_0f483551-50fe-4653-8fac-ed575e420118.png index 91fa81895d284e107af48e75a48907496fe7b2d4..c798274c332fad399986128ff6097d86dd1bec33 100644 --- a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_0f483551-50fe-4653-8fac-ed575e420118.png +++ b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_0f483551-50fe-4653-8fac-ed575e420118.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ff9c63b10a3f23a2f97e6f64c19f5472034c99d6b90399b68df35c49bcf6d7c -size 1149694 +oid sha256:c012c3c7efb5b8f24226dd27ac5a79f1696797ed2634a4f780a2f42550970179 +size 853422 diff --git a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_305706d0-b1f9-42fc-988c-a57904eb9ce7.png b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_305706d0-b1f9-42fc-988c-a57904eb9ce7.png index f45621a66480a1c75f35824e862dc850183fed18..1a64ca6d4a39f664af0b32db1b8e4a3548a2f435 100644 --- a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_305706d0-b1f9-42fc-988c-a57904eb9ce7.png +++ b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_305706d0-b1f9-42fc-988c-a57904eb9ce7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6a13366c9ff18441563ac2869814e946a7b8c39907b472bf3d92c46b5a3f506 -size 239344 +oid sha256:34d3d9f69c6feeb39a231b2eaa14ed1760a0d3be8a79ba20164745187b5e7d4c +size 416329 diff --git a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_6468eb70-ab1c-4fce-9744-5fe7bfea7cde.png b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_6468eb70-ab1c-4fce-9744-5fe7bfea7cde.png index 20776a09926a9966b9023aa28369c7175ad99f5b..a14fbd8dee4ef7a7cc1df1879c14b9aaa8ce60c0 100644 --- a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_6468eb70-ab1c-4fce-9744-5fe7bfea7cde.png +++ b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_6468eb70-ab1c-4fce-9744-5fe7bfea7cde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4b232c0b009ab82ad75e8ab4a15a145e38a24280bbda777de64a019a63ee789 -size 213169 +oid sha256:fa6269929c5ce94883a83dabd8d1dc87fa056b11861a899f2ff56d0ed5b008bb +size 414266 diff --git a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_89834bb1-075b-4540-8bbe-88224a51cb0b.png b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_89834bb1-075b-4540-8bbe-88224a51cb0b.png index 05811b8def9f6278e26655350cf7660876e8ec64..a9273a462427cd5e432c43b2795136f815457ca4 100644 --- a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_89834bb1-075b-4540-8bbe-88224a51cb0b.png +++ b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_89834bb1-075b-4540-8bbe-88224a51cb0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:698bea30c763ac0c362703eaf00d4e08948a9c381c440df224a93f5ebff3ed1d -size 179704 +oid sha256:b1b30d285768c02ddc8cdc1bc77e4999509260031af44a7412c9a7be5e93dd82 +size 302393 diff --git a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_9c268783-bff3-4bc7-8657-d596565595c5.png b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_9c268783-bff3-4bc7-8657-d596565595c5.png index a6bb592843b84715c208479d5aafbe807d4ceb60..31085c4f3da12bf59f985059638375aa33f62fd5 100644 --- a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_9c268783-bff3-4bc7-8657-d596565595c5.png +++ b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_9c268783-bff3-4bc7-8657-d596565595c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9cab1dfbe1eeffca8f3667e154c5b485587d85975c119f46dec6900667559c3 -size 2045539 +oid sha256:77f42ac8836d250081b73db7aa058d6ea464dc4fb7a14c18b0bafd2eb5a943e4 +size 1906265 diff --git a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_da185646-5517-4406-ad3b-28bae9edf30a.png b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_da185646-5517-4406-ad3b-28bae9edf30a.png index 01189a6a26e877cb3ebb824f8e24fc84c668b369..b9a68db7b834af73445c050d9f92771040861664 100644 --- a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_da185646-5517-4406-ad3b-28bae9edf30a.png +++ b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_da185646-5517-4406-ad3b-28bae9edf30a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00b6b6976325605a2f63b12f19e3546e3da8809df645e053f7264ee08cbc4b22 -size 1024595 +oid sha256:c337416e82521b3b58f0216118df948e9ecdccaea6ad6c7e63d80b113c416480 +size 1463772 diff --git a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_e23c889f-caa3-430f-87f1-00c0ed71a29e.png b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_e23c889f-caa3-430f-87f1-00c0ed71a29e.png index af09f0da58e50bd71e32642acbf42576b4a7e21e..d0b2c232d371a3a57e5c71e6f9826eeaee5600c3 100644 --- a/images/2c19d467-4e44-4c0d-b050-a13823ca545d_e23c889f-caa3-430f-87f1-00c0ed71a29e.png +++ b/images/2c19d467-4e44-4c0d-b050-a13823ca545d_e23c889f-caa3-430f-87f1-00c0ed71a29e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bb00e0e76c186cec557925dac911bb3494999fed8adfa162ab8427f39df1c0c -size 326235 +oid sha256:77229163ca175b6d6a886e4f83804b20039cccbd85fc18c794bd91862f87f151 +size 246443 diff --git a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_54ae18ba-fa04-4295-a6b3-509266945442.png b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_54ae18ba-fa04-4295-a6b3-509266945442.png index b45b0250c113675de2e857b97fabb2ec554564e5..ad50401c371926f4ba2cb278fe532acafc48b128 100644 --- a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_54ae18ba-fa04-4295-a6b3-509266945442.png +++ b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_54ae18ba-fa04-4295-a6b3-509266945442.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6206e62d04ca1ff0626ac552edfacb370e46b45f0331f76f0923d2c7aec5775 -size 1148043 +oid sha256:9d61e75efe76c926063158b8e91d4bfa79ffe010f5e264dccb2629817cd77cce +size 1485979 diff --git a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_798b286b-adb3-4c20-b60c-f9d140ca52ce.png b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_798b286b-adb3-4c20-b60c-f9d140ca52ce.png index e237ddf84f5ff82d94eae72b5a408d0042aaf3a8..8a3ff8927b2dc70d96db353ccfb233c0c67b783d 100644 --- a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_798b286b-adb3-4c20-b60c-f9d140ca52ce.png +++ b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_798b286b-adb3-4c20-b60c-f9d140ca52ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59dd2470e07ed2157dd26239ede5507ed72204149583082b3a2f7722d83b0278 -size 537495 +oid sha256:96a2a8c060eb8839bd35335a4a9de87d81bcf27c16d3fd76358105ce8713e39a +size 527008 diff --git a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_8207c27d-1536-43da-8fdb-6973924ef101.png b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_8207c27d-1536-43da-8fdb-6973924ef101.png index 507b28024cb26c84aa31e3db1cbc4e6ba32bc064..159f57209c330676debdb003d4834b845b39779e 100644 --- a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_8207c27d-1536-43da-8fdb-6973924ef101.png +++ b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_8207c27d-1536-43da-8fdb-6973924ef101.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b918da2a7a9670b38666425cbde88908e05412497f56fcf4803b49aa850bd3ab -size 806389 +oid sha256:33b78992b851bb5175c082bc21acc096e3b3ef9d6fac699d765aadf64c082651 +size 523468 diff --git a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_b3189405-5a1b-427c-a196-d223b6799956.png b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_b3189405-5a1b-427c-a196-d223b6799956.png index 1d00ac19706ffc00376f72af15a3df5ccf87af80..6331c577eff295e180698caf25864e8025735f18 100644 --- a/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_b3189405-5a1b-427c-a196-d223b6799956.png +++ b/images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_b3189405-5a1b-427c-a196-d223b6799956.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57f67c63c10664bbb070d0212982ed01558f69edfe2f9b37cffc4ca127c7b243 -size 629871 +oid sha256:f1a5f14f9cefd5fce7a97d00914d3af7073d88a9a82358f5502e7cf33d304a45 +size 629673 diff --git a/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_12b43c44-08b2-4054-96c4-4f4b62433e37.png b/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_12b43c44-08b2-4054-96c4-4f4b62433e37.png index 10e8c9248e07b6984912a2b61bfece1c364530a3..9cc2a7c0b7b3ee9a4d77c9e4612f744275eece88 100644 --- a/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_12b43c44-08b2-4054-96c4-4f4b62433e37.png +++ b/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_12b43c44-08b2-4054-96c4-4f4b62433e37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4d6123a9f5307dcef3820cb387fda72b7856edbfe41751e4ec1df39d942ad9d -size 1187394 +oid sha256:0fb5b5b35a95456aed26847cb58006fb926237c02f5bdd786edcb7ae64ad3314 +size 1054339 diff --git a/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_e41e2eb8-6d19-446c-a636-c3ad48011f2a.png b/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_e41e2eb8-6d19-446c-a636-c3ad48011f2a.png index f026344177c428b65bf84894ccedf7f8426cd811..f2066ac5ce74593c3bfb932beeb33dfc1769835e 100644 --- a/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_e41e2eb8-6d19-446c-a636-c3ad48011f2a.png +++ b/images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_e41e2eb8-6d19-446c-a636-c3ad48011f2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd587224aec9d0bcd12df24db513e16d654ba73f977650eec6f5207ce5f120be -size 916598 +oid sha256:82ae9bfe7def9960f5abacab214790b015dcca2d262eec7f737e28c40e48dd8e +size 631214 diff --git a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_6592e896-575d-4583-9c89-7cb0a9a099c2.png b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_6592e896-575d-4583-9c89-7cb0a9a099c2.png index cc68f3ec27f1210816b136e52dfe43ad2510e822..ab44fa26f63a7a2e7eb29391f6f88125e7aa9144 100644 --- a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_6592e896-575d-4583-9c89-7cb0a9a099c2.png +++ b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_6592e896-575d-4583-9c89-7cb0a9a099c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e4ab4862dcbc2393d09ebfe5adce11f8f96b4649bedba4dfec65a7fd9c53b49 -size 1025605 +oid sha256:0f55aa4b5bf566ed28be0d50fda105d196c155d51278732a75587bb1578b6255 +size 242352 diff --git a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_8a1d16e9-9527-42fd-9a08-6a0e9d39c051.png b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_8a1d16e9-9527-42fd-9a08-6a0e9d39c051.png index 40d1c97896b9635c1fdd19410b1cf81ea0d277b9..27527c64a2e636b4303dbabde77eee12e806d06a 100644 --- a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_8a1d16e9-9527-42fd-9a08-6a0e9d39c051.png +++ b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_8a1d16e9-9527-42fd-9a08-6a0e9d39c051.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c26b7435c614a5ea83b3bc97ed71b9878b37b210b921ba8f716671acbc962346 -size 1078453 +oid sha256:80ab2555e2e6bae8b14ec63022a20e3a22cef46680d3f4c0c606b3772b1b9631 +size 1280167 diff --git a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_a98a57df-9cbd-4882-8daa-dd037f890ed7.png b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_a98a57df-9cbd-4882-8daa-dd037f890ed7.png index b70062d09fcb4dbcf74db803d4d0a77ac138b56d..0268070f05821330b5bccff07975359707c29ac5 100644 --- a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_a98a57df-9cbd-4882-8daa-dd037f890ed7.png +++ b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_a98a57df-9cbd-4882-8daa-dd037f890ed7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24e897a409cc2caf36b11c35fdd56638db94c6b1cf5758a94f97589d5648f941 -size 692998 +oid sha256:b50dc9bc7ff9c7b5003fc384ad027fea64ad6c64b5555c23812e851bf1c74894 +size 257225 diff --git a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_eea67014-a3f9-41e0-8b0e-e2ca7dd69079.png b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_eea67014-a3f9-41e0-8b0e-e2ca7dd69079.png index 0d6a06cb0e4ed71f973465c7c611d8d5f5aa9a55..3da4dbe26df8ea524fc5a5217aee6851b3ab334f 100644 --- a/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_eea67014-a3f9-41e0-8b0e-e2ca7dd69079.png +++ b/images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_eea67014-a3f9-41e0-8b0e-e2ca7dd69079.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:694a52d7812255bcb8fdacc7ea4a4be20c5753e166fa6b07bf80c175d98ddaec -size 1075365 +oid sha256:e3c5c9d87af768455863e4dd1d84d8e609dc39281846d3fcd1e792cc3ec78bd4 +size 1078780 diff --git a/images/2d18cb36-5628-49e8-a336-c25c153c5527_2b13ecd2-3bf5-41ec-8dfe-063e95329550.png b/images/2d18cb36-5628-49e8-a336-c25c153c5527_2b13ecd2-3bf5-41ec-8dfe-063e95329550.png index 90057f325065def91bb926c2e34163135cff293f..7d0130cf2a8f11453c35e5479deb0fed15d2c19f 100644 --- a/images/2d18cb36-5628-49e8-a336-c25c153c5527_2b13ecd2-3bf5-41ec-8dfe-063e95329550.png +++ b/images/2d18cb36-5628-49e8-a336-c25c153c5527_2b13ecd2-3bf5-41ec-8dfe-063e95329550.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca3c4c4ff70fb461df51d690743d9140857acf6733c6e991fea8c879584f2653 -size 1579869 +oid sha256:3efca7485c73e72a1e9b8619d45c57281a9edac08cee770026d876f74f2b81e1 +size 1501925 diff --git a/images/2d18cb36-5628-49e8-a336-c25c153c5527_38057c1f-4752-4761-a83d-b914e6702b85.png b/images/2d18cb36-5628-49e8-a336-c25c153c5527_38057c1f-4752-4761-a83d-b914e6702b85.png index 037032e6068b76b26a32cfb4c87084a39ba07ba4..9d654b41bea66257d5861912d44ce65d4bd16dba 100644 --- a/images/2d18cb36-5628-49e8-a336-c25c153c5527_38057c1f-4752-4761-a83d-b914e6702b85.png +++ b/images/2d18cb36-5628-49e8-a336-c25c153c5527_38057c1f-4752-4761-a83d-b914e6702b85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc01c60db814194fc17a8d38c02acbce59a4136be3812db9dc4390d666fffc20 -size 401541 +oid sha256:f86297501a9c8eefb13741cee0fa53666fdf29dae22e4e56051ae419076538fa +size 203797 diff --git a/images/2d18cb36-5628-49e8-a336-c25c153c5527_4ee08fe7-5ec5-46d5-8ccf-d41e84d607b2.png b/images/2d18cb36-5628-49e8-a336-c25c153c5527_4ee08fe7-5ec5-46d5-8ccf-d41e84d607b2.png index 862c03e8d91274e8c09385252c35ebfb6bdf1afb..54e5ced22fdfc07fec17f23932cc87e96b64f1b3 100644 --- a/images/2d18cb36-5628-49e8-a336-c25c153c5527_4ee08fe7-5ec5-46d5-8ccf-d41e84d607b2.png +++ b/images/2d18cb36-5628-49e8-a336-c25c153c5527_4ee08fe7-5ec5-46d5-8ccf-d41e84d607b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a88934ee375953e55a3a592d301a1085087130fa58027524d253b04fb330a98 -size 402609 +oid sha256:d07c3aff81718c247f53ff2cac9217476f10aad7d8452a56aeeba894ed6bf91d +size 221429 diff --git a/images/2d18cb36-5628-49e8-a336-c25c153c5527_69947f74-50aa-4d03-ae09-eead95ecefe4.png b/images/2d18cb36-5628-49e8-a336-c25c153c5527_69947f74-50aa-4d03-ae09-eead95ecefe4.png index 68e32e60cdb715ab7ec0913220f6106742c0ef1e..52813034ca51ce25ece0eb2473d46d58364aa504 100644 --- a/images/2d18cb36-5628-49e8-a336-c25c153c5527_69947f74-50aa-4d03-ae09-eead95ecefe4.png +++ b/images/2d18cb36-5628-49e8-a336-c25c153c5527_69947f74-50aa-4d03-ae09-eead95ecefe4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:732a37471433eac6ec919ba1be924908f1650c1439805e2c45bdaa618d0de9e4 -size 402210 +oid sha256:a594aab9650e9edfe447ac6b0d29f98506c36f2794df8f563d139403153a1608 +size 211802 diff --git a/images/2d18cb36-5628-49e8-a336-c25c153c5527_7751058d-19a1-4973-90c3-187dba735d4e.png b/images/2d18cb36-5628-49e8-a336-c25c153c5527_7751058d-19a1-4973-90c3-187dba735d4e.png index 8e9d94ee4e0ceb907cf50bd990c67522c7f58b2f..1202909c26bab8ce83b4bf023ac3c237972406c5 100644 --- a/images/2d18cb36-5628-49e8-a336-c25c153c5527_7751058d-19a1-4973-90c3-187dba735d4e.png +++ b/images/2d18cb36-5628-49e8-a336-c25c153c5527_7751058d-19a1-4973-90c3-187dba735d4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3c0617042e5bbd88cc9e9c30ca0ffba0ab756a5f11c2650a1543a45fe1c81d8 -size 737400 +oid sha256:b8f491f8ca227411491638c050cd45814c0a468c82b97379a309c42e50b079ad +size 538764 diff --git a/images/2d18cb36-5628-49e8-a336-c25c153c5527_94da50d6-e71f-4997-abeb-db862c325ecc.png b/images/2d18cb36-5628-49e8-a336-c25c153c5527_94da50d6-e71f-4997-abeb-db862c325ecc.png index 11fe85a3ea8971cb906db9da8c70eb987b5d113f..10ded21afc774c03873e6cb2363933d5f6500f85 100644 --- a/images/2d18cb36-5628-49e8-a336-c25c153c5527_94da50d6-e71f-4997-abeb-db862c325ecc.png +++ b/images/2d18cb36-5628-49e8-a336-c25c153c5527_94da50d6-e71f-4997-abeb-db862c325ecc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73daae0951dd59c5db13e8029a42d17d8dbf9673e71fea33e4a73027856c5f49 -size 435853 +oid sha256:3463dc198e76e3a41b87cd75f765d72b40c6f74e7bc3a4af87d84ed1458d65ae +size 461026 diff --git a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_8a8aa71c-c8b4-4fb8-85d8-c47a3787306b.png b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_8a8aa71c-c8b4-4fb8-85d8-c47a3787306b.png index 8481c6a00e0286f1e31241900bdb70238759a840..d4ff223376b6d1f247ffb9869d85ca91d7c6cffe 100644 --- a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_8a8aa71c-c8b4-4fb8-85d8-c47a3787306b.png +++ b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_8a8aa71c-c8b4-4fb8-85d8-c47a3787306b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1dfcdbd15c0e8636d83015b4f8977589cbe3c4478aeb710f4611c33f07edd6e2 -size 549809 +oid sha256:cdf64487f8bc1c898e777825104a6616836679d8bc3519f836e8624179fc8643 +size 473272 diff --git a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_9d90cf4a-97bd-4b59-a600-b1a420139626.png b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_9d90cf4a-97bd-4b59-a600-b1a420139626.png index 9f8227d65db426c606b3028308d9297917e85e66..52ce982be5a71a24e6ae9e9cfdffe6f1f6e9cb8c 100644 --- a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_9d90cf4a-97bd-4b59-a600-b1a420139626.png +++ b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_9d90cf4a-97bd-4b59-a600-b1a420139626.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5256ff14bdeb4442f7f13eef94e689c0df7db75e7945b71c3b941baec1537f24 -size 424579 +oid sha256:e1afbaca07cf0d3e11ac631619caf50fcdf2e370d6db5be59b5a6862f99ac043 +size 405584 diff --git a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_bc8d44c6-bcd3-4cb2-8d4e-f7e33a3a71fb.png b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_bc8d44c6-bcd3-4cb2-8d4e-f7e33a3a71fb.png index ee7c75db2575f03f113548acae9353969c8feb04..3c89ed54fed72246b4304ecd8f2301b4feb55ef2 100644 --- a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_bc8d44c6-bcd3-4cb2-8d4e-f7e33a3a71fb.png +++ b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_bc8d44c6-bcd3-4cb2-8d4e-f7e33a3a71fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9ea9cd2712cb52fa6f4a7dc00145b58b428eb41a8191c6449bb4b3756f2af22 -size 367055 +oid sha256:280f5fae1d69ef0ccd1fa776c39f6f35b08c3114fb3ce75d0006b704072116cb +size 462858 diff --git a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_e169a421-70f0-477a-9db4-ed882245eb5c.png b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_e169a421-70f0-477a-9db4-ed882245eb5c.png index 211250d9c345f82c9f11b1434058a81414d965c6..8b4ffff52939d1abf60b9a8af8afa62c11b1fcc7 100644 --- a/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_e169a421-70f0-477a-9db4-ed882245eb5c.png +++ b/images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_e169a421-70f0-477a-9db4-ed882245eb5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84e9e03564906ad7599ba1f3116da6d6ca959900ac61c81ed9a16d2d00f2887a -size 411231 +oid sha256:31f630c1d160e566c88d8537fd95b4b3469105dd24398a1dda5f11553fc3aa79 +size 510561 diff --git a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_107e8ce0-5be5-4b2f-8966-35de535030bd.png b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_107e8ce0-5be5-4b2f-8966-35de535030bd.png index 25f65b0a44e15e01f766c16c5bf7999d6e21e33f..ee298da74b51b00f406aa81108523225f9b69be3 100644 --- a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_107e8ce0-5be5-4b2f-8966-35de535030bd.png +++ b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_107e8ce0-5be5-4b2f-8966-35de535030bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46faf8c96cb3b8d249ee434cb29bcf8683d5f9865b529246a309b9fbdcd8409c -size 393485 +oid sha256:51b6f22ce7c97394e46773a002e2bdff49ef848603598a1332efd22a7cf3b76e +size 232394 diff --git a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_40384767-ea26-43fd-af97-41d9f4f1070c.png b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_40384767-ea26-43fd-af97-41d9f4f1070c.png index b7f32d0200bfe6fb6349ccd7b33ee93c2c5bc61c..7f35f2155cd56a9bd76e327a7d167dbe6ab9c222 100644 --- a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_40384767-ea26-43fd-af97-41d9f4f1070c.png +++ b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_40384767-ea26-43fd-af97-41d9f4f1070c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d9a38d4e527ba0b29d39bbd9cc7fb49ac2cd08ceb4f65a0a0b2ea02f9fd641e -size 579491 +oid sha256:e3cef70ed0eaf52ce1710ec1be9335a4dac47565cf6d5f1be1e92cf0c66f2883 +size 374895 diff --git a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_74ff6222-caf3-40c2-abc4-2ec3029d571e.png b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_74ff6222-caf3-40c2-abc4-2ec3029d571e.png index 14dcf183ea0382cdd34af923c17820bffb4529e1..d8da4796e4b8148450f867a7809a28f9da6ba96d 100644 --- a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_74ff6222-caf3-40c2-abc4-2ec3029d571e.png +++ b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_74ff6222-caf3-40c2-abc4-2ec3029d571e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d40c7b36a47addf08e621744410f4d849460d7545425bfb43acd57647d98971 -size 1378896 +oid sha256:5ef827b46a9417ed5ddfa62d4fbdc93bc7f09ab34d397a4b1226bdd95c8b529f +size 634974 diff --git a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_9a0c78ef-cc11-4975-8d79-e59d7a5e6d84.png b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_9a0c78ef-cc11-4975-8d79-e59d7a5e6d84.png index 0e83e0d796a838c3e742588884bfc5c265c164d9..0304a9d95db46a545b6aa34e63c80ad8d040e540 100644 --- a/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_9a0c78ef-cc11-4975-8d79-e59d7a5e6d84.png +++ b/images/2d92911a-b208-4d68-ad00-46d2d67f9efa_9a0c78ef-cc11-4975-8d79-e59d7a5e6d84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7861bd6eb594d284ffc718f79e3e1c4fd79d1b5d386aacfaac23ef4ddad7912 -size 423465 +oid sha256:80ad5a6129cf929d01c32b540eb2166b4c597090e99b7c7475c6ded21bde1ef2 +size 484358 diff --git a/images/2e133e56-ac17-41dc-987a-257078d770c3_21f89d91-cd21-47c6-9155-084a3ff620aa.png b/images/2e133e56-ac17-41dc-987a-257078d770c3_21f89d91-cd21-47c6-9155-084a3ff620aa.png index 24afb015b41cf1b12387f1971757aa841452b01b..8f55d5d5fdd6269c30cff766eff02c9c75c22d64 100644 --- a/images/2e133e56-ac17-41dc-987a-257078d770c3_21f89d91-cd21-47c6-9155-084a3ff620aa.png +++ b/images/2e133e56-ac17-41dc-987a-257078d770c3_21f89d91-cd21-47c6-9155-084a3ff620aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8284d0567e4e8f5622fe284a699840e50744548ee4fe5dd44988e1262f46f2fd -size 1860881 +oid sha256:a6c33a8d8911884a0cdea5965986bb071b1c423a0bbbb20e9a4ff4ef3fadd31c +size 1643925 diff --git a/images/2e133e56-ac17-41dc-987a-257078d770c3_61738502-0b06-46b8-910b-266b5ccfbe97.png b/images/2e133e56-ac17-41dc-987a-257078d770c3_61738502-0b06-46b8-910b-266b5ccfbe97.png index 05ea9f606895a80dc7e8840d4d517d2cc83d16c2..8ac3bc748737bb70baf21602d3f616fd8171f096 100644 --- a/images/2e133e56-ac17-41dc-987a-257078d770c3_61738502-0b06-46b8-910b-266b5ccfbe97.png +++ b/images/2e133e56-ac17-41dc-987a-257078d770c3_61738502-0b06-46b8-910b-266b5ccfbe97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64ad14e1a03b676f416652ba4398d225cb81d074e4fbab61652598788d63d298 -size 2573105 +oid sha256:7fbb27ed1f72e0bbf6538f1e053c885a50522eb150403398d6276c38a863c306 +size 1249350 diff --git a/images/2e133e56-ac17-41dc-987a-257078d770c3_7d42e63b-49be-41b1-a453-28707cb28367.png b/images/2e133e56-ac17-41dc-987a-257078d770c3_7d42e63b-49be-41b1-a453-28707cb28367.png index b61ced85055b995702c033051e5b1191e2680c58..accd56c7571eed30dc342224ea952c808ffa34ba 100644 --- a/images/2e133e56-ac17-41dc-987a-257078d770c3_7d42e63b-49be-41b1-a453-28707cb28367.png +++ b/images/2e133e56-ac17-41dc-987a-257078d770c3_7d42e63b-49be-41b1-a453-28707cb28367.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:867403a0dfdbf5a5abacd59b4a46d6fd707f208ba9c7e8b4e1117d19c91da512 -size 1147407 +oid sha256:ebf32294576bd5ea85fa9dc0ca4f57bea649bc7beb26d8bbefef47da206eb7fd +size 1253965 diff --git a/images/2e133e56-ac17-41dc-987a-257078d770c3_ac2708fd-5705-4b77-8cf4-684e0e121f2a.png b/images/2e133e56-ac17-41dc-987a-257078d770c3_ac2708fd-5705-4b77-8cf4-684e0e121f2a.png index 830700298806176fdccf139391219112b26bddc9..62d7f0dc36c99a27c58803d9ab9af9dbe1fc0a0d 100644 --- a/images/2e133e56-ac17-41dc-987a-257078d770c3_ac2708fd-5705-4b77-8cf4-684e0e121f2a.png +++ b/images/2e133e56-ac17-41dc-987a-257078d770c3_ac2708fd-5705-4b77-8cf4-684e0e121f2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e40de60278a9df3bf9bdbcd3b93cee982e9a07129d18605e1799b6ba54b5fa03 -size 1534670 +oid sha256:c450978e26cc28a8faa5db172b41f61b8d5cc3f6a4c8899bea79f31342908db6 +size 1265625 diff --git a/images/2e133e56-ac17-41dc-987a-257078d770c3_b0cd091e-32b2-4506-9cb9-8259c8d63ce5.png b/images/2e133e56-ac17-41dc-987a-257078d770c3_b0cd091e-32b2-4506-9cb9-8259c8d63ce5.png index e6acfdeea696f8519c08eb357c76c7b1b80109b1..1e697acb39891f8e3c119b7a40407950aca5e7d5 100644 --- a/images/2e133e56-ac17-41dc-987a-257078d770c3_b0cd091e-32b2-4506-9cb9-8259c8d63ce5.png +++ b/images/2e133e56-ac17-41dc-987a-257078d770c3_b0cd091e-32b2-4506-9cb9-8259c8d63ce5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04a2acb330899bc886461b5636a8fe42d38eaed65d80ce353fe843bcd1e6f1e6 -size 1907285 +oid sha256:8bc8a20a2d63cf2397b157555a1ef6535275552e53ed19dc4ab77d3ab457f421 +size 1540121 diff --git a/images/2e133e56-ac17-41dc-987a-257078d770c3_e0ca183e-3787-4b09-87f3-b7b9079d4b6f.png b/images/2e133e56-ac17-41dc-987a-257078d770c3_e0ca183e-3787-4b09-87f3-b7b9079d4b6f.png index afe60bd6789111b17344b7121bf1cdd2653808ed..a0b8569199160d7ed94c72d8facd286a8dd79de7 100644 --- a/images/2e133e56-ac17-41dc-987a-257078d770c3_e0ca183e-3787-4b09-87f3-b7b9079d4b6f.png +++ b/images/2e133e56-ac17-41dc-987a-257078d770c3_e0ca183e-3787-4b09-87f3-b7b9079d4b6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65796240a0bee288936097c8b99b238ce3a05cdda64202027b6343f71d596903 -size 1644894 +oid sha256:5bae82932e048a8c357fc81a2098f4a38e8dbe52466d0e7482a941ac05bd884f +size 1503477 diff --git a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_55f1399e-7b1c-4717-b348-bec38fe194b7.png b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_55f1399e-7b1c-4717-b348-bec38fe194b7.png index edb6b51375437c74ddc41154c3bb733a8207cd6d..f5867369539623484e9e1598a6b42235e257eac1 100644 --- a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_55f1399e-7b1c-4717-b348-bec38fe194b7.png +++ b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_55f1399e-7b1c-4717-b348-bec38fe194b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5433da047a9207a37da57c8cbb074f30397695022f19d8f4170666e4e5ac538b -size 1286310 +oid sha256:c228e36f2cef46dbd1d4c9c7b799cf3b86b1c55e76bd156b44a004ee5a9b96e9 +size 635375 diff --git a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_89f727a9-5994-4db2-bb45-0252e4288321.png b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_89f727a9-5994-4db2-bb45-0252e4288321.png index ad4f33835cfd9c53bf918789f3fb70f495a08d7e..8b00a409a6504c1a2793a9416dedf09b7bdbbcfc 100644 --- a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_89f727a9-5994-4db2-bb45-0252e4288321.png +++ b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_89f727a9-5994-4db2-bb45-0252e4288321.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ca6b0c417ffdb6b142d420d5b7716f24832d26fca06c68071fd011aa484ea24 -size 1268165 +oid sha256:f5acee044d492d38be46a299df9395604b6c66f6285630c0dd05a1ecd1ba4be4 +size 920043 diff --git a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_8e68485f-08ab-473e-9845-e5fb8af0833b.png b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_8e68485f-08ab-473e-9845-e5fb8af0833b.png index dea1cb61f4650218e3d295289ee55e5cb0afee18..9eea7a8597274da0952677db79b7cbd2102354a9 100644 --- a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_8e68485f-08ab-473e-9845-e5fb8af0833b.png +++ b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_8e68485f-08ab-473e-9845-e5fb8af0833b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74f1e10e63e2c84770fe4d6a9d7fb3a5d4a0828ac1e0277263d0cac9613207d8 -size 1615464 +oid sha256:ade13832a377750c9fe050cb54c747f3a7dcf8bfaafd3b900bdf66b5eebb2385 +size 1490471 diff --git a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_94362238-1136-4e0f-a10b-dc03a18519f6.png b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_94362238-1136-4e0f-a10b-dc03a18519f6.png index 081d3e68f5c007f226dde3667c781893c06ddfd9..d2eb38d202aa3ede9203bdb4aa45c360a101d154 100644 --- a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_94362238-1136-4e0f-a10b-dc03a18519f6.png +++ b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_94362238-1136-4e0f-a10b-dc03a18519f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08e462fd64c0a2b23b1f1007fb9e63585cd0eb2ecab3eff8e214a7c67758f7f1 -size 885283 +oid sha256:f5c4e134f99db41c63c466af76f9fe86325742e569c5ad25171a8af8caeae9f0 +size 1594199 diff --git a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_c4f097ae-417b-4e54-b706-78282c045acc.png b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_c4f097ae-417b-4e54-b706-78282c045acc.png index 151237242ff1faa79360aa798ca7830816afb0f9..06b1ad7cadef6f6c05645bc982b09dba1b0f369f 100644 --- a/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_c4f097ae-417b-4e54-b706-78282c045acc.png +++ b/images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_c4f097ae-417b-4e54-b706-78282c045acc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0571f9604a757de798b4cd9d35006fec8247369fe1ce913a38b332f8985cf27 -size 1686336 +oid sha256:27013a1177dee38570110c4740c6dd35c9997adefaebde5822f7cd2163c14850 +size 2090451 diff --git a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_00248976-2b6a-47d6-a025-29c82ff112f7.png b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_00248976-2b6a-47d6-a025-29c82ff112f7.png index 6c7085ccfc95fd1eec05ea4a7353e207638d32b4..f2a1f0393df8a7ef944b29d932b495c444cf50f4 100644 --- a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_00248976-2b6a-47d6-a025-29c82ff112f7.png +++ b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_00248976-2b6a-47d6-a025-29c82ff112f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a37e291a5dbba638887f079d49249d215ae7ede476d8391888f8a1a6396ba5d6 -size 1172402 +oid sha256:f5f23b55c4ed3e4d5ba874067ae694df236fcbb33f94a4c4bd6f708f7aa73f55 +size 2097999 diff --git a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_3b61150e-f073-4093-b655-8b362b023c5d.png b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_3b61150e-f073-4093-b655-8b362b023c5d.png index a33fe0ef0d93390eebbd01fa007327877b1829ca..5f385246d15a7ed622c8d41ce337c3a22b73ab83 100644 --- a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_3b61150e-f073-4093-b655-8b362b023c5d.png +++ b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_3b61150e-f073-4093-b655-8b362b023c5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae5f6960c86df83ea2580349ca9b317557709db94c55754af350d5077a169886 -size 1203580 +oid sha256:80de01aa9b5c7fed39dad28d139a576861074b7d108b14aa9f462229278d2714 +size 1355820 diff --git a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e4249fd9-eaf4-4209-a6fc-81cd2b3267ab.png b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e4249fd9-eaf4-4209-a6fc-81cd2b3267ab.png index 718ef9a2d2c18d8fe4b4b587462b3331addab31a..c9c326c4dacf5076765748863ad483252a57837f 100644 --- a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e4249fd9-eaf4-4209-a6fc-81cd2b3267ab.png +++ b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e4249fd9-eaf4-4209-a6fc-81cd2b3267ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0307399de71651e8a051f4fcbafffc4d882919c07b1f89fd612ce91e3eb8a345 -size 1219696 +oid sha256:ebd8346f5eb0da05f62945b223e3be75787824e6a861a28d55ee59cbe2ca8ef9 +size 194332 diff --git a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e6bb3bac-2055-4e4d-b429-cf5310a8955a.png b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e6bb3bac-2055-4e4d-b429-cf5310a8955a.png index 7c72a0e37df03d608076fcf63fb792f376efff66..1deaba2dfd34c338e07d00aa302fd1a043955303 100644 --- a/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e6bb3bac-2055-4e4d-b429-cf5310a8955a.png +++ b/images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e6bb3bac-2055-4e4d-b429-cf5310a8955a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3915724c09aa310b034f71ba020e91d702419398d2f06fd46e92ef8b6ffbd26 -size 1285629 +oid sha256:69bc9c13c0002811807a656fbaa65c2877df4d4cdcb5a43675eb5e03ac87ea3f +size 157826 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_19b2c816-4c83-48d8-877e-71017a0fc5d3.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_19b2c816-4c83-48d8-877e-71017a0fc5d3.png index b909949e0ef76241da12028a4d64f04dba4801d4..ad2d5d68049a55d21cda00f07584696b4d1b3c2e 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_19b2c816-4c83-48d8-877e-71017a0fc5d3.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_19b2c816-4c83-48d8-877e-71017a0fc5d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3248869f7f3d5ab8ad9f3d7b2cd0713ee4899b20abfec42f9a8e0238d9aee1aa -size 2059358 +oid sha256:29b72979a0956ff68bb40558f564de4cba40b397104bfae88f004ddff04ae2b5 +size 2380216 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4b61c893-4f21-4ecb-8f84-7ce763a40e70.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4b61c893-4f21-4ecb-8f84-7ce763a40e70.png index f11256bbf85f7110b79dd1dc523d1cdf8d271e4a..b008d9ddb792da9e28a7281a3d48e49e22b7ae3a 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4b61c893-4f21-4ecb-8f84-7ce763a40e70.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4b61c893-4f21-4ecb-8f84-7ce763a40e70.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eca3efd67e8fb76b3702ffbb76ea89a19bed66128b30e86acc67d1545b69dc23 -size 453900 +oid sha256:3cbe971ccb8575acb4ce6d4b26a7d9ba0a3fefe29485919b39e26a434214a5e8 +size 531160 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4fefd40e-5a97-42d1-968e-b429e4b4c5c7.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4fefd40e-5a97-42d1-968e-b429e4b4c5c7.png index 769fb80450aff65d67de1297e4d183dc4f25ba7f..8f590dfe038e88783a3da9cf417cfc12f066b5f8 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4fefd40e-5a97-42d1-968e-b429e4b4c5c7.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4fefd40e-5a97-42d1-968e-b429e4b4c5c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:feec4ea25575aea894f7c4c0eb175497e8cce476cd3e6531e9134eae4d93581d -size 922217 +oid sha256:e6280b2eab6e972638876adf6063da82325d086f85ef892864a9b0630414995d +size 955409 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_53d2d9a2-528f-4a9a-b182-b80a0795a6a2.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_53d2d9a2-528f-4a9a-b182-b80a0795a6a2.png index 4cda8727dd42c8d0e81168d602696582199639b7..9c5494936f681697f402dac9fd02638f04357c4f 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_53d2d9a2-528f-4a9a-b182-b80a0795a6a2.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_53d2d9a2-528f-4a9a-b182-b80a0795a6a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97182d4f5011e9895e96a74f1321308663fee968fb8ece158a1c0807179474c9 -size 1119650 +oid sha256:88ccb0355917cde2a5250a406a560cbaecbdeb46b7c7c72f17a00f36a70b5d20 +size 944883 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_8252ffbc-069b-40e9-b567-119df02fc127.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_8252ffbc-069b-40e9-b567-119df02fc127.png index 31d2866e3f3916721c48fde9a5c0bbdbe3d65f72..e0a7377553d5ba8b54d9b39cc9abdfb0f084b76a 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_8252ffbc-069b-40e9-b567-119df02fc127.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_8252ffbc-069b-40e9-b567-119df02fc127.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2483e1521bcda78432c0a4209c570596b5c0af845d849a081f395c8883a7f75 -size 1502130 +oid sha256:7b6a8d4d0c3f48d5c52f77bd1b6531e66c9904de53f89c2d366bbeb150f70122 +size 1083334 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_baa133fa-49f8-4b65-b96b-d529f98ac029.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_baa133fa-49f8-4b65-b96b-d529f98ac029.png index 889c3c492b2a1ddbdaf29908c856bfa14afdde75..a488cbd152dc720d959a002341f8ceba53181d9b 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_baa133fa-49f8-4b65-b96b-d529f98ac029.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_baa133fa-49f8-4b65-b96b-d529f98ac029.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2b31ce5c6ad0cd841bf479376c41f75a4cf66619afee9d743ffd05e63d731d3 -size 454533 +oid sha256:2aeff38148b77f7a203c0c9976743b61d8e4303fb55ef77643e5ae19888664d6 +size 531772 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bb69521e-4cea-48cb-997b-5779793d1ce7.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bb69521e-4cea-48cb-997b-5779793d1ce7.png index 208d81d9215442319dfb7e0364d43f32836564fd..d5b99ec3f6f9887933b667bd39052bf99486afff 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bb69521e-4cea-48cb-997b-5779793d1ce7.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bb69521e-4cea-48cb-997b-5779793d1ce7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43222eab1d17657a720c138b765de78d92c7c1371a5175b04f9ee9f6be99dc55 -size 453548 +oid sha256:cd52b073a7b2e8b52e3d416e67e7b8f38b9f2760310452a29b36c01b0a2d07dc +size 530826 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bfb2a8b3-ca1e-4ce4-8be1-65c9a7ddad63.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bfb2a8b3-ca1e-4ce4-8be1-65c9a7ddad63.png index ff0f76b65629e227dc0cd20b7c4196b7d5dbb6d9..eb74758b22e7c79f575a3cb343d346aa7fc97295 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bfb2a8b3-ca1e-4ce4-8be1-65c9a7ddad63.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bfb2a8b3-ca1e-4ce4-8be1-65c9a7ddad63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4a091b82b8a58c809cb32432b705808e54d8f95888c0a742e75b27bff09ee37 -size 958371 +oid sha256:9605c6b41c38a3fc286922b09f85dc5de32075b3e077d0c71ad970f53977d45e +size 764369 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_c10ef165-10fb-42e0-858e-713888d54f96.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_c10ef165-10fb-42e0-858e-713888d54f96.png index 53fcec26dd626a12dcea24f9a4bce73a388e212b..8d67939e304a1b6cfcd6af0d967a11a5562bfcb5 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_c10ef165-10fb-42e0-858e-713888d54f96.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_c10ef165-10fb-42e0-858e-713888d54f96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95c047b3a9a268697ebfe7506339b67ab853c61c485ad81047261d2351100c34 -size 858314 +oid sha256:ecdec03d3714ce564bf9445becf4fbae62750e943a7729089abb8acfa9a2f289 +size 1196747 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_cb293186-4d7c-4e50-96c2-2f81fc673290.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_cb293186-4d7c-4e50-96c2-2f81fc673290.png index a8086fd2919cd4c44c66fe9364a8b5b4c03c717a..383f80d9f49e241588aab94a0c4b3d4dfc858488 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_cb293186-4d7c-4e50-96c2-2f81fc673290.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_cb293186-4d7c-4e50-96c2-2f81fc673290.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34b4b07ecc8055927ad9d7a4796a303eff3e8d4fd26f961a4e86afdf0d663a99 -size 915056 +oid sha256:1de58af9bcb60d898b27833ddf36b3db14e931f00e067d9cb5180133398963d2 +size 983454 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_d5d9dc65-d2d7-40a9-bffc-3a9bf35a0050.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_d5d9dc65-d2d7-40a9-bffc-3a9bf35a0050.png index fed030f7a2a4557b31fe441bfca178f7030be62d..4e17d4f4a3a5befd26eb202343753f9cb89ef5f9 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_d5d9dc65-d2d7-40a9-bffc-3a9bf35a0050.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_d5d9dc65-d2d7-40a9-bffc-3a9bf35a0050.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8bf350efc18b768994305ce0e4113b6e16d79d18977619d666943d996ad86052 -size 435724 +oid sha256:08fa81f0544f708fc000a4b4166ad0ae8638666bd0a90c5caabd9ce1f6a8dbaa +size 532277 diff --git a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_da2be31e-ad27-4939-b2f1-d7ad426c736d.png b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_da2be31e-ad27-4939-b2f1-d7ad426c736d.png index 769fb80450aff65d67de1297e4d183dc4f25ba7f..dc439e4422cb2a154e67f9f4058360565555b9db 100644 --- a/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_da2be31e-ad27-4939-b2f1-d7ad426c736d.png +++ b/images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_da2be31e-ad27-4939-b2f1-d7ad426c736d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:feec4ea25575aea894f7c4c0eb175497e8cce476cd3e6531e9134eae4d93581d -size 922217 +oid sha256:b85a5d1eece5481cfb1ca625791978b049414b115400ec038f40685e34087cbe +size 528284 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_04db667a-0b22-4a1a-a420-f17742d94391.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_04db667a-0b22-4a1a-a420-f17742d94391.png index 670d4536f3df5586653572dbc163fc7c065c7b98..5aec3b6901ea1b0daf59d68ce6abd688c1b21675 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_04db667a-0b22-4a1a-a420-f17742d94391.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_04db667a-0b22-4a1a-a420-f17742d94391.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66d54a672d34ddf5fe3567113b7df42cc699d8ffdf72dde1a1555346ade628a0 -size 1636410 +oid sha256:ff0ccbabc8d304a3610b4ac2cc168107d945338d2985e5a6b27fa6a1471d006d +size 1351792 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_1c3d2eff-3a63-4757-bdbf-48e0f4ba8d4d.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_1c3d2eff-3a63-4757-bdbf-48e0f4ba8d4d.png index 95753a03b9f38eee5e818f833e8963140751436d..f058ad152afa1132487d367e0ea62f0482d27e4f 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_1c3d2eff-3a63-4757-bdbf-48e0f4ba8d4d.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_1c3d2eff-3a63-4757-bdbf-48e0f4ba8d4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84b4ae4c2f111c9ec438bd06ec990afd5c4648d8b773066b2e3a8089425a4d59 -size 1666804 +oid sha256:d68e4d05c83d36dc69b2de7be4b68387ba09343e0a6651aefd4f23e69edc5cf3 +size 2266370 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_21c2de06-c37f-48d9-9657-a25121393718.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_21c2de06-c37f-48d9-9657-a25121393718.png index b310d5498d1edcab053e9c8ffdee831903a0080d..75a3cee7b7a2eb1181adf3abe4cf420402433be3 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_21c2de06-c37f-48d9-9657-a25121393718.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_21c2de06-c37f-48d9-9657-a25121393718.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76a9eafe8a1d476a8ac8d55e635b5eba2735478f7b0c91fee5bcd8a79a21f278 -size 848523 +oid sha256:f12607ecf7476bba5f83688fb5b4f97dd008dd69c3d88106eee571d49e158555 +size 448284 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_24495f4c-e52f-45dc-a2db-85227476df1f.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_24495f4c-e52f-45dc-a2db-85227476df1f.png index a2252258c9ec9fcd6f6f14dc8fb9cebcbccf6b61..c9fd2fd581dac47aad3ee0237a03b77ef25d5083 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_24495f4c-e52f-45dc-a2db-85227476df1f.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_24495f4c-e52f-45dc-a2db-85227476df1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33a9acef90d8d3de2d52078f73548dbdfc6295ecef750a09a07d3f75ba3ab5dd -size 1497008 +oid sha256:fc2d5e70427148bde0ed799b789a0935cacca00661b9f9ec1e537489883201a2 +size 1129866 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5ae033d1-dce4-48b6-b901-b87c39aff698.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5ae033d1-dce4-48b6-b901-b87c39aff698.png index b1217e8122ce5a29c4787a18000c33c7aec9517b..3a81403868153851c6d572b5163619d33b115f11 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5ae033d1-dce4-48b6-b901-b87c39aff698.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5ae033d1-dce4-48b6-b901-b87c39aff698.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6bade09655ef5219034f5646ac66c8fa160b6434467ce3ee54c6d4ae60592587 -size 2652435 +oid sha256:8eecf84097eaf4b70e9c1b94d34875af952ff7b2ec618292fcf7020cfbe35324 +size 1590058 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5e934cdf-7af9-40d2-a4f0-ada6b371432e.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5e934cdf-7af9-40d2-a4f0-ada6b371432e.png index de071a03874cadbc378e0d41d3260f9c31cbb39d..7b8cab2169c3cce35c0dc40c97a219fcf5c94421 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5e934cdf-7af9-40d2-a4f0-ada6b371432e.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5e934cdf-7af9-40d2-a4f0-ada6b371432e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1815402abae8a31d57669b7143a9546e4acfcec4ef009e48144bdfbe3542d06 -size 1633226 +oid sha256:3dfc5a2b3d4fe070cd57015139cffa63560644bb69957aba30efc67449e1d5a5 +size 2222587 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_71a6b122-0c7f-49f6-8f00-496bc997c596.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_71a6b122-0c7f-49f6-8f00-496bc997c596.png index a33a5e1b7397eaadcb2cd07e18023bbfff9ef357..6677cc64bc0ba426043267b91da90a79d952e936 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_71a6b122-0c7f-49f6-8f00-496bc997c596.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_71a6b122-0c7f-49f6-8f00-496bc997c596.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f126ef5e5f92c3875879f35077dab85c4df02a6aff2471060c24abea0138d4b7 -size 1658110 +oid sha256:6caed5d93e1dae1179b8b59aa9cbc4ea1c48200e3536dd775e7112256c739d0f +size 2665453 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8abefbf4-3265-488e-921c-d391ae6096c7.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8abefbf4-3265-488e-921c-d391ae6096c7.png index 8f781f5cc11476072a45ab1c280c22aaecf79a3a..0cb14cf5e81d804dd92b3a491e3b8ff6571b4be0 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8abefbf4-3265-488e-921c-d391ae6096c7.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8abefbf4-3265-488e-921c-d391ae6096c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2f0eecace49e29aab5ebfab20b38b302da94a01e4178fc4945f3082aece4a6a -size 816038 +oid sha256:d7d0d31fb884fce74dda33706079def3b657c797444e741ddf3a7736d92eb249 +size 316228 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8ce27faa-f678-4a05-8029-1541ca7578a0.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8ce27faa-f678-4a05-8029-1541ca7578a0.png index 811d7745a04ea405fd43f677cd2bcc3c7cd9afa6..698fa4840b17a4c421992bc1165ac3d51fbc3b61 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8ce27faa-f678-4a05-8029-1541ca7578a0.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8ce27faa-f678-4a05-8029-1541ca7578a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbd7bf0e442c5f3ca9191914ff525d13da50774f14976a17c2c598113a438b20 -size 1635935 +oid sha256:53543694771eb68397d850c394266b882f36217a012c6ef945186db7531b62e3 +size 1065101 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_9a0d4689-c0ba-46ce-acd9-03b108d9dd8c.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_9a0d4689-c0ba-46ce-acd9-03b108d9dd8c.png index 927823859d9f156d50d2b2c9a97d17235966d9f9..092bea9b068f08670906eb671318961f17ea5f4c 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_9a0d4689-c0ba-46ce-acd9-03b108d9dd8c.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_9a0d4689-c0ba-46ce-acd9-03b108d9dd8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9e542333c1975e67e107359d5aaf35fd48dc37d6d6bc4768e1624b6dbf1505b -size 2113977 +oid sha256:01a796d409c73f0baa817846a1f4e0722c13e61b0422715ea313c5050d8853fc +size 1578851 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_a73e46ae-d077-4494-bbb6-3e900105e7b1.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_a73e46ae-d077-4494-bbb6-3e900105e7b1.png index f1409fea7a99d9dfdb73069545cd305046b576fc..7433e817656f481cb5f806383ac03c63cd5665c2 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_a73e46ae-d077-4494-bbb6-3e900105e7b1.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_a73e46ae-d077-4494-bbb6-3e900105e7b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ff16731be9afea41ce359defd6aa7fb856dd2811686ac22529408d834f4640a -size 1489213 +oid sha256:f8d5a5a01bfd5eb56de3f8899fdf6c09bbb7d536720c1b9749fdc6db1943eb84 +size 2259886 diff --git a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_bcc99912-98b5-4458-b057-a3f9c7aa4391.png b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_bcc99912-98b5-4458-b057-a3f9c7aa4391.png index 558f97fec2556649cabe69b030dc41fbc0c215f6..62d538de9ba247dd10464a567273614c932f787f 100644 --- a/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_bcc99912-98b5-4458-b057-a3f9c7aa4391.png +++ b/images/2ef470ab-b06a-4479-883f-78b4e3b94a04_bcc99912-98b5-4458-b057-a3f9c7aa4391.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e93e9518138d74380206b5c8fc9e572c17a2103fef9d6ce2f812dfaecb5c1f1 -size 3332008 +oid sha256:370efd2270b278c33535ad6a525c3dafb4ec11116ed2b02a4f81957712bda791 +size 1619434 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_04a916be-3c46-4417-917f-c2ebb4477795.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_04a916be-3c46-4417-917f-c2ebb4477795.png index 89007ca730694d3cf8258945d4bfecc0a6a0d5d0..85f67ad2ed1de4d49f1bac46c186b14bf21788cc 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_04a916be-3c46-4417-917f-c2ebb4477795.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_04a916be-3c46-4417-917f-c2ebb4477795.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:148897a59e4668c5b4371f0b3f105410eec0609fa09a4bb642cb685dd00b5b84 -size 1418357 +oid sha256:91b2242f32a9fdca7743fd337630aeadd66f5bad9e11c949f30db4911b458323 +size 1003263 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_103a605a-1392-4f31-bc96-5a2f561540da.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_103a605a-1392-4f31-bc96-5a2f561540da.png index 1256a3b33a09224f3c170af31e66ff5457433823..d5240c1bc2602528cf0f8891174f2cdf21aa0c0d 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_103a605a-1392-4f31-bc96-5a2f561540da.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_103a605a-1392-4f31-bc96-5a2f561540da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfcf920b552298bc3c26b68152678d7182febf134c5ebf05453bf62485776cef -size 922560 +oid sha256:2d6bf994fdf96c88db21797d341026f3966a5cfada8c2a269535034c36735af6 +size 1117858 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_11949dd6-6d1a-42e9-a965-3bad963bac16.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_11949dd6-6d1a-42e9-a965-3bad963bac16.png index c68fbcd74c6bd46d31d05c1945047235eaa01685..ef636d5f326573342248496690b42c4a2a87b68b 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_11949dd6-6d1a-42e9-a965-3bad963bac16.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_11949dd6-6d1a-42e9-a965-3bad963bac16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2c7b4d898110de18cd911e09233f1c3d799de46e624c309f2792d14c253648a -size 1305878 +oid sha256:4b5f5943886b69694b7dbe000a26e2a7a3ae227c2f12521a61cfc3bbc8d1eb78 +size 1315071 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_3653852b-16f2-4d36-8496-d814ef3f9c56.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_3653852b-16f2-4d36-8496-d814ef3f9c56.png index 1a5a904f1a302240e4a89eb731e661c042fc1d33..197d2c12041f4b7f9aaadae3a4e0518f2c7c337f 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_3653852b-16f2-4d36-8496-d814ef3f9c56.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_3653852b-16f2-4d36-8496-d814ef3f9c56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0f0dda5f68b77fed86d024ecea0f7a1c74fbf93bd0e8d691f053b4855cb20ac -size 1224477 +oid sha256:142f00aadc3c331bd8c0b09601bf20204144e0e703a2cadb47c78360151251de +size 1217650 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_826d708c-25dd-46c8-9e40-0a777f75a221.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_826d708c-25dd-46c8-9e40-0a777f75a221.png index c82015f0e486fad60388ba2c39bd52c6f2c8a0fd..0b1ac65deda630cafb054adeac3834e0d446538a 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_826d708c-25dd-46c8-9e40-0a777f75a221.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_826d708c-25dd-46c8-9e40-0a777f75a221.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f45a9f236d0bae9ff7c445503180905856411d73ed68124b749b649da0db760f -size 1228716 +oid sha256:75842fd3efaefd9c5df807db1bfcb9becced16ddade5960ffb56c451daebfd5e +size 918173 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d5cf93d2-7fa2-4971-8668-436c866e37c2.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d5cf93d2-7fa2-4971-8668-436c866e37c2.png index b82ac9277640be7e422a81dfbd84b3f58acb5b02..8843b32f2f4bb9932c1dbf8dfd158545fd4d70b1 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d5cf93d2-7fa2-4971-8668-436c866e37c2.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d5cf93d2-7fa2-4971-8668-436c866e37c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12aa5f8b0f0b85052abcb7b5cadfd567e3750370310a032fa0315d3088a78222 -size 1241982 +oid sha256:9bd00606f322061ea653395d6b9710587dc2dd752129af67757649d83699bed0 +size 1078323 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d74a9097-01f7-44a1-b1bc-6097432e6ef9.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d74a9097-01f7-44a1-b1bc-6097432e6ef9.png index 8f7b98862aaa2de4a790ca61241de345acffaa4b..f2208f4fe9da42c3db6f84009e1118c931180986 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d74a9097-01f7-44a1-b1bc-6097432e6ef9.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_d74a9097-01f7-44a1-b1bc-6097432e6ef9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0f60b4088e744d61be4ba771c834654f2af0f96a09fd4000bdadbbd7ef99d83 -size 1262846 +oid sha256:8b8d863ef15a0f21f067752d676227dc4cbbb4535b0c0830be92822bd9dd545e +size 1101132 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f35539ff-43bf-48f6-af52-483fc39a7cc8.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f35539ff-43bf-48f6-af52-483fc39a7cc8.png index db217dc8a2264b69d8ee63892776870684d984d3..7b99fa007732d94163af0b97ffc9ea5c7186b1f1 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f35539ff-43bf-48f6-af52-483fc39a7cc8.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f35539ff-43bf-48f6-af52-483fc39a7cc8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b827e55d9e83d2180757c3b3c0f8c4d74bc9882662b4dbbb8d7ef757ffa29998 -size 2068701 +oid sha256:c802bb6bc1192f807985d6a8c419919dc6c833eec05ca511dfbca1b00389a415 +size 1370343 diff --git a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f7c7c715-3a1d-43f6-a391-7054a379dcd0.png b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f7c7c715-3a1d-43f6-a391-7054a379dcd0.png index 6e346131416a226bd018b44ba3932bcaabb98f03..f26dedfe2f1cc7648c412ab1af03fcfce77d6a68 100644 --- a/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f7c7c715-3a1d-43f6-a391-7054a379dcd0.png +++ b/images/2ef75333-ca40-454f-a65c-88b6b60e2497_f7c7c715-3a1d-43f6-a391-7054a379dcd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0129df333abe6062ae8915579eb3379ed98df47b0281be5dbfbddba4d7f99b29 -size 1645079 +oid sha256:a39887f603bceb522cf3bbb38a253244af8214d628e35696637d839d572f95cb +size 1649264 diff --git a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_1b4aa789-a458-4655-9eb9-f9e72cb900fd.png b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_1b4aa789-a458-4655-9eb9-f9e72cb900fd.png index e46e14dab58b9dce35184750e84bbeb7faafbd73..d5e6fc8f405fb886732727354dbf2b918656a70b 100644 --- a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_1b4aa789-a458-4655-9eb9-f9e72cb900fd.png +++ b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_1b4aa789-a458-4655-9eb9-f9e72cb900fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4478ce8347c805fc79720ea541122b4c4e24020ab9c27522bb477c572aa1f807 -size 452871 +oid sha256:d2414f55f339a971304fb1d6ce77a065b500854508e5b17627ae4a10408096a4 +size 624236 diff --git a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_2da87719-cf27-463c-859e-44538f0428bb.png b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_2da87719-cf27-463c-859e-44538f0428bb.png index 4a2c014ee008c42c2e0bf27e013cdc11bf68e1fb..29f4f7cc0289162a71c91b1b754e0f7713f2ce35 100644 --- a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_2da87719-cf27-463c-859e-44538f0428bb.png +++ b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_2da87719-cf27-463c-859e-44538f0428bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b756f638392fd3f3d96c8ca2571a615685ee8a51bf1baf2a3a022ed6f68f271a -size 955981 +oid sha256:954d1590adeb991e4c064ac4db1b4dd33c8406f65e98d504631da688cc2a462b +size 1954275 diff --git a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_3828f926-29a5-4b41-99ce-471f499356c6.png b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_3828f926-29a5-4b41-99ce-471f499356c6.png index d585d915ba4810fb1cd6325865a492843dde9187..a895ae1aeafa771f284605fbedce2a730061cc2b 100644 --- a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_3828f926-29a5-4b41-99ce-471f499356c6.png +++ b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_3828f926-29a5-4b41-99ce-471f499356c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec30cd98fdf59a47eb880a5dd1fc9cbcf988fc8704a0e17f6b8f6c456d408e5f -size 1595807 +oid sha256:8ef327ebddf41791b954cc36120fa87ee5bad53bcaa037da835541d916add3bf +size 1074782 diff --git a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_47c0cd12-231a-4660-82ca-493fc19a1456.png b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_47c0cd12-231a-4660-82ca-493fc19a1456.png index 2af07e7af7d717140d5d7c78746e2260fc643617..769769c0490bf7101a777d7726bb6fee7104ce5f 100644 --- a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_47c0cd12-231a-4660-82ca-493fc19a1456.png +++ b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_47c0cd12-231a-4660-82ca-493fc19a1456.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a1d775ea33d337e1baf2b7c6667db966268473e49386a63c1920a3a1d7098be -size 587073 +oid sha256:4a434b76a9cc34bf08a86e3653eaaf5428859ec6fffdcf5db225a644528173bd +size 741875 diff --git a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_5d754fb7-f2c9-4ad0-a58b-577b5a88701b.png b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_5d754fb7-f2c9-4ad0-a58b-577b5a88701b.png index 8835fc28e2e1502b734b17859e8ba38253a41f0c..0eac3c51c5ba13e8be936835b9cfeac29e6d0325 100644 --- a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_5d754fb7-f2c9-4ad0-a58b-577b5a88701b.png +++ b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_5d754fb7-f2c9-4ad0-a58b-577b5a88701b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:314f9c95b0e6b099a5e0d52caaaf93367af1e6a7abebb24ab8c5b230804f441a -size 1152921 +oid sha256:158daa265810c887b206a455db99a51c184c83624e1813713987af65cfab851a +size 1123573 diff --git a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_94f9bf73-f8d5-45fc-9fe8-8745e3364c2e.png b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_94f9bf73-f8d5-45fc-9fe8-8745e3364c2e.png index 29c35c84cb57d6e11e45d46f31d339cf1a21f2c5..13f9f0c39f978cf976ccf7083f534f1c74ecaaaa 100644 --- a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_94f9bf73-f8d5-45fc-9fe8-8745e3364c2e.png +++ b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_94f9bf73-f8d5-45fc-9fe8-8745e3364c2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbfdfd70aabe5f41cc90a079a02c774a96dfb08ec5febaf5f51825e913555e8b -size 1182635 +oid sha256:08f11dd292d6e16a14fb292f9b1161c0979d46a473d3e2b7a0944ec0319afcba +size 1228806 diff --git a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_9518f246-371a-40eb-b20f-2c5c1083d0f3.png b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_9518f246-371a-40eb-b20f-2c5c1083d0f3.png index 7b83fdff3af8cfe8910cb19a89741e43838aca8d..374d00666363fbce4920f3328a4b5fb89e6c3e55 100644 --- a/images/2f660153-d470-4744-8b75-f8dc64c7f94e_9518f246-371a-40eb-b20f-2c5c1083d0f3.png +++ b/images/2f660153-d470-4744-8b75-f8dc64c7f94e_9518f246-371a-40eb-b20f-2c5c1083d0f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0b293f082e32891d50c078f9d30994c5e8ec46e95a344a22023d4a56446146e -size 748094 +oid sha256:e1585fd112f32f97bee98493d565a163bc9e3e6f701a9d6488629f46b17b9dab +size 1451869 diff --git a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_222cd8db-5718-4f2a-9fe2-93b144ba93cc.png b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_222cd8db-5718-4f2a-9fe2-93b144ba93cc.png index 61f65caf0c141c37e0c383f4e5ec2fdf389ed7c6..374ebafd59a31f0d3bb2190440fe3da6207a3b69 100644 --- a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_222cd8db-5718-4f2a-9fe2-93b144ba93cc.png +++ b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_222cd8db-5718-4f2a-9fe2-93b144ba93cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cba89cf9c5fa981a0bac0ba0aa47fa9dacd80e7c826d34aae310f743adb9616 -size 1390593 +oid sha256:94399039fe9d2285d254aa5db22111bcc177c3e32ab907c251740a04d8006f8e +size 676145 diff --git a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_481aabef-7831-4470-967a-8926d70118fa.png b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_481aabef-7831-4470-967a-8926d70118fa.png index 96a8f7244c181f1daa6d9f37d1ef996e5e2724e2..7f024e6ff1d15e128a2794d823018d020f5e610b 100644 --- a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_481aabef-7831-4470-967a-8926d70118fa.png +++ b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_481aabef-7831-4470-967a-8926d70118fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ce8921c0e189e629a239ce40859c99711403749517cc34b354f8418defa316f -size 1052534 +oid sha256:9fd43c79b4775c6f6fc5dae3938547f54c5dee5568077fb78ba922de0e19dbf0 +size 593272 diff --git a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_5e9517f2-e5c6-4f6e-9dc8-48652fa459f1.png b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_5e9517f2-e5c6-4f6e-9dc8-48652fa459f1.png index 77a48083e1d7c29b42241644f83990b926711092..80d57fa28a2eeb5b95756c308b3d4ea59ed83c89 100644 --- a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_5e9517f2-e5c6-4f6e-9dc8-48652fa459f1.png +++ b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_5e9517f2-e5c6-4f6e-9dc8-48652fa459f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8d6711da8b5eba6ce9243bf9d48b151f78bf3b12002128928d5f2857f299cb0 -size 911486 +oid sha256:af275f1114732435510e80e2471c308a8a4c6001c7a9607a6cda967cae578a06 +size 748529 diff --git a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_a5e39e4d-8ef5-424e-9370-dc254fdbcb03.png b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_a5e39e4d-8ef5-424e-9370-dc254fdbcb03.png index 1bb3909dbbd7213e62ea1f140f7e454dda0066e8..53a1147b2d862344f40c6aac6041a07b5e132747 100644 --- a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_a5e39e4d-8ef5-424e-9370-dc254fdbcb03.png +++ b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_a5e39e4d-8ef5-424e-9370-dc254fdbcb03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad0f9c8373160476cb508c030145cb647ed5df629730a52ad5409397a24602ff -size 990928 +oid sha256:e92633c281a8b6c89f81c7636eb1e8d8a37011ff96b537af5f2f42beb63ef591 +size 633542 diff --git a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_e4dfa148-ff80-4458-99ca-8d1c48572e37.png b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_e4dfa148-ff80-4458-99ca-8d1c48572e37.png index 84100fd45661d472aa8c700fd4a1474cc15a780b..0b250369ab581007328e5be905b06bc5a6db93a1 100644 --- a/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_e4dfa148-ff80-4458-99ca-8d1c48572e37.png +++ b/images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_e4dfa148-ff80-4458-99ca-8d1c48572e37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a16e16981f3dfd2bf29943ee7868d6f0e999b98da976c0a4d75e6ca52f113381 -size 887221 +oid sha256:f8b0032cac425293ebcd5a62526f6f78462263b6854235bc42fbbe7036a747d3 +size 1490611 diff --git a/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_75c1a95a-3206-4beb-9527-099e88355322.png b/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_75c1a95a-3206-4beb-9527-099e88355322.png index c439133b67508ff35f87dc86bd4f55852ec9281b..2c3e05ba77dd761fe63ec6f295b92abbf28ae17d 100644 --- a/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_75c1a95a-3206-4beb-9527-099e88355322.png +++ b/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_75c1a95a-3206-4beb-9527-099e88355322.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e618e8ec1984af2627ddd1cb7605a0bf3a49ba3c241d00edf245782dd0ac463f -size 1717349 +oid sha256:758679bf6a84bb91ea16aa4b1f444df5fa94458bf3195d308345ad91eabee70c +size 1529486 diff --git a/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_bd575876-47dc-4259-ba68-82544768d412.png b/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_bd575876-47dc-4259-ba68-82544768d412.png index 5bcb3dda054d7761b6339ba4fe845136f259bda7..02106d44d6838bd5e6673ca630bdaa570e5fc07c 100644 --- a/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_bd575876-47dc-4259-ba68-82544768d412.png +++ b/images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_bd575876-47dc-4259-ba68-82544768d412.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:142e6074e1ddca18c46ab0476314816a3766d69165a6e41701c98924028a9303 -size 786162 +oid sha256:297fc6dec07c82b57f3a035902fed3899b79526b7aac191c6cc8fbebca96d48b +size 3096281 diff --git a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_18882ce8-7875-4663-93ec-0807ef95ce96.png b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_18882ce8-7875-4663-93ec-0807ef95ce96.png index 4a7b474950b9ed58be0628e278bb005c7b1bd88e..f8f9079fdf6247c29983650d1b726bb1f01eae77 100644 --- a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_18882ce8-7875-4663-93ec-0807ef95ce96.png +++ b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_18882ce8-7875-4663-93ec-0807ef95ce96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a04b500842681823959b2aa8e5fb56727a56c03193008c42f702b60c7bab705a -size 2762904 +oid sha256:14bc6ad8a422bd4c7355b29d0ab3e064dfc9ec84ba9ac194c08c3596089dce26 +size 2501470 diff --git a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_21b5581f-18db-4928-9246-ddbfa5e1bc60.png b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_21b5581f-18db-4928-9246-ddbfa5e1bc60.png index 43c8595de424d2d20f03b17017c80a3e573b5ff9..afcf266a6eeb27b83024022d8dbcff5abeec6083 100644 --- a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_21b5581f-18db-4928-9246-ddbfa5e1bc60.png +++ b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_21b5581f-18db-4928-9246-ddbfa5e1bc60.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d21a49d22cfb3aeb6ca45c5ff6764d913cf5e822689d8773938f8d48f943431a -size 2064507 +oid sha256:61c23b6e124b062c373d5a785326dc6ca50d366b02bc3ec5e7f5f1a9f14f7eb2 +size 2764646 diff --git a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_341a74e7-e3bc-49bd-8c12-ff4d7c51fc02.png b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_341a74e7-e3bc-49bd-8c12-ff4d7c51fc02.png index 5117c7438670ab9885906a244641285bd15f1f36..8db967b4dc01252dee94c8796c80b35278501db3 100644 --- a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_341a74e7-e3bc-49bd-8c12-ff4d7c51fc02.png +++ b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_341a74e7-e3bc-49bd-8c12-ff4d7c51fc02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc810e0a98e239aa8b03696a782635bd5ba9931ade002d1eb928a632d191c5f4 -size 2241783 +oid sha256:53290041fd28f9c15eba5de62ac9e200374b4a921593fc8dfef7e99716b019d6 +size 1333348 diff --git a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_5e2ba778-2e6c-44e0-a6f2-e28df0337e1d.png b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_5e2ba778-2e6c-44e0-a6f2-e28df0337e1d.png index cb2b28eefe3806f0c4894106292c9a2337dd6ff1..19e2cd50662ddbf75e44b6452964d779c1d7bd14 100644 --- a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_5e2ba778-2e6c-44e0-a6f2-e28df0337e1d.png +++ b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_5e2ba778-2e6c-44e0-a6f2-e28df0337e1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdbe3960e53330ff846f5f3ddb73d9ca53a80f393b01b7b1aaa94cf770faaeee -size 1627618 +oid sha256:7e8d281bb1c4a18a265096edce1bbc036b34980d2bf150a4eb2b85bb0349fca7 +size 2118859 diff --git a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_6aa2eb7f-be4a-467b-b5c5-96e9dd543d22.png b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_6aa2eb7f-be4a-467b-b5c5-96e9dd543d22.png index 0116e6256080c0fcc6052a0add063820477a68b5..8f52e8596e3e94e0a40f03f77d0d803671f43b10 100644 --- a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_6aa2eb7f-be4a-467b-b5c5-96e9dd543d22.png +++ b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_6aa2eb7f-be4a-467b-b5c5-96e9dd543d22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b3e5d7b2afc0136d804e8912d457ef3357a3fe33e4e53dfb2e319e7173620d0 -size 2062158 +oid sha256:aead4ac61eba6906f85038161fd1de0f4203a4e0effbe88634259d6f39eba099 +size 2735364 diff --git a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_8dc97709-9a15-4255-b63f-010da99ade05.png b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_8dc97709-9a15-4255-b63f-010da99ade05.png index 4cdbc0cea87d6d5dfcd8a20d1aa708b7d0fed868..45a6ae4b67de6a159313dd2541ec415392d47868 100644 --- a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_8dc97709-9a15-4255-b63f-010da99ade05.png +++ b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_8dc97709-9a15-4255-b63f-010da99ade05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ad2aea35f4f9ca70e90c30388f07af4f1a1c4a7e591ebeb4737652e26f035bb -size 2030358 +oid sha256:0d568b05dd8ee5ddb9a247998f1b45134586cc47734524cd32e18a436debaab1 +size 1797943 diff --git a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_a14c70a0-22cb-4218-9f3f-281c20bcfd0a.png b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_a14c70a0-22cb-4218-9f3f-281c20bcfd0a.png index 6f50aad95adc726ebbc224d29b7ec6120e04c75c..95bde1758746ec8408c333d6badf7445f438d543 100644 --- a/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_a14c70a0-22cb-4218-9f3f-281c20bcfd0a.png +++ b/images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_a14c70a0-22cb-4218-9f3f-281c20bcfd0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18f528450d5ab2cce823c2d67d34bc4adbca4f543ee49caea8f997fc053fb517 -size 1627034 +oid sha256:d761147a438c09797365deb5a8ffe124939ae2dd05bb16461a7ae5d58f7bd0f4 +size 2575762 diff --git a/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_0eb8808f-9c59-4b77-ae36-5cdab2faa0dd.png b/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_0eb8808f-9c59-4b77-ae36-5cdab2faa0dd.png index 825c35a74197f7b15b2be188214e89c20d937b91..1210e03ee5190d333827445725f31841ec85e2a9 100644 --- a/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_0eb8808f-9c59-4b77-ae36-5cdab2faa0dd.png +++ b/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_0eb8808f-9c59-4b77-ae36-5cdab2faa0dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f04683bc03b7f5f3d198ac3de5343c7374dcdbbd2d028e8f8d97fb30679096e4 -size 1140755 +oid sha256:cc28cd2412cab6a1bc56108b13def46f219d427be133ca0e8eac077706b1b9eb +size 1511371 diff --git a/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_d5387d67-6e85-40ef-8c69-412c86d9cd11.png b/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_d5387d67-6e85-40ef-8c69-412c86d9cd11.png index 73146884c61edf440900c8ae40aec13feff4b0b3..8e4510f25e6a26ddb25dd2e7b948a3e7c6a7495b 100644 --- a/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_d5387d67-6e85-40ef-8c69-412c86d9cd11.png +++ b/images/30e310ca-af64-46b4-a0f6-14b8f04fa734_d5387d67-6e85-40ef-8c69-412c86d9cd11.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc41cdc04e22dd346ea5a5d797418045ae942f7671993988248be99537192f5d -size 1435465 +oid sha256:b3fa305eeb901bac0655e4fa033ba3981b1f0db6fc6bb84f7842d668b2283cd4 +size 1462875 diff --git a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_92018224-3ef4-440f-aace-50d82122188c.png b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_92018224-3ef4-440f-aace-50d82122188c.png index 8911830e013b9536e32c8e40dd5f8e2ae73abe89..37897a3dc4fc04a1fde83a092959a097ef566835 100644 --- a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_92018224-3ef4-440f-aace-50d82122188c.png +++ b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_92018224-3ef4-440f-aace-50d82122188c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eee33682ad8c3de02088c1d6adb187246d6003250acff9b7cc928b9d412b84b3 -size 1426349 +oid sha256:def9a2fa41f9fe69dd2ef21738ab573f6e9b8a88086ac5e5f5b441f23dd32af1 +size 1444181 diff --git a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_cf550759-5d20-4109-8c9a-469f64f2f1e6.png b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_cf550759-5d20-4109-8c9a-469f64f2f1e6.png index 65dab40e081b8a9a63c68dcb735ac1996187a803..52b531a576f5ae5b2722131524cfad515d7ef6ce 100644 --- a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_cf550759-5d20-4109-8c9a-469f64f2f1e6.png +++ b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_cf550759-5d20-4109-8c9a-469f64f2f1e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:890cd422afd0de4e49cf4d480878d9ce62d9e916c023c475b100252c07600198 -size 595155 +oid sha256:729202b13d5f5766f995cf580541c8475bb50d11cf4853ad2490a220289de665 +size 685272 diff --git a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_d36af357-3992-4fdc-af97-755183ecfd0a.png b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_d36af357-3992-4fdc-af97-755183ecfd0a.png index 8ab81adc5c61f90a6e0435ac6ab5f5365759bc19..eb3281bcec2bfe1e2dce0918859f8ad8d7fcb3b3 100644 --- a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_d36af357-3992-4fdc-af97-755183ecfd0a.png +++ b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_d36af357-3992-4fdc-af97-755183ecfd0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76d042b3477646b66e309dd6fc90632989c9a63bdac36f125b4668e707053b78 -size 1560825 +oid sha256:e5e33b0d56941199dabb8e47c228b7d9a03d95c1d4682b7a279ceaedc4a948eb +size 969976 diff --git a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_ff0d9b95-71ea-4ffb-ba47-f3b317d24f09.png b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_ff0d9b95-71ea-4ffb-ba47-f3b317d24f09.png index cfb6e7196e0c0fb6f72167b280d32713d0cc57dc..ea64eea5911c6b90c7b3d25ea16e4ebe2d1c9ed7 100644 --- a/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_ff0d9b95-71ea-4ffb-ba47-f3b317d24f09.png +++ b/images/3110612f-63fe-4a7a-98d0-29c806d6a34f_ff0d9b95-71ea-4ffb-ba47-f3b317d24f09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b828ae5f88fb9f952e100a70725557a639f23e3cd43b4876e9d25e47474a0d4 -size 856604 +oid sha256:6b5672dad4f04908eb2667f401fa0b746b4ceee61c21ac888de2bb764ecf173a +size 629006 diff --git a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_1e45626d-e3d4-4d8d-a2d5-e00027b696fc.png b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_1e45626d-e3d4-4d8d-a2d5-e00027b696fc.png index 900d80d0d9c19b4917423bb1caf7c2bd7ca8ecbe..9499792ec7c0932611025c627fe3f8e9a1a03f0b 100644 --- a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_1e45626d-e3d4-4d8d-a2d5-e00027b696fc.png +++ b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_1e45626d-e3d4-4d8d-a2d5-e00027b696fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c6501239816391f3b68f16193d294909e3156a83662cba8e30692b9f5671408 -size 1119398 +oid sha256:d7e1a626a566fa928a2e38df5a335cdd71656c766d2724374ed812c251ec3d25 +size 838142 diff --git a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_2efe3302-5f5f-4b26-ba7b-7348f700afe8.png b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_2efe3302-5f5f-4b26-ba7b-7348f700afe8.png index 39fd021a75625e714933a6fade07ee380e233878..251c832b4ed8e07ffa9c36d9aa6533bfe9acb1b8 100644 --- a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_2efe3302-5f5f-4b26-ba7b-7348f700afe8.png +++ b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_2efe3302-5f5f-4b26-ba7b-7348f700afe8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:489d6f57c58164d14ef690f0fe500d1e409c5176c8a0ee45aef4b2862033d346 -size 2523658 +oid sha256:58c2295d80f6eb63c7917625eb79fbb61957f6ccfd8c13272ac4a602b16eb7ca +size 1849485 diff --git a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_38ab39b0-d855-4990-91e4-801450b4c9ac.png b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_38ab39b0-d855-4990-91e4-801450b4c9ac.png index 48daa2784ad62014c5d527f39cebf338c1de5374..4a0a7d2b418f3a3d811781787d3f2a3e7fd708f8 100644 --- a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_38ab39b0-d855-4990-91e4-801450b4c9ac.png +++ b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_38ab39b0-d855-4990-91e4-801450b4c9ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:358ec1488b7f530f739ae05deca715965661478e2da6da4dc946f4edd17e0b78 -size 1313110 +oid sha256:83a4365d638f82620f66624ecd57470fe658847bf2b61b2f430841e66bdfdf06 +size 1313029 diff --git a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_3c77beb9-4242-4ebe-8ec6-d5599cf39cd9.png b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_3c77beb9-4242-4ebe-8ec6-d5599cf39cd9.png index 0f442613409989a06a807689ebb3fb153ae42f0d..a156f8b63f3dccba40969eb7582d40ba51290062 100644 --- a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_3c77beb9-4242-4ebe-8ec6-d5599cf39cd9.png +++ b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_3c77beb9-4242-4ebe-8ec6-d5599cf39cd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07f913beccda625ad6fd175731b9a86f71480765b78252167b13e7044a570dc2 -size 1324356 +oid sha256:0f688acd0931752e272dddc31e9f68af216187e1059b68b5cf29b80318c801fd +size 1116754 diff --git a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_583fc711-90e0-4363-ac48-057b547a3a33.png b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_583fc711-90e0-4363-ac48-057b547a3a33.png index 94260340cc8bd76950af1fecb397e64518b97056..e510ebe65b9b42776191ea6e2872e57be457bc78 100644 --- a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_583fc711-90e0-4363-ac48-057b547a3a33.png +++ b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_583fc711-90e0-4363-ac48-057b547a3a33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfb0c53bf02456c88493aa8e8669df0d4da41b4d64e23c5ee9f9ae1361627df3 -size 2123906 +oid sha256:d943cd6843510a9c29bc0d25edcae5bd6188a5911f73d7b25f52ae81ec640c0b +size 2195873 diff --git a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_985d0c82-e934-4e69-9a1a-7e7097fb4c00.png b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_985d0c82-e934-4e69-9a1a-7e7097fb4c00.png index 0476ad1bdadc0a72adbb18b9a95b0b9cece249ee..ba195a33b295a6b7a2b28efa45004666ceb1d3f6 100644 --- a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_985d0c82-e934-4e69-9a1a-7e7097fb4c00.png +++ b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_985d0c82-e934-4e69-9a1a-7e7097fb4c00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c3b35662d0e11abe6cc45e77b39b20057fa123da8e6156d45ec91235e7d03df -size 1309195 +oid sha256:125fc497fd579080f56c1573de8273c3a47c0fe6a93ae0a6aa4d16cb4e484f92 +size 1207279 diff --git a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_d025456f-7f08-48c2-bd5c-368b869e6a5a.png b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_d025456f-7f08-48c2-bd5c-368b869e6a5a.png index 79231689278f0fe9a122ac66cc3ba7cf9886c8df..c8cd193c851ef1853f76875d590567f454530218 100644 --- a/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_d025456f-7f08-48c2-bd5c-368b869e6a5a.png +++ b/images/31a74ae0-4b8a-407b-8f3d-a094a6966254_d025456f-7f08-48c2-bd5c-368b869e6a5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc029154a1091172f0c371986bd2543f6267e47a78258103c4c7f3b3c60e6790 -size 1324275 +oid sha256:f8671a536c7377814a9f52d7d50d931112c7a1f83513707e2c671d589eddc53a +size 1584644 diff --git a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_48fba121-c893-45fd-85f5-9bcd5094a0cd.png b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_48fba121-c893-45fd-85f5-9bcd5094a0cd.png index ff76f0120d7da43ef34fc4892f2a3a202be02de2..e188a1fc34840f2a102d5ff2b16687306c19f87b 100644 --- a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_48fba121-c893-45fd-85f5-9bcd5094a0cd.png +++ b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_48fba121-c893-45fd-85f5-9bcd5094a0cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3571673567eabe142525483b04c658cc4ca8fdc2923fda0c0bd16f89fbd2ea5c -size 1067487 +oid sha256:f83005fca7db606788d3a8b6942fe3489bfa04ff0bd761a65e7a3e448ea5a6e5 +size 978622 diff --git a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_75bcd0ba-31a7-43c4-a6a9-c9eb75258065.png b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_75bcd0ba-31a7-43c4-a6a9-c9eb75258065.png index 947af0596aaccd99bf490dc01f0df4d9bbb92027..eedacb1cbeb3e4ffcc37011cdd96ef8e3068b24e 100644 --- a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_75bcd0ba-31a7-43c4-a6a9-c9eb75258065.png +++ b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_75bcd0ba-31a7-43c4-a6a9-c9eb75258065.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:626675ea8490dbd94cf3b7715ab4c7d1fe726e3575e46b810859483b1c58ec05 -size 661429 +oid sha256:6be01a586569dcc044b34261bb024a981ebdb8a410e00d6d5fd00e37bcc2c794 +size 711345 diff --git a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8c67fc5e-0ba2-4649-b99f-249f1310f9c3.png b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8c67fc5e-0ba2-4649-b99f-249f1310f9c3.png index 04ff0f429015431b2a4fc0575c6510fc9225df1d..4cf2e6beb42c3055f6456cb637ef8021c6589b61 100644 --- a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8c67fc5e-0ba2-4649-b99f-249f1310f9c3.png +++ b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8c67fc5e-0ba2-4649-b99f-249f1310f9c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f66dd56df81653d5539c95a0083626dc246df59c96a98ce55ab934d314b1a699 -size 1069696 +oid sha256:76548c4e255f3e1cd648af7d5b32da8074b6908f2e5926030097022cff37d6cd +size 982211 diff --git a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_c249afb0-9d76-4cf3-bc7c-8dd58876ce45.png b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_c249afb0-9d76-4cf3-bc7c-8dd58876ce45.png index 656169279d80ca518aa61782c83fad61aaafd90f..38890d383157366c771fd67c3410cf6628ef0811 100644 --- a/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_c249afb0-9d76-4cf3-bc7c-8dd58876ce45.png +++ b/images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_c249afb0-9d76-4cf3-bc7c-8dd58876ce45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ffff5d3f74ba91e53790a4eb126f2237684f8b2c27658dbc8157e9c793a80d2 -size 639133 +oid sha256:2d2e09220c280564cce3a4f44d9bce38889509c8d5a59768abc493067a19deb7 +size 638692 diff --git a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_66f6a6e6-f08f-40b2-95a6-0ad325c3aa2e.png b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_66f6a6e6-f08f-40b2-95a6-0ad325c3aa2e.png index 4f1cd417e171f9652aefffb74142cc6f7cb1e628..3e876f7b8604b203cc54fbc5b340ad1124ff3c2d 100644 --- a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_66f6a6e6-f08f-40b2-95a6-0ad325c3aa2e.png +++ b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_66f6a6e6-f08f-40b2-95a6-0ad325c3aa2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58dc388bc22dea200a804ad6ec2596e1274fd83beb51f4e2f982f9b6e89d54b5 -size 463597 +oid sha256:dda994d8d515eb5720e91143ba8127162193021394c9f37a7b7f7655c1e64433 +size 437126 diff --git a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_76bbcf6b-64fb-45de-be5d-ade45a0b2247.png b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_76bbcf6b-64fb-45de-be5d-ade45a0b2247.png index 6d4a30055b2497f4d1b3fdfb1fd4841d23db35c5..8f0ed67027c7881d5b1c6239de1ccec87f945a06 100644 --- a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_76bbcf6b-64fb-45de-be5d-ade45a0b2247.png +++ b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_76bbcf6b-64fb-45de-be5d-ade45a0b2247.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf156cf9d9587104f21726a5fd2b60a32f49e65adcf7e78ca9326652b3bb98db -size 369578 +oid sha256:2a3a9b65b7b7fcdac4dc060ebbc1111cdbdc60482bc9696825d3aff3ff0297b4 +size 402796 diff --git a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_ebc2b67a-b88f-4ff5-8a9a-3b93c778a404.png b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_ebc2b67a-b88f-4ff5-8a9a-3b93c778a404.png index 17dbb864beeaf69d3791202858a83648fe34d22e..64f27bf68193da05e1dc7bc91da102e12db103f6 100644 --- a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_ebc2b67a-b88f-4ff5-8a9a-3b93c778a404.png +++ b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_ebc2b67a-b88f-4ff5-8a9a-3b93c778a404.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:445330e061652893bbe326ff6364b434c39bc44d6d0831d91967c2398ad957cc -size 401071 +oid sha256:119a60a92998713e712ed608dbfe801b46e3d27b91dd1249a5e57e4dc2371565 +size 399796 diff --git a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_f1411a49-e617-4dfa-aaa2-a947056f2ceb.png b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_f1411a49-e617-4dfa-aaa2-a947056f2ceb.png index cb7f068d5629677dba98c1dfb3dd0dd6e6deae19..278684557a264fb9304d822461ffbf9fbdf44b2a 100644 --- a/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_f1411a49-e617-4dfa-aaa2-a947056f2ceb.png +++ b/images/31df4f0d-8852-4694-b6cf-5f337bd26d39_f1411a49-e617-4dfa-aaa2-a947056f2ceb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57f3ff9065de5094b50e3c7b2ae672b8e22782651a1a65e52b4bc8249a82da2a -size 447073 +oid sha256:0683a060ae21e2d25ac1f972cf1d096d0e8da1c65ba0b1a6bc284634397936e4 +size 325569 diff --git a/images/3236b068-8eaf-4a39-913f-b71884a35c39_032de1a2-3b36-4cbb-80c4-94c0c2882d0e.png b/images/3236b068-8eaf-4a39-913f-b71884a35c39_032de1a2-3b36-4cbb-80c4-94c0c2882d0e.png index fb8277f82f91f77f86ea74e40f7eef72d5cb427b..57b0f76150c60cca0150b6913e89d235c846ddca 100644 --- a/images/3236b068-8eaf-4a39-913f-b71884a35c39_032de1a2-3b36-4cbb-80c4-94c0c2882d0e.png +++ b/images/3236b068-8eaf-4a39-913f-b71884a35c39_032de1a2-3b36-4cbb-80c4-94c0c2882d0e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3827810675d695689d9480298ec12c9c965cf6c91f1ce5b6f3d6b31897a3aec2 -size 796262 +oid sha256:991d8e6943d16f14fe330a8386079804129128f09133dfa1a54077a41e34b5e1 +size 826075 diff --git a/images/3236b068-8eaf-4a39-913f-b71884a35c39_8c018b5c-efad-4b19-8fdf-607219a937e0.png b/images/3236b068-8eaf-4a39-913f-b71884a35c39_8c018b5c-efad-4b19-8fdf-607219a937e0.png index 47c208044cb641cab9eda4dbf95f7644ca265e0c..e235fcba30b176f4b77b09511a7ec242788190cc 100644 --- a/images/3236b068-8eaf-4a39-913f-b71884a35c39_8c018b5c-efad-4b19-8fdf-607219a937e0.png +++ b/images/3236b068-8eaf-4a39-913f-b71884a35c39_8c018b5c-efad-4b19-8fdf-607219a937e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5c543f4e332589b666bf9cf1e1a958c3ed1bb85660ea79230bf2203440d4474 -size 2494574 +oid sha256:680f37a5fb68cec26ca5790681fdde1edb995da2f5503eb421c0e408cd430bec +size 2495407 diff --git a/images/3236b068-8eaf-4a39-913f-b71884a35c39_aaa64149-aeef-4b01-9d53-323f0c6357b1.png b/images/3236b068-8eaf-4a39-913f-b71884a35c39_aaa64149-aeef-4b01-9d53-323f0c6357b1.png index 6cad7de5bc991f6f98fb67448ba695e589d83fef..8b03b2d386bfecb8eb8ce29e222022952ddcbfa0 100644 --- a/images/3236b068-8eaf-4a39-913f-b71884a35c39_aaa64149-aeef-4b01-9d53-323f0c6357b1.png +++ b/images/3236b068-8eaf-4a39-913f-b71884a35c39_aaa64149-aeef-4b01-9d53-323f0c6357b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da403763c39851fe351aac9dc631d5fa5fb1db424dc8120ef2d7930af0ff888f -size 2522610 +oid sha256:11dc6c1fc4468acc9fc5fc52f0e91aafdc12e7b4d7c288eb04bd9c49f61abe34 +size 2000835 diff --git a/images/3236b068-8eaf-4a39-913f-b71884a35c39_dea7fb0b-b89c-4513-b5f3-9156a5463b72.png b/images/3236b068-8eaf-4a39-913f-b71884a35c39_dea7fb0b-b89c-4513-b5f3-9156a5463b72.png index ce62a4eaab37f0b7dd3c3269085053b1f376a74d..2cfb8ff92fedd54481145e3c53c757f95c6471b5 100644 --- a/images/3236b068-8eaf-4a39-913f-b71884a35c39_dea7fb0b-b89c-4513-b5f3-9156a5463b72.png +++ b/images/3236b068-8eaf-4a39-913f-b71884a35c39_dea7fb0b-b89c-4513-b5f3-9156a5463b72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4202181719fdbfcd223ab720e153ccec4c98c8f326b84e10f275262ea12469cd -size 3401278 +oid sha256:67bd44d90dd03a24b1d7dfdc4397e5af9019bc40b4eeb8e5e5448f3e4861b5f4 +size 826117 diff --git a/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_6222129b-f9e3-4f1d-b425-baeb45366cb9.png b/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_6222129b-f9e3-4f1d-b425-baeb45366cb9.png index f701c03d3af9f0fc7d140a5f74b619c73f580686..a4dc71cbeeee5222828bbdd6794f6171cde9f2fc 100644 --- a/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_6222129b-f9e3-4f1d-b425-baeb45366cb9.png +++ b/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_6222129b-f9e3-4f1d-b425-baeb45366cb9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37206e8badadc3a6a299430b4aad81084d7aa568bdba3ff7a5bd3e0c169ba1cc -size 1438552 +oid sha256:0e59bd1e821f12212816f1a33e2714e99bbec7dc259fc2418021c598957d1f26 +size 626371 diff --git a/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_66a067aa-db40-45b7-bf6a-a4ba43889d2a.png b/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_66a067aa-db40-45b7-bf6a-a4ba43889d2a.png index 89e84e7c5cdd11b6e824aca03a595c572ba0e038..bcf2806006778d75aa6c5989c3783931d724785a 100644 --- a/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_66a067aa-db40-45b7-bf6a-a4ba43889d2a.png +++ b/images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_66a067aa-db40-45b7-bf6a-a4ba43889d2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfb668b62a5b51ed8f42ee0fe91125e299b8373af9cb1e4c83f7a4817ae2535e -size 878719 +oid sha256:59871288feceb9da761d03e6b902919b35c3e1ece0ea9578ec4e05757460c4b3 +size 1472061 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_0e80c45f-23d4-40ed-b1b3-013f44b2f9d3.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_0e80c45f-23d4-40ed-b1b3-013f44b2f9d3.png index e139bade40ea17de4e787c2213cb3664fa5bec81..4c4e539d248072806b0db32718c7768ee12bbf89 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_0e80c45f-23d4-40ed-b1b3-013f44b2f9d3.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_0e80c45f-23d4-40ed-b1b3-013f44b2f9d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50122e9ada9f2db352de0495b5ee18aba77ebacc53312a2ad62561d5d34e1524 -size 1339787 +oid sha256:bcac561cc2c0ad3fd41c06512f5f2178d99839fcf5d9aef9606ab5b2173f9213 +size 1916576 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_139fd486-b530-460d-8d08-ab5188efe59e.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_139fd486-b530-460d-8d08-ab5188efe59e.png index ba6cc41a1a55edfcefe9276f97cf8e76c3b41b41..c5d1a42dfe92910bb292800ef64c6832e5d74df0 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_139fd486-b530-460d-8d08-ab5188efe59e.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_139fd486-b530-460d-8d08-ab5188efe59e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af95bbffcd70b513e05efe54b82894afb0f4de4e239d26e24590cf94eea438ae -size 417853 +oid sha256:370da5cc7b0a291952fac3b79ea21cedb979aae6fab929003fa729e401addfe6 +size 310742 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_3e758aec-19d7-4865-aad8-cf4d53774bf2.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_3e758aec-19d7-4865-aad8-cf4d53774bf2.png index ebd8d5ae9172d4c2fd3ef1e6645f7a811e20f938..48cb63358c71fe3b866dfe3a259613e6c0185841 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_3e758aec-19d7-4865-aad8-cf4d53774bf2.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_3e758aec-19d7-4865-aad8-cf4d53774bf2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5eb2cf5a2689eae1ae87332b582cfb1fb0969dfdd33ac6b7d4b1a0647ddc6373 -size 127165 +oid sha256:7ce9b5ca97d8b0de751ae4e3b8f522081b1b38cbdaaa4e94a0184e7c801a9841 +size 126548 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_5b1347ea-791a-4e8d-bc7b-db15fe3375e3.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_5b1347ea-791a-4e8d-bc7b-db15fe3375e3.png index be3d9cd92d9f2be17100f433d834fd08f4fe097b..98fbb4c154c4aab6f7c5287f663c60d628bc161a 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_5b1347ea-791a-4e8d-bc7b-db15fe3375e3.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_5b1347ea-791a-4e8d-bc7b-db15fe3375e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cbdb75fec9feb784a4a51c4970b1a0df9a7b97dfa08133de1d025098fcb7d3d -size 411377 +oid sha256:6a88ae6b22ee33ca980060efbd7c53e23eb05020e02dd6ee208f4239486c6285 +size 358339 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_763e2e66-b9b4-4a26-87bc-e6969bb9fb7f.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_763e2e66-b9b4-4a26-87bc-e6969bb9fb7f.png index 4b3461463bd68e29d4ae90c5ec311432be87b97f..2baa97660394d1c3cca9f67e7ebee82316ac105c 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_763e2e66-b9b4-4a26-87bc-e6969bb9fb7f.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_763e2e66-b9b4-4a26-87bc-e6969bb9fb7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7a4e65a7f5d9b7443ce494fd35c41b344520d9da05ddf71c0f3fada016356de -size 139244 +oid sha256:a83c604096dc1c938119b2a00995ce4adc2c309abd92d0dc3c8099ef988ddcdc +size 236033 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_7ca8088d-aaf6-4a6a-a81f-854a7fddcd12.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_7ca8088d-aaf6-4a6a-a81f-854a7fddcd12.png index 3cfbd9b06f105fafae2a51749729ad1be6509756..3de32b4ab6bedfe623aefcf323b7c51ca56aff27 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_7ca8088d-aaf6-4a6a-a81f-854a7fddcd12.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_7ca8088d-aaf6-4a6a-a81f-854a7fddcd12.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3554acb46cf1e001af8c463b9e69c806440b60d175bb02c1ec3df23cb7be0f24 -size 141362 +oid sha256:fcda698b87643e7cc03a433e5d78203c90f828164ddc8a1a9054554274bc0a4b +size 418870 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_87fd7194-d9d7-433f-8ce3-af6697c92098.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_87fd7194-d9d7-433f-8ce3-af6697c92098.png index cde95454fe61a4a3a0dffc73fb17a9a3c87e02c8..d939ace6a4f67d79deedcd9105f3fab0455e053d 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_87fd7194-d9d7-433f-8ce3-af6697c92098.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_87fd7194-d9d7-433f-8ce3-af6697c92098.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eeb1796c1e00022c196f9e5e45a59270a9bd656a640f07f289ddc1c061233773 -size 454672 +oid sha256:4144fa71b9de5c83c79243dedb974514efbf016a209d2f127113236a3a7b869a +size 606426 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_905eff11-9e4f-40ec-8794-0aa4dbad687a.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_905eff11-9e4f-40ec-8794-0aa4dbad687a.png index 60d4a21c88406f11d03b52b52ceeba0b5d18a55e..a54bff4f4fa324e9c989dda110c6ccaa03ee7d0e 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_905eff11-9e4f-40ec-8794-0aa4dbad687a.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_905eff11-9e4f-40ec-8794-0aa4dbad687a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d75cc51f64ad19a77519a7c73eaa6dd647ea006bf14676bb0d89cc07118f353 -size 129056 +oid sha256:bd8df83899d1215182b8c9322c03ac9ba084b2000ac937607adbea8349137075 +size 406407 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_ac2a90b7-0655-4a0e-afc6-64e9c8c133ba.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_ac2a90b7-0655-4a0e-afc6-64e9c8c133ba.png index 9f40bedfbcf1b9806c198f9f4d61695f7f16d7f6..bfd52101cdbfea1b93e4572090ca91367b631c2c 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_ac2a90b7-0655-4a0e-afc6-64e9c8c133ba.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_ac2a90b7-0655-4a0e-afc6-64e9c8c133ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4be190df65b061192ba4da30bb4ea1abe9fc8c5f2758336035b6e29ec21f54a -size 126775 +oid sha256:777ea472992f9ee173a64a08b8474b9ab40dacf5ec3b0c72847af407c56d59a4 +size 284139 diff --git a/images/33064851-00d9-46c8-b0b7-6b5048005c51_fe6c0a7d-c18c-4084-b223-e178afccd592.png b/images/33064851-00d9-46c8-b0b7-6b5048005c51_fe6c0a7d-c18c-4084-b223-e178afccd592.png index 5ce36ec0e9b2810f293815b84263ebd56161c478..763f4061068c0c0e48a799d2e6d6b5a65b800ccc 100644 --- a/images/33064851-00d9-46c8-b0b7-6b5048005c51_fe6c0a7d-c18c-4084-b223-e178afccd592.png +++ b/images/33064851-00d9-46c8-b0b7-6b5048005c51_fe6c0a7d-c18c-4084-b223-e178afccd592.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d48f3f7ec32e83bddad9bc30c91a435c24f38537ada6ae553986ae4b5a5b678 -size 1143874 +oid sha256:be8bbfe6ea02c8ff554c2bd0319d25f37a44ccfeed09ac05127fc69e4438e984 +size 822326 diff --git a/images/330d5618-9db4-447b-9b56-0d2c33f414d5_23526806-c2f8-4218-b6a7-e66c61eb3c15.png b/images/330d5618-9db4-447b-9b56-0d2c33f414d5_23526806-c2f8-4218-b6a7-e66c61eb3c15.png index 25577c8377cdd996a274a868be3721e07d3b382c..d9d5564b0821b91b3e5554e56eff2311ac295ff0 100644 --- a/images/330d5618-9db4-447b-9b56-0d2c33f414d5_23526806-c2f8-4218-b6a7-e66c61eb3c15.png +++ b/images/330d5618-9db4-447b-9b56-0d2c33f414d5_23526806-c2f8-4218-b6a7-e66c61eb3c15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:702e087a5acc7405178115b1cb7494f8b07c378e32670c8467968a0d294f189d -size 1048666 +oid sha256:d5a8135de07e0b36456498f19022de50dacdc13e636f4c1ee90516a60af4d23d +size 1046091 diff --git a/images/330d5618-9db4-447b-9b56-0d2c33f414d5_769575d0-1e94-4299-9e76-4b79f5704861.png b/images/330d5618-9db4-447b-9b56-0d2c33f414d5_769575d0-1e94-4299-9e76-4b79f5704861.png index 852547352bfc96113fff6fc6992421fd89bfcab1..36f5c2452749562ed54bf2e8669eb1230d59e557 100644 --- a/images/330d5618-9db4-447b-9b56-0d2c33f414d5_769575d0-1e94-4299-9e76-4b79f5704861.png +++ b/images/330d5618-9db4-447b-9b56-0d2c33f414d5_769575d0-1e94-4299-9e76-4b79f5704861.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de68ea4a180224b2bb280ceba58ad9a1fa6e383382e970cc77c8af8d87b985e9 -size 1823597 +oid sha256:abc2d7d50d70f0845664b2b66a1708829b1148482bc71b3fa666972ae5f755a5 +size 879360 diff --git a/images/330d5618-9db4-447b-9b56-0d2c33f414d5_a7fa1b89-a997-48b0-9e33-4f34fcca5f69.png b/images/330d5618-9db4-447b-9b56-0d2c33f414d5_a7fa1b89-a997-48b0-9e33-4f34fcca5f69.png index 433f7f5a0532f404b03fc4edf2a1945c46d81d86..2eadc21ba609a3f8360dbb2631e72601adc5a3d7 100644 --- a/images/330d5618-9db4-447b-9b56-0d2c33f414d5_a7fa1b89-a997-48b0-9e33-4f34fcca5f69.png +++ b/images/330d5618-9db4-447b-9b56-0d2c33f414d5_a7fa1b89-a997-48b0-9e33-4f34fcca5f69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0b96c5e2d3be5f3409a90d27d95bfcc51385fa25da320ef43577206362a9c11 -size 923519 +oid sha256:da790b63684ea6b0c7d2bc08b193ab86267bca96577e7ddcc2494dab133eaa55 +size 1015088 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1745b057-153d-4830-9fc0-a0dd6789d5bb.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1745b057-153d-4830-9fc0-a0dd6789d5bb.png index 80ac906947f1b21014b93ca9c11357ab96db58d3..5ef70536ce29fd1dc67ac0b12a6291ecbe77b808 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1745b057-153d-4830-9fc0-a0dd6789d5bb.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1745b057-153d-4830-9fc0-a0dd6789d5bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99970b6e6b1fc5a192770382fa895fc4bdbc8873168148a50fc15105eea2f2fd -size 1024906 +oid sha256:d006a1e5d2d123cf1c60e2cbb893ad29e43785b109f64d2d05281e07c57b43fd +size 1650897 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1b86bf52-d450-49a1-b6e9-54aac49ff1cd.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1b86bf52-d450-49a1-b6e9-54aac49ff1cd.png index 95c77b10b4c0052ed22391a955822d33d1d47cac..5e16b413b9210a01dca0cb24cba0ba7560d081fc 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1b86bf52-d450-49a1-b6e9-54aac49ff1cd.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_1b86bf52-d450-49a1-b6e9-54aac49ff1cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7890305803709722fcd03bb2946aac2afb0f99e3bc23ff75dbc52b4d9b1a9589 -size 1023453 +oid sha256:901a8c493e6fdb9fb1fdffc81baa8c7fc510ce0e589f34bff2a02d1b51a91102 +size 1961906 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_22bbc1e3-9c82-4dcb-a01d-a34c70a62cef.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_22bbc1e3-9c82-4dcb-a01d-a34c70a62cef.png index fefbd87dd30db84f32157f36f31ed9bf3880e8fc..7fa3a4b4d42a46fd5859af93bfde595e32a277fe 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_22bbc1e3-9c82-4dcb-a01d-a34c70a62cef.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_22bbc1e3-9c82-4dcb-a01d-a34c70a62cef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9394a960c98c7f26d4e36bc7258242d52e3684587453f33182ba17230aab9e4 -size 1029646 +oid sha256:cf77d0b21c390fdc7c8bb73141a8bb2c28f18b24ac66e22aa38ef2151531fddc +size 640111 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_230d03bf-d64d-47b0-a803-2d9e20684510.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_230d03bf-d64d-47b0-a803-2d9e20684510.png index 224836e582026bd991017fd9218b9eac81c26a60..7385de62959b955dfd95599c1cc6b8fdc37f8253 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_230d03bf-d64d-47b0-a803-2d9e20684510.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_230d03bf-d64d-47b0-a803-2d9e20684510.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1906af0fe93029826afe846e778e681ecfbb1a6d590f073cec5aa2bb5b1b971b -size 991273 +oid sha256:3952363449370e8368dd33a2ab70642a501346ba042f1d05cceb6d3a00626311 +size 1145587 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_27a5a40d-ef36-4bbf-9d79-d6a1269d66e7.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_27a5a40d-ef36-4bbf-9d79-d6a1269d66e7.png index 15611466fff56a1f749e943c699a2e87ddd58332..931b7e19488d89a31c4183549af59ec1cf053b55 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_27a5a40d-ef36-4bbf-9d79-d6a1269d66e7.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_27a5a40d-ef36-4bbf-9d79-d6a1269d66e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8926e7c7dd1c721d220d4f5e779d86e2bc8e455c7437b2914acbc709ec42376d -size 1033050 +oid sha256:0bfa79286c051ec331abc2e77f50d5d1ce96dee1518a19b46becad1d68993c11 +size 193289 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_61a9ea08-e8d9-4584-affd-51e292f094e5.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_61a9ea08-e8d9-4584-affd-51e292f094e5.png index e59a72e51c7edd5914c4853c8654c593e7c2ce72..b1cc9351c6361647024cd15ad4a38a5df5081e8f 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_61a9ea08-e8d9-4584-affd-51e292f094e5.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_61a9ea08-e8d9-4584-affd-51e292f094e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72aede7eb011a55dd1f0d680ec54af76d888c514860dbecba0abf38a358c22ce -size 1021603 +oid sha256:490f1af391b75a0c6e3e4780e5a79429561154e890dfb7ce50cc47071d08232a +size 1548842 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_67f3e885-cc8d-4607-bde7-b6dd64775a4d.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_67f3e885-cc8d-4607-bde7-b6dd64775a4d.png index 4a61df9556301b0c63cef3bfbd188f0510517b01..a4bbf6b3a5b180b68ec4395f3445f335446b0036 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_67f3e885-cc8d-4607-bde7-b6dd64775a4d.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_67f3e885-cc8d-4607-bde7-b6dd64775a4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b96cee6661b2f2289c7feb0dbd8ec0cd659d2bb18fd99bdf1fac4d4701c738a0 -size 1077347 +oid sha256:e5ead8f9f26bf4a34dae77ecd4f1540320d092a8d81c3488910a9e96e1932ed0 +size 888371 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_78c4087b-0f97-4920-8529-834cdb618baa.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_78c4087b-0f97-4920-8529-834cdb618baa.png index bd266484280d83d110ed9c0640509b9fbdab2131..b692ac5518e3d5b1f6a08d3a39bb189ed6582e82 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_78c4087b-0f97-4920-8529-834cdb618baa.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_78c4087b-0f97-4920-8529-834cdb618baa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d092a0d997ca3aed632173de1dca2a26ef9139aa1c69b4c7288f55d4c1c0a9f -size 975475 +oid sha256:a3f2d986c7b4dc967d18682fc05e9c6922e92bcd83a2d4994d3555011631a400 +size 1913070 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_da8122e0-c040-41c6-8481-3dcd54f56ac2.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_da8122e0-c040-41c6-8481-3dcd54f56ac2.png index 197e4afdc2a31a5ef1467ba75238b6d1e04dcb3f..78fe899e8376e90a54974fec835cd833191c325e 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_da8122e0-c040-41c6-8481-3dcd54f56ac2.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_da8122e0-c040-41c6-8481-3dcd54f56ac2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9726baf6f2b08580e1b8849de9b40a87b21de4a1569e5877598498cdb4d4e95 -size 1024863 +oid sha256:2b90c43b37d64aefa517d82bfb926c5a95cfd44bc9125b4314de62ba998cec2b +size 1347484 diff --git a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_fd4af41c-9faf-4c75-b376-c0be227d6673.png b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_fd4af41c-9faf-4c75-b376-c0be227d6673.png index 8984a84549c8c9103a4b34af845c7d9fbe3b668a..41fb260dee164dec45b02cf8cae1c9b80a06a807 100644 --- a/images/3358dffd-3673-435b-bfcb-cb242a2c622c_fd4af41c-9faf-4c75-b376-c0be227d6673.png +++ b/images/3358dffd-3673-435b-bfcb-cb242a2c622c_fd4af41c-9faf-4c75-b376-c0be227d6673.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be3e7b36b4862fcf800407fc07e708ef8a0852c0a15a2c99168823eee53ecc27 -size 1433434 +oid sha256:e2a02edf59b0f9bf4142ef13506c1d6c36ce71824e87180b23e1059c10429078 +size 1258802 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_0191e556-51aa-4851-928d-12a02ca30a6c.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_0191e556-51aa-4851-928d-12a02ca30a6c.png index bda71a03a6074f337f1fd99e9b2b989b8326ae01..fee926ffb975da079ea054f81c0ac5fc60e1923d 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_0191e556-51aa-4851-928d-12a02ca30a6c.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_0191e556-51aa-4851-928d-12a02ca30a6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:891d2dec6e6bd2415aa389f1fc4c344458db700081291c76f0e72c4094a6a153 -size 1170308 +oid sha256:26472af55674ce50592eab725f1dd08142eaed97aafb2e13a9ff8aa4992f817e +size 954219 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_33c00f88-b578-4fac-9e0c-a9a0520fc6f8.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_33c00f88-b578-4fac-9e0c-a9a0520fc6f8.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..1e5e3b7bb8681cb1bba447c276b9858442577346 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_33c00f88-b578-4fac-9e0c-a9a0520fc6f8.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_33c00f88-b578-4fac-9e0c-a9a0520fc6f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:29562ca2d45403c216e5836ba7db19f3cc910b4ac9dc562709f3024dbaf1bd4e +size 2110850 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_63a284bf-17b9-4a58-81c7-7545cc57a69f.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_63a284bf-17b9-4a58-81c7-7545cc57a69f.png index 8ad43edb3f4da02a8c362bb73023e2c53459d7e4..0cbf0efbea50001c46c8f09c54ffa26a505f5a50 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_63a284bf-17b9-4a58-81c7-7545cc57a69f.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_63a284bf-17b9-4a58-81c7-7545cc57a69f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb8aa4cc553a335da42d2922543ec5a2ae5fafbf31a10b87180f5f9fb0919905 -size 1948283 +oid sha256:6442845fbb6766d496d1be43323aeba3552ebd59778b399c1bc1d3d47c5ebca8 +size 892478 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_6a9be6b6-2a46-47a5-baff-fe468a69a2da.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_6a9be6b6-2a46-47a5-baff-fe468a69a2da.png index 990a4ff472dbe40e9382d805204dcd2c9fbf0083..4f77952030112861c18e00611d1921fe5497d8eb 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_6a9be6b6-2a46-47a5-baff-fe468a69a2da.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_6a9be6b6-2a46-47a5-baff-fe468a69a2da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a1bac4f76a95d4e6aebdf6e1c40b9b8a1d21307149607b9f48dfbe0dfa39f12 -size 1988704 +oid sha256:fa1b86e63828e8a65120d87823476670c2f86681634f41ab6d1e548ac724287e +size 854386 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_96597d17-6899-4300-8c45-7ae8387e89dd.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_96597d17-6899-4300-8c45-7ae8387e89dd.png index 8aa562cd9aedc8c262f579db4a116d403577ef65..97954438e72aee88d6acfb9db25f6d91a173232e 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_96597d17-6899-4300-8c45-7ae8387e89dd.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_96597d17-6899-4300-8c45-7ae8387e89dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38c17b573a5486945f44c8df7952a097d3a1dd72c1e628daea1fb1476e5625a3 -size 1713305 +oid sha256:4197c65d8b4dbebed4626ded121f9a111c262972d1de5bc8e16a58c7b44abba6 +size 1523904 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_97b19ffb-e2c9-4bab-97f3-735bdb136ad7.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_97b19ffb-e2c9-4bab-97f3-735bdb136ad7.png index cc94fd8ed8dd338bf7056d34e86e09e6d3ff62bf..791656f9a0a01fbc52d133ff0cfdc09aecf44f7e 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_97b19ffb-e2c9-4bab-97f3-735bdb136ad7.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_97b19ffb-e2c9-4bab-97f3-735bdb136ad7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f5cfe1c7ff2e2a4293e16fd8f2c94d5efbad3c8f7e0eb3be4f98b27db0f7c3f -size 1930909 +oid sha256:042c07c6237da6126f6cb615f731912a006c44ec2dc12001a4b284f0e8b4ab15 +size 817425 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_b655d9a9-a403-46c8-8b35-5686ceb8c895.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_b655d9a9-a403-46c8-8b35-5686ceb8c895.png index 0fc677ec01da6818c2cbd0349545226a5655d7c6..cee502b8e18c222fb2f5fc0a66ff5049a9789dae 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_b655d9a9-a403-46c8-8b35-5686ceb8c895.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_b655d9a9-a403-46c8-8b35-5686ceb8c895.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce2789d63a5b31303731821987767040d71679f0c040e2b29514fad51f1d1728 -size 1071397 +oid sha256:214798f93461f60e0cf159164ee45c3c4b54846213951715d600b6020527b9ba +size 1322458 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_cceccde0-e4da-420c-a4c0-3dc9ef3191a8.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_cceccde0-e4da-420c-a4c0-3dc9ef3191a8.png index 330bccae3522ab502a69d2a462ead92978d0e3c3..9cc4f1177ff10efd03dea0e803ad3c6670447606 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_cceccde0-e4da-420c-a4c0-3dc9ef3191a8.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_cceccde0-e4da-420c-a4c0-3dc9ef3191a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2560f68245696636559f1a76b72875bab5faa449d6d8a462534a8a66fa57ea3b -size 1704251 +oid sha256:a5633d76c59ab4d876b8db4f754fbb8601c2dfd3ce8c906f7fa32ebe5023ad58 +size 2109456 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_d04d30d3-199f-40a7-a804-6bf215c11519.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_d04d30d3-199f-40a7-a804-6bf215c11519.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..cfd4365da30dea92c4531f06ccb43111988945aa 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_d04d30d3-199f-40a7-a804-6bf215c11519.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_d04d30d3-199f-40a7-a804-6bf215c11519.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:eda9586817c4681fa3ab34cbf44a49bc5d5fd0d8bcda8a02308b7a73aee833e8 +size 2114770 diff --git a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_eb02273c-148b-4fdf-9b98-90ddaec0236a.png b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_eb02273c-148b-4fdf-9b98-90ddaec0236a.png index d87ef4d2243f07e76bb710c92eef42192e960de6..1eaa1264901eb3abd6e08cd779372ad33a77e058 100644 --- a/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_eb02273c-148b-4fdf-9b98-90ddaec0236a.png +++ b/images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_eb02273c-148b-4fdf-9b98-90ddaec0236a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a51c3289668a76060d60d7e946a135effc9b2e4abeff850e55154e147de78f8 -size 2040369 +oid sha256:655ab7a3d73100ba7615d3d02288a44b4db721182adc60d6adbdc8190ab3a672 +size 1809886 diff --git a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_014f12cf-0228-4d0a-95f9-2acff952c74b.png b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_014f12cf-0228-4d0a-95f9-2acff952c74b.png index 0bb33238a6bcb6fc0dae14bf587f28a599e0b6b3..1fc1355bfa3a3bfdbef6eac7b1a6d0c0eb9d972c 100644 --- a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_014f12cf-0228-4d0a-95f9-2acff952c74b.png +++ b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_014f12cf-0228-4d0a-95f9-2acff952c74b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35d00f472870a4690c4474b812bffcee06b01a9790bc8c988502e2248d2ec36d -size 1058929 +oid sha256:e3e4a7aaa70fd13c44c4135b4849b5590b67cc36c86e7e0644fd822c1e34488b +size 1125776 diff --git a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_0e2ae006-05fe-4806-bb31-742c673af29f.png b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_0e2ae006-05fe-4806-bb31-742c673af29f.png index 6b87c401cc6ce536cae0d4b26b8d9c5d35c8988a..6028a68729ed3164ea11a57eaa4408a1b374ce9a 100644 --- a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_0e2ae006-05fe-4806-bb31-742c673af29f.png +++ b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_0e2ae006-05fe-4806-bb31-742c673af29f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d555f54078b95919b822ec80d993280d35b71912672dc18c5beae2debbcbe74 -size 461501 +oid sha256:23f5e1ef4b2723a39d396fa8972cbd069e359e0dc67a14a9c5392ca1682be6d0 +size 848878 diff --git a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_21dba24e-8f7e-4330-b7ef-66a664eb3024.png b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_21dba24e-8f7e-4330-b7ef-66a664eb3024.png index de0754b0d83576a74de894b24706be413d0af4a2..ae5fbd4e817ed6ce7eea53632e309fbb9ee59e6d 100644 --- a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_21dba24e-8f7e-4330-b7ef-66a664eb3024.png +++ b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_21dba24e-8f7e-4330-b7ef-66a664eb3024.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34027260ba3eab6d8af737baf52f485f86f1c0bd3c95e5916afe553b63346363 -size 1697767 +oid sha256:24b294b73928e9506ba8cbbf82ad1092ef4fe88f8f48a66769f60eea554c80e9 +size 1565688 diff --git a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_76da21b1-1ead-496c-841d-d52583fcd675.png b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_76da21b1-1ead-496c-841d-d52583fcd675.png index 6b832a636de987981f88eed29d70e5e7b01dfd6b..c2b05d787d35bada3d555482c8e88a4cdf5d213a 100644 --- a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_76da21b1-1ead-496c-841d-d52583fcd675.png +++ b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_76da21b1-1ead-496c-841d-d52583fcd675.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95ec709d0c32c0f0e4c5d35f3e0cf249fdff9ed84bcd4834c7a5ced00724ecfe -size 1804285 +oid sha256:e82e25b7c11bd86fdc62c05cb4e19beaa8d6000430b7d7562dddebca4136fa2a +size 1697610 diff --git a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_92ca37c7-e58f-496a-ada7-4d5c078c20d1.png b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_92ca37c7-e58f-496a-ada7-4d5c078c20d1.png index 6a50e5fe57a4f5391033ffa08912d68ccbe0da48..528e09d0797ad406fadfcbd1beebae6a7391dd5c 100644 --- a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_92ca37c7-e58f-496a-ada7-4d5c078c20d1.png +++ b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_92ca37c7-e58f-496a-ada7-4d5c078c20d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8c148e3ce9fe79e3054bc207a3c93c4d2d4bce777f9e7cb9080a3cf96e94636 -size 902238 +oid sha256:95106bfc9f27db5b77d2306f075ef7eb0dfb7f052131b61bf61de8bfa9a873ed +size 559301 diff --git a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_bd93fcae-3bb0-4acf-8189-415e9cdce009.png b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_bd93fcae-3bb0-4acf-8189-415e9cdce009.png index 5e060c05877d1518a362c68ef871993c8e0268b1..54d058259aa26774419bb03c0ac9cbae88cea5ba 100644 --- a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_bd93fcae-3bb0-4acf-8189-415e9cdce009.png +++ b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_bd93fcae-3bb0-4acf-8189-415e9cdce009.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7b4bd02aad2e79ca60b7756fcaa4a60c4ed8ec84a9bb92ea36168338cca2c54 -size 877775 +oid sha256:89db788a52983fc8b2fb40f1fb38afd6396a98de374a2e2fc1d2df4a1944b7e8 +size 621716 diff --git a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_fab6b0d8-0467-4a98-97f4-e43c25baa36b.png b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_fab6b0d8-0467-4a98-97f4-e43c25baa36b.png index c13f070dfb9aef8a682a6f6c0ddadf818b50c065..b465d4366a50a7e005c9b352fe396176cb774bd9 100644 --- a/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_fab6b0d8-0467-4a98-97f4-e43c25baa36b.png +++ b/images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_fab6b0d8-0467-4a98-97f4-e43c25baa36b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ff4d415f46b0b12b24cd42a2d4d7924449f24ab63093486de463f4a8e3cb1bf -size 508953 +oid sha256:13bfe5c2e2282c2123639eff58b86a53f3fd98c34c5b927a8947894a1c59a2fb +size 708903 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_05fbfb1b-ca54-4bcf-afa1-df49a7a6b480.png b/images/34e13beb-0235-41d4-b108-137cc7480904_05fbfb1b-ca54-4bcf-afa1-df49a7a6b480.png index 726cb498726b27af07f9df5e5246f0206107d26a..884716e4f44dc800e6088ec1736bba0f22f1e813 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_05fbfb1b-ca54-4bcf-afa1-df49a7a6b480.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_05fbfb1b-ca54-4bcf-afa1-df49a7a6b480.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a26d2048bbe587122a50396ab3e307b466dc7856b64da94bd29d32d14e0d4250 -size 1666679 +oid sha256:4228b94f20babea416d29e4339053db2f7a4810314ad12e5558d557a7c88cf44 +size 1649970 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_36474a36-dedb-4836-b0bb-64cb383cadf1.png b/images/34e13beb-0235-41d4-b108-137cc7480904_36474a36-dedb-4836-b0bb-64cb383cadf1.png index 73b0c827a774d3a6777db46a60db2eab8b625027..a29be1592770beda12da408daa4ac03f3f57ad92 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_36474a36-dedb-4836-b0bb-64cb383cadf1.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_36474a36-dedb-4836-b0bb-64cb383cadf1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c18f3d41afff9a1cd2cd2a8f87ec425793194dd5df83189662c10f1c51f3603 -size 1650867 +oid sha256:9882604bce803b6e8ca343966d42d91624f553680003799d6599260831f478a7 +size 746954 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_b3dd079c-2531-400a-92c1-7555485e132e.png b/images/34e13beb-0235-41d4-b108-137cc7480904_b3dd079c-2531-400a-92c1-7555485e132e.png index ba9c76ecbe29d2e611de2dd250c008b555aca0b9..1444915ddddedde490aa0b657d080e84fd5aa80a 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_b3dd079c-2531-400a-92c1-7555485e132e.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_b3dd079c-2531-400a-92c1-7555485e132e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:339c995be82acc8beb52621626fcb849d881e264cf0702c8bc7615641085eb9c -size 1317228 +oid sha256:382a6e6c7f875f560a1ca4411c1a60b95a0c5e2f64abd4c03cf45a03f665c56c +size 953909 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_be0e790e-1b06-41b4-ae7a-26e06db06d59.png b/images/34e13beb-0235-41d4-b108-137cc7480904_be0e790e-1b06-41b4-ae7a-26e06db06d59.png index 5c443db2a37e9c120375c5aed0b98a7bfda0fbe1..78c7e3b4c73a79e6b0c15d036b86cce098524e10 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_be0e790e-1b06-41b4-ae7a-26e06db06d59.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_be0e790e-1b06-41b4-ae7a-26e06db06d59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa5f934d03c4f131532721091c703b554bfbdc9c331524b479db1717de384d09 -size 220005 +oid sha256:3135ebd0672fb3065020e0ee70a1698819afb7a82db3c7f438af3b9eff39b383 +size 219873 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_ca1921ca-fb66-4d9d-b1a6-7695452f3ce4.png b/images/34e13beb-0235-41d4-b108-137cc7480904_ca1921ca-fb66-4d9d-b1a6-7695452f3ce4.png index 968874944ae0abab6dfcec878be073676d513dae..3c5ee9d6748020310c0450e908381ce5b99d9f54 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_ca1921ca-fb66-4d9d-b1a6-7695452f3ce4.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_ca1921ca-fb66-4d9d-b1a6-7695452f3ce4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c230972ec3a6d725eaf480f68115a38490be70722a3ad5c4833673b2db1fcc2 -size 192078 +oid sha256:c1b9964f0b13c696d11ae9536c5156a1093f57f6d4b99b125589aeafbfcbdff0 +size 170721 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_d5d3ece8-7439-42f0-82d0-31f0ae61e479.png b/images/34e13beb-0235-41d4-b108-137cc7480904_d5d3ece8-7439-42f0-82d0-31f0ae61e479.png index 74a99e53b01ca3eaceadf162feaf3d2f61761b86..fad67aa4539a766d30b20532abb5266a39bc4e33 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_d5d3ece8-7439-42f0-82d0-31f0ae61e479.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_d5d3ece8-7439-42f0-82d0-31f0ae61e479.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cdd374b81db6f084b238caf48c8300bc69d557c5a1b126d1a387b9f2a483f8a -size 1257764 +oid sha256:412e0a8063928afab9b59dceea8dfed7567e7d82fd7345c6681bebe35c48d4a8 +size 1054499 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_ef48f3d8-f9e6-40d1-931c-334e477f5a28.png b/images/34e13beb-0235-41d4-b108-137cc7480904_ef48f3d8-f9e6-40d1-931c-334e477f5a28.png index 01a61a8fd8a1b7c4be37b9a9cf44890537006e58..1b5b7c39491743fe4d2dff05ce42b720ee3569c2 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_ef48f3d8-f9e6-40d1-931c-334e477f5a28.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_ef48f3d8-f9e6-40d1-931c-334e477f5a28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd2c866f036cc92289b91786626041a774fcdf7040638d82b1f18e9188b396b6 -size 1326831 +oid sha256:a07a15fb2d8ed78feb44932965862ff3efe884c20ba4a8524eee64515c761b77 +size 861060 diff --git a/images/34e13beb-0235-41d4-b108-137cc7480904_f69045c0-0476-4c5d-9f6e-c84d5488fb80.png b/images/34e13beb-0235-41d4-b108-137cc7480904_f69045c0-0476-4c5d-9f6e-c84d5488fb80.png index a8b5ea04e69be40f11117a420550c4604c283b9d..4e572ce81b0e04ba4895105d229d675e70a97887 100644 --- a/images/34e13beb-0235-41d4-b108-137cc7480904_f69045c0-0476-4c5d-9f6e-c84d5488fb80.png +++ b/images/34e13beb-0235-41d4-b108-137cc7480904_f69045c0-0476-4c5d-9f6e-c84d5488fb80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:082f67fd1b457f781e1c22d5a03bad58fde2227331b5366e8847d4d0b4fccabf -size 1673777 +oid sha256:e66bc43f879dc46d5a0cf45b2a5b781085a9d10895d94423d4281a3d95fa0afd +size 1509996 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_04339d0b-8754-454b-b068-ce03b1f45f86.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_04339d0b-8754-454b-b068-ce03b1f45f86.png index 9208a9cc7aa440c70e731778166619e726f6ce24..c100350d8160572bf857e33c43dcf749c931f065 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_04339d0b-8754-454b-b068-ce03b1f45f86.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_04339d0b-8754-454b-b068-ce03b1f45f86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70e47cdfb154d003623d3bdf44b63c27947dd40a933d10b1e022f8be4080eb07 -size 444918 +oid sha256:5e6f16cf2123cad9cba5b37631b96af2db6ad6a509e4cafb29601d0c40c4383f +size 406614 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_0f8f7d21-0a40-44f8-8683-ca8d046e3e2f.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_0f8f7d21-0a40-44f8-8683-ca8d046e3e2f.png index 58d2ab4e8e28acf8f7832a26813492ac612c3ce6..1e1ba810d02b5bcf05f57e420c7a48becb23d3d2 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_0f8f7d21-0a40-44f8-8683-ca8d046e3e2f.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_0f8f7d21-0a40-44f8-8683-ca8d046e3e2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87802f2803290f144c4ff1f533d2fc19a5b9f8f7d7b5dd4c8fad60a1181f1965 -size 443530 +oid sha256:bbe7def12e733dab3cf87fb112374947fe0342284c7b28963b54bb9461281ef4 +size 914305 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_17c1ec49-033a-41e2-a6cd-101bfe603185.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_17c1ec49-033a-41e2-a6cd-101bfe603185.png index d5fed70186e998be9d3be54167d35fa11d7c7d55..8f7417f77367b886f6ab5cb145aee725799acfb4 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_17c1ec49-033a-41e2-a6cd-101bfe603185.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_17c1ec49-033a-41e2-a6cd-101bfe603185.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b951a966691431064bf06910a188e1b98fadc65d80cef11512752b8aa3f818db -size 1588979 +oid sha256:63f6a24d7e45a3a441fbbdf0e83666651cb8efb2b91f143fa6237885cf6fddf9 +size 1303179 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_18ed8582-6390-4c5e-834d-a8c52a81fd04.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_18ed8582-6390-4c5e-834d-a8c52a81fd04.png index b7b2322e49379be5e29728826a36591e8b3d689a..96154cc861707d741d93711e287ef5840bf152ad 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_18ed8582-6390-4c5e-834d-a8c52a81fd04.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_18ed8582-6390-4c5e-834d-a8c52a81fd04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9173fccf4d990f68a31c73c1f18cf6d1130f812f2900c834ebd51acf69e5ba94 -size 442462 +oid sha256:0e92e31b54e9befc0b88c58a5f4754e48e67f6b5480e780f2a97b63d7e220bc2 +size 597853 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_3de629ac-77af-43a3-b249-a76ed19aea42.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_3de629ac-77af-43a3-b249-a76ed19aea42.png index 002d68d96dea3982134573699160cd1b7fd69224..e6020d8b9c1f882ae9f8476c8c199a4d8be38e96 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_3de629ac-77af-43a3-b249-a76ed19aea42.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_3de629ac-77af-43a3-b249-a76ed19aea42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:816e00195909456f13ba4b6a429363df53ecf24e0ced4b6ca0775a121e553648 -size 444083 +oid sha256:4866cb7f7fa852e342ecf085033c11fd0218d6212c35eec5b762677f95ea486e +size 292101 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_7973940d-9d78-4a0a-97f7-b98b5c80ae7a.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_7973940d-9d78-4a0a-97f7-b98b5c80ae7a.png index a302638d9a570e2aab1449d5ce6e0c548cf895ed..f16ab3c4a57eb298e262f86230e44658e35f48a5 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_7973940d-9d78-4a0a-97f7-b98b5c80ae7a.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_7973940d-9d78-4a0a-97f7-b98b5c80ae7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a81ca5fc2c3e7c5b396a9a46869777982ac894f891affa8beeee6e3a29e35d76 -size 1253623 +oid sha256:6ce1f9f59a63321c5d500d4d098d496d1b5ebe0519a2b301b87c3e65df60c311 +size 1150728 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_7c298bc0-fed0-40a1-b15b-c6cad7071b60.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_7c298bc0-fed0-40a1-b15b-c6cad7071b60.png index b63d091e8e77e3c29d5b8bba01def30657630713..4a4997f0eaec91de5ec81bd12791b4d3eb335b88 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_7c298bc0-fed0-40a1-b15b-c6cad7071b60.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_7c298bc0-fed0-40a1-b15b-c6cad7071b60.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbb0c89ea99dcda23ade006f2eea15b299980d4b7506357e00f8e0da680c601d -size 1658480 +oid sha256:e32e82ce68375d52ee0e256b7f9516430f7c34665a32ba179bec24b987d65459 +size 1391431 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_c7df32a5-dab9-4edb-afc5-75f2a9996884.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_c7df32a5-dab9-4edb-afc5-75f2a9996884.png index 5d45b043e563e792b267ede8b3ca3e5389dbfeab..933086b6ddd39fca450a87e1f62de5f89437443e 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_c7df32a5-dab9-4edb-afc5-75f2a9996884.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_c7df32a5-dab9-4edb-afc5-75f2a9996884.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:222c9c09280cbb0d61e917e44f9433c7a5ffd922ded4add5081bc75927609ee0 -size 1670431 +oid sha256:9dfc594acdb961000f9a00f2b0164e9993eba97ea27ae8c9bc66359df63d013c +size 1045334 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_d3549f30-b63f-480c-afbe-9574b4078c82.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_d3549f30-b63f-480c-afbe-9574b4078c82.png index d5fed70186e998be9d3be54167d35fa11d7c7d55..a5d6f9e2ecc807f625d670a8c73cc9bf04db7345 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_d3549f30-b63f-480c-afbe-9574b4078c82.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_d3549f30-b63f-480c-afbe-9574b4078c82.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b951a966691431064bf06910a188e1b98fadc65d80cef11512752b8aa3f818db -size 1588979 +oid sha256:2108d1afa9828df87853c801fbc1a69b208c9f867b6c0857f6d0c444097cf003 +size 1121865 diff --git a/images/351568c6-452d-4f32-9375-2b6301f0cb36_ef32e30f-b74b-49e3-87fe-bd6ec3dac346.png b/images/351568c6-452d-4f32-9375-2b6301f0cb36_ef32e30f-b74b-49e3-87fe-bd6ec3dac346.png index 8fdf6ffd0fc92b9bc752c50991a74d3df0346417..b464eb5950fd790db0ce21411e13f793a63761c9 100644 --- a/images/351568c6-452d-4f32-9375-2b6301f0cb36_ef32e30f-b74b-49e3-87fe-bd6ec3dac346.png +++ b/images/351568c6-452d-4f32-9375-2b6301f0cb36_ef32e30f-b74b-49e3-87fe-bd6ec3dac346.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6b93ccc9251c967c9e6dc0fb03dcecb2c3b59cd2b90c8ee6c55b39b4f218cbf -size 1610677 +oid sha256:98a8320c50453023c4328ce51a8935a1d7f0f092dc3730dd3c53d17582c0b6b6 +size 1851995 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_03783199-4419-495a-897f-12d1d1e5b7f7.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_03783199-4419-495a-897f-12d1d1e5b7f7.png index 5a3d3962a4dad549a4783177390987b760eb3e49..1ed1fd71248d00e7d7154e7710dec9f8d552b3e6 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_03783199-4419-495a-897f-12d1d1e5b7f7.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_03783199-4419-495a-897f-12d1d1e5b7f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6242325823cd7be283b59bb1a9784e127a1a8386ef2b7f0c6dc3498927d4add -size 1483070 +oid sha256:0c9d087f5786aff4d5502bcf86074950c5a6f87338cec97b446183715e8b64da +size 1243587 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_05f6ee5e-250d-4770-82a2-0b7e87ff2586.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_05f6ee5e-250d-4770-82a2-0b7e87ff2586.png index 41ff9857bb2cc1a43256333678df302e742b4603..84e8cd62ebad80e0b09a2b7a91a53a9d268fffba 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_05f6ee5e-250d-4770-82a2-0b7e87ff2586.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_05f6ee5e-250d-4770-82a2-0b7e87ff2586.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f56c953140bb7194bddb9de762d0bad974e075543b6f1621ff865fcafb51a271 -size 1496017 +oid sha256:80b7a684bf9f89351a5915cbdb2af2854a6e7172bfefe0a697b0d0fedaa6c4da +size 1283110 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_0f034570-81d0-41ed-9f4e-e3ad4241112b.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_0f034570-81d0-41ed-9f4e-e3ad4241112b.png index 9e5cbd94ae4ed9c6dd219c39972a08be593e6da3..1000b14e699e34d687851d6b7577523494acbf83 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_0f034570-81d0-41ed-9f4e-e3ad4241112b.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_0f034570-81d0-41ed-9f4e-e3ad4241112b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34a871daaa5200b1b4b6a3a86d2bc208b29dddda072b6ed2a26331d00a308487 -size 450260 +oid sha256:41dab786a7c7d78ed37d2ca1553aa6089a2eba22de3fe75b556b02ffbbae15d6 +size 520261 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_383ffaec-42ae-4770-9ef6-305c581ca89e.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_383ffaec-42ae-4770-9ef6-305c581ca89e.png index d91e7b6904920c0f36d2d0936624b44760058b8b..3c7cf4c94e4a6b82101f8dbe36b61270ca431c57 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_383ffaec-42ae-4770-9ef6-305c581ca89e.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_383ffaec-42ae-4770-9ef6-305c581ca89e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f72ad80f7aa50278d18a728fa313992bfa68f967a55953fe49ca7a5a8b040fd6 -size 1483260 +oid sha256:8c38a0a20519f3ee3fcab3c39d0b3e7d529aa099d7706a7954c4fa6bd7d7c550 +size 1528393 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_3dcc458a-5ca1-4057-a628-51580cd69e87.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_3dcc458a-5ca1-4057-a628-51580cd69e87.png index bda18bfad9fa18e2a4280b57ab18d396460d2999..3afdbdc0f58de87ac0fb3c7eb4751ffc74ee5b88 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_3dcc458a-5ca1-4057-a628-51580cd69e87.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_3dcc458a-5ca1-4057-a628-51580cd69e87.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccd2d2f4daa9e8ae6cdb21efe9b68917200e65bea086748f16e163a16f9dade8 -size 1483169 +oid sha256:44245ab4d2460217fcd52427f73f9efea4edeebb024ddb7df2fb0c52205b4f57 +size 1299417 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_bc0ec628-85e4-4548-9c22-79e966b51ed2.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_bc0ec628-85e4-4548-9c22-79e966b51ed2.png index c5eb49ff76ff01d93fad7ceab7c2a4ae10ba6055..407439e5806ac684dceb5843c248e55a970c9416 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_bc0ec628-85e4-4548-9c22-79e966b51ed2.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_bc0ec628-85e4-4548-9c22-79e966b51ed2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b484cc9e9e956f22820a2edfa8cb48a149a3801ae81ebf1d46c0d3ac1ae812a -size 1477238 +oid sha256:f833450682e7b1dd57ee187dd562749cbac6945a6c2722f18e2d309aac47dc80 +size 1475476 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_c37c733c-abb7-4a0a-a1cd-c3d90df774a8.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_c37c733c-abb7-4a0a-a1cd-c3d90df774a8.png index f9a59c53a0ae1148074c8f316e3adc7c7123d57b..884522183c0d4c32113e03552e440ed9ff9761c0 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_c37c733c-abb7-4a0a-a1cd-c3d90df774a8.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_c37c733c-abb7-4a0a-a1cd-c3d90df774a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70927093acb53eda613b34bd49e6ab60e7331a483a9c62e3ee1c65e58e77e178 -size 1427085 +oid sha256:da5397895321d31475f4ef41a59c1e6443c406ab09bf8d8bcb493d998f534f61 +size 1463412 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_eeecfa15-c5a3-4487-bfc1-6c14e0030ccb.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_eeecfa15-c5a3-4487-bfc1-6c14e0030ccb.png index 1613dc88266a92857353f55be2c2b33ab6956f08..6a07d62a51ab568f9ac1777bb440797a1392a6f2 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_eeecfa15-c5a3-4487-bfc1-6c14e0030ccb.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_eeecfa15-c5a3-4487-bfc1-6c14e0030ccb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10194c9d6d40c3e1fd9d1e010b131b0fc142a24ab768fca332c69a7f70fb193a -size 1476250 +oid sha256:d9a1346a01f92c58ad1700b30eecf1d7e2204905be7342f2c1345e3389c00333 +size 1471993 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_f6cab64e-9db9-4928-b663-52d3bd4561da.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_f6cab64e-9db9-4928-b663-52d3bd4561da.png index d8f6fe20fe5b697cf6be0d0c8f3d1528e4c1badc..a7c729d3d88340e77ef1fa26566ed007a8effb21 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_f6cab64e-9db9-4928-b663-52d3bd4561da.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_f6cab64e-9db9-4928-b663-52d3bd4561da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:084e5fc6962ae24712daefff1de073bf690973df2cdf3e34d8d400ca59e2ccb5 -size 1484665 +oid sha256:d8e3c3fde286f7932568d46be22bd5b14bd261ecb3ae686d9398ad7d71a23dbe +size 1481203 diff --git a/images/35646414-520b-4de0-90e0-5dfeafd850a3_fda0099f-d128-4efa-800d-b5a118ac9d96.png b/images/35646414-520b-4de0-90e0-5dfeafd850a3_fda0099f-d128-4efa-800d-b5a118ac9d96.png index f8d7400140a12a5aba2b29793d49a24f1a730800..b2adc5284f20db1f3e084055084450826cf90924 100644 --- a/images/35646414-520b-4de0-90e0-5dfeafd850a3_fda0099f-d128-4efa-800d-b5a118ac9d96.png +++ b/images/35646414-520b-4de0-90e0-5dfeafd850a3_fda0099f-d128-4efa-800d-b5a118ac9d96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf0da814366fd9fe786e95b34b9a3a11c3364108c02de43caba4fdc6848ba242 -size 1483228 +oid sha256:51c2dd41066657c8cc279359e058bb6ad57e598bcc292be2dd2a242e965c8d7e +size 1485100 diff --git a/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_8f824ede-447c-4c6c-b620-18425d58bbe9.png b/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_8f824ede-447c-4c6c-b620-18425d58bbe9.png index 592b7ab439f70507f766246aa053f72417c6afb0..327ebf82f0b97f436e1a590453adc183d5014e37 100644 --- a/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_8f824ede-447c-4c6c-b620-18425d58bbe9.png +++ b/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_8f824ede-447c-4c6c-b620-18425d58bbe9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2a0b3ddf1c14eea80c0d302126883c5ba32e5f89c2c82522d08a1adf4195146 -size 1061405 +oid sha256:0e820511fd3f88927506377d58f409015ba928d06712c44b0d622fb4d7589e72 +size 640377 diff --git a/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_cf0f773a-5040-453e-91ae-e7416a2e470a.png b/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_cf0f773a-5040-453e-91ae-e7416a2e470a.png index 2f9aaff782dc74267835f46a4df490e2b6458ac6..36043c0021bd67937644043368bd70eb5cd70d34 100644 --- a/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_cf0f773a-5040-453e-91ae-e7416a2e470a.png +++ b/images/3596733f-6427-4ab5-9ff1-c65baaf524f1_cf0f773a-5040-453e-91ae-e7416a2e470a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bdd9ad09c0c63e9a7dad7372b59b1ca35afe97c17e6b5e00037003de4ea6fc3 -size 1107281 +oid sha256:827059a5ce0dbb546aa0b12bb6bddcc6c296c19730653edfb70bbc5e8e51ed19 +size 1336309 diff --git a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_03b9d01e-5454-4f71-88b8-20e5c41872f0.png b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_03b9d01e-5454-4f71-88b8-20e5c41872f0.png index e308df7b9afab7e8a652989b2156df0186ce1ef1..363beea49df3a047bb3f4196e64ac9bb797ac53d 100644 --- a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_03b9d01e-5454-4f71-88b8-20e5c41872f0.png +++ b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_03b9d01e-5454-4f71-88b8-20e5c41872f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9089f8a4e2c2648ec721cbbeba8724c3b8af3db76bc996215d772dc63960098 -size 998128 +oid sha256:8a19188b03055363ef6521bb9e4c17e6a48ac75d313f9538b9beec0272ac3a2f +size 859665 diff --git a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_0a667a4f-86b7-4ec6-b915-9af08d700aca.png b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_0a667a4f-86b7-4ec6-b915-9af08d700aca.png index fcedfd3d9912b236aac1a6b5bd285b87b2761b39..f9b79ff4ee8c2f77095cbe36d194f8a3bfaaac74 100644 --- a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_0a667a4f-86b7-4ec6-b915-9af08d700aca.png +++ b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_0a667a4f-86b7-4ec6-b915-9af08d700aca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:356e7112d3cd937702dd0de4d802dc200922555cb2fb7586fe0bd0b94f7ac7ad -size 3793296 +oid sha256:47bf4c02bc78ef7aa31ec28887d0deb0d24f3bcd0ab031cb81a36271e0b10f23 +size 2598309 diff --git a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_57ff6313-097f-456c-9fcd-a58f3e099011.png b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_57ff6313-097f-456c-9fcd-a58f3e099011.png index fc8b7093ad70f439a0657cded183e93f5a527f3e..7aab8e239ae933694050853005b604bf9fcd72ae 100644 --- a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_57ff6313-097f-456c-9fcd-a58f3e099011.png +++ b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_57ff6313-097f-456c-9fcd-a58f3e099011.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7546bf54b52f819a89c77c2dcda2cfea454c914078dd7e0db8563d23bef8a2b7 -size 1035147 +oid sha256:aa3042d017425b7b71aac3d18ecbaf77c573e2dbdda36cd50a8dd47e0a032ba8 +size 1033380 diff --git a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_662d83be-f7cd-4480-ae69-20aeaf275ce8.png b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_662d83be-f7cd-4480-ae69-20aeaf275ce8.png index 381e62e22f027d6e843aa4b763bf7af912309db9..1ca53481d4abebf0666f62dbd3e002e265a4805e 100644 --- a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_662d83be-f7cd-4480-ae69-20aeaf275ce8.png +++ b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_662d83be-f7cd-4480-ae69-20aeaf275ce8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d97dd02827e5b23fc48bfaa492da54991d98e516ddfb5ed2b1d9e0ebdd95976 -size 1124597 +oid sha256:77e778f6ac9ccc04c7080344665305498cc8bd1a4d6a5dbdcd9da73b2826766e +size 801729 diff --git a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_af4330bb-a695-48ff-bddb-dddf6ee09277.png b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_af4330bb-a695-48ff-bddb-dddf6ee09277.png index b2dde593ae36a7dff1ad6bc1189d9454d34c4078..dd6a24abfbab8bf22d6b8978f09bf25747cc006a 100644 --- a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_af4330bb-a695-48ff-bddb-dddf6ee09277.png +++ b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_af4330bb-a695-48ff-bddb-dddf6ee09277.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12fb681ad0dd42a6b22c4d9e7e043b3f403fff4c3f460e39f498fb4c7f69cc5f -size 1092638 +oid sha256:1c818420fc0c1d0c0f82ba6a11a07f6a9be3085365d4a0d5a280088b8200622d +size 1732912 diff --git a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_d0fa9c16-fbc1-4ab6-bdc5-13758977249b.png b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_d0fa9c16-fbc1-4ab6-bdc5-13758977249b.png index c7930442052978651cb9c8ecd5f0dffc99bc0cb5..595a1128eaf0ee475deee7a0704e6fa35f552f6a 100644 --- a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_d0fa9c16-fbc1-4ab6-bdc5-13758977249b.png +++ b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_d0fa9c16-fbc1-4ab6-bdc5-13758977249b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8de58cb8509a8daf95e046201dd65e0e10a117cf166808e1b5315f9b78826ba1 -size 997654 +oid sha256:eb9ad58a9104536331dd5adeb092e86029a148484e974e5201ccfb834c00b4b8 +size 1075645 diff --git a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_f0e05a7d-56d2-4f09-b264-8e29f664c2ce.png b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_f0e05a7d-56d2-4f09-b264-8e29f664c2ce.png index 4ff273ff6c01fcae47fdb315b5a6453cdb730bc1..4daa213e458b4f09bd7773b5c7eee690a29fae55 100644 --- a/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_f0e05a7d-56d2-4f09-b264-8e29f664c2ce.png +++ b/images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_f0e05a7d-56d2-4f09-b264-8e29f664c2ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ff24c6bf2587b5cac599ba5248ef3690cb519bd02032e6708a8f1bd7c44370f -size 1005474 +oid sha256:d5cc4d08ae539beed2c3552a11e611443791f1bc4149f0ffe4734da2ca3b4236 +size 1151883 diff --git a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_6db2cc83-aa0e-4f3e-91de-71acfed5bdb1.png b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_6db2cc83-aa0e-4f3e-91de-71acfed5bdb1.png index fc40403c6b92b83f3146d44bc8ca314b10a48672..b43f265aee9a0dcf0770f0f251fcd0676c336f94 100644 --- a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_6db2cc83-aa0e-4f3e-91de-71acfed5bdb1.png +++ b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_6db2cc83-aa0e-4f3e-91de-71acfed5bdb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:279cf0d372c56a793aec88770df3af3f8f1fda9792d976fd65d604ab6c47af5e -size 1148513 +oid sha256:d548fa38d53cabf8128683ac0bff0de169dd521a6a7555ec85f3d6685ad3b215 +size 1182908 diff --git a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a7be10f1-c85f-444d-93a6-48f078088d83.png b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a7be10f1-c85f-444d-93a6-48f078088d83.png index c52ada5310456d694789d7a7819ea1b3f20a1bf7..7a11d03b46afd83782a5c009347d2fbf657fbafe 100644 --- a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a7be10f1-c85f-444d-93a6-48f078088d83.png +++ b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a7be10f1-c85f-444d-93a6-48f078088d83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cee921e711eedcf24b16ed58af1f8348b884404ead9a090f7ea04578abdefaaa -size 1747911 +oid sha256:57cf6c2e24a6989c5fac53e0a94754b26c07222de2859a82a753421747f09568 +size 1747782 diff --git a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a97d602c-fb9a-4e8f-b69b-e92f1033bf8e.png b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a97d602c-fb9a-4e8f-b69b-e92f1033bf8e.png index fddcae17fa56144c6cd8ca7e6bba8c8f0c26aa5c..d8a909b050af7381df4a3f750bd61be72d1fa40f 100644 --- a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a97d602c-fb9a-4e8f-b69b-e92f1033bf8e.png +++ b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a97d602c-fb9a-4e8f-b69b-e92f1033bf8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bc8a8a50367e2d09e90e7dc6444d503000ca4429d7437701ddc4ade4ec6e694 -size 572441 +oid sha256:db6d5bbffb1cabf98a9882be0edbff01a6341755921c1b94e01978a2346603bf +size 653581 diff --git a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b4e902e0-1823-4a2d-82d1-e4cb17411a3a.png b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b4e902e0-1823-4a2d-82d1-e4cb17411a3a.png index 5d3306414ab895532b4760bc2efc67668d41ddb8..00d87d4166d8643dd19afc59e5d7d0fccdb400bd 100644 --- a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b4e902e0-1823-4a2d-82d1-e4cb17411a3a.png +++ b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b4e902e0-1823-4a2d-82d1-e4cb17411a3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd193f6a005b05ee08a5f6286c322925af5cbb67c63d7a221ee0ce12e260669e -size 1639682 +oid sha256:77ee03254962408d344ca5803406907e485dfff10399a3cbc7e80695fd45dd73 +size 1658290 diff --git a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b740bff0-dc6c-49fc-8895-96b5959e3fc1.png b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b740bff0-dc6c-49fc-8895-96b5959e3fc1.png index 2da242a905d2f98adeaa5cfa21e39e67fe196d24..1af24899ad0c70ee9045cd211ce94759813cdf65 100644 --- a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b740bff0-dc6c-49fc-8895-96b5959e3fc1.png +++ b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b740bff0-dc6c-49fc-8895-96b5959e3fc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e864fb4e6f50dd3f83b7253dcef60446a38c6b137b7587bb05170cbe59a3dd7 -size 548858 +oid sha256:94d8b5bb5566a5d7bbf9d89271f3cc4e3844631c5f07fb2443045abe05b0d830 +size 629973 diff --git a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_c4137ce6-fa4d-4c99-9a90-5f8465c290c5.png b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_c4137ce6-fa4d-4c99-9a90-5f8465c290c5.png index 48000c10f7038ec098c6ad1c0200258ed1974c7c..2bbfe3edadde37d1ac65cce75182f0994b6f1c91 100644 --- a/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_c4137ce6-fa4d-4c99-9a90-5f8465c290c5.png +++ b/images/360eeaa8-0077-42f4-8200-1e3cf6414cda_c4137ce6-fa4d-4c99-9a90-5f8465c290c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9fcf3b0471e9dcaabc1b222af24a62d1f98942c39eaa409f1c4024487093e40 -size 595465 +oid sha256:f16f00a2e880ada8274904fdb87e9b2967c4513cf21b759af0eda364b97e3c67 +size 632126 diff --git a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_625191e5-adcb-4948-a105-2c4e95dad39f.png b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_625191e5-adcb-4948-a105-2c4e95dad39f.png index 54b312b612dd29f3e1cab1f3e6a05567617e537b..d03f62dd799bd185fdc211a502614279c3c12295 100644 --- a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_625191e5-adcb-4948-a105-2c4e95dad39f.png +++ b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_625191e5-adcb-4948-a105-2c4e95dad39f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05064122d47c196da20b3514cacc2d68b90764e59d1ec3c8f1e90bc7bfb2bf5d -size 1092063 +oid sha256:72d3ae260b37e262cefcbf32b10e0630db6d68d7783e2afb790a681476456b33 +size 1146974 diff --git a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_6d6e210e-ec5d-44db-bfde-0d32e4dd500a.png b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_6d6e210e-ec5d-44db-bfde-0d32e4dd500a.png index bee505f85c06e9a5d8991071365d2ddf3871038c..7ea88d79fcebc59fbe3e47baeba4e0444991f39c 100644 --- a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_6d6e210e-ec5d-44db-bfde-0d32e4dd500a.png +++ b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_6d6e210e-ec5d-44db-bfde-0d32e4dd500a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:852b56878039354923bbf3142e80def905c8981af4ea512e26e26ac6aaecd241 -size 787801 +oid sha256:1299b9bb04413e2c2816cf940d40e9e67acdaac2e9793b9f9835c23cea80717b +size 420478 diff --git a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_83929b11-5af8-4c6d-ad37-5ca0f73ad849.png b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_83929b11-5af8-4c6d-ad37-5ca0f73ad849.png index 741cda370393479255da4c23507780330ab5dd18..0ac55b064bf3b634886572c81486ce7df12fa2f2 100644 --- a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_83929b11-5af8-4c6d-ad37-5ca0f73ad849.png +++ b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_83929b11-5af8-4c6d-ad37-5ca0f73ad849.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:465e994ebaeb1fae43e941455a00dc057fefa6a6abc3c26cc2037ba6d6889292 -size 913667 +oid sha256:54fc40cc6b17398df3e36cc6b6eade892ac5d05cb3cbe02aee96fae09ea9db3c +size 820074 diff --git a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_f2510dca-2b8a-4d16-9824-8bd6f3e5274f.png b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_f2510dca-2b8a-4d16-9824-8bd6f3e5274f.png index 56f579338edfb7fbc6dc70357e64c262f280cc46..797dded8979aa6600a8b38039411ba5f5f9f3c83 100644 --- a/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_f2510dca-2b8a-4d16-9824-8bd6f3e5274f.png +++ b/images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_f2510dca-2b8a-4d16-9824-8bd6f3e5274f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c2fa64c05eba92f1556fc095279758fd7c1ee74983e7f4bab14dfc7dc3feeaa -size 1063970 +oid sha256:03ec88528b05ff8927d8d37c755ddfe8b5bbe2bb7de7e8073ddd47e76ce6543f +size 1219323 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0bc62f29-131d-4a0f-a05b-5bb6471dd1b1.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0bc62f29-131d-4a0f-a05b-5bb6471dd1b1.png index ccf239f22157dfd2e54b8484cf2242165bf3aafe..c4639545475bf09191b85bddc31982e432868293 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0bc62f29-131d-4a0f-a05b-5bb6471dd1b1.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0bc62f29-131d-4a0f-a05b-5bb6471dd1b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e6bc450791886d4bd43e25dcb959b20f46a764fdde08201662947808da93b36 -size 351022 +oid sha256:b5175ffbafd289cca32aada846906a56454974b7cbbc75752728b942486c115c +size 408058 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0fef1419-a98d-41fd-ad31-a96c7cfd4f4c.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0fef1419-a98d-41fd-ad31-a96c7cfd4f4c.png index ecb8bcc4e31d8958f06ac83fc8e948c4c0c0741c..80d2d0e0125d0383854c3e4caf8104f15de27f8e 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0fef1419-a98d-41fd-ad31-a96c7cfd4f4c.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0fef1419-a98d-41fd-ad31-a96c7cfd4f4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57163b277b86d5be43c3076e5ae79f36d05953d7032b4d5dce3f13bdf6b58342 -size 351006 +oid sha256:ce8592da0ec2451fc95c0956ddc9c31c60f33250d31cb02dd113699335fd8ec4 +size 358291 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_1f81f2b6-3fc7-4827-aeee-4a41116ee19d.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_1f81f2b6-3fc7-4827-aeee-4a41116ee19d.png index d406565e1ea222d957d0dee2e19edfee4f3daa9e..612a089808e083e55b90a9e41690fd982b104db5 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_1f81f2b6-3fc7-4827-aeee-4a41116ee19d.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_1f81f2b6-3fc7-4827-aeee-4a41116ee19d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ce678ee63f728627d1beee187958fbaf948c9f1ffafdcbeed30ffde038b6494 -size 426441 +oid sha256:295cfcf2ad4e22dc9b4555259cf65661f9649ce3f127847bb14bf097c3d1b2d6 +size 369178 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_2a40aabf-98f7-48bd-863b-0f5a3dd0ebb3.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_2a40aabf-98f7-48bd-863b-0f5a3dd0ebb3.png index 74b225699e6db6b4a352610b6f0a78cdadedff42..adf776664586a7bc67c705f1b7ac69390599a272 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_2a40aabf-98f7-48bd-863b-0f5a3dd0ebb3.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_2a40aabf-98f7-48bd-863b-0f5a3dd0ebb3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe3113f8cbf20052d8fb5e05bbbb2d8e5033fda461473140ca87f9961233af81 -size 624082 +oid sha256:8aa956079a811341c79650a1b14f4c334291f8d11a280079b96a020a16db28cb +size 290458 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_404af1a6-ec27-4b33-aa24-691486c2ec74.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_404af1a6-ec27-4b33-aa24-691486c2ec74.png index 99d4ef8939b3721f816cd6ed7d85e4af3de24fee..a8f3ebdd270f08d8482a3546d221868bc604c975 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_404af1a6-ec27-4b33-aa24-691486c2ec74.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_404af1a6-ec27-4b33-aa24-691486c2ec74.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d713f6286b60d2f05ab3ae4560e3c29b61a282ef73cdfc94f86f3bc1ccc03a37 -size 630577 +oid sha256:c4e82d88f9c76dae2c3a4e43af8f6efb6270965c82dd5c3513164ca974302cb2 +size 523750 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_4e8c633f-7da3-4beb-afea-a194df00dcc7.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_4e8c633f-7da3-4beb-afea-a194df00dcc7.png index 4a0f4bd0d3d58b99414a0ffaeaed2b7ea39d615b..88356908e66d3e2bade9b883979870be5a33224a 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_4e8c633f-7da3-4beb-afea-a194df00dcc7.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_4e8c633f-7da3-4beb-afea-a194df00dcc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc712acd5753d7365db6b662b5c67da7f1054adf87d944b30288740ee80dfe67 -size 1453861 +oid sha256:5f3cf53aae9f4379eb75366b0159f909ffec902cb19257267f9fb7c8024a52e1 +size 1575446 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_657f7043-a245-412d-843f-b4cc104f8b22.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_657f7043-a245-412d-843f-b4cc104f8b22.png index f2f78e5c3c7d42217d1773f7b159ca71e09dff6d..28b381e04b9ca1ee8de075c4569bff04eba07f68 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_657f7043-a245-412d-843f-b4cc104f8b22.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_657f7043-a245-412d-843f-b4cc104f8b22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90a2909922c4d6d1e8e022727351e18270c3571e4d2d4cbf326f25a8df2df319 -size 473736 +oid sha256:52f66a21318aefc2e639566859e1b49c3a43990741250a5d18c41571c0d196bb +size 472068 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_74a2475d-3369-49de-8be5-e1aeaaa0f1e5.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_74a2475d-3369-49de-8be5-e1aeaaa0f1e5.png index bfe36586fddc502cdf7850e9c12d1c0ceefa979f..b2df1f6c9a25d5395e65a6bcf23d26cd6210da11 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_74a2475d-3369-49de-8be5-e1aeaaa0f1e5.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_74a2475d-3369-49de-8be5-e1aeaaa0f1e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8561e12cc5e21963e4e65e8455b9cac52d20a90b82ed07fc00ee0ab4dbd7f597 -size 428556 +oid sha256:b7ece22604f153b7f43f846b9dbdd8170bd5bd5a6884d03c6bbf5734060a6c45 +size 244765 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_7f944d31-195e-4421-9644-93d4aadde6f2.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_7f944d31-195e-4421-9644-93d4aadde6f2.png index cbcabf42b82f999b72409f1e2605e443d0d065b6..a12526263490c7cd8b14896fb68d0e442c750878 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_7f944d31-195e-4421-9644-93d4aadde6f2.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_7f944d31-195e-4421-9644-93d4aadde6f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c995d630f28461fa8a8bf5ab63263123966fb23bf51e68126424a647998b152f -size 356121 +oid sha256:5a54d44679d1aa8af2da21e61ee145257bed894351463d392c1fb6772bf9047f +size 202359 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9204f384-9544-4d77-abbd-a69960ef3360.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9204f384-9544-4d77-abbd-a69960ef3360.png index 1dd5003d106631d8db20ced6d342a4290bed41e8..54a78af8e966e11e26aa786dc23afcb68c25cbe2 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9204f384-9544-4d77-abbd-a69960ef3360.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9204f384-9544-4d77-abbd-a69960ef3360.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8e7ee39be28f795dd7214e02352b2cb2a67f6630685f39c3ebccb3b09a9d89f -size 381360 +oid sha256:fd1036882220b32c14a2007a128f8f3c10ff44011459f82117ccf24dc41d0c65 +size 321253 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_958c6197-e149-4213-bf7e-760f1d4708f9.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_958c6197-e149-4213-bf7e-760f1d4708f9.png index af01b3583a23f93d64383f03d81e53df41cc80a0..33dbc38e019950e654deee35825ed662bc242241 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_958c6197-e149-4213-bf7e-760f1d4708f9.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_958c6197-e149-4213-bf7e-760f1d4708f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83165e7d53219cc65e0cc45ec456b1177450b288e2754203633a2b1a800adef9 -size 620380 +oid sha256:a2a8e88fd90c99c45771c728ecef7c28dce9ba5168b5ba0f5471cd20fa0b6120 +size 335574 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_99cd8c52-e44f-48d2-a670-27822e4ff213.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_99cd8c52-e44f-48d2-a670-27822e4ff213.png index b4fcdf5f78888bf20256ff9e5595dad4cb8c2002..f403e09203facea22bbc80c3f4ff9d2e69c0a2a3 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_99cd8c52-e44f-48d2-a670-27822e4ff213.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_99cd8c52-e44f-48d2-a670-27822e4ff213.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce6f57db037ac94db8bfaef14d125eb51518fd7550933a9203ee0060329378b1 -size 1446887 +oid sha256:b41cd3a3298b8209d77c81891eb17edc18c0279773d59105e47366236f243c05 +size 1649715 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9fe8d58a-4c1f-4bbb-8bc9-2c1e157f291a.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9fe8d58a-4c1f-4bbb-8bc9-2c1e157f291a.png index 2bba0ff044fe5129d9284f7b09b88b58da220dcf..143cf8fb90e1dd613e558c32e73b439af59011ff 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9fe8d58a-4c1f-4bbb-8bc9-2c1e157f291a.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9fe8d58a-4c1f-4bbb-8bc9-2c1e157f291a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eef771aa70b6a5d870b4d69723d77c8e3c23dcd940bfc94a53de941e76a75af0 -size 597918 +oid sha256:eb21c0eabac2cb3ab5affbdb6dc33594df98146ea7170c70cc9dfa758d1f0d85 +size 314849 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a2dc6c3f-b0d4-432b-b984-65d526e49e90.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a2dc6c3f-b0d4-432b-b984-65d526e49e90.png index 9e17d3c7312c4cde291e2f64804b2f4dd56f1cb1..3c57ddbfa6c1a7b8165fe34a5707bf4f5940fbae 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a2dc6c3f-b0d4-432b-b984-65d526e49e90.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a2dc6c3f-b0d4-432b-b984-65d526e49e90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:053aec1becb662a0e5d8a18a6c73770aa7ee817d08a86daff2bd6c99f8e764d4 -size 264161 +oid sha256:bfd6f03321876325d0770373525318f12c706a50e0f909da23200d75a2b040a1 +size 371684 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a40a0d44-e057-46e9-98bc-cf21b715bbb6.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a40a0d44-e057-46e9-98bc-cf21b715bbb6.png index cd8fb82e4fd2ab008527629cdd607bcf1634f9a7..8dca718234bc8b20f94da63a9b1aedc5536a0c52 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a40a0d44-e057-46e9-98bc-cf21b715bbb6.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a40a0d44-e057-46e9-98bc-cf21b715bbb6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4022a438bdd80e8640848d37382650dd383d7290a46193d1dd1bc13864b5c6b3 -size 348716 +oid sha256:3f2045ccf7f3b32636ae454c17fc5fe0fc89158c102a5d45f899463e597a7d07 +size 270807 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a9bbd8b9-6372-4ec8-823b-a7a75b04cd09.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a9bbd8b9-6372-4ec8-823b-a7a75b04cd09.png index 5dfefc15ce4fde8e9db9bc2de104719d5474f81e..f530071fb957d26744ba286f58ae9473b97021c5 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a9bbd8b9-6372-4ec8-823b-a7a75b04cd09.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a9bbd8b9-6372-4ec8-823b-a7a75b04cd09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c41a861fa837744660f3fe9a76403d6ccdeb6516545ad314f71afbee43f4cef -size 633703 +oid sha256:493ab614e7a116f37ecee5b3442135b2034cb5018dbbfacd0a927582b2586c40 +size 418325 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_ba18ee7c-dbe9-4345-b2c9-b4f7b2fa559a.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_ba18ee7c-dbe9-4345-b2c9-b4f7b2fa559a.png index 94aed5db75f4639034aef8ed9dcfe429609bde3c..e19052fff39021168693c19302577ba57eea0165 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_ba18ee7c-dbe9-4345-b2c9-b4f7b2fa559a.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_ba18ee7c-dbe9-4345-b2c9-b4f7b2fa559a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15f3b9bbfe2a4b05fc27d6869bc84e071b9ae61f2e5da46175f6e46ea20fcbdd -size 1446750 +oid sha256:d114111bd7928a450155629954f4ba5e5f28f07f75084231dcb2cd2840b9b040 +size 1488051 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_c2803987-3226-4c5f-b470-33c51dec0f99.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_c2803987-3226-4c5f-b470-33c51dec0f99.png index 933c2e4293992136fb288ecb5b7af0fff0a7788b..0a40049a21af5400d1b8c2e31d185bb7f32b17e6 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_c2803987-3226-4c5f-b470-33c51dec0f99.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_c2803987-3226-4c5f-b470-33c51dec0f99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58c0397892ad3e18e05e472964d2db55ca5d48635e1fe2d340654b8234d61886 -size 620186 +oid sha256:7b6b632eb4d3adb7b07bb1212ddd2831264b4da0c3b595650daf2416a645a384 +size 287913 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_d661979e-d1ee-4a39-8bf0-6e7167d44cc9.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_d661979e-d1ee-4a39-8bf0-6e7167d44cc9.png index da3fbbb7371d5a4677ae9ba41aaa2d539a15ff78..a8e9b9ce1453db3f5f52e55d516fa3d0287b35cc 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_d661979e-d1ee-4a39-8bf0-6e7167d44cc9.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_d661979e-d1ee-4a39-8bf0-6e7167d44cc9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a747ccb3bd01abaabcf744885ed33fbe29e957b405ba0808f9b96a7df037617e -size 626367 +oid sha256:f542f7d094a7358d09399f863b3445546af5c7f74284c1ca75bdcdde7082a2da +size 233147 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_df0d8978-9049-4cb9-968c-6df0e0da3812.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_df0d8978-9049-4cb9-968c-6df0e0da3812.png index deeba450ab3ffed74c8056be08d0ba91285c82c3..b4107088c9106b7b1ec073aa9763a414c2f7acd1 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_df0d8978-9049-4cb9-968c-6df0e0da3812.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_df0d8978-9049-4cb9-968c-6df0e0da3812.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7adbae6aa7e65bfaa0f85b3bc7a873f7d44b69980c342f1d377f029df907ae7f -size 372975 +oid sha256:074018c32a46db5d5abc4ce7dea6623fa1ca0d972fe68d26d9fb13348316f561 +size 433218 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_e28713b9-2334-46a3-9c37-b9d5e33e2cf8.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_e28713b9-2334-46a3-9c37-b9d5e33e2cf8.png index ccf239f22157dfd2e54b8484cf2242165bf3aafe..bac68c9e6dc8a024f7eb5c66b0cadeed9b6734f5 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_e28713b9-2334-46a3-9c37-b9d5e33e2cf8.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_e28713b9-2334-46a3-9c37-b9d5e33e2cf8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e6bc450791886d4bd43e25dcb959b20f46a764fdde08201662947808da93b36 -size 351022 +oid sha256:8f5d9529a55f9290df8b3a2c885ea8c37e527834b90095f3a422a7cc93d543d1 +size 292591 diff --git a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_fb8f3a51-6870-47b6-898a-25b1ebf691f5.png b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_fb8f3a51-6870-47b6-898a-25b1ebf691f5.png index ad136f71f28867190f6e974ab905f12d38204637..fc3b444284319e83e503d0653ede1553ef900be6 100644 --- a/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_fb8f3a51-6870-47b6-898a-25b1ebf691f5.png +++ b/images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_fb8f3a51-6870-47b6-898a-25b1ebf691f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:971b186ab968c564269c0d8e9630b2b86951f4621bc9b67d99da7644a0005959 -size 468196 +oid sha256:d863de18f87322945b852f773b45272e516890c82676df0f350a9c631ef0baaa +size 289341 diff --git a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3072f153-fda1-40dc-a266-ae38ada19df4.png b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3072f153-fda1-40dc-a266-ae38ada19df4.png index 054f08d62d848d34eb61370f8a94308c824f1dec..278fc8412debf6e314a14dd5362f0e8c98a33e1b 100644 --- a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3072f153-fda1-40dc-a266-ae38ada19df4.png +++ b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3072f153-fda1-40dc-a266-ae38ada19df4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9848153e6f27c151b5ad43c2f29c8d4906663055af0203093c02b60b3b3921e5 -size 1095885 +oid sha256:5e271665adf137bf882f144e66c4b1d2ddcbef303bcae69dbeb148f21845a015 +size 1347050 diff --git a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3532f564-c8fa-40c9-ae22-fb22f6068baf.png b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3532f564-c8fa-40c9-ae22-fb22f6068baf.png index 16431b74c67cddc3c878d734d8183a97adbaa637..d16c6685cc4b763e7baad82cdf105276f2ce38be 100644 --- a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3532f564-c8fa-40c9-ae22-fb22f6068baf.png +++ b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3532f564-c8fa-40c9-ae22-fb22f6068baf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19009e8d84b8411e3e49be385bc81d25e3149dc68afd8585cd0ce9ee67508d94 -size 960346 +oid sha256:baa85a77ae8c1ad592ba4b2d109cf47cb891500450dd61c06210ce0cb868436d +size 955812 diff --git a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_362a4918-c9e4-43c7-b7c7-e6c5ab3b2f67.png b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_362a4918-c9e4-43c7-b7c7-e6c5ab3b2f67.png index 4a07a659b54b03a938ff964b3e4c22d3b4a058ae..213d733ad8be803e66b710988efb0ee9213a3e63 100644 --- a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_362a4918-c9e4-43c7-b7c7-e6c5ab3b2f67.png +++ b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_362a4918-c9e4-43c7-b7c7-e6c5ab3b2f67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f4562e043ec84ae1d3b744900761957d1341c5339d963a578ffc2b59e8b568b -size 975392 +oid sha256:6f1dabe6537c496f4c3e9cf48ad192ea0cbcd5a49297d96edb66faf95cd5c808 +size 1272703 diff --git a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_466cf611-17b2-457e-97cc-7dc9d643ef86.png b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_466cf611-17b2-457e-97cc-7dc9d643ef86.png index 7da59f38fa8eacc85743c1be12b165af2a1eb0bc..1864bf19bdbfe2624744f5a6002965e9b8529de4 100644 --- a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_466cf611-17b2-457e-97cc-7dc9d643ef86.png +++ b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_466cf611-17b2-457e-97cc-7dc9d643ef86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:640865f6458306c297f581d384fd15f103d0eefdf771175543c01be0320699a0 -size 1083087 +oid sha256:1f2323e4da8f51cd87270b0f5df2d6954cb2377333467c9e4aa4e9ea706bc222 +size 1098475 diff --git a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6a8d7913-dea5-453b-b17d-6652f31792b8.png b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6a8d7913-dea5-453b-b17d-6652f31792b8.png index 4a07a659b54b03a938ff964b3e4c22d3b4a058ae..bed1d8ce21d8c96414009c7e6921d230ea6244c5 100644 --- a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6a8d7913-dea5-453b-b17d-6652f31792b8.png +++ b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6a8d7913-dea5-453b-b17d-6652f31792b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f4562e043ec84ae1d3b744900761957d1341c5339d963a578ffc2b59e8b568b -size 975392 +oid sha256:3e7e9ca8c44ac2829b88a08bf5af44e5a919fda32d7daadc939313584fc43db3 +size 1011836 diff --git a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6fea70db-8ef9-4ac9-b662-4ee385b4af59.png b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6fea70db-8ef9-4ac9-b662-4ee385b4af59.png index a0db10e4f4607c582809ef404b19d49a920d1b69..33e2569d85e04f50226fa710387438bcc891c880 100644 --- a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6fea70db-8ef9-4ac9-b662-4ee385b4af59.png +++ b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6fea70db-8ef9-4ac9-b662-4ee385b4af59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:adbf3183a585a4425ecd90d014b3e7ab082969458508d5b099b35322680c2d49 -size 929481 +oid sha256:a79a09f09b36fb24b118c7529af6dafcf2920a3d719ba225dae809570acd71cc +size 1004607 diff --git a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_ca2747bd-f638-48d5-922a-3a3d48df068e.png b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_ca2747bd-f638-48d5-922a-3a3d48df068e.png index de135acc0cc839fc2575a0d746ded7c858004a39..a5c0e95921c3035b8b57ce0a37bdab9c4f074c3a 100644 --- a/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_ca2747bd-f638-48d5-922a-3a3d48df068e.png +++ b/images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_ca2747bd-f638-48d5-922a-3a3d48df068e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:974cd14c83b25778c65fbb13962eaa8f82f50ea3ea126bc295477b1bc1d4a1bb -size 980816 +oid sha256:e85c2a2d11ea9cea8e0b72d41af08b87e439e6ae85f66e339371b29f63eb0be0 +size 1337958 diff --git a/images/370a037c-c397-4adb-ab7c-0c388f448f68_463f5123-4572-4b78-8a57-faf3f84441a8.png b/images/370a037c-c397-4adb-ab7c-0c388f448f68_463f5123-4572-4b78-8a57-faf3f84441a8.png index c9875f1d78d5dad47991f1fbb25ea1942a4cf622..181d882050a86e84844c807a9bf2959f003ca44e 100644 --- a/images/370a037c-c397-4adb-ab7c-0c388f448f68_463f5123-4572-4b78-8a57-faf3f84441a8.png +++ b/images/370a037c-c397-4adb-ab7c-0c388f448f68_463f5123-4572-4b78-8a57-faf3f84441a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddcefaac208ea9cd3569a912a45d48a815df320455a389cf6cc3caa16efb9041 -size 1129898 +oid sha256:7b72ffc3a6f132bd743d7716b2411bcddd08ed53631c84643dc0c05b526b03d3 +size 1250869 diff --git a/images/370a037c-c397-4adb-ab7c-0c388f448f68_4c7b25a2-d944-488e-ab04-8558592e50ce.png b/images/370a037c-c397-4adb-ab7c-0c388f448f68_4c7b25a2-d944-488e-ab04-8558592e50ce.png index 377aa8f1aa739ce861d305c004d835e14c514a1f..7e081b63508aeb9dd5fd5318a16d7ed56ac12320 100644 --- a/images/370a037c-c397-4adb-ab7c-0c388f448f68_4c7b25a2-d944-488e-ab04-8558592e50ce.png +++ b/images/370a037c-c397-4adb-ab7c-0c388f448f68_4c7b25a2-d944-488e-ab04-8558592e50ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33897a9c2d18f9d2d1e14fb6b153930ecf2d9db0f4e277bd77463d03a26a122b -size 1015643 +oid sha256:a2c6f674de97363a0571b043f026b5a18922b1e15741298ea03602957657c52b +size 997522 diff --git a/images/370a037c-c397-4adb-ab7c-0c388f448f68_5e4409af-2205-49f1-a595-13b9617f85a0.png b/images/370a037c-c397-4adb-ab7c-0c388f448f68_5e4409af-2205-49f1-a595-13b9617f85a0.png index d71132f644cf88b8b048b2694bdbc04e726af7bf..f12862147e52b7bd67bfd919cf17465d9d6768ff 100644 --- a/images/370a037c-c397-4adb-ab7c-0c388f448f68_5e4409af-2205-49f1-a595-13b9617f85a0.png +++ b/images/370a037c-c397-4adb-ab7c-0c388f448f68_5e4409af-2205-49f1-a595-13b9617f85a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae95d28bdf0c92da602d371a082aeb221070bd2afade1d8ff9a68161c651c085 -size 767641 +oid sha256:7fc5ce04f9e35edbba5c8b9ab1420170f0c3f2fcbd87169bb6c531aa9d25314e +size 884042 diff --git a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_096ee7c8-8209-4457-9239-6737dd54c324.png b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_096ee7c8-8209-4457-9239-6737dd54c324.png index 4f2c46c490778a1ef9920174d235afe4bcd5d464..63eb045bd47499202c47f3e022974535ec70214b 100644 --- a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_096ee7c8-8209-4457-9239-6737dd54c324.png +++ b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_096ee7c8-8209-4457-9239-6737dd54c324.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8144fd29251e29d2438a3aac3d7127430f1c3bfcda3a9b2aff197f0f5a6a6f6 -size 971429 +oid sha256:d6898b8c4ec54eca992fddc78919e5af354aa05a580dd1730ccd2f7546d92316 +size 856311 diff --git a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_35e91393-85cf-48da-a8ab-49d8e51a5972.png b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_35e91393-85cf-48da-a8ab-49d8e51a5972.png index 32e25e81bef3dcb4961fc903863a733611184ebd..43530e6fdf85bbef2bf8c8bc34f8b138355e73b0 100644 --- a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_35e91393-85cf-48da-a8ab-49d8e51a5972.png +++ b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_35e91393-85cf-48da-a8ab-49d8e51a5972.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d63ce8194f445f6a0133a2377b3470856214cf03269a3750ca365153ae7536f2 -size 838107 +oid sha256:50de0100dd6e064875483daf6edc3639e4293f330c36e6e938bda3814f8f3fa0 +size 503291 diff --git a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_3f558697-ab28-4d4e-b047-333054eb40cb.png b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_3f558697-ab28-4d4e-b047-333054eb40cb.png index 16c52ba88b003ef61b4582e6c6c9aa77b04ef71f..1517c07bb88e0d6499e7116b0aa5259ed2ea8b5e 100644 --- a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_3f558697-ab28-4d4e-b047-333054eb40cb.png +++ b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_3f558697-ab28-4d4e-b047-333054eb40cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31c80dace2769326e839c9847e7728bc3ee9c1901ab9bdd9287c046e9e262323 -size 605386 +oid sha256:ab864e91dcc0af93223a80f0abd9de99f47d6cbc95d00f7fe3ad6702fc702386 +size 922471 diff --git a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_643a83b8-2d11-4001-a1b6-d5ad0fe22f89.png b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_643a83b8-2d11-4001-a1b6-d5ad0fe22f89.png index 1574adade5a9951953c17435606861c4580b2ec9..28bf85e3d317db8513a021f0ed69352eacc149d3 100644 --- a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_643a83b8-2d11-4001-a1b6-d5ad0fe22f89.png +++ b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_643a83b8-2d11-4001-a1b6-d5ad0fe22f89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cfd1d85f65ab489fc0e96950a1045cf8011d88abc710ea0ddf338be43d01bb76 -size 1142649 +oid sha256:9507914d96e1e9c93dcd4afca122fb16194b16229a5ebba579b1e35c85ddca0b +size 1478511 diff --git a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_9a6d2ff8-b4aa-4bfe-8962-01e45d156bf3.png b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_9a6d2ff8-b4aa-4bfe-8962-01e45d156bf3.png index 3452079c1612f08594ac9e4113ba63dd1a915578..1db4e40ea3723d2ac57f03c669963f4bba348957 100644 --- a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_9a6d2ff8-b4aa-4bfe-8962-01e45d156bf3.png +++ b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_9a6d2ff8-b4aa-4bfe-8962-01e45d156bf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89a7334811267152505f554624977deb9731eeb7e8d025862b716e842bad3e76 -size 646494 +oid sha256:8961e81d47881b6e757a7e45fa10e3425719c9283e3351eebc71ebff6f940a96 +size 626060 diff --git a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_dc54e8cf-9e8b-4094-a90c-2230a4eedbff.png b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_dc54e8cf-9e8b-4094-a90c-2230a4eedbff.png index 501178419611f8109891c9f650a105e81dabebeb..e845e79b7caeed17e79c4d02493ba5465cf8afcd 100644 --- a/images/373dec75-7278-4d12-bcc3-0e802e337c4a_dc54e8cf-9e8b-4094-a90c-2230a4eedbff.png +++ b/images/373dec75-7278-4d12-bcc3-0e802e337c4a_dc54e8cf-9e8b-4094-a90c-2230a4eedbff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c92b601cce82ae38db2df0f870b3f81bbdb3925d8b1808bc97015908e9841de -size 690512 +oid sha256:b4e89de482c8e463c26706a223c1578ec57d768acb48178988feafebe5355e83 +size 956456 diff --git a/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_53c60b2d-adf4-4d28-b1d2-bf611ed7b011.png b/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_53c60b2d-adf4-4d28-b1d2-bf611ed7b011.png index 41473cddf4ad91926ce583c416879e265bad8ca8..1a3ed4ff36e4b7aa61b350d36b2ed45c04a3aaab 100644 --- a/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_53c60b2d-adf4-4d28-b1d2-bf611ed7b011.png +++ b/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_53c60b2d-adf4-4d28-b1d2-bf611ed7b011.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d1531c39d64fffb67c823ae751013d0b8dba631a9a4e401897ba1fde5e0f2c5 -size 1205488 +oid sha256:747ddada6be7fd1ab48ad98d000fe776153c1d83cd609db62e771ca4e5169446 +size 1702082 diff --git a/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_c62fa753-fdf3-4a97-a464-d6e1a2d7c20f.png b/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_c62fa753-fdf3-4a97-a464-d6e1a2d7c20f.png index b8aca89394a9a4aaab203f981e2cd0ad9b20405d..9b02078dc0f283e351683fd138c4e10a62424611 100644 --- a/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_c62fa753-fdf3-4a97-a464-d6e1a2d7c20f.png +++ b/images/37564222-bb58-4a55-b47b-e9ffbbc1d160_c62fa753-fdf3-4a97-a464-d6e1a2d7c20f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:032f5a9e6d850934563578296a15559235dbc4c09f6239a2bd1a982e7c4b9f68 -size 1227677 +oid sha256:f17978508e004633e1f8088569db2f39a5e2077a286b0a59bb0d8a93001bb3c7 +size 1360604 diff --git a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_118d364c-60ab-4ee9-bb94-839fad51462c.png b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_118d364c-60ab-4ee9-bb94-839fad51462c.png index 436dd1eb1519a81cd74c996d74786dded28896df..436c6defb9199d68dc70aaae4094dbdaf26d3346 100644 --- a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_118d364c-60ab-4ee9-bb94-839fad51462c.png +++ b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_118d364c-60ab-4ee9-bb94-839fad51462c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:578166df3528508c00d164bf0d013d7bfb745844eff780efc6dbb157aa5516f0 -size 1043369 +oid sha256:754ff57257d5bf5095d1ef84c5c3b25b0e2cf5381bbd8fbeba8e9fbaf4271ce2 +size 1105637 diff --git a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_50bd371f-78a5-443f-a626-2689e0c84de9.png b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_50bd371f-78a5-443f-a626-2689e0c84de9.png index 66850b02961dd39a7bca166dd677cbdc2e26abd3..f4207ef3959d13d1787e918ba588ff574e2e1bc9 100644 --- a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_50bd371f-78a5-443f-a626-2689e0c84de9.png +++ b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_50bd371f-78a5-443f-a626-2689e0c84de9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00708b653db885cd739a1e77fc961a2a86d92ffbe9b34e955db689fcc3eb727d -size 1734674 +oid sha256:0a0a34b04a05943a19a799cbe8ecd33b25d87a3fad66685c753f7b8bee867d7c +size 940687 diff --git a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_b7bcc68c-c9f7-4069-84c1-6c3fe43d24ac.png b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_b7bcc68c-c9f7-4069-84c1-6c3fe43d24ac.png index 628316c82bf58a6e1cdffd0e633969c3f1a3138f..44680502c38a21a326f31f32fff10cd6bb56fca4 100644 --- a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_b7bcc68c-c9f7-4069-84c1-6c3fe43d24ac.png +++ b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_b7bcc68c-c9f7-4069-84c1-6c3fe43d24ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe93e9f08773cb386a15f21a3d502940379fde057b240d41f212fb3359507c0b -size 26504 +oid sha256:ec90d06ec9c46106363150b51bec88726ec2d5574ee0153dde579278d526c57c +size 55622 diff --git a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_d229bdd3-bc87-470b-910d-a43ff645f98f.png b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_d229bdd3-bc87-470b-910d-a43ff645f98f.png index d974b5197b4a60afc00f9d922faa070a6032bb6c..551df218b0aeede33025225a70af4860f5900a41 100644 --- a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_d229bdd3-bc87-470b-910d-a43ff645f98f.png +++ b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_d229bdd3-bc87-470b-910d-a43ff645f98f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e502be2d0cdaa2910a0abe38ae3fb86722d9b20e085682f93d11a1632f2c8778 -size 1349406 +oid sha256:fe7cc4f346922f8522927fbb065185c2408b1723b486c6f005fa36b72f048737 +size 1821501 diff --git a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_dbe89b77-d51e-4e13-8e3f-970b5ce60eca.png b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_dbe89b77-d51e-4e13-8e3f-970b5ce60eca.png index 65e12bee5f202145fa17d872236b67fd1f24dad4..a88bdbcc425678fab664523a0f4d482988b280f1 100644 --- a/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_dbe89b77-d51e-4e13-8e3f-970b5ce60eca.png +++ b/images/37c09901-63d4-4194-8a96-1d87ca8c37ae_dbe89b77-d51e-4e13-8e3f-970b5ce60eca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2cef942a29c5095550ea3e87fa45aac92a02238e781389dea26dfedada1e66c -size 1871177 +oid sha256:dbc589df1ee25aa256cbe067f4c48e32b6120e82463db8a945f3e4ede97f8fd0 +size 991516 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_03b96773-9634-4493-b857-612a778193b0.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_03b96773-9634-4493-b857-612a778193b0.png index 692f83b69e78610a76874b2382c9eced1b74ccf9..e693a7bb5d80d4585ee0b46edb408d29d404635e 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_03b96773-9634-4493-b857-612a778193b0.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_03b96773-9634-4493-b857-612a778193b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39f67e808bb70a22b4ceba4d305c955f39af11dc1e3714f2dfcb8521b5808573 -size 1410509 +oid sha256:cc38c29e4bd4ee772a7734cdfcab793c277ef060ad3a2a34d979aeadadece1c0 +size 1323628 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_03e8b74e-3cf6-4077-9339-84aefa9f9237.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_03e8b74e-3cf6-4077-9339-84aefa9f9237.png index bf4f463b6c178fae20462bbec8dd8599aeb45e82..67e48712e0d1604dfa5f990326c7fe0dff9207b4 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_03e8b74e-3cf6-4077-9339-84aefa9f9237.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_03e8b74e-3cf6-4077-9339-84aefa9f9237.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06472f4adcba1ca14d528658f70ebe3607434f05910c8db62b7cb134374bc434 -size 1031094 +oid sha256:ec5c37219b4a3c003aeb7969da410c0f1fdebd52364394a75a91cef9cb4ee92b +size 1340730 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_075a04fe-0a97-42ee-aa93-736bd6b90023.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_075a04fe-0a97-42ee-aa93-736bd6b90023.png index ebd40f1c7434ea7d9b1ec46261df049fa77bff44..69c8841886ffb2d00e5dbd12b737c53dccbf5d46 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_075a04fe-0a97-42ee-aa93-736bd6b90023.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_075a04fe-0a97-42ee-aa93-736bd6b90023.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:142ffa98cca36c5e578307d40e38c2dc4709c1e1018be8ab540f1c5a4df773f1 -size 1622189 +oid sha256:8d3836b017e26ab2e32a8e8e445b1cad17a159cc714428350d987b47153e240d +size 1107514 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_0b1c7033-5c2a-4574-80c3-86956a2b8e45.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_0b1c7033-5c2a-4574-80c3-86956a2b8e45.png index f7c33372b25b5d61c5967ff39e172ea8fe2bd72c..687b90cbc216a0fe267968387413d620c34fc195 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_0b1c7033-5c2a-4574-80c3-86956a2b8e45.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_0b1c7033-5c2a-4574-80c3-86956a2b8e45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:769589fc922635a95a5c1acb295ff9e21fb7ec80cde736447b675c1a6b4d67c1 -size 881165 +oid sha256:383bf82d71a9a7247942c1128805b5ba9d1b3939771afa93adc78fee2d2162cd +size 1151904 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_1ef62685-5086-432a-af32-b3cf57bab812.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_1ef62685-5086-432a-af32-b3cf57bab812.png index cbe3fd97babd8b9af3c045f5b33408b40b82b993..0cd31c9170e9c99aa41abf2f6e68d1ed0336c6a3 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_1ef62685-5086-432a-af32-b3cf57bab812.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_1ef62685-5086-432a-af32-b3cf57bab812.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20cb53550176239e24db38acffb6d236fed212dbf4301d038a987eb046fde08f -size 1109525 +oid sha256:1a5cb5af146a8e3ac887747725cffc7064d051b382e1fe7748f3f6cfc547fe9d +size 1684944 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_45f168e4-68f7-4a40-b1cb-50e2d47ed9cc.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_45f168e4-68f7-4a40-b1cb-50e2d47ed9cc.png index 368d304860924cdcd8b165bdc2ea7fd0866042be..7a8dd12d900223ff4e62c736f81e5727b80aed62 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_45f168e4-68f7-4a40-b1cb-50e2d47ed9cc.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_45f168e4-68f7-4a40-b1cb-50e2d47ed9cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f13475fc68b79610573c8b371e91386ea80876107b380ef59155f423c1a08b07 -size 1468741 +oid sha256:9a603116a3987915f8d45e7ca868e9e6956f24e84c576d8913e5f8a3a06a44f6 +size 1555308 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_57f7d43c-5d53-4a00-8fa7-5feafe218409.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_57f7d43c-5d53-4a00-8fa7-5feafe218409.png index 662aacdd609cb36b386f103cf9172209d767f945..00b8058fe0bf53a5b635552837c518a76e150881 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_57f7d43c-5d53-4a00-8fa7-5feafe218409.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_57f7d43c-5d53-4a00-8fa7-5feafe218409.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c9c301cefdacc60115b4b530622e86c3b8565d35819d0c7dc7466903e0917b3 -size 1379341 +oid sha256:dc7db430d5a09819f3fc7bfb6b3853b653c52a43f82f0da0fe8c2c683cc44ed6 +size 1614309 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_77c7b44a-4641-49f4-8c49-b7268e7e1c6f.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_77c7b44a-4641-49f4-8c49-b7268e7e1c6f.png index 95bc40bbddccc260baf6bd7adf9b8d5533192d46..7a7deb31d3d0a6ad9a85c5f7e73f30fe9f05feb1 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_77c7b44a-4641-49f4-8c49-b7268e7e1c6f.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_77c7b44a-4641-49f4-8c49-b7268e7e1c6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a1ce47b9db35224c6fb45942a66a35383be027a3d2a0ae22f1b5bb9ebac4a30 -size 881777 +oid sha256:1088f94a74ec6bdfd97df1c8445922248c0b6382fadf72f5effe569ce9bb5643 +size 949450 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_998b318b-b288-443e-9cd0-039f263ea2b3.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_998b318b-b288-443e-9cd0-039f263ea2b3.png index 19b60c0ca6e69cff1f3d3be253d05098481c9239..ad24b28b3cd3670f518261c7369c6551059d8cc6 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_998b318b-b288-443e-9cd0-039f263ea2b3.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_998b318b-b288-443e-9cd0-039f263ea2b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b9a6b9528ce93f111fc8d04b65ad718e37f83ba5adc8fe9ffca4b580687d99b -size 885864 +oid sha256:8f58d14a7953ca55fa7f1575be42e490f1946ab5f4f7f9c4860ba1b6b1a6a78d +size 1964094 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_9c7a03fc-35cc-4769-869b-469e1363dca2.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_9c7a03fc-35cc-4769-869b-469e1363dca2.png index 9b496b9e23bbf6c5f013719f2e0b8bf28321111d..efbe37e9c195122931bff554e0d735fe21a6a10f 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_9c7a03fc-35cc-4769-869b-469e1363dca2.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_9c7a03fc-35cc-4769-869b-469e1363dca2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b99adfd9fa4a7324c973c0c8a6582700df21c279ed30fab5a7e159cdeab7827 -size 1652323 +oid sha256:6b8fd9d18e5e339bb4c274398470d1a983dc907fcf79080d85f0175b5dc0c3a3 +size 1630426 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_bf997fb5-69db-4c87-9ebe-fba3ab9f26c8.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_bf997fb5-69db-4c87-9ebe-fba3ab9f26c8.png index e61b787d286ff3b88a0c710e7f4c8a4327bdde62..02abe223533147eaf0e88876fa74584906bfedf1 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_bf997fb5-69db-4c87-9ebe-fba3ab9f26c8.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_bf997fb5-69db-4c87-9ebe-fba3ab9f26c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52971187de54a12b7aa0dc6809763c635fe6c5b6607bd2c65071bc96a642a9cb -size 1531438 +oid sha256:56b213cd1e6e96eff2fe3937d8832a6aee4c4cf1550e9a5c311bdfa360f86dba +size 1886602 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_c269bd1c-a1bb-485b-9fc9-5eaca199ab2f.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_c269bd1c-a1bb-485b-9fc9-5eaca199ab2f.png index d321984e49918af4bb57a7b8a4e5fdcf5b1321da..fcb34fe8fd53060e7a88acf8219ad4f7a5a1f9df 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_c269bd1c-a1bb-485b-9fc9-5eaca199ab2f.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_c269bd1c-a1bb-485b-9fc9-5eaca199ab2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:975f8d3ad9a85baf163c327af53f78868bd057b8799acf70607c407585f96285 -size 877789 +oid sha256:ee58477f5e4c4362c2398e55e8f4099e87dc63b34757080eca1a9e83b38990ab +size 1730945 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_cd7f794a-afd7-45b5-8d02-ed5fbce7caf5.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_cd7f794a-afd7-45b5-8d02-ed5fbce7caf5.png index 415d229c3d83cdfda5ff1e1e4e8f926f232c5539..b99993b380d6f9445f3f9791c8067b01fd48afd6 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_cd7f794a-afd7-45b5-8d02-ed5fbce7caf5.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_cd7f794a-afd7-45b5-8d02-ed5fbce7caf5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a095d704e62e7631b3f89f9874d221aaec002ff5d2f97af5d14be3c06006440 -size 1304022 +oid sha256:6d0102ff6b2a42631a142f8876712d0e6a0373e01dbfa221a57e25de63a1f8b4 +size 1264973 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_e672e49c-5049-4b40-b486-40028a26aa99.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_e672e49c-5049-4b40-b486-40028a26aa99.png index 543fcdb6c2079e50dd493969ad539f6da33d17f5..90efb7d3934179870a5e9178d5519be49aa23aa5 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_e672e49c-5049-4b40-b486-40028a26aa99.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_e672e49c-5049-4b40-b486-40028a26aa99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa9b87f248edc42ec98d284bb479d9c30f12c88a96d30435b119c770925cc139 -size 878070 +oid sha256:0d77e654aa8a04267ae7e389965ebb69ba4d32690e1bc6c8d1813f641b2fcbc9 +size 1677450 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_e95cc523-03ff-48de-a9d7-0f07b4906ccd.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_e95cc523-03ff-48de-a9d7-0f07b4906ccd.png index 53b3145eef981da46b07354a1730475fbb85ebed..92a9df147d19195c22919d3569ee1cd7320c7a7b 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_e95cc523-03ff-48de-a9d7-0f07b4906ccd.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_e95cc523-03ff-48de-a9d7-0f07b4906ccd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:018788e0bc7dfa981c7e4d25440c99963089758dc07dea6e71d10862342b18bf -size 1060142 +oid sha256:c727f2b872f44065ed8d4fa8f203b6d8c842cdc126dcca235283086250d38165 +size 1497165 diff --git a/images/38fe67f7-14af-4259-8309-aa350abdc395_f2fd4cab-af21-4062-bc3e-9899832b6611.png b/images/38fe67f7-14af-4259-8309-aa350abdc395_f2fd4cab-af21-4062-bc3e-9899832b6611.png index 251e9504b517b15a0e7aa3b613ea4239be0a8a4d..3335867c9a95bfda1bcdef20ab17750de782af17 100644 --- a/images/38fe67f7-14af-4259-8309-aa350abdc395_f2fd4cab-af21-4062-bc3e-9899832b6611.png +++ b/images/38fe67f7-14af-4259-8309-aa350abdc395_f2fd4cab-af21-4062-bc3e-9899832b6611.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:773f9df1c4b912ce3003739478e3c6a709406223224cd6405857d27af94bcb09 -size 1391274 +oid sha256:a9f4a60bee13ba887f7443e108871b9ec8cd801899c883e8e70384767b6a9b7e +size 1530631 diff --git a/images/39358d9c-6db2-4662-a91e-47a416eeacf7_765485e9-a5cf-4af2-b2b7-e1810fd891a9.png b/images/39358d9c-6db2-4662-a91e-47a416eeacf7_765485e9-a5cf-4af2-b2b7-e1810fd891a9.png index 625221831c794a9d89569d17c50cc00e22f787b5..3f8bc669d9d645d90acad423f12da6113f17c8ee 100644 --- a/images/39358d9c-6db2-4662-a91e-47a416eeacf7_765485e9-a5cf-4af2-b2b7-e1810fd891a9.png +++ b/images/39358d9c-6db2-4662-a91e-47a416eeacf7_765485e9-a5cf-4af2-b2b7-e1810fd891a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3270d43ca1081b7178ca72161d4e701b1505f2a0999bee3cf12d453d92cd4d46 -size 2747420 +oid sha256:152d182e90d4a7ba4abaede6e4b9c70f5f925a2355853e4572a231188f422d72 +size 1951215 diff --git a/images/39358d9c-6db2-4662-a91e-47a416eeacf7_8720478b-0b2c-4c71-a216-68ea7cc6ec42.png b/images/39358d9c-6db2-4662-a91e-47a416eeacf7_8720478b-0b2c-4c71-a216-68ea7cc6ec42.png index e4292345571881c618d6539f3380ae4ae8b770b9..488a428124af4dd9c2f8e2b1f48848bd4535952d 100644 --- a/images/39358d9c-6db2-4662-a91e-47a416eeacf7_8720478b-0b2c-4c71-a216-68ea7cc6ec42.png +++ b/images/39358d9c-6db2-4662-a91e-47a416eeacf7_8720478b-0b2c-4c71-a216-68ea7cc6ec42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b80806f187aa4e0d9106e3663c312e0f90b0983599c9855f9da04369d981f4c6 -size 2670028 +oid sha256:b5fc95fa7fa322efa7d206535c746ae99c8be29b7f1b62655c03b2597438baea +size 1824325 diff --git a/images/39937001-8af2-4727-bb2a-6997981cf50d_23f74267-67c7-467c-a379-2b044cca97f9.png b/images/39937001-8af2-4727-bb2a-6997981cf50d_23f74267-67c7-467c-a379-2b044cca97f9.png index 819ae2699f4d18c921eaa99e151ad37c66175a1f..2ed17e4e1e5fb7ed4e01e0419dfa705385063d8a 100644 --- a/images/39937001-8af2-4727-bb2a-6997981cf50d_23f74267-67c7-467c-a379-2b044cca97f9.png +++ b/images/39937001-8af2-4727-bb2a-6997981cf50d_23f74267-67c7-467c-a379-2b044cca97f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e5c1b5aa75eb2adcd005aae43bf8a989338bc1dcda706e8d1b229ea284ccf77 -size 1332270 +oid sha256:00fadf0da3d366190c52bceba5291760e871ce84e3d6ef7265d735bc905fc219 +size 1268403 diff --git a/images/39937001-8af2-4727-bb2a-6997981cf50d_4e4e4193-b03a-48de-a041-da1ba92837d3.png b/images/39937001-8af2-4727-bb2a-6997981cf50d_4e4e4193-b03a-48de-a041-da1ba92837d3.png index 81ef4a70d750852ab20c7328d3253ad23d7ff5b3..b924dab893d8e029538ac72299e715db97672c81 100644 --- a/images/39937001-8af2-4727-bb2a-6997981cf50d_4e4e4193-b03a-48de-a041-da1ba92837d3.png +++ b/images/39937001-8af2-4727-bb2a-6997981cf50d_4e4e4193-b03a-48de-a041-da1ba92837d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8d1165e25be1940ef164787ca1dcd208003e211fa73e102e1114750cdf05628 -size 2016521 +oid sha256:d7ae05d70545deb11ee2b6acdecfc4dc53861aa2f4b3cfd809a82fc0da808d33 +size 1725398 diff --git a/images/39937001-8af2-4727-bb2a-6997981cf50d_916bd8e4-c9a3-4837-8144-c9fc5cbdcf41.png b/images/39937001-8af2-4727-bb2a-6997981cf50d_916bd8e4-c9a3-4837-8144-c9fc5cbdcf41.png index 247641d917752de785309a49b4fe1a601ceb5c44..6952a42916bce589aa971876225e8005749014d7 100644 --- a/images/39937001-8af2-4727-bb2a-6997981cf50d_916bd8e4-c9a3-4837-8144-c9fc5cbdcf41.png +++ b/images/39937001-8af2-4727-bb2a-6997981cf50d_916bd8e4-c9a3-4837-8144-c9fc5cbdcf41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96aac7c019f9858d05fa0a99ff543b79adadff3e4ee1778e04ed09ab445e86cf -size 2057612 +oid sha256:316ff15c750a540cfb454c0b4bd058307a752cb51ac2f1fead100bcab347d4a9 +size 1263961 diff --git a/images/39937001-8af2-4727-bb2a-6997981cf50d_b402a6dc-c2dc-4d0b-86d2-7ee0f55a3275.png b/images/39937001-8af2-4727-bb2a-6997981cf50d_b402a6dc-c2dc-4d0b-86d2-7ee0f55a3275.png index b3dab4bdacf15e04999295e79b09fd124e0150a1..560bd1363b2e0de573ec0c523f1f20eff0f7e457 100644 --- a/images/39937001-8af2-4727-bb2a-6997981cf50d_b402a6dc-c2dc-4d0b-86d2-7ee0f55a3275.png +++ b/images/39937001-8af2-4727-bb2a-6997981cf50d_b402a6dc-c2dc-4d0b-86d2-7ee0f55a3275.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c780eb546af0c8b2fefa5e9ec54a3e183bed823b4e14353af76fd48a17582448 -size 1410549 +oid sha256:6a35e421c32891e3efbc3407244742f60351b8aa168148c9fc497e263547b764 +size 1116292 diff --git a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_3df37d9c-f0b4-46a2-b7ea-aa19650153f2.png b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_3df37d9c-f0b4-46a2-b7ea-aa19650153f2.png index 524f87704bb8cdbfb6965cefe1bc4c34827e1c19..fcbad1ad402b4df31085397ee8325d1b59973b54 100644 --- a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_3df37d9c-f0b4-46a2-b7ea-aa19650153f2.png +++ b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_3df37d9c-f0b4-46a2-b7ea-aa19650153f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e92f50937cfaa51f703bac417975d563b4af93711e6b6186b4f6a036c5b3840c -size 1490964 +oid sha256:c4f614bafa806ea8dd400009ec5f0b8bced3e19dc9cc8c2e3e15b933cf3e22f0 +size 1539857 diff --git a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_885432a5-3a1f-40a1-9c83-3e2222987162.png b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_885432a5-3a1f-40a1-9c83-3e2222987162.png index 5468f06ebfaa9fccbc058835ff87041376d0085a..8e8f61f1d5f7e2ceb45ee47f23874679e5ec59a6 100644 --- a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_885432a5-3a1f-40a1-9c83-3e2222987162.png +++ b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_885432a5-3a1f-40a1-9c83-3e2222987162.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:308b1bbdb316e0ba2adea1036039cd2d39a21ea1da56ff6c7c7338e4d94f982e -size 1205529 +oid sha256:bcf9c363b4962dc42d67f69c1cf93fd21b4def8960aaad305e7d35de906a3cec +size 1236721 diff --git a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_a0c205cb-1a8c-44e3-af38-ff210c95571e.png b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_a0c205cb-1a8c-44e3-af38-ff210c95571e.png index b7b9989bef507c0ad554fc8f943812bf23b4975e..7d74b0dfbf44c690de51bb2f14b7c601c20cc666 100644 --- a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_a0c205cb-1a8c-44e3-af38-ff210c95571e.png +++ b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_a0c205cb-1a8c-44e3-af38-ff210c95571e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68fa40122449189dc8568e2522db402d938866507ee75d529ca281b62f816200 -size 1053350 +oid sha256:aa866192abaf6213c4b4fa8d0ae2d03b9f8b6a53c9d5f14309a5aed94934337b +size 1083524 diff --git a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_b05415f3-ea39-499f-b8d0-25e061aa16e3.png b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_b05415f3-ea39-499f-b8d0-25e061aa16e3.png index 1ed64acb49238a32e53465d080b782de359d3008..be78c2a517e161dc57415cb4b140c9dd09e6364b 100644 --- a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_b05415f3-ea39-499f-b8d0-25e061aa16e3.png +++ b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_b05415f3-ea39-499f-b8d0-25e061aa16e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3fd843601ce1b304b6377be19cac3fd7f4574f0c9a06967806b3fb7fcac40ca4 -size 1047462 +oid sha256:6951ff2af45d3661b89878e7f1e8f5ccd1ac16f36b0a602bb7f4815305b9691c +size 1038302 diff --git a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_f4a3db2b-6081-40ff-9efd-57848cd9bfd0.png b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_f4a3db2b-6081-40ff-9efd-57848cd9bfd0.png index d093bd55ef1bf3163479fca9fb3641071cc06aa4..7baf9b59b8beb497dccc254b99ddaebdbd33a492 100644 --- a/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_f4a3db2b-6081-40ff-9efd-57848cd9bfd0.png +++ b/images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_f4a3db2b-6081-40ff-9efd-57848cd9bfd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:001610e0bbf6026d12ba88c32da61f0f9ce0444e6f02e35119375e1a46052abd -size 1896374 +oid sha256:35edf5c302a2497cf33ee0cf595fea9fd976977f25c34c513403a80e41f77f51 +size 1526955 diff --git a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_25b74fe3-6b52-453a-9885-aaa17ab27940.png b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_25b74fe3-6b52-453a-9885-aaa17ab27940.png index 9cffce045b060379b57a61bfb9b8672a1d9fab80..8b81b5e3e3fbaf0009ab7a22ea3143bdb8b1b0c4 100644 --- a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_25b74fe3-6b52-453a-9885-aaa17ab27940.png +++ b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_25b74fe3-6b52-453a-9885-aaa17ab27940.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:317d9ddad3b542cab42c4bbf79eb2d685986a062831f8b6d69a8e5c82188a119 -size 601584 +oid sha256:6d4f00e074a4761ac47c537795e5a3141b3970122492844fea18dfadf75d9003 +size 661471 diff --git a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_2a99e22d-6bc8-48c6-b38b-d358a070a01a.png b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_2a99e22d-6bc8-48c6-b38b-d358a070a01a.png index 85beec8f8ffc82997ecf22d843545a09f7cbdb93..b0080e2f5209cb0324b2564d06b60adb01b9ad92 100644 --- a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_2a99e22d-6bc8-48c6-b38b-d358a070a01a.png +++ b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_2a99e22d-6bc8-48c6-b38b-d358a070a01a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:870484f41816d466a7dad6bbdc2c0aff6311ea114dc43f2c06e20d8c4e4cf96c -size 587177 +oid sha256:654e0ea314ee124146849c3c7a2f0ddc39eeb2baf95530a2091484da220a55fc +size 381391 diff --git a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_563fa026-5ccb-4530-ba47-2733ea4e3f73.png b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_563fa026-5ccb-4530-ba47-2733ea4e3f73.png index d01915b0262e8317249a3a205c63df260912a283..b4fe03ad3454e8d8041bd03b1dba6a1ed668e9b0 100644 --- a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_563fa026-5ccb-4530-ba47-2733ea4e3f73.png +++ b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_563fa026-5ccb-4530-ba47-2733ea4e3f73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:861b231621fb00f56f9a2941e43db4ee1de4ffdab6428b6d032d0b69570672a0 -size 121025 +oid sha256:a6002645b7bdcc3e733a611d9cfdd77169e1b59576385884af8bbfa9d6d2d456 +size 147451 diff --git a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_89b46503-23ad-4b12-8aa0-132496a675ca.png b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_89b46503-23ad-4b12-8aa0-132496a675ca.png index 0719e42a1cf286a3eb3294409fb3364e7154ffbd..e737da90bd429891c20dd93493a39bb98a72ddc0 100644 --- a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_89b46503-23ad-4b12-8aa0-132496a675ca.png +++ b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_89b46503-23ad-4b12-8aa0-132496a675ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90d4c3b3fcdba2980fcfb223291ad70a33366a4904d65db113075f09118aa964 -size 1736822 +oid sha256:ef3c8edf48ae52355e8821062a17d371076579965615daa8f3fe9aabb7201d62 +size 1264411 diff --git a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_9b411bb7-0ebe-45e4-a378-0eee5a93e61b.png b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_9b411bb7-0ebe-45e4-a378-0eee5a93e61b.png index 1acda6f6f61ec420429fba134194b45a6856123e..694c5ed384f1a354f5d5f71c7108c573f68e5ba4 100644 --- a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_9b411bb7-0ebe-45e4-a378-0eee5a93e61b.png +++ b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_9b411bb7-0ebe-45e4-a378-0eee5a93e61b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9776ed4452c3155fe2c0fe2519d295e1942154d95d6bfed13a2a430dc07697e -size 1187985 +oid sha256:2c07867b015b5e21bb420151aefcc559042529c3a48e624839d8d2af36f11207 +size 1137214 diff --git a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_d5098452-d25b-474d-8bf7-267ce1c1b48a.png b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_d5098452-d25b-474d-8bf7-267ce1c1b48a.png index 2b470bee01382a692b6fb1ce487948ef4cae2f19..ad4920b0085631f8eb917ce92d9229e87d7c53e2 100644 --- a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_d5098452-d25b-474d-8bf7-267ce1c1b48a.png +++ b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_d5098452-d25b-474d-8bf7-267ce1c1b48a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d14d852cada460b091451518242df2dfa4a4febfc1c62e4b4ece2ca6e2e888e5 -size 587543 +oid sha256:8cedbb037f797477e8f6437a25f7f04677f34a403335350a3b2330c7b57df5c0 +size 642823 diff --git a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_f23a29a4-d885-4c86-bbb7-6eee5e6b991f.png b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_f23a29a4-d885-4c86-bbb7-6eee5e6b991f.png index a196020771ef98bb390e7d3116c17913f63eea27..4922afb6e384a3a3328e94be5a10ce18bd71fa20 100644 --- a/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_f23a29a4-d885-4c86-bbb7-6eee5e6b991f.png +++ b/images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_f23a29a4-d885-4c86-bbb7-6eee5e6b991f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54caf373d6f1736e88bd1183469555d9cb2908277797cd6dc3737ea618537a8f -size 587033 +oid sha256:9a334cfb550f31d1009702f0f360e22f0beb22e4d97984d28f6d04bc2e699df9 +size 588202 diff --git a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_1b9a4422-dbca-4194-9891-f0fefedd9e5d.png b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_1b9a4422-dbca-4194-9891-f0fefedd9e5d.png index 943a8fa1ba16abf6258f3a5e0a855930d78dadd4..5ee34126ade58cc388ed759ecb97ffc7eb4aa60f 100644 --- a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_1b9a4422-dbca-4194-9891-f0fefedd9e5d.png +++ b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_1b9a4422-dbca-4194-9891-f0fefedd9e5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a2953aaf5395bebc8b7912860b47ea1cb18bcf2b9c10dc9f0a7c44c2b55b93d -size 566631 +oid sha256:41c3a326ec18812265c0d9e422f9681b56fcee26a3113b18fea15bfdc173d81c +size 388099 diff --git a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_37a05e4f-282b-4550-b32b-59a3ae182626.png b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_37a05e4f-282b-4550-b32b-59a3ae182626.png index 9a46310831b3d68b383760584c8f7eddd14ea6b5..d27c27927ea0b11026273aec984a963b9d4a79c9 100644 --- a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_37a05e4f-282b-4550-b32b-59a3ae182626.png +++ b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_37a05e4f-282b-4550-b32b-59a3ae182626.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa287e51f384c918cf4ac07f7235dc48aa566585336e752fcf780ccee46ee595 -size 452689 +oid sha256:b7a6ff183aeb7753eb52ca09d8025dea9b6f302506f41ceaed3aadbf865dad0d +size 528156 diff --git a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_63575c7f-68ce-498c-a1fe-1bfefa463455.png b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_63575c7f-68ce-498c-a1fe-1bfefa463455.png index 9f482e3ef8e15f314b0e9b408ced3d053239965f..ce777747beb8ecbe7664095130e91036a38823d2 100644 --- a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_63575c7f-68ce-498c-a1fe-1bfefa463455.png +++ b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_63575c7f-68ce-498c-a1fe-1bfefa463455.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4902816e98996fe464ed3bceca0d85a86da1ad07541999e5b10a4f5b741eb66c -size 541481 +oid sha256:6d8b73449b61355dde741f3ae108c48af89014408e8b2cd54219041390c969a3 +size 561824 diff --git a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9d8c2c30-a449-44bf-8f1d-120f3a8057f8.png b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9d8c2c30-a449-44bf-8f1d-120f3a8057f8.png index af7889ff751ae788d74b0b09f4c29d394368bd3c..997d43beb2ba1bd70a7e3cd0fc26780076b1ba58 100644 --- a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9d8c2c30-a449-44bf-8f1d-120f3a8057f8.png +++ b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9d8c2c30-a449-44bf-8f1d-120f3a8057f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd6d340df7ab050c3055ca364a67da1603ee146492ea6353bee4e50f2633be5f -size 1589539 +oid sha256:c5882a955c16e36d6733c19b10a199d74b1a4b8a5ba96427b9e2679e8992ce8d +size 1562712 diff --git a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9e8314a0-1e1e-4db1-9e85-422dfa0bb165.png b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9e8314a0-1e1e-4db1-9e85-422dfa0bb165.png index 74c1374a6f08ad1531b280f30dfb0ea997ed9891..fd7adb90d307bcb3b210061347d7be6b94f55dcb 100644 --- a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9e8314a0-1e1e-4db1-9e85-422dfa0bb165.png +++ b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_9e8314a0-1e1e-4db1-9e85-422dfa0bb165.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cca22f6561b017b5b7617ea340f7119f7ebad6a431f842d46e82e95544201d2 -size 427498 +oid sha256:1158ae65534f68868780ff31808bb64f2c2bd39372d72c2982ba1a9d3d58fa59 +size 558982 diff --git a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_d39be68c-f55a-4c10-b578-860068cfaa10.png b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_d39be68c-f55a-4c10-b578-860068cfaa10.png index 73141c348eabfc0f08ca09e9424ef4a926b9b272..074f71afd8e708e786a05f4e54dba8110c21f728 100644 --- a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_d39be68c-f55a-4c10-b578-860068cfaa10.png +++ b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_d39be68c-f55a-4c10-b578-860068cfaa10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8419e1707844532e61013a8cd0284754d5598e9b0eb1e0a70a538293cfd460b1 -size 382744 +oid sha256:f8828c2779f808bf3170aa2f14090c5c868c8308f10ea80e5d1ee15fcd93d835 +size 535803 diff --git a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_ef0e466a-4175-4c3c-80fb-9471d37a9103.png b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_ef0e466a-4175-4c3c-80fb-9471d37a9103.png index 90ed6893aeeb1c387a65d1f66a1365aded1f233d..ff99240a3ba7710a93bd3876d0cc0ccc08a6fff1 100644 --- a/images/3a231c03-d038-4b61-b5a2-685b77d9920e_ef0e466a-4175-4c3c-80fb-9471d37a9103.png +++ b/images/3a231c03-d038-4b61-b5a2-685b77d9920e_ef0e466a-4175-4c3c-80fb-9471d37a9103.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7d2739c8885f15c6b9d28302c05cbd31091cb491c55262c7e99be17fdc482df -size 546417 +oid sha256:d121a5f715fa2835cdb856f2ebf65ed13f94e0beb031d6b0a6790d5744a28952 +size 498372 diff --git a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_2696e6cb-e2fc-45da-b9e5-33fe50d21113.png b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_2696e6cb-e2fc-45da-b9e5-33fe50d21113.png index b46ffa633b5710ed2e344b6a849a73dc07230055..99e41a921415ef1de39cb08b3e07d64496a57bcb 100644 --- a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_2696e6cb-e2fc-45da-b9e5-33fe50d21113.png +++ b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_2696e6cb-e2fc-45da-b9e5-33fe50d21113.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39bd9db37cadf76332f0b897df6fab2b475b9b80505a24a552d5fdb11a7840af -size 3229681 +oid sha256:2d9e39661f3fb67f72afbfe7f87d395f2c50288c6eed7e19bf528629f27f3ecb +size 738092 diff --git a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_705d55f8-4fbb-4b6a-8f06-cf33aef62a05.png b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_705d55f8-4fbb-4b6a-8f06-cf33aef62a05.png index 4b93bf1df0bcfc2a4b4677038ddeeea026799ab4..b784e1af28209ef192b92bba8d2fd39579178a0f 100644 --- a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_705d55f8-4fbb-4b6a-8f06-cf33aef62a05.png +++ b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_705d55f8-4fbb-4b6a-8f06-cf33aef62a05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5153a17cee2baef44b6d6b681d42036fbb740a3670cb43163b9e2d53cf21bb34 -size 1758961 +oid sha256:07c096985011fc359aff801761768fac03a1a9e9844fa184bbd581cbe69106ed +size 1033070 diff --git a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_785d0f3e-72f0-4f03-80cd-dc73dcf41af2.png b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_785d0f3e-72f0-4f03-80cd-dc73dcf41af2.png index 5ea4ce3f6f31d23b6bb796904b68d4ff517ad8a7..582409e06b4fc9f40237fd1d5a2ea02044fbbf2e 100644 --- a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_785d0f3e-72f0-4f03-80cd-dc73dcf41af2.png +++ b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_785d0f3e-72f0-4f03-80cd-dc73dcf41af2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c2f23ef288c8fc5ef42518b96142c04bed609b278bf3745c2321e04d25d1e5b -size 1250475 +oid sha256:7ee79c00c7f492efad37d8e08c50239e05ab8e16cd929660dec8ec7fae86d645 +size 1146513 diff --git a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_e7bb4a75-73a7-4320-95ec-03516f734caa.png b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_e7bb4a75-73a7-4320-95ec-03516f734caa.png index 0726281e2e7a2059620cc81cfceea728c7c008e0..c5dffb16f7552136619256d2baae8910570bea7a 100644 --- a/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_e7bb4a75-73a7-4320-95ec-03516f734caa.png +++ b/images/3a85b415-9e68-4cf0-91be-386d4d8f0710_e7bb4a75-73a7-4320-95ec-03516f734caa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3943d180ebfbafe1570409e47bd446de0549c752c949d0d06d82e6ea5a1c30a6 -size 1069236 +oid sha256:bd2f4b71bccca6c5a5342b1355913d9d12d2bef038fcf12dffa18f1eea5e6cfc +size 917104 diff --git a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_68bbcc87-382f-446f-b611-bf58f39479cc.png b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_68bbcc87-382f-446f-b611-bf58f39479cc.png index 0375bebc94dba36dca8c1870f9d108cbe0059206..4890268ef4755c4c56323bf44b53fcf1f6426740 100644 --- a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_68bbcc87-382f-446f-b611-bf58f39479cc.png +++ b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_68bbcc87-382f-446f-b611-bf58f39479cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24f17393a6106acb4d5ef1c3c09686c09ea3407fbf9b453dd9caf22645570d14 -size 1789468 +oid sha256:2b8408e741e14edf1157687f0d5dd7c045ead26f564810b454e7a149aa126e4a +size 1069181 diff --git a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_7dbbe8fd-aead-477a-92b2-485de3aec565.png b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_7dbbe8fd-aead-477a-92b2-485de3aec565.png index e34ed055411b6418eddd126eb9f3a7aed9adc0ea..db71868fb0a67156913b11ebfedaa98501017984 100644 --- a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_7dbbe8fd-aead-477a-92b2-485de3aec565.png +++ b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_7dbbe8fd-aead-477a-92b2-485de3aec565.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acf4ad5df81b7601e6765c69caa32adf0f4f091e5d680062e42579dc48dad057 -size 821619 +oid sha256:4f19709cd4a0afd6580ae33386d7ef25c2d0de5024c084f1b060571ffa342e3b +size 861667 diff --git a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_8ee0dcc3-5cfd-49ec-9324-4e578df23877.png b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_8ee0dcc3-5cfd-49ec-9324-4e578df23877.png index 8d60a719fb6611d829b9f00570fd002d2499435f..f39d680e8282443ebd24db23cdc8643b43fbd1f9 100644 --- a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_8ee0dcc3-5cfd-49ec-9324-4e578df23877.png +++ b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_8ee0dcc3-5cfd-49ec-9324-4e578df23877.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e23d723ec7bb1f44ac5b12a34d72e03e4a9ccf86159656fbf56887fe71288319 -size 376816 +oid sha256:cafd70d5f2c56bc2c9a70dfee6b019dcdaa03ee0946f23e7e07852278b09e9d4 +size 371967 diff --git a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_a5ebdd01-9df5-4350-a0cd-bdc3f2ca1173.png b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_a5ebdd01-9df5-4350-a0cd-bdc3f2ca1173.png index bc58d4a7772f975c2fa03931c61be2bad420582c..14833c4d15ed7c5e64120c6f89104a5db2becdb6 100644 --- a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_a5ebdd01-9df5-4350-a0cd-bdc3f2ca1173.png +++ b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_a5ebdd01-9df5-4350-a0cd-bdc3f2ca1173.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef309e1a95056d00ae1cbea373d25906d95aa116d7e8dc72856111bc9c20c5dd -size 614789 +oid sha256:a6af221a86ff62d6400f0b046282e11154fcb0be2bb28a7c4c998a7e1ff2cf14 +size 585604 diff --git a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_b512eb3a-d22d-4b97-9602-8accf6088ddc.png b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_b512eb3a-d22d-4b97-9602-8accf6088ddc.png index 70674e0c75237cc42aa5893cb0c30ee71006d7fe..9700f398e42d1b02e2a7345fbf4c0d4c02895d2a 100644 --- a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_b512eb3a-d22d-4b97-9602-8accf6088ddc.png +++ b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_b512eb3a-d22d-4b97-9602-8accf6088ddc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b20ade3f2abf9ee5bc939b887c68ce5200abfd86e0827affa1976f6f9feddd23 -size 380651 +oid sha256:0316bf62723cf6d64e906ec464224b26c684a397539898bcb8180dced7abe406 +size 370853 diff --git a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_cb589bb8-5110-40b5-9fa6-b42683918b39.png b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_cb589bb8-5110-40b5-9fa6-b42683918b39.png index ebea40a116f3a08dcd50f20f5f1b03783bf45639..40d0fe9f52517a2bb2f5d153ecc69772123ac404 100644 --- a/images/3b390b60-5c04-40ee-a5af-353def43ffbc_cb589bb8-5110-40b5-9fa6-b42683918b39.png +++ b/images/3b390b60-5c04-40ee-a5af-353def43ffbc_cb589bb8-5110-40b5-9fa6-b42683918b39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38c226715fbd5c1a30fa4b38f8b5f44fdd8b9ae9e81ab31ccc1a016eac86cff3 -size 380358 +oid sha256:555a6a7d1552df375bd0a4e5c6f8012fc7bc82fde01df965ff7e8184b578f993 +size 371719 diff --git a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_017f0e17-837f-451d-aed3-0c99dd21581f.png b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_017f0e17-837f-451d-aed3-0c99dd21581f.png index de79fa0c82402be9a64f0b2289c9c0cca274988d..216e62ec22a09222cbf8cd93d7d84b7b37c5ac4e 100644 --- a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_017f0e17-837f-451d-aed3-0c99dd21581f.png +++ b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_017f0e17-837f-451d-aed3-0c99dd21581f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7ca7deb66f14a28030787a3ae42ee26ad4929655993098584fdd8f1801bd1a3 -size 129544 +oid sha256:f2920cb495e554793f137c64fc2e29d96eba6edae2d91f47fbb1e4c3eb1d2822 +size 131916 diff --git a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_7f3fc81f-dfca-44c6-aa5b-cca862f0d470.png b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_7f3fc81f-dfca-44c6-aa5b-cca862f0d470.png index 318008eceb58591075c52afc940f528687e17f7a..c668730c55a333303f2cbaba0f5417ce62e5c3e0 100644 --- a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_7f3fc81f-dfca-44c6-aa5b-cca862f0d470.png +++ b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_7f3fc81f-dfca-44c6-aa5b-cca862f0d470.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4292b00a2f987a2d73b7603dd5e448afcc0bb2388865c8815f589d830ae435f9 -size 379877 +oid sha256:5daa01898c131594a57bcfd609a1903faff86be9aabb34cda84d847ec0bf61f6 +size 380123 diff --git a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_a52e0685-b9cd-429b-af8f-e1a9a994a2c5.png b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_a52e0685-b9cd-429b-af8f-e1a9a994a2c5.png index a42964accf33c9e883b03ef201f7f55e2b5f50c2..5031ff32d96c4100b5e01aa157946ddf49a4900d 100644 --- a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_a52e0685-b9cd-429b-af8f-e1a9a994a2c5.png +++ b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_a52e0685-b9cd-429b-af8f-e1a9a994a2c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e14ef01c9bc852aa18825ed82274e31930721777c42e70f022c417ff8e3f7468 -size 72896 +oid sha256:ad9cab49a2381741f36e61bb3691b75a05f4992f3f2ded8fc8d62ed6eb0adc28 +size 131404 diff --git a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_de4459d6-26bf-4a32-9099-9880aab98615.png b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_de4459d6-26bf-4a32-9099-9880aab98615.png index 2bfae9d55eb08be7798336167bc6c901cfd89f35..9f0a3bc90fbffe83b1fa89bfbe376dc131ec06b3 100644 --- a/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_de4459d6-26bf-4a32-9099-9880aab98615.png +++ b/images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_de4459d6-26bf-4a32-9099-9880aab98615.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:beaa2933c31beeae7ab0372598d481b86c8b94d1f55b63bd750aeed40ebcfd9d -size 402585 +oid sha256:cd931326c2c1567075920dcc59b386e2e6a845d8d66e8050e5c249a9c4303af4 +size 403388 diff --git a/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_0fffa0bb-9a4e-48b1-9023-298c7a5829c3.png b/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_0fffa0bb-9a4e-48b1-9023-298c7a5829c3.png index 8e1f22f6d0f745740fc05c9374fe3aaba13465d0..81734d18bdd73bdc640317a71b3a8c0b34736f9f 100644 --- a/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_0fffa0bb-9a4e-48b1-9023-298c7a5829c3.png +++ b/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_0fffa0bb-9a4e-48b1-9023-298c7a5829c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a25259aa934197cea15a194fdff3b1481e0dd4399f151110a3a1afae72a71075 -size 1285732 +oid sha256:0339aba3d6e98cbfdf38e5487c3f4dd51e07e6be6bb5017ab1ae7922f9ced7d7 +size 1027221 diff --git a/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_42c5eaec-812c-49d9-b9c5-5c4af9c22f67.png b/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_42c5eaec-812c-49d9-b9c5-5c4af9c22f67.png index f21cc6f65170d5496cb4718a07da07a3492c3a00..9b30bac4e4605e8c497d17da5b03280656cd0757 100644 --- a/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_42c5eaec-812c-49d9-b9c5-5c4af9c22f67.png +++ b/images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_42c5eaec-812c-49d9-b9c5-5c4af9c22f67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e974580a639efb77b8fd334b5a0edefb7eb2ef652a35fc88b3fa9d95a8b2412 -size 1454693 +oid sha256:851ce7f0b245f13ea841e51022769e04d89f92116e71df495515a931fd520f35 +size 1939778 diff --git a/images/3b7cead3-475d-41a4-a018-db89c7ace632_17db9ef9-89fe-482a-bfe0-9e2bf9d76253.png b/images/3b7cead3-475d-41a4-a018-db89c7ace632_17db9ef9-89fe-482a-bfe0-9e2bf9d76253.png index 350154a9af6c2242985a316b14e5c7f35e5aecb9..afbd2b7759aaa80d407f3b33238e08171bb0537b 100644 --- a/images/3b7cead3-475d-41a4-a018-db89c7ace632_17db9ef9-89fe-482a-bfe0-9e2bf9d76253.png +++ b/images/3b7cead3-475d-41a4-a018-db89c7ace632_17db9ef9-89fe-482a-bfe0-9e2bf9d76253.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cd8a17a7b9b10c404cec7bcf66239f95b80307df87c1ceef2c76297788d162b -size 230566 +oid sha256:cb98318913484ff017aacb70f0a37000a08c1b9e138623123fd4888b556981a5 +size 233140 diff --git a/images/3b7cead3-475d-41a4-a018-db89c7ace632_34accc8c-406e-4136-8c51-c2b1edb1654a.png b/images/3b7cead3-475d-41a4-a018-db89c7ace632_34accc8c-406e-4136-8c51-c2b1edb1654a.png index d8aa93277c06f66c6b6562f445baa5078b32a827..7dc076e0418a57fc17b441706cf53a9d99d28c9e 100644 --- a/images/3b7cead3-475d-41a4-a018-db89c7ace632_34accc8c-406e-4136-8c51-c2b1edb1654a.png +++ b/images/3b7cead3-475d-41a4-a018-db89c7ace632_34accc8c-406e-4136-8c51-c2b1edb1654a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cf1f6f81c8bd319562f82241ede5d40fe40e993e1d1adacca613947d7f7fe04 -size 84118 +oid sha256:473a49a2b01d050509715228b86afc2f8e552a4902bbf43398e3af06036ad73d +size 85657 diff --git a/images/3b7cead3-475d-41a4-a018-db89c7ace632_6c2dbd75-a9bb-416a-a351-191e84c7897e.png b/images/3b7cead3-475d-41a4-a018-db89c7ace632_6c2dbd75-a9bb-416a-a351-191e84c7897e.png index e3fbc84c217741a3d6acbd70693b7c1969467f0d..6eee07c7dbff8336f544f598410ae00a5d458f28 100644 --- a/images/3b7cead3-475d-41a4-a018-db89c7ace632_6c2dbd75-a9bb-416a-a351-191e84c7897e.png +++ b/images/3b7cead3-475d-41a4-a018-db89c7ace632_6c2dbd75-a9bb-416a-a351-191e84c7897e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65007d9152cde047e1eaddc64b22401e91b2f64bffa28b47ce3e46f8f4b13dc7 -size 2168221 +oid sha256:e675a2ffe34c0cfac0fb2860e070df263917865b357d5bcc813238016d68f439 +size 1783133 diff --git a/images/3b7cead3-475d-41a4-a018-db89c7ace632_946345bf-762a-48d7-99fd-8ff65665c304.png b/images/3b7cead3-475d-41a4-a018-db89c7ace632_946345bf-762a-48d7-99fd-8ff65665c304.png index cff049fab23f464a2b09935984202134a43a7ab7..7c46d63e64e6592edf8cc461e15871c50b219f61 100644 --- a/images/3b7cead3-475d-41a4-a018-db89c7ace632_946345bf-762a-48d7-99fd-8ff65665c304.png +++ b/images/3b7cead3-475d-41a4-a018-db89c7ace632_946345bf-762a-48d7-99fd-8ff65665c304.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d721ba0512ee5f388f9193f396d7aa64c823e33f3316bfc49bc29b9216fc8530 -size 731869 +oid sha256:e016b9cae2db824acde480c42edd692d83f0c688724eb40525aa630bb7e602e9 +size 516631 diff --git a/images/3b7cead3-475d-41a4-a018-db89c7ace632_e0553eae-5195-46a6-a861-4a5d92255ee0.png b/images/3b7cead3-475d-41a4-a018-db89c7ace632_e0553eae-5195-46a6-a861-4a5d92255ee0.png index ad3e47e7077fbac15484ba424aa25bb4725bc0bc..b7294bd1c84a4a7d69466a1aa4085084efe2a242 100644 --- a/images/3b7cead3-475d-41a4-a018-db89c7ace632_e0553eae-5195-46a6-a861-4a5d92255ee0.png +++ b/images/3b7cead3-475d-41a4-a018-db89c7ace632_e0553eae-5195-46a6-a861-4a5d92255ee0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d235a90a21691ba446f7cb7c4a99b524264193f051cf379057e9a10a9e88e675 -size 693521 +oid sha256:b4544e380810c6ec841e09b7c1874cf72fe25d7e6ddeaf80dd66d219644c985b +size 692268 diff --git a/images/3b7cead3-475d-41a4-a018-db89c7ace632_e44d2293-b6f9-4388-b87b-f11c66219504.png b/images/3b7cead3-475d-41a4-a018-db89c7ace632_e44d2293-b6f9-4388-b87b-f11c66219504.png index dfcf6c7bc53925ac1b1894a89790773fa5290a30..610f2e1ce852897ad208475da59a2f9ded99faa9 100644 --- a/images/3b7cead3-475d-41a4-a018-db89c7ace632_e44d2293-b6f9-4388-b87b-f11c66219504.png +++ b/images/3b7cead3-475d-41a4-a018-db89c7ace632_e44d2293-b6f9-4388-b87b-f11c66219504.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74f435110535f00b6a12f1fc50cdeb4f4d4b2a2b3ec228131839eb1b7c65e095 -size 114452 +oid sha256:6b4e78ea932e9ef4a1236de8b41550929d2be08f6034bfc6ca9159fd57d7545f +size 117320 diff --git a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_211ae34b-54e9-4b42-acbb-df977fb6dba6.png b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_211ae34b-54e9-4b42-acbb-df977fb6dba6.png index b2d1e36f9c8e118123c0fb647d37509975b6a89c..fb17c73eab3f1d1158f6172e8833778bd503ddd6 100644 --- a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_211ae34b-54e9-4b42-acbb-df977fb6dba6.png +++ b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_211ae34b-54e9-4b42-acbb-df977fb6dba6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c78bbf526ddbba763d01345cd2b5fc9cdfb85c1c48874e7eb01e6813bafd1c4f -size 388896 +oid sha256:30f75cd76d6e01209f3fac54f975e4e662d0f334c87445e39cedea932616e338 +size 314888 diff --git a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_39fe4719-f218-4d71-aab6-f8e6a4082dad.png b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_39fe4719-f218-4d71-aab6-f8e6a4082dad.png index 2cbb2712d38d25c6cd208a92bbd7660723026026..181d11f1e85b89cd40652c4ad765004c59f85a06 100644 --- a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_39fe4719-f218-4d71-aab6-f8e6a4082dad.png +++ b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_39fe4719-f218-4d71-aab6-f8e6a4082dad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:282ac2c2b4e20d2a69ae9fdef8c119a1706e5a38e904eb3d900f06b9c73d17f3 -size 1739208 +oid sha256:d7a1f26da065b2adee8235f2fb30dc28256f4c288f877d8645d8425938a9580f +size 1765082 diff --git a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_416ca3da-f479-4b9d-b5f5-29b8c251f0f0.png b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_416ca3da-f479-4b9d-b5f5-29b8c251f0f0.png index 1003eb7c1ca2546b4cbdc40c5700b2b8058c6893..6955660a8deb72490fbd1a6da2b6f70fc1dd8578 100644 --- a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_416ca3da-f479-4b9d-b5f5-29b8c251f0f0.png +++ b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_416ca3da-f479-4b9d-b5f5-29b8c251f0f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eeee5cc51d4b2ea1251b1290861f1d81eeefc6bd99a67fd3296795e6719dee6d -size 38659 +oid sha256:887d664e917dde807ac29ed335e17cca17c4b347c9a5636b2afb131145cfaacf +size 43949 diff --git a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_8b3ecf8e-9155-4d96-9c07-4068e5782c66.png b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_8b3ecf8e-9155-4d96-9c07-4068e5782c66.png index dd13bbda744068f31e947cdb219ea94d9c11a1b5..f8e68560c5d5cc9e6e97e5bc107e0682058de55f 100644 --- a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_8b3ecf8e-9155-4d96-9c07-4068e5782c66.png +++ b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_8b3ecf8e-9155-4d96-9c07-4068e5782c66.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2712f29559e51828f1ca4798f37116527d17c9cbb8da8930fe43e7885d878cc -size 3268672 +oid sha256:35a60b044010eaf8fb444b5ea387b844609780b21d248acd9551dddfc41700f6 +size 632989 diff --git a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_bfa33329-4c33-4284-bdd5-ffae4b862d8e.png b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_bfa33329-4c33-4284-bdd5-ffae4b862d8e.png index 676ed1af039380094bd850ee2aa64acf8958a3b8..1342f0dec3ca88ae86fc1aa822db6dc65e7194d4 100644 --- a/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_bfa33329-4c33-4284-bdd5-ffae4b862d8e.png +++ b/images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_bfa33329-4c33-4284-bdd5-ffae4b862d8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7233db1ad8ff9fdcba035baed4fdee0e49cd459921306e94014824063e5b6b36 -size 389068 +oid sha256:f2b74b3b5edb2095e1064be8dc499f946825195a985d622737565dab2e2335c0 +size 316353 diff --git a/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_62ad40a1-3e77-4bb7-bced-d863ee082eb0.png b/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_62ad40a1-3e77-4bb7-bced-d863ee082eb0.png index 3d2e028ab26031b0ce709f43ebdaf490bdad494b..083deec48a50a1401aaf478fffd49ef33c3b0751 100644 --- a/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_62ad40a1-3e77-4bb7-bced-d863ee082eb0.png +++ b/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_62ad40a1-3e77-4bb7-bced-d863ee082eb0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e74a8c15c214bfd8c0ed4bf4888652b7d08d943169cfd0e09b995f50def53fe8 -size 745534 +oid sha256:2100315f7235da579d382884864437d22616e8c1cf4da535f0bc340abce80cc2 +size 439119 diff --git a/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_ab7e03f5-52b6-4226-aa07-97b50ddf55cf.png b/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_ab7e03f5-52b6-4226-aa07-97b50ddf55cf.png index 5eaa8c770f2b7393de6d826bbd5c4f017d136a64..8b8b59df0bb03f6cf7083b0e4b86a97121d88abc 100644 --- a/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_ab7e03f5-52b6-4226-aa07-97b50ddf55cf.png +++ b/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_ab7e03f5-52b6-4226-aa07-97b50ddf55cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5dc594f2b1e8b5a5e4093d6f636c3e9bd0970547a42d51222002a90959f64099 -size 4285320 +oid sha256:a45fbba51da97afb2de3fc879be69d89a0c186a90483f9a0841dcbae25d6c7c2 +size 4335305 diff --git a/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_f321c6df-c46a-4b3c-85f6-255e8db91d65.png b/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_f321c6df-c46a-4b3c-85f6-255e8db91d65.png index eb286b3dcae13977cd0de9bb8e4b8467c01fd4e9..35f52e71e55888eee3586aeeda35574a3df76eaa 100644 --- a/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_f321c6df-c46a-4b3c-85f6-255e8db91d65.png +++ b/images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_f321c6df-c46a-4b3c-85f6-255e8db91d65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de6b98f19d9581b37cc36cd3019263da79e73e121677f17ce74953b0a70ef0ff -size 4824354 +oid sha256:1641e3990807077a32c6611e9dc775af47d30399519267219014850b25ee38da +size 3081406 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_1e255fc2-932c-41e6-b97e-1d1c15da28bc.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_1e255fc2-932c-41e6-b97e-1d1c15da28bc.png index 5b436c7e0f07851b230c04cd4834113984212ce9..6f06fbd08f556c69b19dc73d3c195c27c93c350c 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_1e255fc2-932c-41e6-b97e-1d1c15da28bc.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_1e255fc2-932c-41e6-b97e-1d1c15da28bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5182659a57e3fdb3c8617ba1815062cc61a0a1eea838862c95902639419840ec -size 1164868 +oid sha256:0459a7c52d6987d16afd67d33f005a03273a09ac8e70143e8efdaf56fa9109e5 +size 972683 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_8516581e-6b9e-469b-8862-803974da5ef9.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_8516581e-6b9e-469b-8862-803974da5ef9.png index a60e76768164d523ac3a9a9cdfe396e5fba5667a..100ebdbf21474dfd76fbc7321a6e1e4138b31ba6 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_8516581e-6b9e-469b-8862-803974da5ef9.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_8516581e-6b9e-469b-8862-803974da5ef9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d212d148c9f4948360ce133ad2b403a585b2984af2806e81391132671ca0ced -size 1620151 +oid sha256:c1d4505c23d6e06e2cfcf049e2ddea65aef8d015f40a31843c8070a8a6c9b5ca +size 1180603 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_b933a80f-1cc1-4d21-86dd-2e4e50ddaa91.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_b933a80f-1cc1-4d21-86dd-2e4e50ddaa91.png index 66b094490f4dca666be7b0d6dcbf3c59a0348936..bb677c95e9e055acb1c4bfeaee4bf29a61993642 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_b933a80f-1cc1-4d21-86dd-2e4e50ddaa91.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_b933a80f-1cc1-4d21-86dd-2e4e50ddaa91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b952c93548492cb645c172379fe89c8f71314b8d36b890382a11eab34823e3a3 -size 829488 +oid sha256:fb76932ba0662237bfcedf8002655b1066def32365ec3c47a9c7134ee02da7db +size 1119490 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_bb5a4639-d62c-4155-8099-1ebe298b6bbc.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_bb5a4639-d62c-4155-8099-1ebe298b6bbc.png index d3229d9716e0fc5b961d81bb4dc410a82c3d8a9a..c675e8650ce7b462dd7155107b8ddb2b6d2d5522 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_bb5a4639-d62c-4155-8099-1ebe298b6bbc.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_bb5a4639-d62c-4155-8099-1ebe298b6bbc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65c2a25fef9e20ce40f1be9aabd7a2ba1c1400bd680e195c104d553e21208ba8 -size 1432405 +oid sha256:a9cbc4ccb3392c6587f04a8004495da06fb8fadc961e8fea4eb27d3be286924c +size 899515 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_c58a3392-4056-47f7-b1e6-16ecb0b2cc8c.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_c58a3392-4056-47f7-b1e6-16ecb0b2cc8c.png index 065a558906e97f3bcf9b93bad43007d77458a5fb..557397efced6c9a107c1c22003dbfaaeb0fd2a89 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_c58a3392-4056-47f7-b1e6-16ecb0b2cc8c.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_c58a3392-4056-47f7-b1e6-16ecb0b2cc8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa57b2d619b10660a61a34a9717e4729a0285487ea188265e73a61b5442e5087 -size 1157454 +oid sha256:b761b5245a993d791b0c77162679e524f26ac669efe5d5034161e1ab92cff96d +size 1215099 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_cc1da7b0-3376-49e3-8030-9d2dc5302d38.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_cc1da7b0-3376-49e3-8030-9d2dc5302d38.png index 81369d30bd6aa3e27db024f55c6f4fcb88b664e0..814d0132afb9748d96517feeb46404088d017ebe 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_cc1da7b0-3376-49e3-8030-9d2dc5302d38.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_cc1da7b0-3376-49e3-8030-9d2dc5302d38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6a6475c7fec15151f29312aad43274d1dacce23be055ce2ef264518a8dbaec9 -size 851932 +oid sha256:b26bd245d834bdf41f9bc74b3678ddf26d720c8ca421ebb53d81b55878aab452 +size 1164316 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_d59408ec-1909-4c52-9d24-9d21802048b6.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_d59408ec-1909-4c52-9d24-9d21802048b6.png index 560041bfa887a021775ec26b7605cd03c2581751..f9125f7c40b5fe68b241c7147e4debeaef9f41f5 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_d59408ec-1909-4c52-9d24-9d21802048b6.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_d59408ec-1909-4c52-9d24-9d21802048b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e3758974c756e8189cc73ce08fa8107b883cd6cc3d24964c67501e32c51fe42 -size 820199 +oid sha256:dafd281d97fea72e41d576d2adf24e532e21fe9910b860178a7f1137b67faa21 +size 1132440 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_ef34dfdf-8e92-4a87-bead-5c134aa1fd8f.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_ef34dfdf-8e92-4a87-bead-5c134aa1fd8f.png index be1e06468a7967c84d96382650d0b5d4b1cbb714..2f770514530d7dbdcf610229d528f185a693ca19 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_ef34dfdf-8e92-4a87-bead-5c134aa1fd8f.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_ef34dfdf-8e92-4a87-bead-5c134aa1fd8f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:870acf19c9e4e1495a0fb7de35df4cde5eb6a2b2f10744f78ea17f74212740d4 -size 1052249 +oid sha256:159a971680aac3154ae20e6a163f9760e474cb2ec47c1c45b393ffc1602cbb5a +size 684912 diff --git a/images/3c098275-ff82-48b1-bd00-6313b4caf215_fe9e507b-6b68-4939-b7eb-2cd4132794b2.png b/images/3c098275-ff82-48b1-bd00-6313b4caf215_fe9e507b-6b68-4939-b7eb-2cd4132794b2.png index 62d54be3c29935c0a0cc37dad36046672b15e8e5..d7bbaf7f141407685ef92210102261ffc8849c55 100644 --- a/images/3c098275-ff82-48b1-bd00-6313b4caf215_fe9e507b-6b68-4939-b7eb-2cd4132794b2.png +++ b/images/3c098275-ff82-48b1-bd00-6313b4caf215_fe9e507b-6b68-4939-b7eb-2cd4132794b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:687c495addeab4b215494358d83b23bd3dd71d34cf0bffe8ecb4efafa56796f5 -size 1053692 +oid sha256:703f7c70d55fbe9dc1a2344f5766ebafdf45619ba0201ed1af1e435023b63833 +size 1164273 diff --git a/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_6dc4d4a5-0284-47c7-b9b9-123e7e580adc.png b/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_6dc4d4a5-0284-47c7-b9b9-123e7e580adc.png index 0acfd1a69f5ef0e20f31ef32055525f177e700a1..048db896f5b2d8e71dcefdbfb0eebf1197b41ddb 100644 --- a/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_6dc4d4a5-0284-47c7-b9b9-123e7e580adc.png +++ b/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_6dc4d4a5-0284-47c7-b9b9-123e7e580adc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8aecac4fb87a430c9eefeb938af6f334be77ec08d807e812cdbe36fa8b5bf74 -size 854388 +oid sha256:cdb9a4a87240440b6f9d35705af783b0eda109193c0e4c336ad4324d22d515d5 +size 754436 diff --git a/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_7d8f7af7-05f9-4a86-96e0-4a680f3b2c6d.png b/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_7d8f7af7-05f9-4a86-96e0-4a680f3b2c6d.png index 1d466e051e1473b351753ec9007a16be73088cbc..09d5d855241bd6ce4b332ac57cf09648c68a01bc 100644 --- a/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_7d8f7af7-05f9-4a86-96e0-4a680f3b2c6d.png +++ b/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_7d8f7af7-05f9-4a86-96e0-4a680f3b2c6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa2a962ddb538cf123573a9e15a8a23de927ab706a76143368d22f8de07042c8 -size 201303 +oid sha256:94f192bda36cf7a2117bb10d711c025cfb9ed39b593747985dab2c97cf021a57 +size 1352400 diff --git a/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_a6f86a41-b433-478a-b445-563cafaebe34.png b/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_a6f86a41-b433-478a-b445-563cafaebe34.png index b19c7903cf937583f20efa93c09da54ef06d8f76..ec0be1e91ddc706001acdd43f48a1c4f4e651084 100644 --- a/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_a6f86a41-b433-478a-b445-563cafaebe34.png +++ b/images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_a6f86a41-b433-478a-b445-563cafaebe34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63c9d9e68b31b28dc522e65252e0ae3e96c529e0d1fbba4a94bffe64effc03df -size 2150755 +oid sha256:87983dd5f5fed3e836004495249b0fb697f0d7f01479d5e568f0d1a7a2aa470b +size 1281255 diff --git a/images/3c9442f9-5542-4395-918a-6551dbba3e3a_5ebe5f2c-a94a-441c-9a90-094aa2e89e97.png b/images/3c9442f9-5542-4395-918a-6551dbba3e3a_5ebe5f2c-a94a-441c-9a90-094aa2e89e97.png index 2b657640fe14da307bf59c687752fd230807efb9..264a640d7d18354f36399589becf273e17b8ae35 100644 --- a/images/3c9442f9-5542-4395-918a-6551dbba3e3a_5ebe5f2c-a94a-441c-9a90-094aa2e89e97.png +++ b/images/3c9442f9-5542-4395-918a-6551dbba3e3a_5ebe5f2c-a94a-441c-9a90-094aa2e89e97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:069d3fbd7d635f92a3dbd34b64752c131a80065af48c455d1002d593b8a17706 -size 2158955 +oid sha256:72c94afc9f56abfb4e6c4c92af3ce4c458bf498b362377371d13fdebee32ed0b +size 2214220 diff --git a/images/3c9442f9-5542-4395-918a-6551dbba3e3a_9a0801ff-e639-4dd4-98a9-fa35268526a8.png b/images/3c9442f9-5542-4395-918a-6551dbba3e3a_9a0801ff-e639-4dd4-98a9-fa35268526a8.png index acf482b6dc2245499369b2142b7d1aaa03cd545f..fb8e3ac945089b3b7b19c67dc7595668eeb59ac6 100644 --- a/images/3c9442f9-5542-4395-918a-6551dbba3e3a_9a0801ff-e639-4dd4-98a9-fa35268526a8.png +++ b/images/3c9442f9-5542-4395-918a-6551dbba3e3a_9a0801ff-e639-4dd4-98a9-fa35268526a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a5264854422c293a5cdfd6c397821a04a6baf797f55c150e912b338f59965cd -size 1929389 +oid sha256:3f3be36f9026c11a56e9cbc9470c2a8c34fe85fb299dab6c6ef7872afbc233ce +size 2225802 diff --git a/images/3c9442f9-5542-4395-918a-6551dbba3e3a_b060e216-e69b-4bad-81a0-482a2cfd7a18.png b/images/3c9442f9-5542-4395-918a-6551dbba3e3a_b060e216-e69b-4bad-81a0-482a2cfd7a18.png index aeb820e8262bdbc9bae37c59723d156dee43f1cf..3b74be17d5fd5073e8983a4b07ae82bfd63459cf 100644 --- a/images/3c9442f9-5542-4395-918a-6551dbba3e3a_b060e216-e69b-4bad-81a0-482a2cfd7a18.png +++ b/images/3c9442f9-5542-4395-918a-6551dbba3e3a_b060e216-e69b-4bad-81a0-482a2cfd7a18.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e30eff3940edb0d99b5762c590adbaa3935b343b3db2d96d2088ecec3aae2bf5 -size 1617629 +oid sha256:1393108fa3ce47f6cdc12a79853a3fd48bacdc61b80614355cecd5ad761563d8 +size 1809252 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_0b5bef49-b4a1-4dfa-958b-2ca8f9b5d0b1.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_0b5bef49-b4a1-4dfa-958b-2ca8f9b5d0b1.png index b8497dc18806d3ad353318a370048b491085f772..e4fd2148a249ca4ef2c2cefdb74cc020c2d88abb 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_0b5bef49-b4a1-4dfa-958b-2ca8f9b5d0b1.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_0b5bef49-b4a1-4dfa-958b-2ca8f9b5d0b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa2890a62d5263b36d51f75b28e76993bb3a30c6abbf2e6152468d96124e29d1 -size 576137 +oid sha256:ee5b2b81d536d5464600ab011de5a14c6c498f9ab8c403d2225520ffbee40f21 +size 792379 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_1959a470-8e5c-4c0f-826c-a690ded653ae.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_1959a470-8e5c-4c0f-826c-a690ded653ae.png index 98224afc795bc1cf4626800dac4d056287548a16..9035e7166dd240deae4976be3cbf79d68956ba7b 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_1959a470-8e5c-4c0f-826c-a690ded653ae.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_1959a470-8e5c-4c0f-826c-a690ded653ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c144fc4cfc4e8ac72bd6c52ada9ee1ec1694a3fa9301077d71e8d783388a878 -size 1113162 +oid sha256:a93f437e6396f470285c851ab4263eb9134398b2ad3c76ffabee221bf26a1ee7 +size 1302093 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_2cd0f6aa-940d-4a88-a992-726a6eab0ea3.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_2cd0f6aa-940d-4a88-a992-726a6eab0ea3.png index 2a96af438f72972242f2eaaa842a3ec1c3d9fb55..1f9a5a6a84ea63f0ff15f09b35ea9665c78b27a2 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_2cd0f6aa-940d-4a88-a992-726a6eab0ea3.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_2cd0f6aa-940d-4a88-a992-726a6eab0ea3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2302c5757a370a8d680f88caa5f52c0488b95e834cfc7c70e78da16a4cb1f74 -size 1384824 +oid sha256:577be6be390fc7da306e3596cd29ba5beaeee3b5a06dcf92b405a3aa343de23e +size 1937548 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_62135e6a-b2f3-46bf-b769-ba492e7b2c13.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_62135e6a-b2f3-46bf-b769-ba492e7b2c13.png index f725b4f1ea7ffbb379a5928a0c1a723b3c996d45..4cfabffe31f66980cf6c85952655d12de8d0b11b 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_62135e6a-b2f3-46bf-b769-ba492e7b2c13.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_62135e6a-b2f3-46bf-b769-ba492e7b2c13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21b6db1f66798e22b922c43b3ab0403f108dc4650adddaedd00ea4224f328acf -size 654641 +oid sha256:753d5343ea5c7991e41390ef0355121ba67ae6b561a6a07fca74ad9d883f6214 +size 608452 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_63149fa6-84de-46e5-a6aa-e8eed68cd23d.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_63149fa6-84de-46e5-a6aa-e8eed68cd23d.png index 0dff3af0d3becef11d737654050a380b3dc40480..922664d572c03b6b6aec63d7e7be691da0a2e82f 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_63149fa6-84de-46e5-a6aa-e8eed68cd23d.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_63149fa6-84de-46e5-a6aa-e8eed68cd23d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edd9cb290403d56badc96896a724523766fdb96342f60a49b907e6f9534a2f0d -size 1511368 +oid sha256:c8251ad806241255b7f8410624c74e7996ca6e866cc038b9061098717cc452b2 +size 1937927 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_672f3990-9c90-4b51-9727-a2ebfd6c5ffe.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_672f3990-9c90-4b51-9727-a2ebfd6c5ffe.png index d610a77fb41eef1874aa397128377bc70dbe4ce0..4e26bdcdfd304f98491edf57e6b089455cbdb459 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_672f3990-9c90-4b51-9727-a2ebfd6c5ffe.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_672f3990-9c90-4b51-9727-a2ebfd6c5ffe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89f247d2af8420496646017124765ae83a2d50b2763c1630dc67d61e3a1974c0 -size 1157770 +oid sha256:4005ee53683b40150ba10ee292d1d366adf92467029df08f6aa9722b61767356 +size 1722804 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_c2e9086d-05ae-454c-a286-99169b97287a.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_c2e9086d-05ae-454c-a286-99169b97287a.png index bce8b0aabb87eada580b073ef7a06cb650f525cd..045fdc37288d6817675343b203cc6d384cb0fda1 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_c2e9086d-05ae-454c-a286-99169b97287a.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_c2e9086d-05ae-454c-a286-99169b97287a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81156fcca7c48ebf00dd7f0007849482db6d4c92359514797afc24c85978b8f2 -size 1733404 +oid sha256:70406b131c7a851a5f8b4e2f884c6dcc8351ea8e953e81516447d7617fce6d69 +size 2177556 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e1c0c2da-37ad-41aa-a735-8768c02f6928.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e1c0c2da-37ad-41aa-a735-8768c02f6928.png index 365be55db991acc59a7220ad3e1b8e7cd308e947..8a1df5419636c67db9b6e818a81b182a7aed0ab8 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e1c0c2da-37ad-41aa-a735-8768c02f6928.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e1c0c2da-37ad-41aa-a735-8768c02f6928.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8a2a3815c8fd75134854dfe8fa480cd4032e24cf78db7e24e54f922be6d47b8 -size 1515584 +oid sha256:d97c68c917d834c2c5b70fb4c38887f9e363dda0ce12aee1a06a35eff6f3e3e4 +size 1128012 diff --git a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e3b859b3-3158-4566-bb2e-e81319206a73.png b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e3b859b3-3158-4566-bb2e-e81319206a73.png index ee13dd0933826b5ebb3326f0bc185beb4fef6b49..b9644f9d525a65dc624cab10430083a9e2ec5e19 100644 --- a/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e3b859b3-3158-4566-bb2e-e81319206a73.png +++ b/images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e3b859b3-3158-4566-bb2e-e81319206a73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d85fa5183e2999bcb38ea74b481fe775f2e9aa95fec932b2895fd3b3f7dffff -size 1508759 +oid sha256:023703bb5f6fef6221a51fd3fb71d6866bd848819ee577c20048378ce643d3b3 +size 1689215 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_254dd796-b699-4c64-9b37-efaf31f2eac2.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_254dd796-b699-4c64-9b37-efaf31f2eac2.png index da7088442d809d5b8be00406dcbaf77f573b5725..a0defb37b3686f3e7e1917f0dbde2757ea699b80 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_254dd796-b699-4c64-9b37-efaf31f2eac2.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_254dd796-b699-4c64-9b37-efaf31f2eac2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b2baad724417982f4d94474b8bfc4fa62765962f8e86c6ba424c5fed1b99bc6 -size 608524 +oid sha256:30c6be03aac64a7519159c607853eb4f1811478b40871690569a31e94bcbdc7a +size 1226020 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_2faa9698-198d-4c20-a35f-8e569196c53e.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_2faa9698-198d-4c20-a35f-8e569196c53e.png index 3280dceac67af00b2f9c05dfbdc15d91a7eb15dc..77006c54e32727c5034e18bb7ce2477a5e1d202f 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_2faa9698-198d-4c20-a35f-8e569196c53e.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_2faa9698-198d-4c20-a35f-8e569196c53e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3345ce7e9a871863287a0d2ebb65196b07f09d4dd82a31bec43dcab252a77147 -size 1086020 +oid sha256:7f912fb6d49d263f1067b98eef7e5df934d294c635ac1946c03dc17a0180a5da +size 1124416 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_30aa0a88-4767-45c6-8fa8-eb179e6e0cb6.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_30aa0a88-4767-45c6-8fa8-eb179e6e0cb6.png index a07d54db2d77e15d5cbbd1331f284cadf004c3d3..6466e446615464df7a6208d339c4699413b6df8c 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_30aa0a88-4767-45c6-8fa8-eb179e6e0cb6.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_30aa0a88-4767-45c6-8fa8-eb179e6e0cb6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20f0557fbe88b797b806a1dd85e8420e807d694a5cfb1648db1ce45f1c3e8e98 -size 654547 +oid sha256:fa9917a32d8953b29351799319d4e10a00c0a0a11af61b491784ba15d6ae546b +size 1067658 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_578fb390-89d1-4041-a5a8-867b7d55b182.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_578fb390-89d1-4041-a5a8-867b7d55b182.png index 0e1209e4a7faa162ee38b66fc3b09e7e2417b03b..6c917bbad5c51fd6fcd61b6d2e8da616850c99dc 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_578fb390-89d1-4041-a5a8-867b7d55b182.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_578fb390-89d1-4041-a5a8-867b7d55b182.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c739e4683f7ea18a652033462779509c0e8d9c877eaae1fd0e5afd8d6d3cbe04 -size 983492 +oid sha256:433cabc54400e2f794d27eddcb7f7afa8d117a437d7f4b61c88c77631b1e6ee3 +size 885532 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_8f341320-6179-4cb0-b145-5d5b9d59e8a2.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_8f341320-6179-4cb0-b145-5d5b9d59e8a2.png index b5c6b2d89a76b29bf587ccb6bb731ab4b7e6720c..4f1566dd604b7949490f8b311ee2b5f7e37eeaf4 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_8f341320-6179-4cb0-b145-5d5b9d59e8a2.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_8f341320-6179-4cb0-b145-5d5b9d59e8a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:956a6430578d124545e9861b40cf154fefd92ccb0379d3e283be4fd85c306898 -size 980496 +oid sha256:1a5267eb7ffc24d0120abce6ff2dfce3fcf124a5e861af46b660198f7a363e51 +size 519766 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_943474ad-4378-4912-9784-e64ea2b22a7e.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_943474ad-4378-4912-9784-e64ea2b22a7e.png index 0312b65eb0d45a03f091dd26b18e989650770676..2de2cebb57fc9a279f4dc6d805c55f20959d6911 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_943474ad-4378-4912-9784-e64ea2b22a7e.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_943474ad-4378-4912-9784-e64ea2b22a7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84c23e15b837cae8e81b563d969ad2e2cb02b38d384e1f312dace53e7b923e30 -size 985141 +oid sha256:c8ecb5c1f108fe6ddf022d34d9c965267ec76cd73fb6eadd54373c00ceefeb23 +size 725069 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_974168a6-b792-4dd9-863e-a6e6a9127534.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_974168a6-b792-4dd9-863e-a6e6a9127534.png index b47455b27d25078ff994c7f3a1369a6bedace47a..51ff1a91fc6cd487f8c5e730d6cf082f974398df 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_974168a6-b792-4dd9-863e-a6e6a9127534.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_974168a6-b792-4dd9-863e-a6e6a9127534.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f35e02d5fda599a73b465fbd656ade3ab8e2dc4ec322a521232ecd4e456e6eba -size 602540 +oid sha256:4c06341f56c3a13056b13927ae7c5c97d3857e4b1f0f6a278b9dae71f92f65f7 +size 1027674 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_acaf6392-4605-4e32-a646-d1b3d7675895.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_acaf6392-4605-4e32-a646-d1b3d7675895.png index 8743b7440dcd68479ac45151a22a534d7d76e169..e283d98939c542fa9efe71c24f59f88287e59ea2 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_acaf6392-4605-4e32-a646-d1b3d7675895.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_acaf6392-4605-4e32-a646-d1b3d7675895.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe765ea80c0eff312f080d2c20211d7e6bc5e981ad998c9dbbcbf3cb119d7294 -size 1079759 +oid sha256:439d9c37e2fc7c60d72e6d531136a963eccd8aab3bd5802e44ccd4200485ee67 +size 1147202 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_b488a66d-0c85-42e4-8975-57b86557952d.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_b488a66d-0c85-42e4-8975-57b86557952d.png index e7255e06a1f02ba965db9b75b2f411ad42138d53..6aa872e8577b9177e8ef5120fae6d643f672a509 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_b488a66d-0c85-42e4-8975-57b86557952d.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_b488a66d-0c85-42e4-8975-57b86557952d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d6ae529df159122ee6c5775fcbaaffacfad48cee2731b8b863ec6d36b9a9580 -size 1285693 +oid sha256:27a2c363e6db32566b99370322a8560ca23d99c865baa1cf41492aff0961d553 +size 651778 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_d44f6051-c9f4-462a-a897-ffa2e2d4ef62.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_d44f6051-c9f4-462a-a897-ffa2e2d4ef62.png index 8e1b5413379a2bc71594317e4fa79a92be95a47b..fcbbb783d8c16af6877227445067d5292257de34 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_d44f6051-c9f4-462a-a897-ffa2e2d4ef62.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_d44f6051-c9f4-462a-a897-ffa2e2d4ef62.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b50ef3ab23ba661a73fcac2eacab8df9946d71520adee7746a64403669c939e9 -size 2285060 +oid sha256:288e4d61bdbd789ebc21a0cb60d610c6ae55873bbda527d7d87b53c457f30bc0 +size 2092931 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc038372-b9b4-4b34-9eb0-10f048962d43.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc038372-b9b4-4b34-9eb0-10f048962d43.png index bf8a49b0f7cb90428f6ac133a43b0a84a9ed4f40..9c8f0cac028cf3ef1d7ff65833ad514f1c2a1204 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc038372-b9b4-4b34-9eb0-10f048962d43.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc038372-b9b4-4b34-9eb0-10f048962d43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:efb8f7b5da0db5f176d39f850795e459c4cbdd591bb2dcfe631be6f604b3bb6e -size 656307 +oid sha256:bd059e17a7e0bf4178b1129ee762d8d1350d04cc681be9ab8544a897f4c41374 +size 1217664 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc480331-a947-421e-90ed-891f11e70239.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc480331-a947-421e-90ed-891f11e70239.png index 4c1f52d2f725d03d586e797fa5f0d227f2556f9b..9f1c43933080789f95d717b84450b4f845e111c4 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc480331-a947-421e-90ed-891f11e70239.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc480331-a947-421e-90ed-891f11e70239.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12c643c53ae77319938084947ea2a348dba82beea9aed7c47d88da357fd6ae42 -size 1145086 +oid sha256:ee68b9a86bf112221e24ca1487c79f06355252b83dce8974394213a344cf5e1e +size 740501 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_e39e31de-cae6-4cd0-9393-50238c808560.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_e39e31de-cae6-4cd0-9393-50238c808560.png index 3ac7fa2138abad57112dbc08294fe40d801bb2dc..4a1386a126e312f8399aa1f9233ffa97ea66c0b5 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_e39e31de-cae6-4cd0-9393-50238c808560.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_e39e31de-cae6-4cd0-9393-50238c808560.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51ec1bd4c6247a14fac5eb221d92ae4f7ba577bf272c4634e14c7a837cc31075 -size 605047 +oid sha256:cbe3fae2ced1c7b0756fdb589de51a94f445efeaa54b39813d357f2de6df0513 +size 1171235 diff --git a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_f9a35bff-eddb-43f7-b14b-e3749487b47f.png b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_f9a35bff-eddb-43f7-b14b-e3749487b47f.png index 9f91c916b7743251a8ac9800665ad8f337c015d5..9ac096aebc039919a7a7776fc6b865acbf010f88 100644 --- a/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_f9a35bff-eddb-43f7-b14b-e3749487b47f.png +++ b/images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_f9a35bff-eddb-43f7-b14b-e3749487b47f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72043dbff056dfa7461ba187d0840dcc32932b5655ada19d294dcf5d380fdd0c -size 1086590 +oid sha256:2ab400dc0bab46bd6cb93244f7ab8035ddc7697ae356e7a406dc027ac92b1423 +size 1120966 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_34c6c7d3-aea8-4755-978d-ba476644df1f.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_34c6c7d3-aea8-4755-978d-ba476644df1f.png index 967d38e4a7fede0420170fc5cc39955565b958b1..7d395b230a9735fbac3cef835dfc363ffe211532 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_34c6c7d3-aea8-4755-978d-ba476644df1f.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_34c6c7d3-aea8-4755-978d-ba476644df1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4b3bfb9acad18b1cd21e3e1fa9c6dcdfcea4734483d232e725dc0854df2a9e4 -size 958358 +oid sha256:75d7bca6cad7190b49a875dcbc84e4d73d20654d59080f00787927f5877b2651 +size 559808 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_3893c47f-d0f7-4f68-8989-f92b5d4b553a.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_3893c47f-d0f7-4f68-8989-f92b5d4b553a.png index 64982454f0357c30a6e68787fe674fbf5075fa37..10b818fda3517c520f30daddc1a82aba1d0d9afb 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_3893c47f-d0f7-4f68-8989-f92b5d4b553a.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_3893c47f-d0f7-4f68-8989-f92b5d4b553a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:942eafe281f9af44027c9716aa6d7a8971cf221cbe50e14e04bf1de115ad249b -size 663591 +oid sha256:7301a011a23102056d91f4d1137d5a19691c3b7ed18ebb02e38707f5b89d23f6 +size 835154 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_4ddbdea8-b5ac-4696-820c-befd4dff83c6.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_4ddbdea8-b5ac-4696-820c-befd4dff83c6.png index ff59fda15844ddb4a7a0b0c36ede52611b205f4b..9667f874c07784bf202e326d4227c06d1a60ce64 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_4ddbdea8-b5ac-4696-820c-befd4dff83c6.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_4ddbdea8-b5ac-4696-820c-befd4dff83c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be791d4a56727fc01e04a387fa575764b40be8f3ed408517cba495df2dac2d2e -size 781694 +oid sha256:fa03bfd5f136bad9fa6b20c579d8f56d147a3a73f071312f3544dd9f43e78f91 +size 799486 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_51dfec56-5700-4f95-b3dd-34a07aae5856.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_51dfec56-5700-4f95-b3dd-34a07aae5856.png index da3f78a21f8b95f6015456d81a5117ae8e4b4177..ee01baeb5cc2db271ac09f7a2689c01f6d1da78e 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_51dfec56-5700-4f95-b3dd-34a07aae5856.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_51dfec56-5700-4f95-b3dd-34a07aae5856.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3066c97b7454c31a665e6ee85ca284ca02ed3e2ff0daea95194b1f21f2e20e71 -size 746917 +oid sha256:037c8e15f7e5156dbccaf97aa2c5bb5036c07028782aa7c4303fef75c249856a +size 1030646 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_557a0c86-28aa-4838-b6d5-84c2383074df.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_557a0c86-28aa-4838-b6d5-84c2383074df.png index 9dc517ab0f664df2db07158901aa4c976e647495..5f8001a4b673eac9f3d22017395cfd4cf4207757 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_557a0c86-28aa-4838-b6d5-84c2383074df.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_557a0c86-28aa-4838-b6d5-84c2383074df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:852e1123390902313fa6c43155a649bd3ad41966005c92cc8ebf2198803ca8f8 -size 661731 +oid sha256:0b943280e6cbed7fcdcc407c07001bba6e8c88d314de8b5909b438340e0cd13c +size 1385289 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_61d45c64-e415-47a6-b881-54e29f1cdc68.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_61d45c64-e415-47a6-b881-54e29f1cdc68.png index ab5a24009bafbbf2a80b9c2d1f717e3cd30b15fb..b38774aa529308959a71a7bc6301f67d22ee6cce 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_61d45c64-e415-47a6-b881-54e29f1cdc68.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_61d45c64-e415-47a6-b881-54e29f1cdc68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ac2003c317f9b4bb6b63872d313a89f97817d894042fb09d2272bc063603698 -size 780105 +oid sha256:fb9d0681bfd3a5ee39d24dd3c97e3af24d5c508ac2dfa8e86443972df8627b15 +size 1241920 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_828390d5-8a45-4d89-af60-3ced4439f066.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_828390d5-8a45-4d89-af60-3ced4439f066.png index a1093b8a21cce6cd1ec9b7891ee35cf752eb88cb..87deda4ecfbcfd969ca3c1dd3c8f0bcaf913e8b7 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_828390d5-8a45-4d89-af60-3ced4439f066.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_828390d5-8a45-4d89-af60-3ced4439f066.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb09ebaaa8e54b52b0b847fb64ebb980e3a31d39e4a541cf7a2faae32f51fb23 -size 1029001 +oid sha256:71aa0c4ae08707da38f88badd9cc7448f1497e9a0c789096b664c4475e456cff +size 1026230 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_91d99a7d-e270-4d8e-ac84-4df6ee2ab313.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_91d99a7d-e270-4d8e-ac84-4df6ee2ab313.png index a8fc5b6bafb80beef382d69f8d1661c4b717fa02..8426709762a1ff3916d7f7cb56fdb3fecef89b9b 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_91d99a7d-e270-4d8e-ac84-4df6ee2ab313.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_91d99a7d-e270-4d8e-ac84-4df6ee2ab313.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a2804f2be026f952ea9cb83c45260b75cd7751bccf64e8c53a0a3dc5569f0e2 -size 874468 +oid sha256:206c95523f09719cf1ef918f4e3ef1f8f81d09bdc10903ff80006c114359ba07 +size 663281 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_a5338369-622d-496d-b922-ce73b9e1b5df.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_a5338369-622d-496d-b922-ce73b9e1b5df.png index 18f4745bce1c941307cb3153ead97dd64b335463..9b53eef75cebdd3656bf6adebc76556b3ae7c053 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_a5338369-622d-496d-b922-ce73b9e1b5df.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_a5338369-622d-496d-b922-ce73b9e1b5df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fd8e5a7aa64caa07e385e1cedff4759b26b8632f5be3b2c82f3e8c11ac80298 -size 781479 +oid sha256:f0aedd9029a34a90cfe412d5d7b58fd8b2a36cae9a389ef6ba1113128bc3c756 +size 1101030 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_b8806818-06e9-467c-8a42-067311698bfc.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_b8806818-06e9-467c-8a42-067311698bfc.png index f16a5574129928193f165c20374aa411db686bed..ce6cf455b411858efab11a6c69387265f9423150 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_b8806818-06e9-467c-8a42-067311698bfc.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_b8806818-06e9-467c-8a42-067311698bfc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cfbbfa03875589ef9f628a7f096655d722f373791f7089bbe5c7877afc3a1909 -size 804684 +oid sha256:0418fffdcf3f4447c5a03212ff52722b008fddb1a9bf91d6fa25c997a92e27fd +size 962967 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_bc096211-84e0-4bc1-9823-94b5011a8780.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_bc096211-84e0-4bc1-9823-94b5011a8780.png index 7a4bb9db9131eacab59f584c5d59a468285faa53..1cfed685d593df55f143b9a8b712e4802c5850ed 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_bc096211-84e0-4bc1-9823-94b5011a8780.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_bc096211-84e0-4bc1-9823-94b5011a8780.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40d15f02da6ce63e5b0a9122e9468ea07f9e4d73898d22f87f7c5cd5c629ad7d -size 1004925 +oid sha256:385024d91e79e235d0cc125ab2621d8fb1c18c3c76bc05e0a5c1a4c0fe5e83df +size 1556810 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_bcfbffee-6953-464a-8489-5fe5b67dc723.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_bcfbffee-6953-464a-8489-5fe5b67dc723.png index 2f4e1005e86eecb72754a9b84f4b8c6e34cc061f..d6473238a188719812da49fbe340baa2137fa29d 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_bcfbffee-6953-464a-8489-5fe5b67dc723.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_bcfbffee-6953-464a-8489-5fe5b67dc723.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cd6fa7d20056cf63d3f177131742e777350242a796235dce0fda02fa434930c -size 1020617 +oid sha256:677ee1e902187b42d291dffebdd581d05bafc51077eaabc95f1b82c06890618b +size 814496 diff --git a/images/3d77584f-9074-46db-bd1e-086a506b54d3_c4edf30a-cfa0-4d58-b4cf-a0df18c146c5.png b/images/3d77584f-9074-46db-bd1e-086a506b54d3_c4edf30a-cfa0-4d58-b4cf-a0df18c146c5.png index 92989b2a0d027ed118142097053241d09b7c10f2..26773d2abc322d8345ef766eadd86728e4aeab9d 100644 --- a/images/3d77584f-9074-46db-bd1e-086a506b54d3_c4edf30a-cfa0-4d58-b4cf-a0df18c146c5.png +++ b/images/3d77584f-9074-46db-bd1e-086a506b54d3_c4edf30a-cfa0-4d58-b4cf-a0df18c146c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0862ed890d7c56a363b13254e8f0779d51577bd33c3087c6ca2f9d36f6b7bd75 -size 852316 +oid sha256:8e7460946e58bdfc139cfee2c68a244c8adb9059b20d857840129cf8968aa751 +size 999861 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_0866c57d-a360-4e2f-b879-f91e92979147.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_0866c57d-a360-4e2f-b879-f91e92979147.png index 2256b6c01cfba7429bfd461cc1e496e3b70a514f..ffc57b22f30f0fc240b94504d495cf0b26cb5ca0 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_0866c57d-a360-4e2f-b879-f91e92979147.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_0866c57d-a360-4e2f-b879-f91e92979147.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bfe0641541bcf0fd87ee0d9bf634ecea534dfbebf642d52750250280d1bdecf -size 642702 +oid sha256:88856c321cb5d4bc7f784f3f64f1d7a7314d4367af6385cae28e885562de42fb +size 719205 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_1cdcae52-8227-4239-8f0b-e512b769eef1.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_1cdcae52-8227-4239-8f0b-e512b769eef1.png index 02d7c845bf4d20af962bcd9f3e87cd237ca2fec8..f81b3d625028c1bc97bc93c2f6220bac2deabbb6 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_1cdcae52-8227-4239-8f0b-e512b769eef1.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_1cdcae52-8227-4239-8f0b-e512b769eef1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdd7ff946de33e8f3f315f610048ec172fda29d88a267b6bc6ede8b34a88d161 -size 712499 +oid sha256:7dba2410e907435081d3e0fa7dfc15a70b30f9dcc22a03489564d4c252245743 +size 678843 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_287372aa-eea9-4451-9ea6-628052669c61.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_287372aa-eea9-4451-9ea6-628052669c61.png index 97385b86a861d09f3fa27acd0a694584e6217c68..cebcd4b22181f096e28d74f34c2369176745dc67 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_287372aa-eea9-4451-9ea6-628052669c61.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_287372aa-eea9-4451-9ea6-628052669c61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70e03f3e18748ec335ed7cb4d696b4797f2f98d7f6b8a43556111585415db702 -size 685294 +oid sha256:adc2c1c91b519116448cf1114c7e8884a58e8e5234b116406f31a8ae05cd223d +size 761925 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_4b178183-f3c3-495d-b232-53c6250c7329.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_4b178183-f3c3-495d-b232-53c6250c7329.png index 6dfa03865237a03ff61666c900034bae1315d5fb..5e0a24f8a9965d5535f0f457f7ebfc8e050e19ee 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_4b178183-f3c3-495d-b232-53c6250c7329.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_4b178183-f3c3-495d-b232-53c6250c7329.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14f8626e8250e982026cd62e4f98a0d9fa7a37b533a19e6f7ff98f38d90ef2ea -size 866235 +oid sha256:d13237a18176cfe0725f7d4f6f1e5cbb90abb7e32cc1c9906c1078d4889b435c +size 1268964 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_671005f8-0412-404e-b398-ad5476ea00cb.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_671005f8-0412-404e-b398-ad5476ea00cb.png index b03920a1771dd2aab7252ddf660143f8a4fe7083..8f2e77aeacbab26bb399ee65f5bd72bde77a81f0 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_671005f8-0412-404e-b398-ad5476ea00cb.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_671005f8-0412-404e-b398-ad5476ea00cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0441a3b2494f82b0b80badc2d851bcb991e8f68d8ca0a546c746abb35a65a19 -size 893351 +oid sha256:0512409179593937d0ec0b47aee17147c1ac5e3e2d7f5b2ccb147be62e15a6f9 +size 1415829 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_76bc3358-5f1f-416e-acf7-b934c7231a1a.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_76bc3358-5f1f-416e-acf7-b934c7231a1a.png index 1245a8cfe0d15b22ecb8ee10c1d5bbe6173282f5..44635fecb967e24c9eb9253079445841c9b1706b 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_76bc3358-5f1f-416e-acf7-b934c7231a1a.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_76bc3358-5f1f-416e-acf7-b934c7231a1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edf4b0b0b4b5226442f35122a0895ce95bd01da424010c26c4f3a87c7a87d906 -size 864769 +oid sha256:97177c5ff4d4f029524713da6e4b0180931710bb7b945e52bdcabf25c685a70c +size 1366791 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_afd88868-e107-4cd8-9da4-f234e5d6a3b4.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_afd88868-e107-4cd8-9da4-f234e5d6a3b4.png index 88b0c00ba0eef305cd870436057f8676696c6f49..c501b534dc42224dbfa68d504c8a396adfbaf6fb 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_afd88868-e107-4cd8-9da4-f234e5d6a3b4.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_afd88868-e107-4cd8-9da4-f234e5d6a3b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02653f31699ef3a678a0ada78044f1192c930c0bb98197c51a4e2a7ec9b97229 -size 662374 +oid sha256:5f622f513b2e00b98c4c3d32adf85354fc448487586a4fcb2e695f7386400a08 +size 948383 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7324432-b416-4718-99a0-42887f0bb612.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7324432-b416-4718-99a0-42887f0bb612.png index 2dbffd84726c78c9f90966fb498a8904d72b04f7..eb8cdd63523e99754c67e93b63b220a11a3b3c48 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7324432-b416-4718-99a0-42887f0bb612.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7324432-b416-4718-99a0-42887f0bb612.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d098a61fb6806fa7403a1505b044a3723f360cbe31d621afddeb1d09b5b71c2 -size 822277 +oid sha256:92b007738c71b043481246a678c85aaa1ac30a90eb35158da0116a2876e0ed41 +size 900359 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7992a9c-7b79-48a4-ac2a-30cb1dfb3e13.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7992a9c-7b79-48a4-ac2a-30cb1dfb3e13.png index 2ce9fdcc276c9638611ef1e60be9b6c343269a3c..0d5030d552142c2a9da1b6fe1b9bb9ef5334a13f 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7992a9c-7b79-48a4-ac2a-30cb1dfb3e13.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7992a9c-7b79-48a4-ac2a-30cb1dfb3e13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:895b61e142570baff109d1a227b498b0c666d42674ae18c6bac47d2c91959313 -size 1589946 +oid sha256:babd81113aed5792ced6dca8a5360d139433421a4ce9d746af02fffc2029fee0 +size 1654192 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e9c512bc-6241-4452-a4ec-3ad2237375a6.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e9c512bc-6241-4452-a4ec-3ad2237375a6.png index 487dffaa6b64b2487a6115055a04668f471b6cc3..d8ef72a3132c0a7b37ece69ce20ef83075f2e2a7 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e9c512bc-6241-4452-a4ec-3ad2237375a6.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e9c512bc-6241-4452-a4ec-3ad2237375a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2975889529668338ef3e79274b3d85bd3198995049c9d8f3073fc86ecca8a521 -size 863149 +oid sha256:e199d9c63269c9f58898f0e5471554618561221ba9f3f76bc1bd63e550497091 +size 1187836 diff --git a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_f444dc13-937a-4b04-8052-da002702db08.png b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_f444dc13-937a-4b04-8052-da002702db08.png index 2b9b9efa1bb4a329c95994193ccbd85a433a6373..c3ed46a00a60825d2756d5d49bf37f6584f51282 100644 --- a/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_f444dc13-937a-4b04-8052-da002702db08.png +++ b/images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_f444dc13-937a-4b04-8052-da002702db08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcc9597ff94d0bcb7efc26774343ab7ae55765dc3d72ac0fc153e952c0712066 -size 788791 +oid sha256:5f590d4e9d578deb7eba927d71f2e91a41076f2c74e86f6aec1252043ace1649 +size 779130 diff --git a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_2ec3122c-4656-4a11-b38a-ace3f4ecb082.png b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_2ec3122c-4656-4a11-b38a-ace3f4ecb082.png index f4f6686d79046d506d6c8db1183d84145115c3b4..b269b7ad10de58d4ab50a8c7d611b844fe7467c9 100644 --- a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_2ec3122c-4656-4a11-b38a-ace3f4ecb082.png +++ b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_2ec3122c-4656-4a11-b38a-ace3f4ecb082.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2204dcbec4e278b4cae9ad899ca7585cb086dfaeef87b8cc6c7ba535d4df6910 -size 4705331 +oid sha256:e38886c3d0484a500e8c7b2a0ca627bae23880944c4204a695e1609cf1536731 +size 2321972 diff --git a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_3b286426-04f0-433c-b13e-98ff7eb4ca88.png b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_3b286426-04f0-433c-b13e-98ff7eb4ca88.png index 034582ec1e17a519809e887d372d3f6bc252c084..04df7f053b34d9e4fb12d9efbd69e28da91a121b 100644 --- a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_3b286426-04f0-433c-b13e-98ff7eb4ca88.png +++ b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_3b286426-04f0-433c-b13e-98ff7eb4ca88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ed0b75ea51c5217fe75fc09c729d0adc349468f9a713809620ecf9d414fd486 -size 3270367 +oid sha256:b22548bb3f91e7a12b27acc4faf8653b460650a63c7dcc7b0d106d169730861b +size 3674299 diff --git a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_57dfa0ac-76e8-4753-948d-3d86bac41a80.png b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_57dfa0ac-76e8-4753-948d-3d86bac41a80.png index e7ee0c0ab20496aeb2fe7dc0f27c56d4812e7ef6..ce714a3c2e806e71bd37dc29c026149784b36b6d 100644 --- a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_57dfa0ac-76e8-4753-948d-3d86bac41a80.png +++ b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_57dfa0ac-76e8-4753-948d-3d86bac41a80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e61a39ad7c4a0ebd6dc80537a321b02eeb16bc7913d07f8a315559c977fe937f -size 3055845 +oid sha256:f6fe8798adf5638deba9dd97d79768b8f13b7b6584fe87a33b060e0582372d4e +size 2184546 diff --git a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_97cc385a-366e-4733-8afc-54b6ceb584b6.png b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_97cc385a-366e-4733-8afc-54b6ceb584b6.png index 5b2239f861c6915acb2382d2fd359a5b1b198d12..e016682c0e0548291ad5b83c4d2a463560a03e8c 100644 --- a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_97cc385a-366e-4733-8afc-54b6ceb584b6.png +++ b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_97cc385a-366e-4733-8afc-54b6ceb584b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2fa5a55b6311402233903ba45ff7fb45dacfbb8d360115d6a2bd5283cf18388b -size 3528680 +oid sha256:936ce83af0c8319c7ba6177cd5e6a5259d2c0f51be677a41c738167bc746427e +size 3395538 diff --git a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_9bf9f632-5dce-4566-8ce7-b94a42b1908e.png b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_9bf9f632-5dce-4566-8ce7-b94a42b1908e.png index 1618a67f5c9a8b1932b5b42c22e07f4fdfd693cd..06412081281c7b2e5a8310079816b0f8faa7430e 100644 --- a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_9bf9f632-5dce-4566-8ce7-b94a42b1908e.png +++ b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_9bf9f632-5dce-4566-8ce7-b94a42b1908e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fed35ae9a250bf92af6e648bc76a5a21b1bf6d8ca462ac7958e57f9f1368c0c -size 3780588 +oid sha256:b8d1f68b41f87159cf3294aba4761415202d00540726c91ed4d152f0f2d0fdfa +size 1607383 diff --git a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_ffd3f2e7-1bd0-4bd4-b057-e7b05e92d656.png b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_ffd3f2e7-1bd0-4bd4-b057-e7b05e92d656.png index 622cee949a4933a06bf690210b90cba9682ffa5e..368e7ff6444efcbd131932d8fd2be01a61cd66f3 100644 --- a/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_ffd3f2e7-1bd0-4bd4-b057-e7b05e92d656.png +++ b/images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_ffd3f2e7-1bd0-4bd4-b057-e7b05e92d656.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9d493acc34a27db3ef34e024d2f904a03cdd3a2d19fcb9c2acda5c82098733a -size 4406536 +oid sha256:79ff725fcea2ec559b2b750730edc6c601f6df2632ea592616ae0670836358bf +size 2210083 diff --git a/images/3e0d115f-9da7-4211-9d99-122912890b40_0fee4950-9755-496f-814f-6f6f5eecd575.png b/images/3e0d115f-9da7-4211-9d99-122912890b40_0fee4950-9755-496f-814f-6f6f5eecd575.png index 795037eec9541887e6d552fe54e6dd714ad25683..488aa2e3629d15fd44f2f8fb62d8741fce804c36 100644 --- a/images/3e0d115f-9da7-4211-9d99-122912890b40_0fee4950-9755-496f-814f-6f6f5eecd575.png +++ b/images/3e0d115f-9da7-4211-9d99-122912890b40_0fee4950-9755-496f-814f-6f6f5eecd575.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77aac49d14700f16a49684dc1e5b9d68503944775d246c59cc181472de7b4384 -size 345217 +oid sha256:ef42ac898218a8c4ac5a91580e161338b8aec7cb570c83668a3d5305742386b4 +size 362820 diff --git a/images/3e0d115f-9da7-4211-9d99-122912890b40_5065da71-9a45-4c92-8cd3-8e15708647a6.png b/images/3e0d115f-9da7-4211-9d99-122912890b40_5065da71-9a45-4c92-8cd3-8e15708647a6.png index c13dea736356989732c7671c65cd6a9cbf29f5cc..8cff060c968bdd41370045248ffa9b3d6e55165e 100644 --- a/images/3e0d115f-9da7-4211-9d99-122912890b40_5065da71-9a45-4c92-8cd3-8e15708647a6.png +++ b/images/3e0d115f-9da7-4211-9d99-122912890b40_5065da71-9a45-4c92-8cd3-8e15708647a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45cfa1704232f31cc16a9b08fbb4cd23dcab9c63b601d302c3839ac4efae9add -size 475119 +oid sha256:cfd69eba0ac2218b35fcf282e05fe09d7c41de6d958b3d559aea8667b3c75a3f +size 522966 diff --git a/images/3e0d115f-9da7-4211-9d99-122912890b40_865d6d65-322d-4261-8173-2a3f843e747d.png b/images/3e0d115f-9da7-4211-9d99-122912890b40_865d6d65-322d-4261-8173-2a3f843e747d.png index 82856168641918db2e1b59deaa3a63f254798216..a58c1f7ba317b6f99605793915d2aa5d49019ca1 100644 --- a/images/3e0d115f-9da7-4211-9d99-122912890b40_865d6d65-322d-4261-8173-2a3f843e747d.png +++ b/images/3e0d115f-9da7-4211-9d99-122912890b40_865d6d65-322d-4261-8173-2a3f843e747d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7140e0a3f7837ac351912af3a5b525e6b4f3ba405cb7a44352e122facc97d49 -size 684484 +oid sha256:26b109944cea50e9ca5a1ae1299c518bb56eab130254a21fb5724fd649841e19 +size 1080168 diff --git a/images/3e0d115f-9da7-4211-9d99-122912890b40_906d43ef-49fe-40c0-b676-17bf6a6c7cc6.png b/images/3e0d115f-9da7-4211-9d99-122912890b40_906d43ef-49fe-40c0-b676-17bf6a6c7cc6.png index 2805fe75bf80936c5f30d641b23bda909cd88e41..a85c6bdb1da4fe60a9a04edc761b34ea561fa7b1 100644 --- a/images/3e0d115f-9da7-4211-9d99-122912890b40_906d43ef-49fe-40c0-b676-17bf6a6c7cc6.png +++ b/images/3e0d115f-9da7-4211-9d99-122912890b40_906d43ef-49fe-40c0-b676-17bf6a6c7cc6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:637b0f500fe4f63936406d684f729001332e4341e48fedb9b24ec426dd5d4396 -size 464401 +oid sha256:e34fbbaf2394e740001d5a6120032dfa7c365225ed1898447606ff80549e98df +size 543951 diff --git a/images/3e142eee-7a62-4ad7-ae16-419d596ab63b_fddf53b9-c162-4b31-9ab7-90a60f30363f.png b/images/3e142eee-7a62-4ad7-ae16-419d596ab63b_fddf53b9-c162-4b31-9ab7-90a60f30363f.png index 68fec9ebe9a832738c63712301cd92bcc3e206ae..42fcc0aa853de03d073491441ce5737754d3ec38 100644 --- a/images/3e142eee-7a62-4ad7-ae16-419d596ab63b_fddf53b9-c162-4b31-9ab7-90a60f30363f.png +++ b/images/3e142eee-7a62-4ad7-ae16-419d596ab63b_fddf53b9-c162-4b31-9ab7-90a60f30363f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5583ec6f54f0f94ceb8ec65085b772167aa9e23ead4eaeb003fbcb150cf9727b -size 93958 +oid sha256:00bfa2c654944969ff7090e6d740a481b90020c1eab3f7d1484ef92d0c69588f +size 101533 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_202bd5cb-530f-45b1-8674-5aa0ff9b3e0b.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_202bd5cb-530f-45b1-8674-5aa0ff9b3e0b.png index 06b23c145c882b59bb8269f756bfb8be6cfe2c41..e2e276a8d8815f123af4a3be03bfad6dc858584b 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_202bd5cb-530f-45b1-8674-5aa0ff9b3e0b.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_202bd5cb-530f-45b1-8674-5aa0ff9b3e0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b834715f1007d7bc59861f9d7fab1fab9fa14fb408e7431c4e344cae56395af4 -size 1293861 +oid sha256:ca27c5c19675b797aa81e316a95e5f85ec898d640db04bf9dc7f97113a584650 +size 955340 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_321c544d-a6f2-48b8-9db0-60760dc13574.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_321c544d-a6f2-48b8-9db0-60760dc13574.png index 8d4d0af8eb4b36b90b405f3eb1516ef368583dd9..ec0fbb08712f8257fc5227166f0e261521c9a2e8 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_321c544d-a6f2-48b8-9db0-60760dc13574.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_321c544d-a6f2-48b8-9db0-60760dc13574.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cf38b1fdb876b8bcb26646f9d38928b67ea96dbc6088e5c168c09efbd0fecda -size 1359028 +oid sha256:a0b9848e86a6824c590992b421e20c092ece0f1986236bafb6da1d172b40906a +size 1018483 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_3f99e4ac-4933-41ad-84e9-1395f8194c48.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_3f99e4ac-4933-41ad-84e9-1395f8194c48.png index 0f2f04824c13cbb93d9fd4aa172b56456cc544fb..4ee84aaae1d9dbb491c91dc271519eb3a708c926 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_3f99e4ac-4933-41ad-84e9-1395f8194c48.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_3f99e4ac-4933-41ad-84e9-1395f8194c48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25f07c4e58e3e613b27ec371090ca48e34029327b46ffde052b0fda3d920ce12 -size 1122598 +oid sha256:f2a5bb6bdff19f6012dc6b8ee8ccf85cec7a7f6043d1b65397c199c32053ed16 +size 1589277 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_41966dc2-1c46-44f2-89da-4e108a52dbc1.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_41966dc2-1c46-44f2-89da-4e108a52dbc1.png index 2d1348d7d65e3d034551bc2ec0f68e8c15abec2c..3a79b8d073a2bf500b9b6b92b6c0a344ee90e8f2 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_41966dc2-1c46-44f2-89da-4e108a52dbc1.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_41966dc2-1c46-44f2-89da-4e108a52dbc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04425a785309669b1f39cb3720a197e8e301dd60a02ad824986bc0fd6b657eb0 -size 1313824 +oid sha256:b121f60c4e9ab850f56153483b821d5322cc26157e79e1e5c55a57ef32ad7837 +size 1521202 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_62891d67-0dd1-4e76-86cf-cb8ae1c0e520.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_62891d67-0dd1-4e76-86cf-cb8ae1c0e520.png index 1ede397c106ff67067c8ec92f5f95ec8e1a98e0d..ff3e5d8b6aa191a8eaed795b9304ae96c85e9598 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_62891d67-0dd1-4e76-86cf-cb8ae1c0e520.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_62891d67-0dd1-4e76-86cf-cb8ae1c0e520.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea4401be7e89b51fb3f86ab0d83d5f78a7347703b4423f30c15c1e4312f1375a -size 1347401 +oid sha256:64a1f3f7e33b695b896d8347b3407aea7c2ff06792eba520e2334defcec120a1 +size 1548752 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_7a035d11-468f-404b-b6d4-b45b72f78c6d.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_7a035d11-468f-404b-b6d4-b45b72f78c6d.png index f749102f7fc46a0761337b23633cd24063b89bd1..6049f227c66fbb31f546d9f9a03d6ced0be6d935 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_7a035d11-468f-404b-b6d4-b45b72f78c6d.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_7a035d11-468f-404b-b6d4-b45b72f78c6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b533ea0a833c75fa8baa8310582c58d5e315fe043e7c29a65373ae95751bc7a -size 759419 +oid sha256:7bece25a9f25febd34b22e49b7fea951fd74c47625d37b8c1ef61f468d76fac5 +size 949393 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_8838a017-236c-4418-8aa7-48ad6c0514bd.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_8838a017-236c-4418-8aa7-48ad6c0514bd.png index 82b63ed57c481dcffe3e0cf65376f4c723da6680..e10001eeffddcff2efb288f18bd9a19095005632 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_8838a017-236c-4418-8aa7-48ad6c0514bd.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_8838a017-236c-4418-8aa7-48ad6c0514bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2d550c39bfedd526ea4f31c72c04c38fb2a536225bb529426fd7da4be06770d -size 761243 +oid sha256:c34c3ffd58b5a9484da6e33e45f1e606bb03e802deee8ff8528d3c411e23005d +size 1037874 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_97174dd3-20af-42da-b81d-bed5eadb1b29.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_97174dd3-20af-42da-b81d-bed5eadb1b29.png index 80ae749f1ce87f261745f0f3e45d71ad4cb931fc..ce04addc779358fce1b8aeae98617ad88b40fcf2 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_97174dd3-20af-42da-b81d-bed5eadb1b29.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_97174dd3-20af-42da-b81d-bed5eadb1b29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:031958d3e372718f4263b369415d81f63f21c4e178c000a566df2ffe54d51c87 -size 1313162 +oid sha256:81d3ddd89bb5b843f16c2414dbd7f35b041cb332d31b4209ce99471ad16a6cd0 +size 1106425 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_a09ec185-740a-4090-85e2-0bb866a277d1.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_a09ec185-740a-4090-85e2-0bb866a277d1.png index 2be893a3fc02ccfbc7031242bad746107c48eade..0a5106b3a49e2e858a9fa78bb0abc2c6e7623573 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_a09ec185-740a-4090-85e2-0bb866a277d1.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_a09ec185-740a-4090-85e2-0bb866a277d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b12f072769742962da8ec74fb5bc3bcd1e76a8aa431bcc946bfbeb0a5b3f33f2 -size 1296759 +oid sha256:a701df1c97aaaf59b8f3c89076c12527eeaeb3756f0a9dd2adfe9fefe0c80f97 +size 963558 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_b8217911-9803-46de-a83e-f7996c8899e0.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_b8217911-9803-46de-a83e-f7996c8899e0.png index 3f61506bc01189a2a80c327a99322d9cd0a28f53..66297934f166a06146481e5eae2c6df16265c199 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_b8217911-9803-46de-a83e-f7996c8899e0.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_b8217911-9803-46de-a83e-f7996c8899e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:392b71f9a3d653b68254eef3d68b246873ff0f9c1efd8c2f8660f39f5c708fbf -size 1350983 +oid sha256:4209167a2294fdc07fb07dc01b8ede492afdd2cc350c125761e5498521606ee2 +size 1551120 diff --git a/images/3e50b35f-7af9-4835-a741-158c2d941722_c78075a6-7141-410a-82c2-a1fdd561cf38.png b/images/3e50b35f-7af9-4835-a741-158c2d941722_c78075a6-7141-410a-82c2-a1fdd561cf38.png index 2d1348d7d65e3d034551bc2ec0f68e8c15abec2c..aa5ababa13cb526fb0d00b9be23473b06c0425f2 100644 --- a/images/3e50b35f-7af9-4835-a741-158c2d941722_c78075a6-7141-410a-82c2-a1fdd561cf38.png +++ b/images/3e50b35f-7af9-4835-a741-158c2d941722_c78075a6-7141-410a-82c2-a1fdd561cf38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04425a785309669b1f39cb3720a197e8e301dd60a02ad824986bc0fd6b657eb0 -size 1313824 +oid sha256:3f6d0a3aa27a27827cf2269fcf9b094a2507511231c6e1cfa9234f3e7f67e31f +size 1014108 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_15956c00-b71f-4623-834e-67fb0a4e40c3.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_15956c00-b71f-4623-834e-67fb0a4e40c3.png index ffc80a9c2a7e9a9beb3367bf1744fb01d5d13d34..0e0e322461b9762bd98310171f75e6d30b0d5047 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_15956c00-b71f-4623-834e-67fb0a4e40c3.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_15956c00-b71f-4623-834e-67fb0a4e40c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f493dc0fc67fbe7a15fa9613d7e0349ee2fc15363f2f3359063d1cbe5e050e83 -size 1628139 +oid sha256:f6c192f784e09a801d78f714e27595840378a07bd27f6ff5c32fa219d49ecd1e +size 854922 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_31af69cc-8439-4cfd-8d2b-1a335a0e1e5a.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_31af69cc-8439-4cfd-8d2b-1a335a0e1e5a.png index c39b49f36f57cf029ade44387ab5d2b08a299f79..132f3cf54c27a59b4f88ab18b48dc860e68a4a48 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_31af69cc-8439-4cfd-8d2b-1a335a0e1e5a.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_31af69cc-8439-4cfd-8d2b-1a335a0e1e5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db24f1648c49289e1b71a9d8498176ba90c03e41b77d06ec3a027d789f80b84d -size 441844 +oid sha256:02b006545e0d7439dbe684612c493dbfba125c1d55399a53d48bbdab0cf45bd5 +size 582353 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3752eb02-5cf2-4205-b0b7-3ffe26b7d0be.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3752eb02-5cf2-4205-b0b7-3ffe26b7d0be.png index 2bf9e323a48022c5fbb790c383e2be11a2c17a17..90af67fff86102ccc876276874b36d2b9307eb5a 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3752eb02-5cf2-4205-b0b7-3ffe26b7d0be.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3752eb02-5cf2-4205-b0b7-3ffe26b7d0be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f3311f0728889cf4c078c0979bf850f4b58c2b8fac563f320d5629f206f3f34 -size 486607 +oid sha256:365dc6d15346c7d82c6c4c60558ecfdf64e9366561674f3a2113e153e434e8a3 +size 825599 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3f78437f-f79a-4dba-b4aa-310f0fb87f56.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3f78437f-f79a-4dba-b4aa-310f0fb87f56.png index 4d34c25a030e0507be9557107475733c3a2771c5..5592765c8f95763932bfbe0f31f385b20a12b1ae 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3f78437f-f79a-4dba-b4aa-310f0fb87f56.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3f78437f-f79a-4dba-b4aa-310f0fb87f56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03a1a74379e862432c3a275d6ecdefd207d25bedf5f4172bbe282aa5248e7978 -size 451880 +oid sha256:701d139df2a5c21f8e6d37017ecfeeb880949a44563a67acc87bba760c57aeb8 +size 609362 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_4e0c7759-42dd-49e7-b8f0-c1a71191be69.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_4e0c7759-42dd-49e7-b8f0-c1a71191be69.png index 3570089738dd3b0ea894a0e717e3233ffa36870c..12583e1adf4ddee626ea5a8e47300d791b0f3008 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_4e0c7759-42dd-49e7-b8f0-c1a71191be69.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_4e0c7759-42dd-49e7-b8f0-c1a71191be69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19352320cb8d033fb460bb652107b7c0546173baf28654510d1cd96969cfe480 -size 1625583 +oid sha256:8a828dbadd0edfe58e4d7c5b3af53c53e758a15ccb2f768ffb1305cae7908a1f +size 1356805 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_88070251-f05f-4d48-8365-9ae69c638083.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_88070251-f05f-4d48-8365-9ae69c638083.png index 289442ff425046158366b7413378625478791c6a..0bfed6648a7ca5647fa23a2b4cc8273384ecb89e 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_88070251-f05f-4d48-8365-9ae69c638083.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_88070251-f05f-4d48-8365-9ae69c638083.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00ab4206810b74ae7f356d19ac2e4dc67836c11e1d046ed0a6a7f118f8fb3adb -size 1585573 +oid sha256:7b10a06ca898c147d62032e1ed5e05ca5808b62d898857144fa693c0d1ca7ebc +size 1490004 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_8961a973-3d00-4e41-a0b8-c24b544aa233.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_8961a973-3d00-4e41-a0b8-c24b544aa233.png index 4e4e8827e17b6c526c38c95629478490b224a56c..1aa497a96f27f596e98d081741f5071d59c9bff7 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_8961a973-3d00-4e41-a0b8-c24b544aa233.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_8961a973-3d00-4e41-a0b8-c24b544aa233.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7cf3eb4c636290c731d675b23c64591e47bdbe80de1d5f71e7bb325bf24a6444 -size 1256393 +oid sha256:feb8dcdd49e507d81093cdbeeccf7cb1b3054ae3427abe20740db08960dd4022 +size 1304973 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_d2848639-97f6-4e63-a587-3a55a70d0525.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_d2848639-97f6-4e63-a587-3a55a70d0525.png index ce3fc4dc013f6c3bb8b0877371a2b4e379bf0b3a..7852635e74b75fefb487c5f362b9e93ab21d38e5 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_d2848639-97f6-4e63-a587-3a55a70d0525.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_d2848639-97f6-4e63-a587-3a55a70d0525.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6889efa53858b2999529bfcc949bfc53b9ac219913d869411837dcf82a503318 -size 1582925 +oid sha256:7b7e362d61a41d702bba00a37c01c61612329d4c2ad1d946c3c8c92bbecbd794 +size 1142530 diff --git a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_e11321fa-806c-4878-b2b8-656dd9b9c735.png b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_e11321fa-806c-4878-b2b8-656dd9b9c735.png index ce3fc4dc013f6c3bb8b0877371a2b4e379bf0b3a..1a8ae3b2ba41415b9dd734f33b66457d32961c52 100644 --- a/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_e11321fa-806c-4878-b2b8-656dd9b9c735.png +++ b/images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_e11321fa-806c-4878-b2b8-656dd9b9c735.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6889efa53858b2999529bfcc949bfc53b9ac219913d869411837dcf82a503318 -size 1582925 +oid sha256:e37ee77a8079b278cc6ecb48bfcf0b0945fa7696cac0cae0dac338e1a4ea986b +size 940628 diff --git a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_22f36720-f49c-4bc1-a779-3156feea0178.png b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_22f36720-f49c-4bc1-a779-3156feea0178.png index 2f7d0fcdb8106b29a1b7cba6f10e42d6db3a881a..f120fccc9a0a84071e47ccce87150b37375b2ed5 100644 --- a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_22f36720-f49c-4bc1-a779-3156feea0178.png +++ b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_22f36720-f49c-4bc1-a779-3156feea0178.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46cde1f58690fcef5a5dafa98db42626a04256ae56db64efc4e77fb847468f29 -size 3383389 +oid sha256:195f08effa5ecb597e8c23f4ce95a5aa8b915781402b779c719b1087f53f2a76 +size 2387794 diff --git a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_2e6cf0b7-800a-42f3-af4a-d2f3c85a6bf5.png b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_2e6cf0b7-800a-42f3-af4a-d2f3c85a6bf5.png index 8818ec79cb9a0165a72b84fdd1e257b3630adcdf..18c28d783b105c0d7805d2ec333d398bd4990f3d 100644 --- a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_2e6cf0b7-800a-42f3-af4a-d2f3c85a6bf5.png +++ b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_2e6cf0b7-800a-42f3-af4a-d2f3c85a6bf5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2de5e8265318862e1eef15254f3eee8ec7a5dbc424b953b3b53168e6c04b6dd -size 928009 +oid sha256:43f9484032ea23e77706f14d79d88b0a302e564f5ebc787cf010783afd37e5ba +size 1165469 diff --git a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_331cc579-44f2-4e80-ace7-f8b7909fd044.png b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_331cc579-44f2-4e80-ace7-f8b7909fd044.png index 9d23f69cea4dede539f0d97acc40df36b3fb6ba9..e0fac433f2125274844a129a4b3690ccc473e839 100644 --- a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_331cc579-44f2-4e80-ace7-f8b7909fd044.png +++ b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_331cc579-44f2-4e80-ace7-f8b7909fd044.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a020be42c519091b80f0ba226efc2d32f1891f9debf7b2297cc11ec695a86c51 -size 3515956 +oid sha256:079e4c4e34a2909aac1634877c34fe7c9c3ed55e6a837782e908523bbd7a31c0 +size 2022598 diff --git a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_390bf505-f485-4703-86b5-6894eda4e191.png b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_390bf505-f485-4703-86b5-6894eda4e191.png index 9278b041e5a9e8582cd7774e960f8b05a479d5ae..b82943aae683b5899c180339ac847694f9645a08 100644 --- a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_390bf505-f485-4703-86b5-6894eda4e191.png +++ b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_390bf505-f485-4703-86b5-6894eda4e191.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:221add461161ef577405186fb973daeb2b4d760bacc3a6aba7f996d353786099 -size 987256 +oid sha256:b917314c1c353665b915355f2f5385d502014ed292ee641f2ae5d62a4b5558a2 +size 1016202 diff --git a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_ce3e58d6-d6f4-4341-8c53-2dd347ac8505.png b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_ce3e58d6-d6f4-4341-8c53-2dd347ac8505.png index 7610c7f6d2e2bea27347907106bf3609d4f234c4..d7f2fd887bf2f82f26b5e9655d811e401693897f 100644 --- a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_ce3e58d6-d6f4-4341-8c53-2dd347ac8505.png +++ b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_ce3e58d6-d6f4-4341-8c53-2dd347ac8505.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c368e608566c1c64c2cf6d820d4015a4c0de5f6308797edcb5a8bb6b00126839 -size 989011 +oid sha256:7b3c93f4724f09019ff3204300939ef0b7c1deeb411e05f50341b518846b0e80 +size 1193009 diff --git a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_e11f7437-a42d-4e13-8ba8-9dd466485e7f.png b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_e11f7437-a42d-4e13-8ba8-9dd466485e7f.png index 6f15055590ddddbaeb6389db0b0a78f8a13b1b0c..88e80463c6d0945ca2ef77f676531121d860561c 100644 --- a/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_e11f7437-a42d-4e13-8ba8-9dd466485e7f.png +++ b/images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_e11f7437-a42d-4e13-8ba8-9dd466485e7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3c223738749fc78ef9c3e0156f41e120c3d3d7d3f1cc7e040fae012e48b97e0 -size 916709 +oid sha256:5bc04b36e26553ee4e98cfad619865b8d93c74696f0eff8bc2736114bd024c3e +size 737455 diff --git a/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_a684c98a-c238-4bef-b2ad-f476f07d73f8.png b/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_a684c98a-c238-4bef-b2ad-f476f07d73f8.png index ab4b37083230cb89c7ff68aed0284331bf9c2809..3ef63a0d0a7d273c79986ce88a6deb2ad085d267 100644 --- a/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_a684c98a-c238-4bef-b2ad-f476f07d73f8.png +++ b/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_a684c98a-c238-4bef-b2ad-f476f07d73f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c718d334945ff39d781263af1d80b138c67961592035f6beba10f8e2832a1af1 -size 512777 +oid sha256:e18513155f125ee9e99be5264a754e4d1bb5e2a3820ebdb4cd3643b9516b3ded +size 558127 diff --git a/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b856c4ca-c796-45be-9390-70f6957a0bc0.png b/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b856c4ca-c796-45be-9390-70f6957a0bc0.png index 6fa06f49762497fd50c1e8532e25efdca26de9b6..4f99e9422ce82a4ce532eceee8730455f06e46e6 100644 --- a/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b856c4ca-c796-45be-9390-70f6957a0bc0.png +++ b/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b856c4ca-c796-45be-9390-70f6957a0bc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08791f79db3a73f43f95a8c8510d8ce8ac7dd4783c0dd91f12150d9066e0972f -size 1881848 +oid sha256:62a4194381ea006a1b2805886b0dc2a5a11bced6c7ed61df34a232c7cdd4f5bf +size 1252344 diff --git a/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_d194ee95-87f0-4ad2-a6ce-d06cf89fec9c.png b/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_d194ee95-87f0-4ad2-a6ce-d06cf89fec9c.png index d0af42d9c8aa4f7ccc99986e1b514845859449b8..07105e422c86198e0484a51f9cbb1a71e6fbaf97 100644 --- a/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_d194ee95-87f0-4ad2-a6ce-d06cf89fec9c.png +++ b/images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_d194ee95-87f0-4ad2-a6ce-d06cf89fec9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5de09a8af4ad1e02a11105689061565dfdb86647f777ff43d09046d8463211a9 -size 1284828 +oid sha256:b0d3dcd4aa42dce672d1f69751f87c22e2022f7b29fb9c3b2b7152e0c30b5429 +size 1184766 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_1298e843-2ed2-4cff-a3bb-95cf25587d71.png b/images/3f62b47b-876b-487e-8681-63e80555938d_1298e843-2ed2-4cff-a3bb-95cf25587d71.png index d2398f294bf96504ce5243957b024ebb4db6894f..8f7c13ffb02ce525c47d3db59d5143439fadfa09 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_1298e843-2ed2-4cff-a3bb-95cf25587d71.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_1298e843-2ed2-4cff-a3bb-95cf25587d71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86de3f7a04af30d2771f0e10bbfa8c923be06b00c3f08aa092d3c68e5d22fbfb -size 859199 +oid sha256:d9353b0368da304109c79e15b815d3669da85dc657b9ec50c09aaa0d5e79ba1d +size 638426 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_2edb2388-3a5b-444f-9824-2bd1e69cbf18.png b/images/3f62b47b-876b-487e-8681-63e80555938d_2edb2388-3a5b-444f-9824-2bd1e69cbf18.png index d906da1a5d487d926416ee2a5f3780e762bdb0a1..d5ea2d5b2cd378905460a58891ea7ad3ff9fabdd 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_2edb2388-3a5b-444f-9824-2bd1e69cbf18.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_2edb2388-3a5b-444f-9824-2bd1e69cbf18.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98a01a83b78262077fcc6c72af36cc543943f5aa35cddcec315ed7c3b3b7a511 -size 742713 +oid sha256:1e1c826d45272a7f0ec94dfd08e5f49c678bdb5b5ff227f57220e9cfbb314db4 +size 516406 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_31f5929e-78b5-4fb3-9722-4fe9085bf63b.png b/images/3f62b47b-876b-487e-8681-63e80555938d_31f5929e-78b5-4fb3-9722-4fe9085bf63b.png index f61a00c5a7fc695521cf17a096d0a61446098bf3..4f03f0cc31b221c50388843397ec73bd9e5f3277 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_31f5929e-78b5-4fb3-9722-4fe9085bf63b.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_31f5929e-78b5-4fb3-9722-4fe9085bf63b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdf5819ca5c6df4bf7e72c74bc43b3c21a22e963219add42bf75d69634722563 -size 706613 +oid sha256:f6bd86ac0ed50b0691d0147d12039ebc91bab76708bdb90b515c47be5b335a1b +size 553678 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_7de0d4fb-357b-420e-997e-df792f9099ac.png b/images/3f62b47b-876b-487e-8681-63e80555938d_7de0d4fb-357b-420e-997e-df792f9099ac.png index 379df82d3049cbb5c9352dfb9c3d5a7c2c2163df..98875732d200e225a4d79957ad9bc4a6ef263b1a 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_7de0d4fb-357b-420e-997e-df792f9099ac.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_7de0d4fb-357b-420e-997e-df792f9099ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a22e6689d1aad2dec390028b055ea9c4f3fa8769142f07b33b5a8349e82c6837 -size 678306 +oid sha256:c4cdf15c63b88c634f7d8560a2ef76ebf62ed5e6d5b477dab42b1f88dcfb5f35 +size 668980 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_85649359-30cc-4848-9453-3f2577813005.png b/images/3f62b47b-876b-487e-8681-63e80555938d_85649359-30cc-4848-9453-3f2577813005.png index 70e0f899368c0f83340b26ad4796e929cba9ff94..50d236b043f0fd34991574fe044bc462d7dff31b 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_85649359-30cc-4848-9453-3f2577813005.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_85649359-30cc-4848-9453-3f2577813005.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85fc899f9272c39b3c078ddb77c80aeae9a960391d68b32e29198687580c2284 -size 711562 +oid sha256:47724d8086150aebf0d717ba77e9390f6a63fa0da8ef728550e3b76113ef08a7 +size 485785 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_9d038530-f4c7-4d78-a473-7ab36b4280d0.png b/images/3f62b47b-876b-487e-8681-63e80555938d_9d038530-f4c7-4d78-a473-7ab36b4280d0.png index 916dc5cffa9ffb97e3b2034803596fb9159b6bdf..4eba2ef4a135131ac56b78f336d3550422502e28 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_9d038530-f4c7-4d78-a473-7ab36b4280d0.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_9d038530-f4c7-4d78-a473-7ab36b4280d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3d18d3ce501fcdac28c311cc02f5175847117e75202302dfe8c211f4ffd78c6 -size 675303 +oid sha256:66af8383f7477a3d6b754f9a4d8da2eb90d682b883634027829e53831cabffcf +size 642640 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_9ede236e-1cc6-4750-882c-9d9e807b32a8.png b/images/3f62b47b-876b-487e-8681-63e80555938d_9ede236e-1cc6-4750-882c-9d9e807b32a8.png index 0abffda770a69c1945e617b6e4c9c9e49f515b3d..4ba14554d9b7df99b99a8b5da0c3cbd8bc511251 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_9ede236e-1cc6-4750-882c-9d9e807b32a8.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_9ede236e-1cc6-4750-882c-9d9e807b32a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed2453762533f7c4f37e007b18e8b2956c4590114b74cdbe83f031e2698455fe -size 637035 +oid sha256:d63f9a77eed3094f3e32ec69294c7d4b38b1c08c6c34d30d268c8ca93e08ffc2 +size 299547 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_b20c7f83-1126-476a-bc80-04de993b895d.png b/images/3f62b47b-876b-487e-8681-63e80555938d_b20c7f83-1126-476a-bc80-04de993b895d.png index b4b459576b557fd78b5e9c3012b252d96a3341e3..65c748e0e5c86cef810610faedfdb5ca7589eb53 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_b20c7f83-1126-476a-bc80-04de993b895d.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_b20c7f83-1126-476a-bc80-04de993b895d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d560631681e21fbe27eae1f82914ed5d3eaf21dd32c97db341894d0c70343c76 -size 636019 +oid sha256:eeb4d643bc4e247cf9276b29b2af624ce8f2609a269c91ddcb48baca6cd5a195 +size 572655 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_c521c553-ed10-4e26-af9a-6e28c2563b07.png b/images/3f62b47b-876b-487e-8681-63e80555938d_c521c553-ed10-4e26-af9a-6e28c2563b07.png index 97144f9e02bffc5b9e85293eb83830f4c0647474..d9768dc9f39eea7f36091aea9ae162a2bc8840d1 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_c521c553-ed10-4e26-af9a-6e28c2563b07.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_c521c553-ed10-4e26-af9a-6e28c2563b07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9219882840f2262da734801faab143b5eae4c799f976e40bd6f7be7f0b6ea82 -size 748711 +oid sha256:210189a1892b6e339169f3b1e7f4f1301e01a225058255577c1d5ef7ae583f0c +size 689804 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_c736856e-e26a-4537-b3b8-82969aa2a016.png b/images/3f62b47b-876b-487e-8681-63e80555938d_c736856e-e26a-4537-b3b8-82969aa2a016.png index 41a588f63f1e74703617f9854eab0dd6312b63e4..8380369de6ca3e2f06d90fa2a005e01f88761aa2 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_c736856e-e26a-4537-b3b8-82969aa2a016.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_c736856e-e26a-4537-b3b8-82969aa2a016.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0088f60bdc9e1ac598446feb18373ea2072796753dd06132976ccdeca91a9591 -size 671731 +oid sha256:091c3c1b781a6eec13208df0a7a5f2e105867217b85abcc29b1758b1f45b5702 +size 768131 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_cd8f79bd-6ddb-40ff-b87c-d4c5e4dc6829.png b/images/3f62b47b-876b-487e-8681-63e80555938d_cd8f79bd-6ddb-40ff-b87c-d4c5e4dc6829.png index 154ccb1e769f493f5ecb354dc872fa825ff93282..338e193079b90b103acc846121fa626cea24007c 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_cd8f79bd-6ddb-40ff-b87c-d4c5e4dc6829.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_cd8f79bd-6ddb-40ff-b87c-d4c5e4dc6829.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4465548766a17785ec81ae8477a334936e64319d9173be8f228a63c51196ef4d -size 664872 +oid sha256:df72898086f56b0e197af05217cd7f8a4b994dec04decb4d691e73d5759c9e50 +size 498988 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_d54537bb-6960-4393-a098-fb7b2390fd25.png b/images/3f62b47b-876b-487e-8681-63e80555938d_d54537bb-6960-4393-a098-fb7b2390fd25.png index 0cc1402c0631ec0e813fb4a2d9e5639cb806340f..0349ea95588b895fbd3d3e84790599a7b221de8a 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_d54537bb-6960-4393-a098-fb7b2390fd25.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_d54537bb-6960-4393-a098-fb7b2390fd25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba7596e9fb429c16513b3cb54a7a1f08f2ffbd9d9160392b4e3749807089ba04 -size 636573 +oid sha256:dac9dbc16eb611a2565b113837d9181cb182900bd6e0b0d72fa08f4ad226ddac +size 520401 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_db4a2ab6-e43f-4059-9dfa-0e4c7fb2eeab.png b/images/3f62b47b-876b-487e-8681-63e80555938d_db4a2ab6-e43f-4059-9dfa-0e4c7fb2eeab.png index 6e3edb072475190a6000806e3f482991046e1376..9ef5c49b1685a0fafd53a36f5d211dbaddb49374 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_db4a2ab6-e43f-4059-9dfa-0e4c7fb2eeab.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_db4a2ab6-e43f-4059-9dfa-0e4c7fb2eeab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e9b72db58f1da0b2fb7cfbd6fbcef96d319bb8d70930c051c56b8e778cb5383 -size 899215 +oid sha256:ca3c455f03f2391e18b957e0f7d8239b50552963729f7e3b62b4bbefb4016629 +size 1277733 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_e4bfc892-619e-42af-a6d8-c208fbf54abc.png b/images/3f62b47b-876b-487e-8681-63e80555938d_e4bfc892-619e-42af-a6d8-c208fbf54abc.png index 28775e1effa2b40c7c45f8c26a9d3237036d846c..be778f51857d2381c05cd30cad5a0c2622b73fd0 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_e4bfc892-619e-42af-a6d8-c208fbf54abc.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_e4bfc892-619e-42af-a6d8-c208fbf54abc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0998fe3f0c3da1ec0e19ee06356fbf3791eccd760134f133a1944b21bac91eb0 -size 703026 +oid sha256:50ffee4bd6ed3a923ba6f3b05e269b05c6a630ba95e1394a40f5c8f21a256e2f +size 663433 diff --git a/images/3f62b47b-876b-487e-8681-63e80555938d_e7771434-c972-4fb1-9915-82fd77e5ec6f.png b/images/3f62b47b-876b-487e-8681-63e80555938d_e7771434-c972-4fb1-9915-82fd77e5ec6f.png index 210122a3ddf97662f35f0c8a7fd35f4e81560f3f..1a53765fc093439ac9b8c6b06fafb86e2a88af0b 100644 --- a/images/3f62b47b-876b-487e-8681-63e80555938d_e7771434-c972-4fb1-9915-82fd77e5ec6f.png +++ b/images/3f62b47b-876b-487e-8681-63e80555938d_e7771434-c972-4fb1-9915-82fd77e5ec6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d91c6878829e3b1454f2fec5e06be06060306ac4e4a5e8bb0ad11d7886c7c57 -size 750335 +oid sha256:968511e4ecaa93e2617fb254aad9249692871474267ecf6c8bfde2fdf6782c89 +size 375812 diff --git a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_42238faf-5676-4bfc-8fb1-4c18741ce175.png b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_42238faf-5676-4bfc-8fb1-4c18741ce175.png index befc61b9b12d905de39c7cbf5772eca009205a18..e3d6de0237ae4897751bca82352a8643649202a6 100644 --- a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_42238faf-5676-4bfc-8fb1-4c18741ce175.png +++ b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_42238faf-5676-4bfc-8fb1-4c18741ce175.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ba4e7f98544254e4202a00fe8f354b3ca937efcb3f537c89fe772f5a94f9ca3 -size 1510103 +oid sha256:3ed80ff995621934a012719a4efa3d09d1915c1bceb4f38a358bb44baba60d34 +size 1835386 diff --git a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_62b5851a-66ac-4f7b-ab41-5c8ca47f87b0.png b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_62b5851a-66ac-4f7b-ab41-5c8ca47f87b0.png index 5a13062ad6b01eec9f868e8350aa104e5b7573f3..8d738697bb8afe7ec5a3cc4bbf9c95433b7faedd 100644 --- a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_62b5851a-66ac-4f7b-ab41-5c8ca47f87b0.png +++ b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_62b5851a-66ac-4f7b-ab41-5c8ca47f87b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:889c7cb9f27ced0463e733e57ef5f84543f0ba0f8285fc9e390cc261e0624179 -size 1754902 +oid sha256:2498d7af8a0e9d5ec96a47d8650ed8f09c3b385aee479d52fd58206ca721524c +size 2541621 diff --git a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_961a671e-f10e-4dc0-bee9-429a8f389b15.png b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_961a671e-f10e-4dc0-bee9-429a8f389b15.png index b6e119b028bfe9175e8b080334c2dd96e0c166e5..ef2251fa0bb2595ec7cdd4271cf24fa233f1e446 100644 --- a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_961a671e-f10e-4dc0-bee9-429a8f389b15.png +++ b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_961a671e-f10e-4dc0-bee9-429a8f389b15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf1cb0fdd5e9a424400a4f1a8b67426e329039c5785e2dd313f5ff4f66566332 -size 1865430 +oid sha256:02e03a3997da1ad3babb5420ed5ce72906f913e1deb7dbb363abe537fe7859f8 +size 2213262 diff --git a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_d9e3b9bf-8a2c-41c6-9c58-d3f0ae781f45.png b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_d9e3b9bf-8a2c-41c6-9c58-d3f0ae781f45.png index 6c659fcdd3917ebd72e34edf499d22c40ed542d5..000b280159671ac14b358d056dfb1cd9c0056462 100644 --- a/images/4008118a-68a1-4a9f-968c-8b92a979dff7_d9e3b9bf-8a2c-41c6-9c58-d3f0ae781f45.png +++ b/images/4008118a-68a1-4a9f-968c-8b92a979dff7_d9e3b9bf-8a2c-41c6-9c58-d3f0ae781f45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93159bb10d4742539baf29f7b501c86015d090c016b7f559421046d1f0c0f4b0 -size 1453640 +oid sha256:de9057b9b92e982fbb7d8a2336237e96345d25d1b1af8bce0034a905a5d2b08a +size 1806911 diff --git a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_4762d735-9dc2-4717-ae8b-baab0b3446e5.png b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_4762d735-9dc2-4717-ae8b-baab0b3446e5.png index c0b286dc69a50804eb093d1beb6d18c0f9cc82a7..89ea3b60c822172d01de660b021761c3b1fa3f1f 100644 --- a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_4762d735-9dc2-4717-ae8b-baab0b3446e5.png +++ b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_4762d735-9dc2-4717-ae8b-baab0b3446e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4c4d05c08254a07979053e685e1cd7e9e72628cb6b05e9a68ed36338864b175 -size 1212106 +oid sha256:9a9c38aa8186826f93a66bc930c227b3493cb6d369369407393492c8954beeb0 +size 1653819 diff --git a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_660f5a50-1d68-4a30-a58b-25330fcabbe4.png b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_660f5a50-1d68-4a30-a58b-25330fcabbe4.png index 3b947d618ecb1820868b928466543b2f4cd59a7e..139fbb9a2a43131cbf718c601d590cb80c1c10da 100644 --- a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_660f5a50-1d68-4a30-a58b-25330fcabbe4.png +++ b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_660f5a50-1d68-4a30-a58b-25330fcabbe4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac0e6e4fb24f09e669bf21d5c27c7c5655e9e915e4faed379047f5f536ce0b84 -size 1445467 +oid sha256:d279d935d51579922ae44542e560200aa8c9eea00916d0dd845d1ca05d6067fa +size 1918086 diff --git a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_6c7a7082-2897-41c7-9688-4b0f3d778cdb.png b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_6c7a7082-2897-41c7-9688-4b0f3d778cdb.png index 600f90c08802ff71b145f7ab9fc76b1d9cc047ef..4a8f03e02fcaa4102aa80d998d06d4a978fbc098 100644 --- a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_6c7a7082-2897-41c7-9688-4b0f3d778cdb.png +++ b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_6c7a7082-2897-41c7-9688-4b0f3d778cdb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6bfd689151826749cd71f6be90c95ae0c8df652053a0eb6470bb5ba83a4bf85e -size 1160506 +oid sha256:3b2e746c6e7d771904c184e01c53689943c6f2f064106446cf91a544e97b2ee3 +size 1649067 diff --git a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_7473d4f5-d147-4c64-912c-620553698746.png b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_7473d4f5-d147-4c64-912c-620553698746.png index 881a7d3962f1697f9020c4218e2c8ade70a97b53..d9e1eeec0fdc8638e8c4767b0b25c24186c46862 100644 --- a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_7473d4f5-d147-4c64-912c-620553698746.png +++ b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_7473d4f5-d147-4c64-912c-620553698746.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a973da9c7d0a045268385c60b2929c566cbf0ce7c98fe53bfd84af2c7108975 -size 1556929 +oid sha256:31959c1c243a727d9996bcccb213a5a7418f13002379ff36a13930ed3c2a94da +size 1723199 diff --git a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_b64c2417-c44e-46c4-bb0b-ff1775e7da29.png b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_b64c2417-c44e-46c4-bb0b-ff1775e7da29.png index be2dc18772fde659b24518ca99a71eeb4f7618c0..ede28b3febc3f15e59aea570048d8b2d556806dd 100644 --- a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_b64c2417-c44e-46c4-bb0b-ff1775e7da29.png +++ b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_b64c2417-c44e-46c4-bb0b-ff1775e7da29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db1cd04281bb68cfd1b9c26c96f6bf0ca2fcb7d55a817511173515d2f25efd0c -size 1161135 +oid sha256:12b858c151583cbfcc98708b9c893ece73ccb36ee5c4c197cc84d0c168f4152b +size 890037 diff --git a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_dad6690b-9b3e-4395-bd06-9aa065bf4027.png b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_dad6690b-9b3e-4395-bd06-9aa065bf4027.png index fca8a0d2896046dbf0eac2275734b24b075ec7ef..faf20873ae954b4b4321c562e601d181ec1c1b26 100644 --- a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_dad6690b-9b3e-4395-bd06-9aa065bf4027.png +++ b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_dad6690b-9b3e-4395-bd06-9aa065bf4027.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11883e78e970fe5a563a15c14f2db84cb10da4daaee3ac2c16dfbda81e5bed49 -size 2044275 +oid sha256:80d164d95d9dfd10273e89f281fb8af972074e28e01edc1bad333cc1aded68f5 +size 1938051 diff --git a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_e0fd3f28-3f04-455d-8bde-a480f0ec1b0a.png b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_e0fd3f28-3f04-455d-8bde-a480f0ec1b0a.png index 191d9844d030521245d45ef6921f9b8faa07d923..4b495ce7e250296d5c8ebd02d1d4df8992cdd27d 100644 --- a/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_e0fd3f28-3f04-455d-8bde-a480f0ec1b0a.png +++ b/images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_e0fd3f28-3f04-455d-8bde-a480f0ec1b0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1da2e8a9557cf77680150073969ac4b789e4dd7ce1a37c742b416589541af490 -size 1329674 +oid sha256:4cf18830bf6b5848fb575fbee64583067b13125e14a3a4e4fec6812f05b68509 +size 1356480 diff --git a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_47aa51fc-0ea0-440e-ad0d-851fd1bd1f90.png b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_47aa51fc-0ea0-440e-ad0d-851fd1bd1f90.png index 09e44799f7027edf50103ae8ef2b02557b7ea2b5..096dc0f141a0467655e7179f7bc0ee1c595aad81 100644 --- a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_47aa51fc-0ea0-440e-ad0d-851fd1bd1f90.png +++ b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_47aa51fc-0ea0-440e-ad0d-851fd1bd1f90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3c671324f70b457d28eeeffdf21a9e2822c48321908e3e3410725dea92f87a8 -size 352315 +oid sha256:314dbd5d4b9dcb230686b24a3bbfaf209689653d52a59c2fa580ef55fe641ca5 +size 347327 diff --git a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_92c548c1-e0d2-4990-a88d-93230c0b8c15.png b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_92c548c1-e0d2-4990-a88d-93230c0b8c15.png index 6d056c916298fb05bdc825ab8766a19087a9de2d..085434aaa159c9a90a43da9bfa9970b1c0de76c1 100644 --- a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_92c548c1-e0d2-4990-a88d-93230c0b8c15.png +++ b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_92c548c1-e0d2-4990-a88d-93230c0b8c15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f776086c537f2b1236333eafc9ddcfab15501d673d8c87201ac3db657f1f640 -size 528146 +oid sha256:dfbc0166baf06aac49fc1002064a63dcf52dc0e48a1ea03a329fc782437cc217 +size 1536626 diff --git a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_a7d026d8-89be-4bfb-b9f9-98e603c88313.png b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_a7d026d8-89be-4bfb-b9f9-98e603c88313.png index ed4f7fe4cb1193c99b86736b6a2d037ceb50e130..bc77c06bb8148717827af41523ff542e5a3a511b 100644 --- a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_a7d026d8-89be-4bfb-b9f9-98e603c88313.png +++ b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_a7d026d8-89be-4bfb-b9f9-98e603c88313.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71157fc64a13fcc7bd56e8ff558e10bda1e9662a806ce4a09c7d01b6c2f48604 -size 325102 +oid sha256:c3f07d67104dd4683f8027867ce02c0747e369227f8feddb33f9d7b440003bc1 +size 616659 diff --git a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_ad4e03b0-1b4d-4943-b2d9-94ab2b408563.png b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_ad4e03b0-1b4d-4943-b2d9-94ab2b408563.png index 365390bf340ef8db7e452c61d27fd252a493abe7..6ce52080cdcfc75620c93c167a4334efe94b3d75 100644 --- a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_ad4e03b0-1b4d-4943-b2d9-94ab2b408563.png +++ b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_ad4e03b0-1b4d-4943-b2d9-94ab2b408563.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3684c40fe91f923bc2ede72d7604efc395a3e2554ccc366ab9b27e12d7ea43e -size 405072 +oid sha256:526148437db617a9cdea5a7ee72349e1f433110b3e6f717cda9bc84f8022a594 +size 393447 diff --git a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_b0e6bcb2-ebc1-47f3-959c-3cba6751f827.png b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_b0e6bcb2-ebc1-47f3-959c-3cba6751f827.png index 02304bf3b095d8bcb1e8c5d480dd6a02290ecbf5..37cb19b5f33b81d31bc14f5e6bfd2bba31b52b02 100644 --- a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_b0e6bcb2-ebc1-47f3-959c-3cba6751f827.png +++ b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_b0e6bcb2-ebc1-47f3-959c-3cba6751f827.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60aca9926d6d52443d3a6a3895efbe4de2489fad15cd3035fc983e6c27a74409 -size 343500 +oid sha256:c5c7a3b9adae2f6c5c7642170324215e06a0f14d5ed963ac2534845d5772c6e2 +size 1803072 diff --git a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_e1f976e1-1ac3-447e-921f-672cd8545c6c.png b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_e1f976e1-1ac3-447e-921f-672cd8545c6c.png index 65a4c405009cf15171eae1fca078fe9ddf7909d7..44a371d4b367d195beab9f53bee4dbbeda9f253b 100644 --- a/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_e1f976e1-1ac3-447e-921f-672cd8545c6c.png +++ b/images/408cc1bd-0a76-4bad-b5f4-11db4405047f_e1f976e1-1ac3-447e-921f-672cd8545c6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a090caf25d020dde29438d0e6057059f0e7e4db8927cf4a1f63d27e481b8476 -size 318187 +oid sha256:873267e2383c83cf337d75edafb80a8cfaa04a6e935afc3531af20bd578ce945 +size 1115669 diff --git a/images/4097c577-e637-4543-87a3-09b2f4734163_0e2f5a42-2c2b-4554-9ec1-5e5a78e2f12c.png b/images/4097c577-e637-4543-87a3-09b2f4734163_0e2f5a42-2c2b-4554-9ec1-5e5a78e2f12c.png index 468a8b2bf574a011e40b88c4627a623470a7e4e4..88d1687fe1bbae594e48c1777bc056cbbe8cb0e7 100644 --- a/images/4097c577-e637-4543-87a3-09b2f4734163_0e2f5a42-2c2b-4554-9ec1-5e5a78e2f12c.png +++ b/images/4097c577-e637-4543-87a3-09b2f4734163_0e2f5a42-2c2b-4554-9ec1-5e5a78e2f12c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b32e1d476ded2a94748f85c59cd26178c774d8d752fd1df2b0cd9028ed967c5b -size 890521 +oid sha256:9846b147449c41ae165aceea7d1b7f65c715a296222f9f2e4d5113e1c7326a7e +size 1006494 diff --git a/images/4097c577-e637-4543-87a3-09b2f4734163_2c488a7d-0773-4c99-9420-d0e8103c6d3a.png b/images/4097c577-e637-4543-87a3-09b2f4734163_2c488a7d-0773-4c99-9420-d0e8103c6d3a.png index b8daea165fa106b00a3271758dc52d1d2aa78528..3c4469927b62950a5212aee1e70600863bbf0381 100644 --- a/images/4097c577-e637-4543-87a3-09b2f4734163_2c488a7d-0773-4c99-9420-d0e8103c6d3a.png +++ b/images/4097c577-e637-4543-87a3-09b2f4734163_2c488a7d-0773-4c99-9420-d0e8103c6d3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a6b901f4435669e1e9b604149080c656daa333099dde9049c71d01859a37541 -size 965621 +oid sha256:0f2767e98ce1ea7bd6c3ef3c07cae5f10fd7961efbcfcf684af1121c6832466e +size 741469 diff --git a/images/4097c577-e637-4543-87a3-09b2f4734163_48956859-b850-47a4-a0a2-7c0bdc12231f.png b/images/4097c577-e637-4543-87a3-09b2f4734163_48956859-b850-47a4-a0a2-7c0bdc12231f.png index fed4f360e729c35ff99dcac2572dd9e1aea4900d..63dd99806a0edd0f83887c29f2965546a3e63f9a 100644 --- a/images/4097c577-e637-4543-87a3-09b2f4734163_48956859-b850-47a4-a0a2-7c0bdc12231f.png +++ b/images/4097c577-e637-4543-87a3-09b2f4734163_48956859-b850-47a4-a0a2-7c0bdc12231f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4de054e4291525d1df1be9ca5a62b2a2ccea24e10de56e6f0226dccf6d97eab0 -size 317262 +oid sha256:866cd0da023278360748714b2cf714a4be8051a83fb12073a423f8c96b0a15ba +size 313542 diff --git a/images/4097c577-e637-4543-87a3-09b2f4734163_61a96c4a-8d14-4bb8-8181-f01bb9e493c4.png b/images/4097c577-e637-4543-87a3-09b2f4734163_61a96c4a-8d14-4bb8-8181-f01bb9e493c4.png index 90979c873c665aace5f12e55c7d9c6b66fb9980b..5699994366de6faae8d291ef4647e4d3d7e958f1 100644 --- a/images/4097c577-e637-4543-87a3-09b2f4734163_61a96c4a-8d14-4bb8-8181-f01bb9e493c4.png +++ b/images/4097c577-e637-4543-87a3-09b2f4734163_61a96c4a-8d14-4bb8-8181-f01bb9e493c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18da1105aa728e1b6a2af8391d2d210e682984a0a8259e92b49e18fa2abceb93 -size 338751 +oid sha256:34856296f5ee74d3ab0b750198e8ec2700cd41fdf6426ca9002e30c035d352d1 +size 283062 diff --git a/images/4097c577-e637-4543-87a3-09b2f4734163_6c2920e2-8409-41f9-acb1-4749cde8de5c.png b/images/4097c577-e637-4543-87a3-09b2f4734163_6c2920e2-8409-41f9-acb1-4749cde8de5c.png index 5c4edc3a89dd5d6549103193ab6bb4ec756a4642..fd7b9cf8cfdd2306c1f301b22a0be7bbf1e0dfb4 100644 --- a/images/4097c577-e637-4543-87a3-09b2f4734163_6c2920e2-8409-41f9-acb1-4749cde8de5c.png +++ b/images/4097c577-e637-4543-87a3-09b2f4734163_6c2920e2-8409-41f9-acb1-4749cde8de5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f10087c10feeb69a3cc7529c4b5273aededdff56f44ae527ee1242a0d4feb811 -size 862661 +oid sha256:0f70b99695a5b11e34af2aa0b80734190871a834977bc87e4a6c131b2d4d97c1 +size 1021582 diff --git a/images/4097c577-e637-4543-87a3-09b2f4734163_ba8b539c-78b7-4c10-ad5d-a34107d8aa38.png b/images/4097c577-e637-4543-87a3-09b2f4734163_ba8b539c-78b7-4c10-ad5d-a34107d8aa38.png index 4de8f80804760704d24fd67ee67a2078ef87c3c4..ea4668b9d6b2928e6ff29361e233e4551cc205f2 100644 --- a/images/4097c577-e637-4543-87a3-09b2f4734163_ba8b539c-78b7-4c10-ad5d-a34107d8aa38.png +++ b/images/4097c577-e637-4543-87a3-09b2f4734163_ba8b539c-78b7-4c10-ad5d-a34107d8aa38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a8fcd1a777e1a6b8f92408ad621a72bea9f30ae7bbd45effdd8ab3026867d95 -size 881884 +oid sha256:e2a14ff4b95509230ab0e09c1c78cbb183ab784aaf00edc7d5c1dd75de9b8465 +size 788234 diff --git a/images/4097c577-e637-4543-87a3-09b2f4734163_f159143b-b456-45b0-8ac4-0a4c3a2a9f6d.png b/images/4097c577-e637-4543-87a3-09b2f4734163_f159143b-b456-45b0-8ac4-0a4c3a2a9f6d.png index 961c4c892cb48e868a33a2c7fcdb7be57b8973d6..29cba0682a41e06c18fdf06d4df0373b1567eab0 100644 --- a/images/4097c577-e637-4543-87a3-09b2f4734163_f159143b-b456-45b0-8ac4-0a4c3a2a9f6d.png +++ b/images/4097c577-e637-4543-87a3-09b2f4734163_f159143b-b456-45b0-8ac4-0a4c3a2a9f6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90efdc9368b51bac4067291a0a8f77e5345b3e93bfa623f0c95df6839d7fa295 -size 819243 +oid sha256:4616500eba88960e80da263f505b9410b2fd42c78f5928b6d43bf81d61541948 +size 1049150 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_08b7c08f-c6d0-44a6-95d8-59c4f97021f0.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_08b7c08f-c6d0-44a6-95d8-59c4f97021f0.png index 1c169496207861b08d40533bc5b1d24f0584207a..f2da4b14d3f449b336710b34d22749223d893065 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_08b7c08f-c6d0-44a6-95d8-59c4f97021f0.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_08b7c08f-c6d0-44a6-95d8-59c4f97021f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:418c437418e7eed746f24c7eb575a90415875a9181fbcf48fc3bf8edfa01de25 -size 826535 +oid sha256:a51c53874bc0af0accf8ec55844d3ada724baa6f5f48e97444e6fc8da21f5c55 +size 1051723 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_0e89a5cb-a2b2-43a0-8c4e-9c4962e3096c.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_0e89a5cb-a2b2-43a0-8c4e-9c4962e3096c.png index 6913ae64b245f19b4197c9173ddbcc438001cd73..d7682a5d139cf216023a26dcacfc7940f6ea15b1 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_0e89a5cb-a2b2-43a0-8c4e-9c4962e3096c.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_0e89a5cb-a2b2-43a0-8c4e-9c4962e3096c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc81480a772b032b3ce87e876d6d04759f2a1a8ff6fefac2cf6e890a02de6a2d -size 627387 +oid sha256:c5a3c46df410d13319c34ac95b8a313350639ac981fa943bc83d1b03de3f3c61 +size 560353 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_19c29ea7-ab93-4ced-aba2-5af7c9b162c4.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_19c29ea7-ab93-4ced-aba2-5af7c9b162c4.png index 1ebdcd42742c52b8004fb43001159c60b68bdfb1..7d57b168373716fd8ea0a122090cf9811565f46b 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_19c29ea7-ab93-4ced-aba2-5af7c9b162c4.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_19c29ea7-ab93-4ced-aba2-5af7c9b162c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c41e18a759c8933d933a9cdcc89aa0ffc2cd33477da5aca44d13dab6ede66a8e -size 808167 +oid sha256:db327c27508c83d690c402dd155dddc6d9d1c5715ab84dc9fefed7699296bf61 +size 1047635 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_1ad4f2f9-ba55-482c-bda8-b879589bdb54.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_1ad4f2f9-ba55-482c-bda8-b879589bdb54.png index 98ce250fdd2b8266781c8c9477419c1ec16d3c77..8c8c887d86645c3a2b4d3ca532bd6ca04605e388 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_1ad4f2f9-ba55-482c-bda8-b879589bdb54.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_1ad4f2f9-ba55-482c-bda8-b879589bdb54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b126bc6d9cae87ff3b04f3943c7eaf345367102eddcb2beacd4db37123e30c4 -size 830745 +oid sha256:2f4d52a6aeb60784eda28b09654677b20b257bf395d911ce5d3d811dad089625 +size 974538 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_2f18d9ac-7e3d-47eb-a590-dfe4ec702343.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_2f18d9ac-7e3d-47eb-a590-dfe4ec702343.png index b6f700a5f17c95e4b2c5339b1a2103070cd21b32..8c47ce3ad812436b0d29c30b2f5542c9b341a7fb 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_2f18d9ac-7e3d-47eb-a590-dfe4ec702343.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_2f18d9ac-7e3d-47eb-a590-dfe4ec702343.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5f806deb092038dc413e05802eaad1b225dc1db3336a0fc5911bddd49479980 -size 1140621 +oid sha256:48f6db0466367b8df8e26373622ede3534b6b093beda441713c6dca3e9e2d15a +size 673987 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_76fc3614-10f5-43b1-a822-b3a3289e4a98.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_76fc3614-10f5-43b1-a822-b3a3289e4a98.png index 33849ba9b637a5fe78f26c30d4363282f674cf8a..33544c92c6075d47569631cc087c68e2344e2e45 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_76fc3614-10f5-43b1-a822-b3a3289e4a98.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_76fc3614-10f5-43b1-a822-b3a3289e4a98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94a4c6f656a6c8faba51e087fab3fa2485c94d971d4b25c7ed052fa4961a5957 -size 970828 +oid sha256:56f7014cdfa40f6c08aa0e355509034e517fb2eb0993d4e392d2ac44867f025c +size 782765 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_831a73c6-155d-4ce3-b1f4-03b69243735f.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_831a73c6-155d-4ce3-b1f4-03b69243735f.png index 630083036e46ed13756cb86e0e54d52a6909649a..2000d6fbe0da9716d96f48ac290f840d06c013a5 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_831a73c6-155d-4ce3-b1f4-03b69243735f.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_831a73c6-155d-4ce3-b1f4-03b69243735f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56bd162b1f7058c134ba79a127ba859f79d3ab4f15ff13ba1c4c7709f2ddc427 -size 704691 +oid sha256:30df3e120bda5d8d334b04e327cc902d06edf3822fb438e80eed205b8d712ae0 +size 803295 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9b22d165-4722-428c-a980-3773ac46b8d4.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9b22d165-4722-428c-a980-3773ac46b8d4.png index b097349c2eaa68b23bf056c8762eafa1a9a95330..1c9e64af35397f0aa371955b571effb652960164 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9b22d165-4722-428c-a980-3773ac46b8d4.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9b22d165-4722-428c-a980-3773ac46b8d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aee202a2f563ac361df75d1b38e7518dc910381554862000c3524c73ceaab8b5 -size 1277282 +oid sha256:79c35132d9c156d4ecf2fa6ba7c4b40a8e54a3fe02056acbf4931fbfcf920e79 +size 965874 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9d423e0d-390c-4608-bd51-2dd07d60bfca.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9d423e0d-390c-4608-bd51-2dd07d60bfca.png index aef70fe8dd1e979948cac578eae03e703811b372..21fdd4fadc44935961ceb5620a0473c3329a5b9f 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9d423e0d-390c-4608-bd51-2dd07d60bfca.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9d423e0d-390c-4608-bd51-2dd07d60bfca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:543c50dfde4ad15db22e1a9f024da5029a1f67996374071f2135d1fe33806d4a -size 837037 +oid sha256:90c5b9308dfcab956d71135b090d2bba2e2150b7d5ff18d3d615f0add40c8d8f +size 1022215 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_b9e37de8-55fb-4bdf-9bd3-a9fd20adb92d.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_b9e37de8-55fb-4bdf-9bd3-a9fd20adb92d.png index 20d19ae98aac9b47e9fb39a61da5f85019d6f2ba..5939dee3cd52f4d0bd0b374a73289db9bc217fb0 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_b9e37de8-55fb-4bdf-9bd3-a9fd20adb92d.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_b9e37de8-55fb-4bdf-9bd3-a9fd20adb92d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20c96e81618d363e9e2ca4bfb0ad77941146867e87234d9d141c41a73052994b -size 1295096 +oid sha256:558eb324e00ece5e407f24b1126c3846c6a9afa1a6f1e461effff128116ea00a +size 747573 diff --git a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_ba7b7ba5-9604-43af-8fbe-fea243c8bb58.png b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_ba7b7ba5-9604-43af-8fbe-fea243c8bb58.png index 6174d17e7c89c383a52dddf19baafafd65da39c6..002d7547f55da4e5cd8dd6c3b2f58c24fd9e2922 100644 --- a/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_ba7b7ba5-9604-43af-8fbe-fea243c8bb58.png +++ b/images/40bf7303-ba39-44fe-b73f-ad3c9743c579_ba7b7ba5-9604-43af-8fbe-fea243c8bb58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd05b475ec107d3a1d300fc75dc229c3a27a32c22bf812230451a6a52fba6b82 -size 1586446 +oid sha256:76846b5ac2a6915f62448ceef4e1d95dce9eebdd8f9afbabec96fe57161cfeaf +size 2073811 diff --git a/images/40cd58cd-6c9a-47b9-a927-92243970d87a_27418770-0fb2-4572-8950-c111ca546d72.png b/images/40cd58cd-6c9a-47b9-a927-92243970d87a_27418770-0fb2-4572-8950-c111ca546d72.png index 1f0ef43d2ff9cdec90155f40e591c9d0305ca624..e2983840416bb5784b8e28d0a752788daef8b5b9 100644 --- a/images/40cd58cd-6c9a-47b9-a927-92243970d87a_27418770-0fb2-4572-8950-c111ca546d72.png +++ b/images/40cd58cd-6c9a-47b9-a927-92243970d87a_27418770-0fb2-4572-8950-c111ca546d72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93dfb3afcb4e7c3ba0ef0c90496b85e7f055608857cc04d21134cf88beb6e4e5 -size 1576075 +oid sha256:1be9e06c1243cc347824f2c66cfb99de4ec79eb5f59b7631920f378e10125e36 +size 306100 diff --git a/images/40cd58cd-6c9a-47b9-a927-92243970d87a_5f3ff4a9-f9ef-4e5b-99de-5bbf85c5f02e.png b/images/40cd58cd-6c9a-47b9-a927-92243970d87a_5f3ff4a9-f9ef-4e5b-99de-5bbf85c5f02e.png index f6015d7e5ac80029a1da5325b4d29ab3af4579b6..03f7df1ee5c7492b053cf96fd2f23619f9843400 100644 --- a/images/40cd58cd-6c9a-47b9-a927-92243970d87a_5f3ff4a9-f9ef-4e5b-99de-5bbf85c5f02e.png +++ b/images/40cd58cd-6c9a-47b9-a927-92243970d87a_5f3ff4a9-f9ef-4e5b-99de-5bbf85c5f02e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a84a1e560e134ce86700cfc76e97fe81b1f5df82a3fc4ebcde7b5400b0dc5a9 -size 944117 +oid sha256:3c9a467b1ca6d400f0b9eed8ca1d1e8b8b67ff680d036e212fb692b9c714d390 +size 783462 diff --git a/images/40cd58cd-6c9a-47b9-a927-92243970d87a_abeec9fe-726d-4040-9765-cc8bb0a8b920.png b/images/40cd58cd-6c9a-47b9-a927-92243970d87a_abeec9fe-726d-4040-9765-cc8bb0a8b920.png index 80136423ffa6d6a2ee1a79c76a0421b9572936c6..62af20739e4943ff723dec361be915e24368951f 100644 --- a/images/40cd58cd-6c9a-47b9-a927-92243970d87a_abeec9fe-726d-4040-9765-cc8bb0a8b920.png +++ b/images/40cd58cd-6c9a-47b9-a927-92243970d87a_abeec9fe-726d-4040-9765-cc8bb0a8b920.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b6658db2ece9aaaaf19993bf17398ab6eb9dad0a043cf7aa5f1d1a02933d50c -size 1020981 +oid sha256:da8dee07fc6609a02dcdfd7c62a5d150c60590be8e4a02f0a3000fa5e8a85ff7 +size 341186 diff --git a/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_a651f53a-5897-49f2-b132-6e19082d77c0.png b/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_a651f53a-5897-49f2-b132-6e19082d77c0.png index d3ec100ef095e02e35446f0158c24ec4977aa2df..f4ba31cc4ff935d01d51d3e83145eaf011de1dd4 100644 --- a/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_a651f53a-5897-49f2-b132-6e19082d77c0.png +++ b/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_a651f53a-5897-49f2-b132-6e19082d77c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91773009fcbc57939d303ad2d0dbf218040fe1c629d7fc5e8f8ccf768f3c941e -size 821563 +oid sha256:3994883fed304f88df7a56ca1caf8c605fd012db43e1d2a3430a0c9316cf92da +size 998474 diff --git a/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_c12adadc-f6c6-4a3f-8969-6badce55661d.png b/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_c12adadc-f6c6-4a3f-8969-6badce55661d.png index ef92fdf993096b824868524548d8e485f1fda38f..71a522718df1a1cf77634d9e600c33a3543d8759 100644 --- a/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_c12adadc-f6c6-4a3f-8969-6badce55661d.png +++ b/images/40fbda9d-22c5-4aab-9798-3db50d981c5c_c12adadc-f6c6-4a3f-8969-6badce55661d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8824fbb4fe7376f062587094030b3f54ad8b0c255c39c58dfd0ca534ae37b734 -size 632935 +oid sha256:31cce69dd5ae73a346f03198ed2c6fb839a132f89228ed8ee065c6a06b0490eb +size 594948 diff --git a/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_3a0d5ccb-3636-49a5-898b-80a18673958a.png b/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_3a0d5ccb-3636-49a5-898b-80a18673958a.png index d13f766ba41c178ce5dd8e9d4e5e41e8acc1c33e..c51f406017f0f4eb71f11de05f4a3a77dab687ce 100644 --- a/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_3a0d5ccb-3636-49a5-898b-80a18673958a.png +++ b/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_3a0d5ccb-3636-49a5-898b-80a18673958a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:222be00062d79416de40f15b3c0f963f6725481e9a6f69ae9eb323a2806dce4c -size 975542 +oid sha256:5c08c75d0a57e6c97e6209b6d7579fc165029facd5143b59a0f69b4f027bbd70 +size 465068 diff --git a/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_62df7775-357c-4748-b3ad-6d521606cb9c.png b/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_62df7775-357c-4748-b3ad-6d521606cb9c.png index bfd44fc8ae083f2aa93bbc1044586510557a7499..552b299a6b53a591d36c666b1ef35aa542e6f7d3 100644 --- a/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_62df7775-357c-4748-b3ad-6d521606cb9c.png +++ b/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_62df7775-357c-4748-b3ad-6d521606cb9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fb8a3b29543f348ad28fddb50415fa8dc9c5ff6df8e5cf81ea993269efeab36 -size 2893086 +oid sha256:b15ca1fd16a79fc2fe63a6280b35e360e79f2b525414f7cfce84fbfaf58cd4a0 +size 1695546 diff --git a/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_a0ec89c8-5b88-4f8e-9547-e6f22bed7148.png b/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_a0ec89c8-5b88-4f8e-9547-e6f22bed7148.png index 6998acf2dc5e8c71f97f2c926947e92b4c77ef12..b4ecc1380102baeda77ac144a8c6f562914ccf29 100644 --- a/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_a0ec89c8-5b88-4f8e-9547-e6f22bed7148.png +++ b/images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_a0ec89c8-5b88-4f8e-9547-e6f22bed7148.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1eddef2dff7a634da4fb9dabc4fd9f1a5218c8475261c869c4e883f792f121e -size 981847 +oid sha256:7092cef8404375ebd93ee70199f9532499b226bbc6c9465344cd9766c6244f3b +size 962864 diff --git a/images/41b8202c-9092-4307-ac58-2283c76df3b7_0f444a22-9504-4eb4-a64c-e3142da53071.png b/images/41b8202c-9092-4307-ac58-2283c76df3b7_0f444a22-9504-4eb4-a64c-e3142da53071.png index a3e9c4a116c67ca760cb17b66cfaeb63dc653b98..e3857f6173709af596cc179bbe9220258befe69a 100644 --- a/images/41b8202c-9092-4307-ac58-2283c76df3b7_0f444a22-9504-4eb4-a64c-e3142da53071.png +++ b/images/41b8202c-9092-4307-ac58-2283c76df3b7_0f444a22-9504-4eb4-a64c-e3142da53071.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d534290bb2d26b34dcd0653ae10cd7e2f546cc8582a551db4f6afcd2eac123b -size 1315391 +oid sha256:7c198385e9d8d40d80ec7c27bf5bf46efcfcc552284c90d0c9367097336d24bf +size 2143398 diff --git a/images/41b8202c-9092-4307-ac58-2283c76df3b7_2adcf298-cf71-48ac-9531-fdc5708bd6a5.png b/images/41b8202c-9092-4307-ac58-2283c76df3b7_2adcf298-cf71-48ac-9531-fdc5708bd6a5.png index 766d65e1609ebeaca1fd8cfabf9d50d7c987da1c..d52c3b36881bc8c6c928625b952a970343eefb55 100644 --- a/images/41b8202c-9092-4307-ac58-2283c76df3b7_2adcf298-cf71-48ac-9531-fdc5708bd6a5.png +++ b/images/41b8202c-9092-4307-ac58-2283c76df3b7_2adcf298-cf71-48ac-9531-fdc5708bd6a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67b90fdf24a6f77011a76ce7a31e85a070d0ab68aa1a59ee3cbb99f4f64cc079 -size 1713231 +oid sha256:533bc172058a983a3bb680accdd85abc5cf66ddc93ca5c5ee16202ceac3cd685 +size 1848510 diff --git a/images/41b8202c-9092-4307-ac58-2283c76df3b7_924bce62-03db-4c07-8747-b201b4878623.png b/images/41b8202c-9092-4307-ac58-2283c76df3b7_924bce62-03db-4c07-8747-b201b4878623.png index d29428c63601c70a821b30b069bc457ff85e8bee..1d9f291134ebefbc4474ef9063f57240ddd9b3bf 100644 --- a/images/41b8202c-9092-4307-ac58-2283c76df3b7_924bce62-03db-4c07-8747-b201b4878623.png +++ b/images/41b8202c-9092-4307-ac58-2283c76df3b7_924bce62-03db-4c07-8747-b201b4878623.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:277d17cec71cceb16ee11b4ef29d6a3c6ae1fd807ba5ce65d5fe5f8bfc61d847 -size 881581 +oid sha256:577cfb8fa0a851ceef09a5c95202729c38f1f6f9da51396f92fb79a8bd7399ff +size 911428 diff --git a/images/41b8202c-9092-4307-ac58-2283c76df3b7_a7f466e8-470a-4460-b10b-38a6ab84458c.png b/images/41b8202c-9092-4307-ac58-2283c76df3b7_a7f466e8-470a-4460-b10b-38a6ab84458c.png index 177f703535cfe4ecd083dfc16f97742ea09d6f52..e540a9a1d6047085f9264dbaf5ef20d8ce14e796 100644 --- a/images/41b8202c-9092-4307-ac58-2283c76df3b7_a7f466e8-470a-4460-b10b-38a6ab84458c.png +++ b/images/41b8202c-9092-4307-ac58-2283c76df3b7_a7f466e8-470a-4460-b10b-38a6ab84458c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a23cd8ad505d80714fb7829b588463ca301d39982e00e89aca53eed70ab14fc -size 751128 +oid sha256:3933e64e72bfe4fc39af752ac5a534dab96152ae1f1fe3ad6e691c428a8364c1 +size 1381254 diff --git a/images/41b8202c-9092-4307-ac58-2283c76df3b7_ffa7efe9-9e11-490f-a776-0d1999334fcf.png b/images/41b8202c-9092-4307-ac58-2283c76df3b7_ffa7efe9-9e11-490f-a776-0d1999334fcf.png index c43bb5f57e723769a9a8582dda837dbe1980d329..39af4cdd08db396214f85c67ca9a824757dc3bb7 100644 --- a/images/41b8202c-9092-4307-ac58-2283c76df3b7_ffa7efe9-9e11-490f-a776-0d1999334fcf.png +++ b/images/41b8202c-9092-4307-ac58-2283c76df3b7_ffa7efe9-9e11-490f-a776-0d1999334fcf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e19b0c99fea57a4d1c4ba70e294373b8773cbde12f683f56b9de1633075a8cfb -size 1929475 +oid sha256:88adf594f0351011df1ab232d963524852eb1de37df8b29d7de8bc5ac5e88b33 +size 1937125 diff --git a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_0e2c3790-5fc1-451f-bc5a-f9e29750564c.png b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_0e2c3790-5fc1-451f-bc5a-f9e29750564c.png index 34573747e944adf8f0f3f146a5ab146d3d5c5d3c..0fc3e2be39daa7da793c8548684167e9238e37d2 100644 --- a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_0e2c3790-5fc1-451f-bc5a-f9e29750564c.png +++ b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_0e2c3790-5fc1-451f-bc5a-f9e29750564c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91401db1784ea7e9fbaadbbf56b4652fb118b674e741bc0b42f48970aa824c5c -size 1394426 +oid sha256:f2294602bc347722de13d0f812032ddef7308e045b3239b13b27a50e7905fe94 +size 2617018 diff --git a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_656ed1ca-1b96-4f94-9342-a57e054a3cdd.png b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_656ed1ca-1b96-4f94-9342-a57e054a3cdd.png index 235b9cd9c5766cf7835ecb4e03947106abbb0e27..09f105f51399ffc1ed5052ab710530b0a70e2b75 100644 --- a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_656ed1ca-1b96-4f94-9342-a57e054a3cdd.png +++ b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_656ed1ca-1b96-4f94-9342-a57e054a3cdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:706fa8e5fafefeb547c581fe498445286bf39e5e94bf9e939427e4d992e20c05 -size 1692289 +oid sha256:4267c4410f293d2393d2ae8de7c6377a88319bc30ebff89ae1c77b712aac74d2 +size 1351916 diff --git a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_683fa50a-4b42-4881-8f37-0352c39ce025.png b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_683fa50a-4b42-4881-8f37-0352c39ce025.png index c148dba126d59762ee2fa5f348a1e94e61827f92..f4c90b83afdd3a77fcaa39b78d44c03c48f453b0 100644 --- a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_683fa50a-4b42-4881-8f37-0352c39ce025.png +++ b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_683fa50a-4b42-4881-8f37-0352c39ce025.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc1397b160b52ae4865e39a9171321b575add3ec8489f209e6e5aa6832db4b50 -size 1688949 +oid sha256:145d18fac3f33b21900b28ae1569dd2d3abdbc95bd6247ba8f4b8a79372946e1 +size 1443012 diff --git a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_77b1d0a6-ec27-41a0-905a-1fd4d43e01ec.png b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_77b1d0a6-ec27-41a0-905a-1fd4d43e01ec.png index 73121632b79e6ce7209473d578705fcf533e2c04..e2a1030389ecbdfa53e8db095a877fc5c1c4350d 100644 --- a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_77b1d0a6-ec27-41a0-905a-1fd4d43e01ec.png +++ b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_77b1d0a6-ec27-41a0-905a-1fd4d43e01ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b95e6777cd79ce328917bd086a95940a88e9b867353c90362194a78ea926302f -size 1339540 +oid sha256:25330f8e7c0af1d3a668ff6f03f5a6a5331788bab5f55a59dc5ee83e662b3400 +size 2365036 diff --git a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_aff1da14-373b-4bd3-b9e4-248ae4224872.png b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_aff1da14-373b-4bd3-b9e4-248ae4224872.png index 7e11f7cb83465ca7b6b63107eb7c3f97c37059c4..e20f39643289ce23864a5b22410582d3bf5c1940 100644 --- a/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_aff1da14-373b-4bd3-b9e4-248ae4224872.png +++ b/images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_aff1da14-373b-4bd3-b9e4-248ae4224872.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a393e0b9c512b6b172b0e0d7cdb4c60d2618e8241440c5e100a0b55caebc243 -size 1361122 +oid sha256:6ee78be833dfbeb1c3d1f3e0341589a7af1dc6f987b101430c630e10beba774c +size 950820 diff --git a/images/41ff100f-582a-422e-b387-3abd9008cee4_47c29841-4175-4266-b729-28314be9ae13.png b/images/41ff100f-582a-422e-b387-3abd9008cee4_47c29841-4175-4266-b729-28314be9ae13.png index e28a6c380cd0296fd0e083381c998e250f1a9785..779f8d1e80c131da23417bc4237a49cb0a936405 100644 --- a/images/41ff100f-582a-422e-b387-3abd9008cee4_47c29841-4175-4266-b729-28314be9ae13.png +++ b/images/41ff100f-582a-422e-b387-3abd9008cee4_47c29841-4175-4266-b729-28314be9ae13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5762417b7ae5bfd873549e6a35c3ebf3607f12eb2626407b7a89213400f0e10b -size 580254 +oid sha256:85f62647ab914e9dda63d4e180b2b817a9cff2e7badfa563211a6ceed17ef141 +size 379246 diff --git a/images/41ff100f-582a-422e-b387-3abd9008cee4_c92672cc-f930-4bee-a4cc-abaf9489d0d0.png b/images/41ff100f-582a-422e-b387-3abd9008cee4_c92672cc-f930-4bee-a4cc-abaf9489d0d0.png index a69c6ca5665089fa35f7e16104a092562d888a9f..b3e359d61697be10159207fdf8e339ef1b5a79c4 100644 --- a/images/41ff100f-582a-422e-b387-3abd9008cee4_c92672cc-f930-4bee-a4cc-abaf9489d0d0.png +++ b/images/41ff100f-582a-422e-b387-3abd9008cee4_c92672cc-f930-4bee-a4cc-abaf9489d0d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47ec63ee1182851f5b0ee4c8f73eab27385afc94e49c35f266597d1588304a68 -size 999771 +oid sha256:6a0d046c247bd6cbb456186e878027999a60d77a71dfaba152ff7d538c361727 +size 1234913 diff --git a/images/41ff100f-582a-422e-b387-3abd9008cee4_ea2e6ff2-b264-4578-aa3d-cd33be74b9a7.png b/images/41ff100f-582a-422e-b387-3abd9008cee4_ea2e6ff2-b264-4578-aa3d-cd33be74b9a7.png index a48a92931185357f0efaba2c9fa5f5f5e6c6008d..7bd03bce327a5f057d27b56ad6c7f4355c9307ca 100644 --- a/images/41ff100f-582a-422e-b387-3abd9008cee4_ea2e6ff2-b264-4578-aa3d-cd33be74b9a7.png +++ b/images/41ff100f-582a-422e-b387-3abd9008cee4_ea2e6ff2-b264-4578-aa3d-cd33be74b9a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b839ec8f312f2ad8876286d0130b4291ed14e4f123d4d00c2b0ff12e8f35e94 -size 574875 +oid sha256:e4869d4c7181c66723f06090b81f6d974bab7a9418ca90622007710cb0d2ef46 +size 1114317 diff --git a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_0f57acd6-e046-4943-9760-1aa47a966503.png b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_0f57acd6-e046-4943-9760-1aa47a966503.png index a86c4e2a53ae5e6444cfad205f418b975d4c6535..1189f774e6dae6f47c156630b943153012ef7e4c 100644 --- a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_0f57acd6-e046-4943-9760-1aa47a966503.png +++ b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_0f57acd6-e046-4943-9760-1aa47a966503.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21676cd225540f4653fa154fac2bee9f70b2f45a1cf6da86e38c2b3d4350a74a -size 936068 +oid sha256:a6195bc03bb4e9fb5159765f0c0ebce46def163bde103bd22e725c2f27ad30f0 +size 899322 diff --git a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_490963a7-541a-4739-836f-b305f77e41ab.png b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_490963a7-541a-4739-836f-b305f77e41ab.png index 617236bdda132697ee5a77792b625c67f9cc9af6..43b52e1f362d10213c69a9fbc1164c34ab7030d5 100644 --- a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_490963a7-541a-4739-836f-b305f77e41ab.png +++ b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_490963a7-541a-4739-836f-b305f77e41ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:418eaa8c703b337dfd8fc29ede0eb018a6efe25c9eb6a5b23aebe15c4fbc3d90 -size 899889 +oid sha256:c8a515a47915644534259405d196309bcebc8a3d7a1ae006472ed4354ffba006 +size 921346 diff --git a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_4fdd3302-d141-439e-a0a2-9a01d9249890.png b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_4fdd3302-d141-439e-a0a2-9a01d9249890.png index 65bf9fae5a96059ededc5ed8c5eb365e3495d1d3..aa821ee863db1a66170d275f0f972fe96e269795 100644 --- a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_4fdd3302-d141-439e-a0a2-9a01d9249890.png +++ b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_4fdd3302-d141-439e-a0a2-9a01d9249890.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a79ea0ec55033f626f326b45a87d7a55b290785fa508b33e06b507a32ad87f65 -size 498331 +oid sha256:85146b9a8924e19ed564d25dacecfef51a5dfe76a784927fe76ce3267f976451 +size 504109 diff --git a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_67e32043-4f15-4318-a51f-237dfcf55ccd.png b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_67e32043-4f15-4318-a51f-237dfcf55ccd.png index 26722001a953ff5a0de3baf2f6b60d2427e5a97a..59c9a656ab620317878c35bca2119c1d06dfddd2 100644 --- a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_67e32043-4f15-4318-a51f-237dfcf55ccd.png +++ b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_67e32043-4f15-4318-a51f-237dfcf55ccd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d40bbc2e07d54a8b46d206a0f78d0e9d0dce323e9335273a9fc92966e1407992 -size 1145979 +oid sha256:6a2b9091202f34a4916c8b64bd4f06b06b2b67b1f43b0d6bd74c774e993992ad +size 1387869 diff --git a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_a451cb2d-a5e8-4808-88cb-c026cbda67da.png b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_a451cb2d-a5e8-4808-88cb-c026cbda67da.png index 3e7a1c4edcd0c586475b7afa6e881bdc503e2858..69d7c55db714d55bf7bb264e96f183efc79e6e64 100644 --- a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_a451cb2d-a5e8-4808-88cb-c026cbda67da.png +++ b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_a451cb2d-a5e8-4808-88cb-c026cbda67da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c14d78c7f8dbba23163d44b465530620ea6e5ee226a6707420d639e1125fa801 -size 874498 +oid sha256:4fd2b680e9c3e4719918c9fa3024804ef55bd4f3ed5a0da01b6e67518d00f9db +size 990862 diff --git a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_cfd7af8e-2b20-4e37-9c53-bc573db84b80.png b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_cfd7af8e-2b20-4e37-9c53-bc573db84b80.png index f884f7b411ad0c98018b1f55c4e3450b6c37fc6a..dec8ea1c5bd939f7a24ba303145fccc3d63c730e 100644 --- a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_cfd7af8e-2b20-4e37-9c53-bc573db84b80.png +++ b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_cfd7af8e-2b20-4e37-9c53-bc573db84b80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:650cec094570c8512bc0ee935cd30705fab5e143a5035ac7cd431b8e91a482c6 -size 1633513 +oid sha256:ac4e666baf30612150d5bf10197be8147fe83366a76cf91d4e1a5c9b19c12767 +size 1553517 diff --git a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_f4702531-91bf-45e3-9072-b758cdb35ae8.png b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_f4702531-91bf-45e3-9072-b758cdb35ae8.png index 3b3faade5c2d8a5ba4895b9a783378b933efdb07..90121a023cc93dcb04383a81bf4538e480bf95c5 100644 --- a/images/42657330-bfc5-425d-ae21-396a9ba1fb12_f4702531-91bf-45e3-9072-b758cdb35ae8.png +++ b/images/42657330-bfc5-425d-ae21-396a9ba1fb12_f4702531-91bf-45e3-9072-b758cdb35ae8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:675e1291d4c924339eb37b55d43824b56a6216dadaa926d0ed18b840a0b93c50 -size 374901 +oid sha256:a37cddaeecf7295f684cf2ca92b4094835e9e9869472ba54485480e147dd073c +size 376832 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_31ea9a5f-c903-46bb-a7cb-6b04c5af555a.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_31ea9a5f-c903-46bb-a7cb-6b04c5af555a.png index cbfe7fb8a4f4adb08aea4ba04a297708b9d31f7a..968f36f27928ece233354fc7ecafd8e9b675d640 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_31ea9a5f-c903-46bb-a7cb-6b04c5af555a.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_31ea9a5f-c903-46bb-a7cb-6b04c5af555a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae835d15abc8920ddd2d88d77e30a605927f0ae3c103646898671747996a193f -size 726282 +oid sha256:39ed240013fd1d6a00b353c9a5b65e4eb538130997bc2611114487e87a7d3e0f +size 906190 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_3c6124d5-eeda-47c8-b0e1-bf30cf61aca1.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_3c6124d5-eeda-47c8-b0e1-bf30cf61aca1.png index ca591d798b51794d26de1099516b21c489209d72..b513c372210650a3ad4cbf8a4a8ef6d526ac37e1 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_3c6124d5-eeda-47c8-b0e1-bf30cf61aca1.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_3c6124d5-eeda-47c8-b0e1-bf30cf61aca1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4259f9f392dcce1f9ee318ace004017159020243feba96e6ebec48b4ef32f8f -size 2044604 +oid sha256:0be7799c0c5affa683927dc9adf605146aced5f975105b7615ac1e4864ae920a +size 1692357 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_4374b5ff-1f84-468b-88d1-cbf28ecd40b9.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_4374b5ff-1f84-468b-88d1-cbf28ecd40b9.png index bb73e52498c0d766b1fe088777b3460a534afea7..c4ee0fa28ed665b5f312e8563f6450aa006e9ac3 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_4374b5ff-1f84-468b-88d1-cbf28ecd40b9.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_4374b5ff-1f84-468b-88d1-cbf28ecd40b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58a65993e7529d3f14e5c177f694d8fe83b1dce2994eb62fc715dc793158cfdd -size 2215361 +oid sha256:427f67ea78a8120ea98ae574fed0a180441a5d09e98e47b9c80c42689d0832b2 +size 2006994 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_45b58892-de1f-4e13-b47a-bb947376442d.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_45b58892-de1f-4e13-b47a-bb947376442d.png index d52d632182795bf3573c4309454233ada034e432..ce5aa9e4c08f9ee7fdfa0ade1610792e2ef97f8b 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_45b58892-de1f-4e13-b47a-bb947376442d.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_45b58892-de1f-4e13-b47a-bb947376442d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6b6a79aace5754584027af13c92ea1d6e42a40f7357e28f53b0fd47bb5920f3 -size 772919 +oid sha256:43d13fed0a3f26225b3e65eed1f9d41f576b284d0b7a52d0b94464d071bd2abb +size 858030 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_515c6c84-9b18-49a5-a48c-2bdb562a48c3.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_515c6c84-9b18-49a5-a48c-2bdb562a48c3.png index 50d9184ce7b142d8b957fd512e2c1ad19372ffa4..a9976331b2a9d3f2a80c831790d7cbee1a2639f9 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_515c6c84-9b18-49a5-a48c-2bdb562a48c3.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_515c6c84-9b18-49a5-a48c-2bdb562a48c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6bb8b20dd45c49f1f55bd97a5fa506e6b5e2eb3cb256db9c35ac7a3c0a6dbfce -size 743135 +oid sha256:d0df6effc060fe3132f795ebd1b70341dff6661e875fa5240d4576112638cfdf +size 787078 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_678dddb3-2699-4a17-9a70-578517f9260f.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_678dddb3-2699-4a17-9a70-578517f9260f.png index e5465e4e41be0ee5d3aa4a62af567f6a848c8f99..b8db8366aad08ed94eb1cadaf0ce1bd0bdb4d0c2 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_678dddb3-2699-4a17-9a70-578517f9260f.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_678dddb3-2699-4a17-9a70-578517f9260f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec9519a21a46fde6650dcc54e00459a55bccd8b551e3b59e72a4062a35156c8c -size 826446 +oid sha256:1a58c874a1f7cd425cb0dd8a16771ea8d4650bdefbc86f49362eb4aba1a7fc3a +size 749837 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_7f55529b-891c-4541-901a-90309af19a6c.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_7f55529b-891c-4541-901a-90309af19a6c.png index 19f0d3ca271e94d4aeb7a8d22d54af13e65cecf1..85430a34b1a31e025291474998e11d03ca091466 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_7f55529b-891c-4541-901a-90309af19a6c.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_7f55529b-891c-4541-901a-90309af19a6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b919f57dfc565cb74446456c8caa85b0fbbcd4e3a2c13014a7eada27c188789 -size 3525773 +oid sha256:b01721ac17943077a8b7e08f5f323eb4691c906b5e42dea9e73a74fd026984e9 +size 701225 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_b14fb2e2-d8b2-41f2-9cf2-517a4a832935.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_b14fb2e2-d8b2-41f2-9cf2-517a4a832935.png index a25d5b82a6d05d277a36516ab26d1b35c963ded5..7854c67a6e59cf819e98b4309b1f49f5b9531b0b 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_b14fb2e2-d8b2-41f2-9cf2-517a4a832935.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_b14fb2e2-d8b2-41f2-9cf2-517a4a832935.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03266b78b46d74a99f740546e1484804bfe1e2df3fc7e969e57474f843457383 -size 1902903 +oid sha256:1eef22616daae945e7519e6e88568526c16374992825231e4daf46fc7c8c6ceb +size 1331605 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_ba11d9d1-a12c-4fea-99a3-b8c63f58c538.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_ba11d9d1-a12c-4fea-99a3-b8c63f58c538.png index dcac8e6cd5c29ae60ae47881fabf0ca30c7c059f..032b16c239ed828615ea123c3d9f3a7418bbb7f6 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_ba11d9d1-a12c-4fea-99a3-b8c63f58c538.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_ba11d9d1-a12c-4fea-99a3-b8c63f58c538.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ccaf8f65b0f44a0ee0bedd7a65b0cadb357092cf8942ee69b69237fd367db72 -size 1977587 +oid sha256:e3613b2ba74e54c1b583242cb6f2e2ad06305a8ca95dcecd7c6fbf2e01f50011 +size 1938371 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_c42d4ac2-8268-4a2e-95c1-399ab2e7ae1f.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_c42d4ac2-8268-4a2e-95c1-399ab2e7ae1f.png index e788986ff004f1086f53e1ca40dac1acddaccc9f..8d4411b76dcf5fe58aebfdf11357311d4a43c73c 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_c42d4ac2-8268-4a2e-95c1-399ab2e7ae1f.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_c42d4ac2-8268-4a2e-95c1-399ab2e7ae1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec85a57da073f302bb0ff01e4fae51e50f9ffb0bd8b67ffe564fa0c148935ff7 -size 713782 +oid sha256:5cf40ea7654d941a28eb0267166b73036811d2b76e76de1b7dac3027a1140dab +size 585908 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_c98344a0-ee8d-469a-b593-ec5f1552321d.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_c98344a0-ee8d-469a-b593-ec5f1552321d.png index 15033ae7b7654ff256673e058aea301fd54b0a69..2ec6507610ee3b2b9ec796f91e5e9a7e4584798a 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_c98344a0-ee8d-469a-b593-ec5f1552321d.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_c98344a0-ee8d-469a-b593-ec5f1552321d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:314b8a25566212cfd39578b26b346f0b929dac11ac4faa7f214b1e347b9eb3ab -size 695946 +oid sha256:c5ae7373ac1481380932f85cdced7800c04f13f2a64646dbd6246e02cd11a3f7 +size 721709 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_dad6902a-d307-4bf4-822c-922230877a59.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_dad6902a-d307-4bf4-822c-922230877a59.png index ea42cd3de9d5cb6e1805b353276050d218b00aeb..26bf35a72f0f2bd27740ca0b29f16772cd7998bf 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_dad6902a-d307-4bf4-822c-922230877a59.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_dad6902a-d307-4bf4-822c-922230877a59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2012bf3603b0a8d8d39e91d9ae3d4f9852ec61aec8c34a55485d26df0116df4a -size 751158 +oid sha256:ccb0b8d42068c47c95a988476e48f5f3bb53d80b14b83bf8611ce54decccebb0 +size 764144 diff --git a/images/4272e233-3440-4572-bd86-b3a2b22a4061_e1d28f7d-0da8-437d-aa75-220acdd712c0.png b/images/4272e233-3440-4572-bd86-b3a2b22a4061_e1d28f7d-0da8-437d-aa75-220acdd712c0.png index 8b31e85a5802b6ad05838cdc2ffb31c6eaabe255..245e8874c13e79c024d5b535117a4cb863cd01c8 100644 --- a/images/4272e233-3440-4572-bd86-b3a2b22a4061_e1d28f7d-0da8-437d-aa75-220acdd712c0.png +++ b/images/4272e233-3440-4572-bd86-b3a2b22a4061_e1d28f7d-0da8-437d-aa75-220acdd712c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7da6c36cee44bda15fd238d98694b950f0aab70ef7c4eae79f636a8ad29d6647 -size 2223582 +oid sha256:a0f2102301b2597be5b5f7e2625ded5f855663c314f9c4188a6ed6d2ad89e455 +size 1693833 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_01301e65-56a9-4d31-8d3e-ce354d6fa71a.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_01301e65-56a9-4d31-8d3e-ce354d6fa71a.png index 2cf43713749b7406a9eb0c2da9e9e798da9cbb53..0e7ce726f4b2807b1fb91854cb770618609157d5 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_01301e65-56a9-4d31-8d3e-ce354d6fa71a.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_01301e65-56a9-4d31-8d3e-ce354d6fa71a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8d1a402a484aae8284b7efbe20c80c981d945ca7f1ed997dc97049506163a12 -size 824407 +oid sha256:4235fe3784cf4448c6a3062b9f21de91acf97cd760cd7560519742677befccc4 +size 835523 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_59519c11-891c-4029-9ef0-3ba24de3ac95.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_59519c11-891c-4029-9ef0-3ba24de3ac95.png index ec0f7a533093cabb6013a8dd3133d9cf022cc144..0d41c69193df9ab5b72fb761f0485b6458cd3bf3 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_59519c11-891c-4029-9ef0-3ba24de3ac95.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_59519c11-891c-4029-9ef0-3ba24de3ac95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8e46cc5d27ccde238495e9059d1d827a19032e91d226f7347a88b351547f242 -size 827515 +oid sha256:dd4c7527268dc72ee79fa60b740bdd2ace1f17123c4ef1326891ff4c5b0253f4 +size 1110975 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_5f031bff-a772-4cd4-a912-b6d83a0c0d7e.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_5f031bff-a772-4cd4-a912-b6d83a0c0d7e.png index 5b023aee712c8f9359db6009aa95f85f8276c191..8f1a1dd68d1ba765e89df439d4d0c2d560bb0c4e 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_5f031bff-a772-4cd4-a912-b6d83a0c0d7e.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_5f031bff-a772-4cd4-a912-b6d83a0c0d7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:480d08c76016e40a229da6fd69afad59f4712d76e5eb9e377b41f0aa84650281 -size 946052 +oid sha256:18cb62a23a888db09d89fd4735fa32c77dc2050ed784a5f679c9d3ee696ca125 +size 1761989 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6ba32131-0de9-44d8-a22f-75c28eb37f80.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6ba32131-0de9-44d8-a22f-75c28eb37f80.png index c95e4d4900d32d8f0cfce9cdcff0c80062641985..a74d51db4b6341c5458b19a9e54aa010b93f4fcd 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6ba32131-0de9-44d8-a22f-75c28eb37f80.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6ba32131-0de9-44d8-a22f-75c28eb37f80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ad8a76cfbc8e04efe49159b3897f2cd1a6857ede07c6bfe029b59d9d4d22f22 -size 948288 +oid sha256:c7263cbadef767d21ed2230afed4eeea10aecf0aef793bc962e2f88233e05a4e +size 1720360 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6c045a29-7c8f-408d-bf6d-e75aafa65bac.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6c045a29-7c8f-408d-bf6d-e75aafa65bac.png index afff077941e6a37e7e74d032033020e8b40bb40a..77fc693db04466927b2353fced14225f662173f2 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6c045a29-7c8f-408d-bf6d-e75aafa65bac.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6c045a29-7c8f-408d-bf6d-e75aafa65bac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:201fe546bd74a8150ad54c3ddc6b138cf74b05b634cb00e294c695bb8467ce88 -size 943285 +oid sha256:7700ba0acb3edaff289948dcfaf663510e360bb2b81778ac23d384271a7da554 +size 1473089 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_7a75dd20-71f5-40d8-88b2-b7f9ee035f48.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_7a75dd20-71f5-40d8-88b2-b7f9ee035f48.png index 095ec2648705cfff8f587694a080a69990830516..0c8ad2e8b84b0379bd04dc233dd740ac23de00f0 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_7a75dd20-71f5-40d8-88b2-b7f9ee035f48.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_7a75dd20-71f5-40d8-88b2-b7f9ee035f48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7554b13451b9a42594778c881b33a673aea89bb34651e493c8a20fe4c1fb5f3b -size 949223 +oid sha256:059cfaaf7aaa268b71c056bc1affb9e1bafd3fb14d2080f849168c225ad665b0 +size 1529712 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_897fecac-fd54-4b7f-bfeb-5ed4dcc72950.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_897fecac-fd54-4b7f-bfeb-5ed4dcc72950.png index 65e40575ebe4e7d5a8801bc8dc1d451185b126ce..f664d19ece997caabd6c5f23d3973a3122c686c6 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_897fecac-fd54-4b7f-bfeb-5ed4dcc72950.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_897fecac-fd54-4b7f-bfeb-5ed4dcc72950.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d8190eb56247a3e333d2fdd049ebcc9bd18e56bc05df5d0b8e5e3479922e943 -size 803322 +oid sha256:e894832c3bb823970ac0807c329248e90280e81163fedc02b9ddd051a3c96ddf +size 1240890 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_951b37fc-526a-4203-93fe-e65dfab59126.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_951b37fc-526a-4203-93fe-e65dfab59126.png index 8d31a4bd7e2caa8eec81fc7634075debc872f15b..7df4928aeebdf81f05582ad0715805979e755a10 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_951b37fc-526a-4203-93fe-e65dfab59126.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_951b37fc-526a-4203-93fe-e65dfab59126.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95ad165375d8ae6bb12a603c04a2d5a92da7124e1f0287be2d0cafc28540273a -size 806820 +oid sha256:f306d0aaf62137167c44aa2f935352567437dc2bf6e135e66e9f078593830cc5 +size 1558099 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_d6772d62-36d8-4118-a2d4-d899094404a8.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_d6772d62-36d8-4118-a2d4-d899094404a8.png index 070088cacadfa3946e740e2e094e46d1a0f74822..05bf42d3d7707bd873d156cb4157d878e89372f3 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_d6772d62-36d8-4118-a2d4-d899094404a8.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_d6772d62-36d8-4118-a2d4-d899094404a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2cbabf1c1fe90557db2005fe1bb8be8eecd059a6b637c20b4ad81a153872b42d -size 774730 +oid sha256:74fcb853fafa51fafa1d67a33423b9deeaaa18b6734ef5e2d5783e0483b472bd +size 1592333 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_dea8c80e-a711-4e0e-9e9c-5ce98849184c.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_dea8c80e-a711-4e0e-9e9c-5ce98849184c.png index a5939efcce9d42c9c797b2800f061c360a0529ab..b5316d6c7dbe50eda24d82cc785ae085bb0fdf54 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_dea8c80e-a711-4e0e-9e9c-5ce98849184c.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_dea8c80e-a711-4e0e-9e9c-5ce98849184c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96905cc8997aac26644a8ee35bd881cba4173ffd6c356a73bc2af5217daa1771 -size 959331 +oid sha256:5be529e6d676dd5ff4d041b2baa1cd6754b4971605a2b7c4f98905a0c99f00c8 +size 1507554 diff --git a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_f8e87034-4dc4-4109-ba01-2b7b0347713f.png b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_f8e87034-4dc4-4109-ba01-2b7b0347713f.png index 0a9b0c310abf8a036a090317db49e1305a76d947..571b6ab5dd89112310bb030111efe9eb77dd256d 100644 --- a/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_f8e87034-4dc4-4109-ba01-2b7b0347713f.png +++ b/images/42f64db7-c573-4ba8-9cab-a390e2f5e535_f8e87034-4dc4-4109-ba01-2b7b0347713f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9348dbc14501dd32b1e7d8cc0de7f6881f7a852e0b390052be4c2438ca899228 -size 943910 +oid sha256:6a3906d70e4df1c10b1c709b26ef037cb45204cf49e02c2389e6605879e5ac17 +size 1687178 diff --git a/images/440273fa-f1b0-41e8-be75-90732ad5170d_955f7e1b-c9a4-4f90-999b-d133dae42588.png b/images/440273fa-f1b0-41e8-be75-90732ad5170d_955f7e1b-c9a4-4f90-999b-d133dae42588.png index a5037e2a63bb87670cb8bbc38f71c1df6bf6710c..2db1ddacdd17f5a240755c92ac6e37047f39cfc9 100644 --- a/images/440273fa-f1b0-41e8-be75-90732ad5170d_955f7e1b-c9a4-4f90-999b-d133dae42588.png +++ b/images/440273fa-f1b0-41e8-be75-90732ad5170d_955f7e1b-c9a4-4f90-999b-d133dae42588.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27d6ae69647bfffb1964b79fa6eb0e2677d91adc061ecf326e86a6c51e5ed9cb -size 167427 +oid sha256:f1f06d8b32a86dd9590132956ee4990cd0cf81bce435f422bb17ef190527cc5e +size 217553 diff --git a/images/440273fa-f1b0-41e8-be75-90732ad5170d_c7d12711-12a8-4053-b048-362c7133caf8.png b/images/440273fa-f1b0-41e8-be75-90732ad5170d_c7d12711-12a8-4053-b048-362c7133caf8.png index 6aea39fa31dfa66ac6c21e567c388ef91f2a6a4a..dab19c3f06573fef61d130126b9f1122150e27a3 100644 --- a/images/440273fa-f1b0-41e8-be75-90732ad5170d_c7d12711-12a8-4053-b048-362c7133caf8.png +++ b/images/440273fa-f1b0-41e8-be75-90732ad5170d_c7d12711-12a8-4053-b048-362c7133caf8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:694615ecd1ad7bb8acac036fe6c61005debd394ee95bffa4550b7c5bebfc703c -size 2447207 +oid sha256:1fc3b0df7186649e976361cef6f2192b23d00d58b1fcb3aa0a439cb01bc174b5 +size 970912 diff --git a/images/440273fa-f1b0-41e8-be75-90732ad5170d_ecb3d820-f6e3-4ffb-84c9-e31d01c412bf.png b/images/440273fa-f1b0-41e8-be75-90732ad5170d_ecb3d820-f6e3-4ffb-84c9-e31d01c412bf.png index a208388302258e562d44eb67d84ff96b507284fe..de266e1a4d689a78e54b3065f24efa0854209f65 100644 --- a/images/440273fa-f1b0-41e8-be75-90732ad5170d_ecb3d820-f6e3-4ffb-84c9-e31d01c412bf.png +++ b/images/440273fa-f1b0-41e8-be75-90732ad5170d_ecb3d820-f6e3-4ffb-84c9-e31d01c412bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56eac4f81155894ed4f6319f064d5166bf99ab80b432877ac1192025cdde5068 -size 190201 +oid sha256:1b7f27192ecce33d65d2f0094fd49c747b224acf2eb8eb1e3795e6a379572034 +size 240351 diff --git a/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_0d85fdcc-ca3f-4b80-97ca-0d509c03ccc4.png b/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_0d85fdcc-ca3f-4b80-97ca-0d509c03ccc4.png index 493d6d4f5b5dc810784644b39d0d4858615aede5..f4a6a683f28fb71bf246c2d464e4a8a608219cfc 100644 --- a/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_0d85fdcc-ca3f-4b80-97ca-0d509c03ccc4.png +++ b/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_0d85fdcc-ca3f-4b80-97ca-0d509c03ccc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:224f14242e76e8eae120f414a81f05eac8f9406350c087b32102a0d1778fa7a2 -size 1008499 +oid sha256:9c78b8d772c98a9e379fab9f925b7db845bdf2cbf9c7137425b602e453b00500 +size 1597522 diff --git a/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_8561b027-bf66-4fda-9414-c6a4d3f4213c.png b/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_8561b027-bf66-4fda-9414-c6a4d3f4213c.png index dee019bb9c6d5c17f63e9543bc33863283c7d9e5..1a04bdf63db640c85acd1b452142dc463043c812 100644 --- a/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_8561b027-bf66-4fda-9414-c6a4d3f4213c.png +++ b/images/44284a24-b3de-44ef-bcfc-abf57c3f791a_8561b027-bf66-4fda-9414-c6a4d3f4213c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:132a1986b95ad17ca06e08fc514c7caee4c4fff68ac0c8a7523edfb47a721bde -size 1428038 +oid sha256:2e984b801eb3cf28f7d655021a59a6597c263cb9171d9375d35420fca9bb52b6 +size 1488773 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_0711a396-35a4-4cc4-b1a1-0264829f7b8b.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_0711a396-35a4-4cc4-b1a1-0264829f7b8b.png index 66461a1cf45ba500a2b9a14a5bb29aa3c6c2ae82..902253e7a4c649fc9f46849fb44c802d1f3a4530 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_0711a396-35a4-4cc4-b1a1-0264829f7b8b.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_0711a396-35a4-4cc4-b1a1-0264829f7b8b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:024d7a53f8bae8da8a16a742b0fc4c498e381f702f84a863415991d0957a03e5 -size 1524003 +oid sha256:733acfb15677bc92c6b64800bd98bec23cc63858d21a0056717c5a8cea02df05 +size 2209448 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_6790b27e-ac5d-4b96-9a93-2e5c9e4d7b71.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_6790b27e-ac5d-4b96-9a93-2e5c9e4d7b71.png index d3afb5bddef05d9c10d465cf770baff1fd5a0751..123f24aaf73ef086d5d5a2f4eca5812ed378f71b 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_6790b27e-ac5d-4b96-9a93-2e5c9e4d7b71.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_6790b27e-ac5d-4b96-9a93-2e5c9e4d7b71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:867fd1d313907eda491aaa2965f211bb1cb466b950a606e78fdfecffaa93ed12 -size 1506952 +oid sha256:0087b1bd4119fadf491d5850f191844b7b8693ee9148319d8fa33a16abe1c079 +size 889211 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_7335eea8-a7a4-4655-85a4-67ac3a93642a.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_7335eea8-a7a4-4655-85a4-67ac3a93642a.png index 7f5221d48fa867833d945899fd9db829dd2d4b64..693b74f7a2da3b83dde60cbb2c091144e7d84417 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_7335eea8-a7a4-4655-85a4-67ac3a93642a.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_7335eea8-a7a4-4655-85a4-67ac3a93642a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b14bedb972b96bc884edc41cff49e349362bd9150cc0a5d75f8f2bf62b1a3b6 -size 1553167 +oid sha256:770d1ca006e7867204f3815fb958f568d4fb8cdf9b8e6e0ba404b59010d96198 +size 1382477 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_9137881e-849f-4da9-bf17-076132e3b61d.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_9137881e-849f-4da9-bf17-076132e3b61d.png index b941f7ee61b8b48c66ff6b33c3b7a2f657a0af5f..04e78447d251b14230e184a49e70d92b1d6fade3 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_9137881e-849f-4da9-bf17-076132e3b61d.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_9137881e-849f-4da9-bf17-076132e3b61d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:354d149ab797ed16483ffa778b50ce6346e87c16c54c41201792edc7d9908ad5 -size 1388892 +oid sha256:45633be8b004f8c9bec37da35ef1536cc155571f62b07b66c6436ebc4717c172 +size 1578611 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_91b89cbd-5da5-4edf-a302-06a3338116a0.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_91b89cbd-5da5-4edf-a302-06a3338116a0.png index 9963c124eaa7f8640955fe605c509057b7e305d3..a171dd2330a93906ef7f5ca32f7aacdb61b1c828 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_91b89cbd-5da5-4edf-a302-06a3338116a0.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_91b89cbd-5da5-4edf-a302-06a3338116a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e210edd27b3aba03f96e12839316597585e52ffbaf92628ae9a7f54eeb23771 -size 1486531 +oid sha256:4d0c2f68de049a5a933442222616a988c5e95d70c57a3c568e2b3ae7c85a7d3b +size 1687282 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_9b93d5af-3dd7-41fb-8252-19a7406bc245.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_9b93d5af-3dd7-41fb-8252-19a7406bc245.png index f4cb72d9c2601c41b881449c8f3265e57191a60a..6ff4797523205e5250b6be7cf9e34c7e5529b11f 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_9b93d5af-3dd7-41fb-8252-19a7406bc245.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_9b93d5af-3dd7-41fb-8252-19a7406bc245.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:020d600ec4d2bd1c7008d4153e100451cdc23a1078b8263effe1663401bd6f79 -size 1491132 +oid sha256:2d9e862250f992a8bf163fdd8c9810cd0a151917f8986aae6e099d88fbea2d83 +size 623913 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_9c69f639-37d4-4a10-b271-a86ad3892709.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_9c69f639-37d4-4a10-b271-a86ad3892709.png index 9dd5bf6b030417d0a5d87b16755c4205b0032ba2..18fdfcb8bdf5f0b0580c8f7f10dab3348ea1b051 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_9c69f639-37d4-4a10-b271-a86ad3892709.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_9c69f639-37d4-4a10-b271-a86ad3892709.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4533b07cc8d869beee4a2964ceb0c06d4ef66b24ee158579a62604f3edd37940 -size 1483491 +oid sha256:dc1b76305dc61c33c8460afad8743072f359eb8d85d2d1806f2838c511245c1e +size 1843811 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_a811817b-439d-4624-995e-f2151b37a537.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_a811817b-439d-4624-995e-f2151b37a537.png index dd2fef50865c2e27934bbf44e803a2ba6f0e3326..a55cf16b30604b7b21540dbc7d9fe04e6b18c5bf 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_a811817b-439d-4624-995e-f2151b37a537.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_a811817b-439d-4624-995e-f2151b37a537.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01d5b18a6649f47da8e72e1d857d724acbe8091300271e9cd860010d24eb5f51 -size 1487759 +oid sha256:c6bdbf977b2f8939272b7ccfb504b36e2249f5351f05398c063178d4347fa116 +size 1281705 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_afd0591d-15ef-4dec-ac72-b2cea47ba8dc.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_afd0591d-15ef-4dec-ac72-b2cea47ba8dc.png index 90ccc0955c57ff81d5d476de5fd382cec1beeee2..67b9a2c34231421cf7d02ad91a450528f4760048 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_afd0591d-15ef-4dec-ac72-b2cea47ba8dc.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_afd0591d-15ef-4dec-ac72-b2cea47ba8dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1409c787bd6a4373a3473bb38e14f037d14b17eca7e4d4d858d6d9f495681ee -size 1547009 +oid sha256:edafe8abf8bbc627ea4579ee72c3bf815a17353c1fa5c0a2076afecd7c89ddf0 +size 944652 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_b0d38385-d24a-44e4-9d2c-8083d639762f.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_b0d38385-d24a-44e4-9d2c-8083d639762f.png index 53488b793dd8f84bcd28a2deb6281207a2607020..b139ca5dd18ff2f3b337dc7b128d3ce6088d7024 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_b0d38385-d24a-44e4-9d2c-8083d639762f.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_b0d38385-d24a-44e4-9d2c-8083d639762f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81c4893f1986338d1c143c98bed67a524857f9c5e5983957723114c30c941d35 -size 753646 +oid sha256:eae2a5c6f6572f98a1825d17f2f0dffdb2747778b3629991e9dc6b6ef60d8666 +size 860838 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_b63cef7a-d7c0-4fd6-a051-17bf4be350e5.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_b63cef7a-d7c0-4fd6-a051-17bf4be350e5.png index 8c3b597d4326f4f6a62958f803ffccbb60414f74..76bdb9647ef4ef0d8d989aa8c1ea6ad9df958c85 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_b63cef7a-d7c0-4fd6-a051-17bf4be350e5.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_b63cef7a-d7c0-4fd6-a051-17bf4be350e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a680d6403ee05033386c0c0773e71429df988d1ab69637fdeb8ab9ec08154f53 -size 735259 +oid sha256:3957d8b0d2e1f125b9272cb823a71c18ccc806f4004fb71e96a996f37f1b3eb1 +size 759790 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_d6957c49-bb8b-4449-9fd0-2d154802e084.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_d6957c49-bb8b-4449-9fd0-2d154802e084.png index 7e44fc124a622017604c9ec758f970d87db1244a..be96d508d9cec63fd286ef21bac165df5f78b545 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_d6957c49-bb8b-4449-9fd0-2d154802e084.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_d6957c49-bb8b-4449-9fd0-2d154802e084.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f79a43b980afbfc5e440ac91028030ba07db663cbdc6a7ecb92a5dea6884f582 -size 362671 +oid sha256:f199624d20df52eaf5fd1ec566e5c810935614509063e1973120e5a56e237d65 +size 553150 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_dd7d101a-ecbd-49e4-b7ff-ca19a02e1703.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_dd7d101a-ecbd-49e4-b7ff-ca19a02e1703.png index 8880dd1a688314072b18b0290c70569b39c7e8eb..7602d49e47b8dfac16ccd9b2f83b2f7700111b93 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_dd7d101a-ecbd-49e4-b7ff-ca19a02e1703.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_dd7d101a-ecbd-49e4-b7ff-ca19a02e1703.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a2f4986384c10b0a5e60dad1162e0f60bdbeb0251f4a54a499280de5c8babf8 -size 1515009 +oid sha256:c005b6f5b4185c1d2548468e21a1b9fa2931fe7f5bafba3ecea8ab4250e2dd80 +size 1518546 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_e160c509-1fef-4f7f-b0bd-43295ecb6d72.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_e160c509-1fef-4f7f-b0bd-43295ecb6d72.png index 9afc8a112b92fb1d33614ade313b093aac77fc70..c0a2fbf8e9c95ccecb5bd2ba3679f5870f63df7c 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_e160c509-1fef-4f7f-b0bd-43295ecb6d72.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_e160c509-1fef-4f7f-b0bd-43295ecb6d72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4eb3c6aa8d94dc9f8a35b843ebe23bc730ac8481ff9e1e4155d36624d26a8580 -size 1525085 +oid sha256:5d61e24394f2515cbc245f5875c0c88d269fa56574645573332efed047b67552 +size 1284950 diff --git a/images/446e3135-8a53-455f-9471-9f6660f6a94d_f02d5411-dd85-430b-a6fe-47ea3fc45474.png b/images/446e3135-8a53-455f-9471-9f6660f6a94d_f02d5411-dd85-430b-a6fe-47ea3fc45474.png index f821a1be2f65b9ea9e69ca93fccb1ac6e7f0abe6..5f389d019437589dcf4a51dbbc0417d6411d8388 100644 --- a/images/446e3135-8a53-455f-9471-9f6660f6a94d_f02d5411-dd85-430b-a6fe-47ea3fc45474.png +++ b/images/446e3135-8a53-455f-9471-9f6660f6a94d_f02d5411-dd85-430b-a6fe-47ea3fc45474.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1726f914cdb1aa79981cc83d68418567726e390fa71aae9f0b43e76f1e84fd55 -size 1520532 +oid sha256:2842b6fd026a78bc6ca79f788dbce6b8d17d1baad5ad177a8b93e9f669210767 +size 1205273 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_329d8cfe-b9b5-4cb7-a9ed-bf622f9a3a98.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_329d8cfe-b9b5-4cb7-a9ed-bf622f9a3a98.png index 8632acbcf01c1a3f4d6869e332897cc9c810f1e4..d9209ed1f4274c1357610122967646289826fbfe 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_329d8cfe-b9b5-4cb7-a9ed-bf622f9a3a98.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_329d8cfe-b9b5-4cb7-a9ed-bf622f9a3a98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86dac41d646e6a48a45e7abed59ad606a99d105a77606d7e83e868155b5c6b5b -size 1556407 +oid sha256:71dce0498e2b9f15cb990f28f2309d368651c8b5477d508b9af0479725391191 +size 2399076 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_43f085f3-b693-48ab-ac7a-3d9c3b9f7af2.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_43f085f3-b693-48ab-ac7a-3d9c3b9f7af2.png index e7b988a2fa86d16991e6a38961fc38622126f0ea..2dab61a3f32c3fb0405b1ac4fc1a04abd36e5ed5 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_43f085f3-b693-48ab-ac7a-3d9c3b9f7af2.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_43f085f3-b693-48ab-ac7a-3d9c3b9f7af2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45a5bcca996617ee19c15d7fe7371c4122c4429fc5d5a200b939a267383f242d -size 648404 +oid sha256:98208a8f1360d6d00e50811a59575653929ea204880fd2243f8361e7f37a1c5d +size 504025 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_464ce264-f475-4262-a089-2b8f06fc4f83.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_464ce264-f475-4262-a089-2b8f06fc4f83.png index 50e251305213b0ab43d0312c6f124bd9cdee51d7..f953e432cbab0c74b1fdb26b312717139bb27c98 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_464ce264-f475-4262-a089-2b8f06fc4f83.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_464ce264-f475-4262-a089-2b8f06fc4f83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:055f41efce57cce8256a425e8324219898b050a994d223ac7ff63eca8f7c5686 -size 1126751 +oid sha256:36263adc471e99122c05a043c283733c458e6c50ec751fe4cf5359e25c14734a +size 1679047 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_502edf50-b8f9-44ea-8313-42addffed44f.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_502edf50-b8f9-44ea-8313-42addffed44f.png index 635f3b5a77e5174322f375a50263f69834179276..c573c1c9bcb0de1781a8b76bc245d264b23d4a46 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_502edf50-b8f9-44ea-8313-42addffed44f.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_502edf50-b8f9-44ea-8313-42addffed44f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41edaab9aadcf837eadeee2b0309e8d4a935fde1817c5978364cbf712473cbd4 -size 1677211 +oid sha256:4e0907e856612f3a30f39edc662f979bff828629b0130d465b35568abbb26a96 +size 1773413 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_57d825a3-5ada-4f45-9789-a4d4cdecb04d.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_57d825a3-5ada-4f45-9789-a4d4cdecb04d.png index f29778ab4a6736f5bb49e614f4e253467b4aef5c..eb8a24b1f6f044c7df2334da6e2cbb6169abfbb3 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_57d825a3-5ada-4f45-9789-a4d4cdecb04d.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_57d825a3-5ada-4f45-9789-a4d4cdecb04d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:543a6c82c005221d36041ad318722e69fa916a70ceedba12c65d00f34c598a7c -size 1245219 +oid sha256:942ecf8a90bbd0d7aa57c12b279cc773a4fd336f86918af731177a73c5a90c29 +size 1321902 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_594751d7-d499-4286-ba1a-207ebba0d47a.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_594751d7-d499-4286-ba1a-207ebba0d47a.png index b9899dc12456e4c891ec3febac33a7ed59d7a997..06741b6fa7520da816a64296440f3acc26cbff71 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_594751d7-d499-4286-ba1a-207ebba0d47a.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_594751d7-d499-4286-ba1a-207ebba0d47a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dacb87483b6d2f616fafef1251a3a627e2cab455a75c67c11bb01727271e5eda -size 595189 +oid sha256:139a581619172cd00fe103112bd02c2268844dc4c8e4292d8ebc2ab73e5b27b5 +size 743641 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_69ca24e2-a91e-433b-9e32-73b3ec203f00.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_69ca24e2-a91e-433b-9e32-73b3ec203f00.png index c5a3d53cec0226f56f426aac8762e065128ed670..44b3f9ce8cd624de4b47550152788a89de4f708b 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_69ca24e2-a91e-433b-9e32-73b3ec203f00.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_69ca24e2-a91e-433b-9e32-73b3ec203f00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6360afa5c9d4702444fdc8373410b7e3ec1fa1011c0b22298a4b8851c17433c -size 714664 +oid sha256:ae8815bbac83f822d83a7b2d2b5e55a6c1306c78f1e4e9ee2893a10be44751a1 +size 861291 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_911b9641-1ba1-4aa5-a6bd-7d1a609dd663.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_911b9641-1ba1-4aa5-a6bd-7d1a609dd663.png index 55233809bfbfdae5b885f9944efd10f968143e06..e34720d6f1e713175814ab41bad51572044aeaba 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_911b9641-1ba1-4aa5-a6bd-7d1a609dd663.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_911b9641-1ba1-4aa5-a6bd-7d1a609dd663.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67c54ce81540b087af2e356a1eb4ed95099bcb77e976ec9d2f619e768c3c6e70 -size 1565249 +oid sha256:a35e30099d89c32e57dd6919785428ba24ba3e560436b04d1f2a7b3448b556cc +size 1295745 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_94ea9c63-2eec-4898-9bd9-b1155e31d79e.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_94ea9c63-2eec-4898-9bd9-b1155e31d79e.png index e38d9ce29ad9908eee9e094c077d56568a7e592d..c0d5b17320ca29c6158bdaf1197125d326fe50de 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_94ea9c63-2eec-4898-9bd9-b1155e31d79e.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_94ea9c63-2eec-4898-9bd9-b1155e31d79e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51f45902d631886631f98119eaadc848b9f043f8473da4456cd755ca5ed82758 -size 659874 +oid sha256:92b11e3fde0c8d26d758c0621eebadbb84ecf7d561104bc836ca072466fc16d6 +size 773818 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_a730b544-051d-4ef2-a3d2-cbe725ac4ee0.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_a730b544-051d-4ef2-a3d2-cbe725ac4ee0.png index 99be847c8f254a20b2c93cde096916a2e280e64c..f74661e5d29c6dc17dc41b61175258a2b54912cd 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_a730b544-051d-4ef2-a3d2-cbe725ac4ee0.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_a730b544-051d-4ef2-a3d2-cbe725ac4ee0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac9baf438488d28b704cac9a9279d0f890f3ab0239b43573ad08b0d5e477ea6e -size 1247035 +oid sha256:9178e35a941c30850806dfc3a3c26e1a987a6850a885113304747169450b99d4 +size 1678900 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_a9d256e6-2222-4953-8e33-4444408df4ed.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_a9d256e6-2222-4953-8e33-4444408df4ed.png index 3488d59493cd350b1e4ae4f49465ed3296460b51..2351beaf5cb5ea146319334b8a44c1d91652480b 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_a9d256e6-2222-4953-8e33-4444408df4ed.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_a9d256e6-2222-4953-8e33-4444408df4ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9b7c10267c9c2846dc61362fd29cdb0c8cee00b7c56b7346edb80fb66af4e1f -size 606122 +oid sha256:aad870e53786d752b56eaba95ac36b7ab832eaff66f58ef48b3b35bf2959e0ce +size 709482 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_c3feded9-8223-48bf-becb-6538339f3784.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_c3feded9-8223-48bf-becb-6538339f3784.png index cbeafb0a6fd459d43d098aa1942a3303f60b4763..32503e46a5f3bf742d3e6dd3471f9f0f7da084e0 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_c3feded9-8223-48bf-becb-6538339f3784.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_c3feded9-8223-48bf-becb-6538339f3784.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af03b82fd49016c51ae5d084056aedc3fc94c1e6b318c22d37ad0aca965cf4e0 -size 1118350 +oid sha256:3f8b746084dc1b735d963f2ade5935b7dd6b8440292478b7cfe5573c0cc7287a +size 1759319 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_c776cdbf-4c36-4f53-9966-ee977e4f6309.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_c776cdbf-4c36-4f53-9966-ee977e4f6309.png index 588ed0df69e4bff32a593d1d0b3c9363e6ef7b5c..db35d236b5d2ef6d49de7719ed4a3e5dc9e5f5d4 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_c776cdbf-4c36-4f53-9966-ee977e4f6309.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_c776cdbf-4c36-4f53-9966-ee977e4f6309.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52acc2db77a4ee17830f8772f0f4bf43ded1a8b9617f3db9536cda0e9cf827a9 -size 590322 +oid sha256:069b8f601c23f4751e9f613f9c73900d2126ae259de14a4a447696e28d371680 +size 858428 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_e474adb2-ec08-4464-b477-30f533b43209.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_e474adb2-ec08-4464-b477-30f533b43209.png index a2b5d930784bb1c4fa57b01f4b24a2b340eddce6..d4f550070e35e75386649ce31eb2d89241f0beaf 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_e474adb2-ec08-4464-b477-30f533b43209.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_e474adb2-ec08-4464-b477-30f533b43209.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb591f7be7a38dee74fec211a685b076c747aae89f895fc75cc50d076e1c17d7 -size 1300218 +oid sha256:7710af1049d160879f9d42e52eb5fc990527041351004234b1081db6ec233acf +size 2103149 diff --git a/images/4498c83b-029f-4f1e-af81-722de20160e2_f73bb3ec-e0ee-4c9a-88f4-067c971d74af.png b/images/4498c83b-029f-4f1e-af81-722de20160e2_f73bb3ec-e0ee-4c9a-88f4-067c971d74af.png index eb8c126c32e7eb57a3f19ff50b4dd08fcb1878de..9d1d639019b1721a8eb7f39194bbcc451511b66b 100644 --- a/images/4498c83b-029f-4f1e-af81-722de20160e2_f73bb3ec-e0ee-4c9a-88f4-067c971d74af.png +++ b/images/4498c83b-029f-4f1e-af81-722de20160e2_f73bb3ec-e0ee-4c9a-88f4-067c971d74af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f2e1f9aa8d873c53c4cc0c03219cba4277eca7fe0e3d113ce441a06c76d1ee8 -size 1595247 +oid sha256:8bce8ff988359a3788d67cbc4fbb2da16a3e9bf30f9036133ef9ad1c34615ab2 +size 1593461 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_034c4eb3-eecf-41d3-b403-54797be9544a.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_034c4eb3-eecf-41d3-b403-54797be9544a.png index ce280dedcd114d5c59c89fef7dbf5d2759691470..94f14e33af86c7a21ed841737bc2f85c9f88b7ec 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_034c4eb3-eecf-41d3-b403-54797be9544a.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_034c4eb3-eecf-41d3-b403-54797be9544a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ff05ada0991382cba0eb007870cb008165cc936f3ed7310a5fd60854ecb5680 -size 792347 +oid sha256:e2c5269489f751d314a7a697594d6d482fb6cfc3e4f264aa6942da53edbafec7 +size 572580 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_065612a0-3a86-4991-b06d-abb9ec4e1de3.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_065612a0-3a86-4991-b06d-abb9ec4e1de3.png index 9e3aaaa9040a31cf734a930ea3fdb0c799aea5d5..e4d0e508262ed1cc256f802803e74eb853223853 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_065612a0-3a86-4991-b06d-abb9ec4e1de3.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_065612a0-3a86-4991-b06d-abb9ec4e1de3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43cc96bfeef799f2dc349390910f3dee00335ed989c68c9b87d919b17e532922 -size 461939 +oid sha256:d2fd2e05a732ad1aa610cf9c35b8a640d40ad729f74a15c2a3b56ee44343749c +size 385035 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_18c3278f-f64b-424e-87a4-39072ea492f6.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_18c3278f-f64b-424e-87a4-39072ea492f6.png index dee4f55be5d3ab4c527be32bb717c4f7e5f81118..39f5beedae9ded47f1a8589b1bd5cac63c21ccb3 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_18c3278f-f64b-424e-87a4-39072ea492f6.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_18c3278f-f64b-424e-87a4-39072ea492f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbe255c7090395ab251b421c1c154db36c446357c70baf9dbf3ca3d801bd8a89 -size 669241 +oid sha256:9007fe3128ed2647b207ad626db0f7ad8934ac853b7e692dabe966e4174b727c +size 392417 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_19088135-f0c7-424d-8c4b-c28c88f3c7db.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_19088135-f0c7-424d-8c4b-c28c88f3c7db.png index 9e3aaaa9040a31cf734a930ea3fdb0c799aea5d5..a7700a655b04a56c435f022ed582bf31c54e8a54 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_19088135-f0c7-424d-8c4b-c28c88f3c7db.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_19088135-f0c7-424d-8c4b-c28c88f3c7db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43cc96bfeef799f2dc349390910f3dee00335ed989c68c9b87d919b17e532922 -size 461939 +oid sha256:5dda497b21626dc4a9d92f9da3fcbd1f2318e87dc4135ef2815e46d7640545f4 +size 1229720 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_1f0b8000-4fe1-4b1f-8034-8f8e4023440d.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_1f0b8000-4fe1-4b1f-8034-8f8e4023440d.png index 9e3aaaa9040a31cf734a930ea3fdb0c799aea5d5..7b05e5b544841847490a35ab0c2a0682c0574524 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_1f0b8000-4fe1-4b1f-8034-8f8e4023440d.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_1f0b8000-4fe1-4b1f-8034-8f8e4023440d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43cc96bfeef799f2dc349390910f3dee00335ed989c68c9b87d919b17e532922 -size 461939 +oid sha256:910fdf95163c8ee5e456049f855eb19cb75c78681fbe13c44f2afbea1de404bd +size 437029 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_229f869d-ed7f-453b-8924-56d1568435d2.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_229f869d-ed7f-453b-8924-56d1568435d2.png index 2a1fd9b72c38f17544de59109537e034b8280748..ae9653050b807cf020c1fe29d448a13a2d41135b 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_229f869d-ed7f-453b-8924-56d1568435d2.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_229f869d-ed7f-453b-8924-56d1568435d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7382c4b61079841764e3e989f35edb985db714aa25c1253483af4ab2cc09e28 -size 677044 +oid sha256:60f23f575f0efb7e3a2479144a0c1727503ac94c9eccf57828a7700025388ade +size 375376 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_5b8da6f5-c53c-4b69-bfad-7bdfd2e6ce20.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_5b8da6f5-c53c-4b69-bfad-7bdfd2e6ce20.png index b861b4ca966feb1e63cb835861a9f9e7a39e7d51..1ef92f22467d08f8dee5c4c8485f80d850d28be4 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_5b8da6f5-c53c-4b69-bfad-7bdfd2e6ce20.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_5b8da6f5-c53c-4b69-bfad-7bdfd2e6ce20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e48b0b27246ae0fb93cd1c1503f146b2ea5875b17e35a30790dfff4ca826470e -size 792280 +oid sha256:454369f019d7ad87dbe9ba1fcb89294301ed2c05fdee1610c45ac61472e59b26 +size 932203 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_60e413f8-5da1-49af-9a07-7b8caaa3de3e.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_60e413f8-5da1-49af-9a07-7b8caaa3de3e.png index e60d6a43916e7ee0afe6dcc77193ac4aeb4c20f6..b2a14aa706bc6fea9f92ac3385301d632411924e 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_60e413f8-5da1-49af-9a07-7b8caaa3de3e.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_60e413f8-5da1-49af-9a07-7b8caaa3de3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8626c51384da24927128cb809fe58e04deeb6524d7d35577f6f270bab32fe6c2 -size 792060 +oid sha256:46b8c561ad778c3ad43442a269134bce3a05bb303db345e3f83cdf8ea74dcb30 +size 907230 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_743b5d19-6618-4851-8d60-aff7605fc7d1.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_743b5d19-6618-4851-8d60-aff7605fc7d1.png index 3affdda6b3a6a86c4e55230ff70086068b8341d5..1f623394f54e15704ff50d3e26313eaa6a111f10 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_743b5d19-6618-4851-8d60-aff7605fc7d1.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_743b5d19-6618-4851-8d60-aff7605fc7d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ae1543332690dd9f4f2d7f5cdae0f0ba700ec7d6fc26cfad66bdd6b2f04a7cd -size 839202 +oid sha256:e7ecd6ddba7a81b0fd973ca01f0c3c46e3e0431c540c50a075e0e2deb9a76e5f +size 770340 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_d0ce30cd-701a-4a18-88a0-296d0f6c054c.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_d0ce30cd-701a-4a18-88a0-296d0f6c054c.png index 947af0596aaccd99bf490dc01f0df4d9bbb92027..98e806eb0a9b2c49a0db2082e19fa83f0a9b6a4a 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_d0ce30cd-701a-4a18-88a0-296d0f6c054c.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_d0ce30cd-701a-4a18-88a0-296d0f6c054c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:626675ea8490dbd94cf3b7715ab4c7d1fe726e3575e46b810859483b1c58ec05 -size 661429 +oid sha256:3afd4acc2a420d7e58d6870c5432827473246409a8e1a332e9ab2378c8027a26 +size 709320 diff --git a/images/44a12ff5-0172-444a-b979-f224162c1aa8_ff95f90f-0dfe-4ec7-a33f-3f7fa040acc7.png b/images/44a12ff5-0172-444a-b979-f224162c1aa8_ff95f90f-0dfe-4ec7-a33f-3f7fa040acc7.png index 22b38ce2b2fb5085dc07a618912a2d4687c4c5af..5e4bf4ddbf38599d5adf57aec02eba467cf1a55f 100644 --- a/images/44a12ff5-0172-444a-b979-f224162c1aa8_ff95f90f-0dfe-4ec7-a33f-3f7fa040acc7.png +++ b/images/44a12ff5-0172-444a-b979-f224162c1aa8_ff95f90f-0dfe-4ec7-a33f-3f7fa040acc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f65c161d1b5a4cd3cc145068ac2fb59a8c2a55ed18106c3e0c912f7921e1196 -size 567936 +oid sha256:e6b9c9ce4e7b391c8249c045c1252ec5b73011f8c45e6dd070109008bed08883 +size 716821 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_055baae4-1d23-47fc-afe9-c93f39a6ceb6.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_055baae4-1d23-47fc-afe9-c93f39a6ceb6.png index 671c8262b475e8f87f530c10e59c7c9efb27618e..77975e9087ed64f6e73e9641cd9c72991b7192ce 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_055baae4-1d23-47fc-afe9-c93f39a6ceb6.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_055baae4-1d23-47fc-afe9-c93f39a6ceb6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1256282bb2218d196ec841cf3c1d38d867577cb47e02f6494b2f61ab8cedebda -size 1217023 +oid sha256:66da46f84fa6306c26fd6d20c5193b732b83f5dbd4c2ac3a9496891d1157de96 +size 1507538 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_23d0755e-281c-4f18-a0be-b4c4da2fa859.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_23d0755e-281c-4f18-a0be-b4c4da2fa859.png index 1accdebe06e4f2631afa668a4ddda0aa8789b3d2..fc4374dc5029a56f4018621493fa485bd17fa84a 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_23d0755e-281c-4f18-a0be-b4c4da2fa859.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_23d0755e-281c-4f18-a0be-b4c4da2fa859.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d160f20e7b4098dcba5bc49b0f8d801544ce4b6f066fe5521aab4078c23d9ef9 -size 1479724 +oid sha256:e0dfab89f85605cd06ef262c9f8024a27dfee456cd3ff1cf369005f714c08356 +size 1047304 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_45f432bc-2147-4142-8762-ee4e46d23ec8.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_45f432bc-2147-4142-8762-ee4e46d23ec8.png index a28865d73b550e5ccc1c1cb4af1fd5590a5ba841..b64fa7f3ebde53f3eae6fa243bc2719f4596ffdd 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_45f432bc-2147-4142-8762-ee4e46d23ec8.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_45f432bc-2147-4142-8762-ee4e46d23ec8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72c075b7af43909a0213d07817e32fd1764c54d74e6c204af8754630381abece -size 1117178 +oid sha256:64ed731fb78e13ca276408d36aacd602e8d7a2cba094fa27cd563ebe8da7ca46 +size 1705765 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_598ec622-0634-4fb2-8976-b12bb75f1b6f.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_598ec622-0634-4fb2-8976-b12bb75f1b6f.png index 7966d43d0e82a7958f902529c5306a0b4d0e47e2..a71aa88554ce9b44c238b1f0ac9b6e3c587d3e04 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_598ec622-0634-4fb2-8976-b12bb75f1b6f.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_598ec622-0634-4fb2-8976-b12bb75f1b6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eee3235af2751c99ea87bd18916adbf7733c2c62e850a2bd41949c2525183e96 -size 1136473 +oid sha256:20f56e876135a3b2e4aa8973ed744b6697970bf3efec73379a524cabc5b18380 +size 2140015 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_82978229-c1f9-4bb2-a23f-900adb290f39.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_82978229-c1f9-4bb2-a23f-900adb290f39.png index 368ad7b9c069dfde4c347d574bc65a4d98b03e8b..2eae41979ecde498f5c96c9c32f0064cf90da175 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_82978229-c1f9-4bb2-a23f-900adb290f39.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_82978229-c1f9-4bb2-a23f-900adb290f39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0e7fb2fe33b987c55cd33d390753ddaa6a8dec12189e65fc95ad29f8c39e57f -size 1218474 +oid sha256:856ba550370f2a2558adec8ed314ae866762ba0ffb5a6f22d6b5aab5fbfd5aff +size 1482229 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_b178005b-95d0-4ad2-9d7a-fcf68844cf09.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_b178005b-95d0-4ad2-9d7a-fcf68844cf09.png index 0a6f14ad5518c73cf5a7c2c0e5d64c92e71fa242..6505afca0a0bc8d26c07d584afcbdb3c125d5620 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_b178005b-95d0-4ad2-9d7a-fcf68844cf09.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_b178005b-95d0-4ad2-9d7a-fcf68844cf09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a509c501d98b92ebc41f139b716325e25c96772be7be47ddd4d2d5b13156b65 -size 1116211 +oid sha256:6db15dbb0c045e175566a91ce48925d7a425b83857e69fa485dbaae75cb5afc8 +size 1000777 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ed7860e2-f5ac-45ce-9b5f-6eedf85b7cd1.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ed7860e2-f5ac-45ce-9b5f-6eedf85b7cd1.png index 2a5aa30e092b1065e92dea82d2941f7da693a294..fcbbf080e6668e7a85ca358504635a9a0f416a9a 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ed7860e2-f5ac-45ce-9b5f-6eedf85b7cd1.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ed7860e2-f5ac-45ce-9b5f-6eedf85b7cd1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f59d1a2ead9ecb7802ae6d6e36f1df17f920572fdc8f0e47ea12e79f01677a7c -size 1155384 +oid sha256:21f7d705145b63080f2b46af1fee2f722d6fea94392b0397b37066877ecbad84 +size 193884 diff --git a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ff9c2e42-59c7-4b21-8080-d0631b14b481.png b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ff9c2e42-59c7-4b21-8080-d0631b14b481.png index e4603a33a0422b55041e8872bcc11883fd8eeff4..3a1747cf30c1717638a0cb91935bc51d8f85262c 100644 --- a/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ff9c2e42-59c7-4b21-8080-d0631b14b481.png +++ b/images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ff9c2e42-59c7-4b21-8080-d0631b14b481.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2281edbad05ab21977016ae71054c5e6e81d3dfd51286e476c44aa44753a23d2 -size 1199599 +oid sha256:9aa54c9e9a46694eb8894d84b2519650a5d4f0f39d7897e7442515691c4a8f5d +size 857316 diff --git a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_0d097d8e-5994-40d6-83eb-55dbafcc5a7b.png b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_0d097d8e-5994-40d6-83eb-55dbafcc5a7b.png index 0a7f15dd3e74e33aab5312c74cc1c67e813b273a..bfd2381fc85cefd99e25ab497ba82300569feba8 100644 --- a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_0d097d8e-5994-40d6-83eb-55dbafcc5a7b.png +++ b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_0d097d8e-5994-40d6-83eb-55dbafcc5a7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a529820aff489d3c673558d33836b02453029501945e7b9fef91ab87bc3de5df -size 1630715 +oid sha256:41c9c627f2f78ff4df4b004c128d0e87f0767136aa781eb618ba0fb050bdbb0b +size 1524476 diff --git a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_23f2c136-1524-48cd-a3cf-e66581e35dad.png b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_23f2c136-1524-48cd-a3cf-e66581e35dad.png index eea6d128c3b53468cc49c302640763a85d80e5f7..07aef5ad64d5bed9770e9353d30c555f948bdf83 100644 --- a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_23f2c136-1524-48cd-a3cf-e66581e35dad.png +++ b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_23f2c136-1524-48cd-a3cf-e66581e35dad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70b1821900c8e68b9a4c53c546dd2f716a658b005b6c23c6528f1ec0a5e37d9e -size 824355 +oid sha256:a3177a1a30ba083c44a43ae203b7655fd92bed07f34d1d72e1259c78ccfd2b9b +size 1177306 diff --git a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_6699b58e-e6c0-46f8-8547-370f9a9e6248.png b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_6699b58e-e6c0-46f8-8547-370f9a9e6248.png index 818f3944289e86f686f50fa1338304e301b069e2..54f04aaf5435ee30b5e3043633de772be8d96906 100644 --- a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_6699b58e-e6c0-46f8-8547-370f9a9e6248.png +++ b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_6699b58e-e6c0-46f8-8547-370f9a9e6248.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02e2d5b9d40fef2b0b587660d138296cd39e547a4f9f03ceffc2f4523ff51f52 -size 657513 +oid sha256:9a00ace481ad3657041aeec5972f6b6bd6a85980686166c5ace8c5b31cc1a4e2 +size 816840 diff --git a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_68d59689-9de8-43ea-b8a8-de293b68448e.png b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_68d59689-9de8-43ea-b8a8-de293b68448e.png index 08d12652248457e0818b7e25fc88005da9269a99..e8978b7290d83c3e67e1450868ba5fc6e1f26236 100644 --- a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_68d59689-9de8-43ea-b8a8-de293b68448e.png +++ b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_68d59689-9de8-43ea-b8a8-de293b68448e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccc88409c3b5d4b673d8da818d92dc66cc60f4aa367e306c91bc2d9e3f88df19 -size 214573 +oid sha256:d56136603d759076f350f5b1f41a6dc4bff6df3fec0463456fedc00c99438052 +size 235956 diff --git a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_82517181-c0b9-44b8-99db-a12fe6acd05f.png b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_82517181-c0b9-44b8-99db-a12fe6acd05f.png index 4469345e39205ed64cae0692549382d3023a71a6..59f85c1112a27462b17f7b84b52d106ef81f8712 100644 --- a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_82517181-c0b9-44b8-99db-a12fe6acd05f.png +++ b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_82517181-c0b9-44b8-99db-a12fe6acd05f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7acd37443dd647e495a69487e1eb6870cacc404d76146fe8530746dd477bd7ef -size 972952 +oid sha256:e77ddccd386214b46a30e5404535c4094d07793b2af4eeeff29ea28a3e46426a +size 1299136 diff --git a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_fef1a976-a670-48f2-818a-82e23cd8c1f5.png b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_fef1a976-a670-48f2-818a-82e23cd8c1f5.png index f997bcab4453738b48b60e4a375b86e8f4679f9b..2c063ee4791d8babea89e42c11bb996a3f963db4 100644 --- a/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_fef1a976-a670-48f2-818a-82e23cd8c1f5.png +++ b/images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_fef1a976-a670-48f2-818a-82e23cd8c1f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5a01b8c3d115a67682f41bda182fe96ad223a7e1db89a119bd2a886070f6dde -size 179442 +oid sha256:4d298db3470336393971557803fd74efcc109ef336aa7aa4f06c6a5259778b0c +size 441548 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_00bb11f2-1f7d-49f5-a15a-5bc24bc5dd4c.png b/images/44dcda68-082c-455a-a409-7091470cc006_00bb11f2-1f7d-49f5-a15a-5bc24bc5dd4c.png index 0e1ec57f881c57cd4b92f26e5486f5b0d42df9e2..81c20e04a22816bcadeb687d3f7be5eadb7db01a 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_00bb11f2-1f7d-49f5-a15a-5bc24bc5dd4c.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_00bb11f2-1f7d-49f5-a15a-5bc24bc5dd4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aeb378aa5b447d638c19516eabdc50ea0042cc8e34c6864da20eb7c82e105edb -size 1346605 +oid sha256:e49f2050b844dd793697b43c38edf04361bc5fca36bf861ad9bed7d143cd3fdf +size 1423805 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_121e3dc7-8829-4755-b0ea-bc71253e4038.png b/images/44dcda68-082c-455a-a409-7091470cc006_121e3dc7-8829-4755-b0ea-bc71253e4038.png index f7fa651ad5cf9f246134998e283bc7fe2c3fbb03..2d64b71ad735f8976db6c42a1a975598030d2154 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_121e3dc7-8829-4755-b0ea-bc71253e4038.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_121e3dc7-8829-4755-b0ea-bc71253e4038.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f8c300e2616834b0e9a026fd723b36cfb6558d3e6b42cebc2d7cae47aa734a1 -size 1427834 +oid sha256:1a3dbcabbf3020b44670668083f0645fce74b112135b5e7b9da16454a85716b3 +size 1479158 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_683c0864-23e0-4989-979a-16e9b0ba204f.png b/images/44dcda68-082c-455a-a409-7091470cc006_683c0864-23e0-4989-979a-16e9b0ba204f.png index e0bacb33317b01c156d0a1019b349936f65c8eca..bf43e0cb5a380685f4d133d267edb0a482c25687 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_683c0864-23e0-4989-979a-16e9b0ba204f.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_683c0864-23e0-4989-979a-16e9b0ba204f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7b6d6c798e65e25d91016f00f94a3a3925a2f63226ce155b168f3dda466a34a -size 1419067 +oid sha256:4ff255708e94639e2b35bb65917a853f915e7cecb460a6de9fed1f08519eddc6 +size 1412723 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_6ae1d041-2f14-47cd-8056-e5167cde24a6.png b/images/44dcda68-082c-455a-a409-7091470cc006_6ae1d041-2f14-47cd-8056-e5167cde24a6.png index 4bdbd3dc895fc0227345c66bf4868ae9cf27d870..02f785c294f4b828a5324975c09c0badb693f8f7 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_6ae1d041-2f14-47cd-8056-e5167cde24a6.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_6ae1d041-2f14-47cd-8056-e5167cde24a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:352155b101f38ed77ff23e69268f48518e83b23528247b4ff7bc1c79578ce282 -size 1430211 +oid sha256:28e5194aab03a7d67aa39805036ca1085886e153e34faf90c9fdb8f9a1eb25b6 +size 1478509 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_85141338-8cd2-4b4d-9f60-9cea25beadb0.png b/images/44dcda68-082c-455a-a409-7091470cc006_85141338-8cd2-4b4d-9f60-9cea25beadb0.png index 84efa635cc100052b136186d56e0c398295a0038..6ac9409bfd29bc7ca88c66f1ac088b5de82e7e5b 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_85141338-8cd2-4b4d-9f60-9cea25beadb0.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_85141338-8cd2-4b4d-9f60-9cea25beadb0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8db9ee1e88e387a49d313201c246732090fbfa3e42ae973d81a4a86a01674a8e -size 1432624 +oid sha256:b9eb283829348f4babb3f5293bb9c1f7c3f03df4d5050b6b8686e9ed0d65d2cd +size 1480387 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_888c5867-268f-4edc-a635-9bc336f1fef5.png b/images/44dcda68-082c-455a-a409-7091470cc006_888c5867-268f-4edc-a635-9bc336f1fef5.png index cde35490b2e830cb8cfbc29d2ae84225fcd3b419..fd4ab0f765ad846604e6a53f26c98760658369f1 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_888c5867-268f-4edc-a635-9bc336f1fef5.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_888c5867-268f-4edc-a635-9bc336f1fef5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1bec3b3061c2bb322dd537947f1ab0bad4531090c1a4e9f685218b862544496 -size 1369698 +oid sha256:fab82e831ff74647525c1df039fb2f216ba87951a19e62bd7adb29fafe51a50e +size 1468340 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_8fdcf5a9-ea15-4bc4-961f-bf32820c84c9.png b/images/44dcda68-082c-455a-a409-7091470cc006_8fdcf5a9-ea15-4bc4-961f-bf32820c84c9.png index 74a9ecb03c428869deacda67e9d1dab9e3a1514a..0fc180245d7045e3a65c24b46cf303a04330c060 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_8fdcf5a9-ea15-4bc4-961f-bf32820c84c9.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_8fdcf5a9-ea15-4bc4-961f-bf32820c84c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d34701a5d7ac7589e16bd214652efde73ba071cf07b08234ed626257350a886f -size 1409299 +oid sha256:00e78bd2ffd5dd9a9cb6f5a54f787262dd48e6bff5399d23d65dc1ca0362d5a4 +size 1499819 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_90da1c51-47a3-4b4d-be32-7427c7b53fef.png b/images/44dcda68-082c-455a-a409-7091470cc006_90da1c51-47a3-4b4d-be32-7427c7b53fef.png index c91615b0d7392329f0b1a2abe6528c625f09fe69..4188ae320fbc96fdb66266beea06bde949907bb6 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_90da1c51-47a3-4b4d-be32-7427c7b53fef.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_90da1c51-47a3-4b4d-be32-7427c7b53fef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbf6da1a9ea5049cfe146bafc6deec76d845d4ab917b528df70c3a0486ba9d6a -size 1425368 +oid sha256:532f5e503a00489ed77a6dd3c4d6211e1198edd318803de2a26049dfe2d6f445 +size 1478551 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_c7bcbc4d-fcc6-40ae-8c77-900307f08664.png b/images/44dcda68-082c-455a-a409-7091470cc006_c7bcbc4d-fcc6-40ae-8c77-900307f08664.png index 4c8617b7925cf7c632009d32ac99c8d346e9aea7..f62917ff0d9cbf36200c50dd27d839c017c62ad4 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_c7bcbc4d-fcc6-40ae-8c77-900307f08664.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_c7bcbc4d-fcc6-40ae-8c77-900307f08664.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9c533a6f1bdd427aacb259f8918d98e771d765678babb0a26e8b79bce30e96d -size 1425959 +oid sha256:e0d99651908ff3c9bd216c4569f7e4babe3f783f0607b21b94cb8120ce090521 +size 1473296 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_ca0c6a4f-e759-4971-bbff-02f2bee950be.png b/images/44dcda68-082c-455a-a409-7091470cc006_ca0c6a4f-e759-4971-bbff-02f2bee950be.png index 74a9ecb03c428869deacda67e9d1dab9e3a1514a..9fb7774fde7f5e01b54551453ee18ce1ad4bf3ff 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_ca0c6a4f-e759-4971-bbff-02f2bee950be.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_ca0c6a4f-e759-4971-bbff-02f2bee950be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d34701a5d7ac7589e16bd214652efde73ba071cf07b08234ed626257350a886f -size 1409299 +oid sha256:7dff9a5408814b571c6e2fa276112d9552e7e170e12a780a21fee1a81fcee74c +size 1408571 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_d17cc61b-2cdc-4948-91c9-e58d1f9311b5.png b/images/44dcda68-082c-455a-a409-7091470cc006_d17cc61b-2cdc-4948-91c9-e58d1f9311b5.png index 3355f8937f9fea61f0327fdc613c589e1be1db1f..34ab4686efbd0ddc88237867e342ad96925cc2db 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_d17cc61b-2cdc-4948-91c9-e58d1f9311b5.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_d17cc61b-2cdc-4948-91c9-e58d1f9311b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99f30b8af18f3bd2c77c420cdc53a0726e7cfadbd7975fc6467f23401960a324 -size 1373449 +oid sha256:27e7c528f46f8490d84ac014b0b81eadf7066d09c9a28de124aea40a9635c326 +size 1147992 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_d99edb30-3e73-492f-ab89-8e248147726d.png b/images/44dcda68-082c-455a-a409-7091470cc006_d99edb30-3e73-492f-ab89-8e248147726d.png index b434b0186b68c4621a5190c57ee2990e78f8575b..f0b81e24d9d56ac93ec6385223598cc6c300af77 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_d99edb30-3e73-492f-ab89-8e248147726d.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_d99edb30-3e73-492f-ab89-8e248147726d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7d3ec481373ee6d17526e7c608418440d21e6d7bd2a5207a25653843de2284b -size 1427028 +oid sha256:5f8cfc182c04ee9ede9ce136454d53cab44ff9d88c53376c979f19cb89188f3a +size 1472999 diff --git a/images/44dcda68-082c-455a-a409-7091470cc006_e964b7d2-296b-49f0-9a08-4813d10b5a46.png b/images/44dcda68-082c-455a-a409-7091470cc006_e964b7d2-296b-49f0-9a08-4813d10b5a46.png index 991f23ecc388f8598e5274fc2629962ca2db52fe..3884e935d4cb9bf0172b965ddfb6b5151e0d5962 100644 --- a/images/44dcda68-082c-455a-a409-7091470cc006_e964b7d2-296b-49f0-9a08-4813d10b5a46.png +++ b/images/44dcda68-082c-455a-a409-7091470cc006_e964b7d2-296b-49f0-9a08-4813d10b5a46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c734dbe87bd1a10de15e2a480887fca194714f16ab838737707feeefeb0c2a2 -size 1425706 +oid sha256:6bd8d27f17b6b56b4f286940487be09a8146a2c3668333084ff2a9fc31c458d8 +size 1518612 diff --git a/images/453da07e-cb2b-4f05-80c5-5b3bc6413086_7e109022-22a1-45b2-9942-b053f85b89bb.png b/images/453da07e-cb2b-4f05-80c5-5b3bc6413086_7e109022-22a1-45b2-9942-b053f85b89bb.png index 5fc541aafe812b60d2aa1ed864a0ad903df98527..8c2fa90493ecf0bfb9ed9b9d588d45500475dc21 100644 --- a/images/453da07e-cb2b-4f05-80c5-5b3bc6413086_7e109022-22a1-45b2-9942-b053f85b89bb.png +++ b/images/453da07e-cb2b-4f05-80c5-5b3bc6413086_7e109022-22a1-45b2-9942-b053f85b89bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:052456846d537ce44408f265620f0ae163dab8051398a94d0c9f692d3b7d4021 -size 1522609 +oid sha256:3f4330006f072cddad32708756927a5fd088c484280ca3a6ebcf1115382812bd +size 2333025 diff --git a/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_2e2727b7-9ba0-46a3-8338-849b1a5ed4fb.png b/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_2e2727b7-9ba0-46a3-8338-849b1a5ed4fb.png index e8d5bd4d7eadbbfe2cc22f7e30ae8a13c803631f..37039660001fe891e10cd167729ecafcf6d3e722 100644 --- a/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_2e2727b7-9ba0-46a3-8338-849b1a5ed4fb.png +++ b/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_2e2727b7-9ba0-46a3-8338-849b1a5ed4fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4640ad2dd3126ed63a9c9b4122ee034684c80a190dacdb4324a5c4562333b920 -size 436420 +oid sha256:8048e0b2ee72565ded7560fb01413f4642fc6c3b7f1a6d2cf9e961b752028c4a +size 887001 diff --git a/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_83a8bc23-465e-4b2f-a976-ae902a22fc9a.png b/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_83a8bc23-465e-4b2f-a976-ae902a22fc9a.png index ddc6f5904fcbde78dec5796409806caf255aa9ae..e0c781cef45a89d22b79d079072ad28a70ca7bb4 100644 --- a/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_83a8bc23-465e-4b2f-a976-ae902a22fc9a.png +++ b/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_83a8bc23-465e-4b2f-a976-ae902a22fc9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a53628b18eefc0f56d7c3f6847519a25d95122fd97596799e86499e7cb634b1 -size 514246 +oid sha256:77b44150ca140ed8103334a4135a0eb6419e4533e954b36c3b06eac61b14fbbc +size 268950 diff --git a/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_86c7afdc-89d2-4a50-8f67-18f069d328f2.png b/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_86c7afdc-89d2-4a50-8f67-18f069d328f2.png index cb01253584eda5189eafb4286e8d1773024e3858..4c01851d5f0f934a034fac8b25c5a88d28193db3 100644 --- a/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_86c7afdc-89d2-4a50-8f67-18f069d328f2.png +++ b/images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_86c7afdc-89d2-4a50-8f67-18f069d328f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:025a0de74c329fa4efb72c418481627a2e57784bb9dbb14a2daa98a8cd90d92e -size 465092 +oid sha256:7e51bc490d876182b279fbb7e8daf29f8d6834a217410a2f1ee15247e9e2273c +size 286468 diff --git a/images/45ae95ac-e539-4314-a0ca-8947b1843890_7afa03ca-d746-4e09-aa3d-3b2b4d0805e2.png b/images/45ae95ac-e539-4314-a0ca-8947b1843890_7afa03ca-d746-4e09-aa3d-3b2b4d0805e2.png index 8ce5e6f9947479eae14761ea6c008161fb926b1b..4bd151f382c6638b50598baaf2cf19127932062e 100644 --- a/images/45ae95ac-e539-4314-a0ca-8947b1843890_7afa03ca-d746-4e09-aa3d-3b2b4d0805e2.png +++ b/images/45ae95ac-e539-4314-a0ca-8947b1843890_7afa03ca-d746-4e09-aa3d-3b2b4d0805e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce0f96179fc445ac286c9eb3b42725c5e53468818e8d9f218b68c4e2e94f38a4 -size 1194676 +oid sha256:0063682076e335d4290fd71fac860a80ec5ad337a65255f2b6cc94207271816e +size 1061010 diff --git a/images/45ae95ac-e539-4314-a0ca-8947b1843890_a3af5576-db64-4685-bb1e-df34b324f361.png b/images/45ae95ac-e539-4314-a0ca-8947b1843890_a3af5576-db64-4685-bb1e-df34b324f361.png index f91588c9d9e449558884eb6498d583f9431d9ca2..d22349a018cc4ad742e747d582d46324c81a11dc 100644 --- a/images/45ae95ac-e539-4314-a0ca-8947b1843890_a3af5576-db64-4685-bb1e-df34b324f361.png +++ b/images/45ae95ac-e539-4314-a0ca-8947b1843890_a3af5576-db64-4685-bb1e-df34b324f361.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9161e9f2bc4e077ed60fea85e727d4e0f94de389147e7a896990a7a0dcd06c7b -size 1193431 +oid sha256:9867a22c95edb466e882920b164a46840e156e31213d6514f58d5a84fb45b65e +size 1016423 diff --git a/images/45ae95ac-e539-4314-a0ca-8947b1843890_b30b7141-b970-418b-ac3d-3069ae385e86.png b/images/45ae95ac-e539-4314-a0ca-8947b1843890_b30b7141-b970-418b-ac3d-3069ae385e86.png index 8146e4ba959974ce5527c1c866e60ab878af4233..920856ada5f99696b2c37fc55b17a9217e1993bd 100644 --- a/images/45ae95ac-e539-4314-a0ca-8947b1843890_b30b7141-b970-418b-ac3d-3069ae385e86.png +++ b/images/45ae95ac-e539-4314-a0ca-8947b1843890_b30b7141-b970-418b-ac3d-3069ae385e86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0eb2a5870053bc599e58345f6d2125c05cacbfb5d703f5b4e97101736cf7cf03 -size 555640 +oid sha256:b5c79789f9927b91a2744c2de5ea3df66755dda93ae59d2437a420ac3262ebd3 +size 426360 diff --git a/images/45ae95ac-e539-4314-a0ca-8947b1843890_ff1e132b-8f0d-41a4-a915-ae5332d7612e.png b/images/45ae95ac-e539-4314-a0ca-8947b1843890_ff1e132b-8f0d-41a4-a915-ae5332d7612e.png index 00b82a841a27f4ebb74204b456d8d116f2421548..8bf97caa8e796e3883831b637de7d4ddbd27c89f 100644 --- a/images/45ae95ac-e539-4314-a0ca-8947b1843890_ff1e132b-8f0d-41a4-a915-ae5332d7612e.png +++ b/images/45ae95ac-e539-4314-a0ca-8947b1843890_ff1e132b-8f0d-41a4-a915-ae5332d7612e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81a1af5d9bbeb80f11b6441dd3eccc3df1f0a03c03bd9898a514e527083055e8 -size 908464 +oid sha256:5689d22c86baa6659d707b33d3d58f11820678be50fd6435914aeff5c528aaea +size 299161 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_02471179-19b4-45e2-9121-a5e8a2a39f26.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_02471179-19b4-45e2-9121-a5e8a2a39f26.png index 2cdb4da4548590307e41ee25f341a95fd4cfa0cd..09eae3036aa51de2fb21ff0a05540b61bfeaa5bd 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_02471179-19b4-45e2-9121-a5e8a2a39f26.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_02471179-19b4-45e2-9121-a5e8a2a39f26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f47f3e95b4935b43f257cd347686584cc6861b067364d69d38fe1575f671b26 -size 963914 +oid sha256:0dde3a643710a9d5536d60c7e65b89d0b30cd247ae1bc3eb4b9debd2394c9500 +size 1096378 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_06076f5a-e49d-4f9d-aeb0-2947192e0d54.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_06076f5a-e49d-4f9d-aeb0-2947192e0d54.png index a99b08b206a5bbc62c00478f8cad191703019b77..b87deaf63ca3488c7b9ecc48582c541d937197fc 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_06076f5a-e49d-4f9d-aeb0-2947192e0d54.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_06076f5a-e49d-4f9d-aeb0-2947192e0d54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cb5620122a4a51161bca23d36679bd4800f75293959f50856873206991e7584 -size 689730 +oid sha256:7cd7e7ddfb57dc32ccaafe9ce73e3304c74f84d79deb2b3e9b9c579c896dc400 +size 1055779 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_0f9dec76-0399-40ec-95ca-b76d4f091120.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_0f9dec76-0399-40ec-95ca-b76d4f091120.png index 88d7f21f6bc08ebc8b85733ec6f6fdf549f30e80..0d454c2887cf5f6548a0c709730a24a5ff5e47a9 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_0f9dec76-0399-40ec-95ca-b76d4f091120.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_0f9dec76-0399-40ec-95ca-b76d4f091120.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:024055fb4f2d1d16026bc7a92dd32ef019ee6182652b614ab42671e51b8f32e1 -size 910108 +oid sha256:55250dba879aed7fccc230af20c5e3405dee4459217ce271da821caac3e93b0b +size 1231060 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_11731204-34aa-4719-a789-eae83d26586a.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_11731204-34aa-4719-a789-eae83d26586a.png index 4a3ec058309c3662535fcc4d894ac46b4bd73d7f..1c67a5dfc389da51f73621b6fe08d6502a7fd5f1 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_11731204-34aa-4719-a789-eae83d26586a.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_11731204-34aa-4719-a789-eae83d26586a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd9912d86027a1ddf584fdb9fe25f9e46f6ff1baa632a74fdf21f1f5de2da030 -size 977652 +oid sha256:3969e014821d27ce30d1411670b0be028e6f9c4d2b7ac7cd75b32e023777ac6e +size 743212 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_5dcf5117-8341-4ea2-a6eb-a516c41a71b8.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_5dcf5117-8341-4ea2-a6eb-a516c41a71b8.png index 7fe3ba16de3a6b88d1c058f2d44f3eb18733c8c9..6f48f12db3287cbcfd497dbb2a27577ad9524dc9 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_5dcf5117-8341-4ea2-a6eb-a516c41a71b8.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_5dcf5117-8341-4ea2-a6eb-a516c41a71b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0126b74eb064b7a9152899fcc1ecf1e50498bf0b8b8b195a49c764e849a5af0f -size 649776 +oid sha256:1dfece4709ed5412c45f029acaca13de00afa376578e86aeafc10beec07f5624 +size 563033 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_67cfe42d-b9db-4b88-a753-af5ee18af657.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_67cfe42d-b9db-4b88-a753-af5ee18af657.png index f1ff0fc67f21fef31820010fb757b22f7a967f47..7162fe23ac1942b031a861106d4d40d6864f13fe 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_67cfe42d-b9db-4b88-a753-af5ee18af657.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_67cfe42d-b9db-4b88-a753-af5ee18af657.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:855e6a65bdf21a436b240655ad28a33d1f8ed5e087420067b73c77ee3483819f -size 822041 +oid sha256:9f0e7874b39b7a41e73db69886780038ff4dad2ad1f7c19c091d40cd01ea41ef +size 971495 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_69539b1f-4a50-4fd5-9700-d3406bff509d.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_69539b1f-4a50-4fd5-9700-d3406bff509d.png index 0df74e2b78eced84e32e5740e5ed6eef7c17ee06..70149898b54fc4eade0320b9afcf5270c272bac9 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_69539b1f-4a50-4fd5-9700-d3406bff509d.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_69539b1f-4a50-4fd5-9700-d3406bff509d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:349fcf1cdea394815e50b3a46aa6df089e2298cb8a7ffb3494ccf78919d2972a -size 3485429 +oid sha256:92bcd68c37212043f99eda1900ef2b65906f2920dd493d322d34084c75d4bba1 +size 1868643 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_6bb0ea02-190d-46c8-98fb-ebfe04f58ecd.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_6bb0ea02-190d-46c8-98fb-ebfe04f58ecd.png index b9232ecb740ba0537a2cc587c43ee1b6c1d652ef..55ed8c38ac4117b2e4c5be900b62238121a48a4b 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_6bb0ea02-190d-46c8-98fb-ebfe04f58ecd.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_6bb0ea02-190d-46c8-98fb-ebfe04f58ecd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb98c431dda0378d49ddab2bd98c51874d12006a0180d61273ac765b24628d95 -size 807433 +oid sha256:3223aab64cb26354c11ab22508652d7b4687d914db241f04906ac5181722bc4b +size 1018437 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_7425e091-4439-49a8-a6ff-c355ec0c4f34.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_7425e091-4439-49a8-a6ff-c355ec0c4f34.png index 285fc9fd97ccd6c691d14c89b6c5f2408b07104a..50b715d75bac63874a42cd5cbddadf0812eb33c0 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_7425e091-4439-49a8-a6ff-c355ec0c4f34.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_7425e091-4439-49a8-a6ff-c355ec0c4f34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8a8468472cdb3dac4fa9f7b42f5cc76c99c2b6979de60b7b119b1349812d9c9 -size 794343 +oid sha256:d5b48d75d5895c49b26ec3bda52b956b637076e19d333efba116cd8d2e692ea8 +size 1217921 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_d64e976c-6174-4f23-80ae-bb2a1af5a5a8.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_d64e976c-6174-4f23-80ae-bb2a1af5a5a8.png index cef5b231761f4fbbf564365442cdcbe822a09d37..3c84190d7c6e17f841a59a587d1b623f8a5a1406 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_d64e976c-6174-4f23-80ae-bb2a1af5a5a8.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_d64e976c-6174-4f23-80ae-bb2a1af5a5a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0012b4cf01724b56f2d989ef8bbd215d2797dca3f75f3a8f6d787353e09979c4 -size 653033 +oid sha256:6fd34f46537a8795ded5d7afd38e47ecf7c5b54d7334c21af963052098013047 +size 919605 diff --git a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_ed8cb3d3-6c92-4bc6-a927-ea4a11ef074d.png b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_ed8cb3d3-6c92-4bc6-a927-ea4a11ef074d.png index 1a44508a3b1924ee72d7d6cac9b0c32b4cb239ab..7ec5e0d58c1248673f57e72bb4d049dfeea63117 100644 --- a/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_ed8cb3d3-6c92-4bc6-a927-ea4a11ef074d.png +++ b/images/46a3683f-fbe0-40d0-8729-6c7964d994e6_ed8cb3d3-6c92-4bc6-a927-ea4a11ef074d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:934ec5b8bb7d5eb4985ae9667150fa91b7f9bdef738460aaf10b2e862084ce2b -size 857364 +oid sha256:2dbf69fd982c9940f02e745da07c869a39e53f5437d36ffcbbfc1ea39bcc6d9c +size 1015872 diff --git a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4ecddf71-7ddf-42d5-b7ba-8090b6c8ca7c.png b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4ecddf71-7ddf-42d5-b7ba-8090b6c8ca7c.png index 61962e29d082f14d98e2513dec64d7259bff70dd..4e20a8b6b087a29bac9724ce7493d18297c912e4 100644 --- a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4ecddf71-7ddf-42d5-b7ba-8090b6c8ca7c.png +++ b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4ecddf71-7ddf-42d5-b7ba-8090b6c8ca7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cf4015588464099eb24cdc4892a3d69f541b5694c688d451a6b90d69be17e05 -size 1772893 +oid sha256:f36b256fce46905aed0edc3ca0da7263c6b0602a572cb2be4d43837dc9a5825a +size 1997671 diff --git a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4f8ec5f7-cb1b-4f13-9702-36f547598319.png b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4f8ec5f7-cb1b-4f13-9702-36f547598319.png index a112332cf4cd449f20eafd53ac386b79de6dc9de..fb6636770971c4a217fbae4ba0f434d09d053b35 100644 --- a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4f8ec5f7-cb1b-4f13-9702-36f547598319.png +++ b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4f8ec5f7-cb1b-4f13-9702-36f547598319.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:697d930493f4db818ced3987d83f90da4b51e94eba11c777211695ef53ad314d -size 1738386 +oid sha256:34d9e909738092e3414eebf3e39a47a5defdf5f41d3fedb8e6fe31a4c8752a03 +size 1958127 diff --git a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_95adf1db-0249-4c8d-aed5-32e5cd9b98da.png b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_95adf1db-0249-4c8d-aed5-32e5cd9b98da.png index fa24c8a3d23bf304739aed14bfd7bf2b9b26fc0d..5bcafc0c078b6a7152831606e6da90685d40e15f 100644 --- a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_95adf1db-0249-4c8d-aed5-32e5cd9b98da.png +++ b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_95adf1db-0249-4c8d-aed5-32e5cd9b98da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b929975aac8b067be979a91dfa81bde599d39aaa52fd3da85fac0d3a0063f1e -size 1058342 +oid sha256:b846559207706d6e123523982f93f5171dd5723128b3ebaa9db3757b247b3083 +size 527137 diff --git a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_c5d34636-ac2c-4afa-bc53-ca501dba2c63.png b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_c5d34636-ac2c-4afa-bc53-ca501dba2c63.png index e7a1ab988d3a4dd05fed58fd49833eb2dfa4d8b6..691ef791d8e755e92c00f3d10ed0e6d6338d0e04 100644 --- a/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_c5d34636-ac2c-4afa-bc53-ca501dba2c63.png +++ b/images/47072aee-1cb7-436c-8dc6-b2a6d109a100_c5d34636-ac2c-4afa-bc53-ca501dba2c63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e0b1748223d829b13fda6acd7a66a593e1df1ccc8f751771ca587201e0baca0 -size 1106307 +oid sha256:f664cc025918267838cae900e8f0bc657ba03c3bc6caebedd547eeb347bf84f0 +size 654015 diff --git a/images/4770e887-f523-4609-a989-ded8c8abad19_1b0baac2-c9d2-4069-9290-d65c9bce964f.png b/images/4770e887-f523-4609-a989-ded8c8abad19_1b0baac2-c9d2-4069-9290-d65c9bce964f.png index 755c1db8a9f0c0a9c3ef098bee680f9fe11727c0..589996490a334b26ce8966591664daa52bd60fa8 100644 --- a/images/4770e887-f523-4609-a989-ded8c8abad19_1b0baac2-c9d2-4069-9290-d65c9bce964f.png +++ b/images/4770e887-f523-4609-a989-ded8c8abad19_1b0baac2-c9d2-4069-9290-d65c9bce964f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e5bf8ffcb016061448d97848292fddd29dcd2a205757227388e868838aa6c02 -size 457057 +oid sha256:2f6b0202a98b97a6ad69315be0aec5f37fa29e3baaf44198b0a5c64de8fe30be +size 374919 diff --git a/images/4770e887-f523-4609-a989-ded8c8abad19_61dfd7e4-d733-4b03-a0bd-a80a9821c4a0.png b/images/4770e887-f523-4609-a989-ded8c8abad19_61dfd7e4-d733-4b03-a0bd-a80a9821c4a0.png index 10e6a4a555931e6d2a814ab2e698f72cc0a45fdd..5d2e84a5417354d987792d58e7fdae982dcfcb39 100644 --- a/images/4770e887-f523-4609-a989-ded8c8abad19_61dfd7e4-d733-4b03-a0bd-a80a9821c4a0.png +++ b/images/4770e887-f523-4609-a989-ded8c8abad19_61dfd7e4-d733-4b03-a0bd-a80a9821c4a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:757dca5ccc43f9a2a2e2736479a11faa71f38b9c5c7c7cfb7ab34d285dd91642 -size 814297 +oid sha256:c532461361d49ea6f0089bf006600bffc6ff9da5918479fbb6bb5f9cc168c020 +size 865734 diff --git a/images/4770e887-f523-4609-a989-ded8c8abad19_ac59711d-fcaa-4057-92af-1038cdc97b7c.png b/images/4770e887-f523-4609-a989-ded8c8abad19_ac59711d-fcaa-4057-92af-1038cdc97b7c.png index 927cb69ef1535345e3800acb6e6158d27797f103..96608a136654a581b048b55a2da2fb62fa1c38bb 100644 --- a/images/4770e887-f523-4609-a989-ded8c8abad19_ac59711d-fcaa-4057-92af-1038cdc97b7c.png +++ b/images/4770e887-f523-4609-a989-ded8c8abad19_ac59711d-fcaa-4057-92af-1038cdc97b7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:486fe133abc80ff4ab438f2453f84843da682e4a0e7c0c0aabc788bfa10edcdd -size 809055 +oid sha256:3adf0491784b2ea79363ac9ae0ebaca7eedba22cff1dc7c4fc3826623fa5a820 +size 747383 diff --git a/images/4770e887-f523-4609-a989-ded8c8abad19_d90ea9f6-714f-4585-92f0-7e3eecf2e396.png b/images/4770e887-f523-4609-a989-ded8c8abad19_d90ea9f6-714f-4585-92f0-7e3eecf2e396.png index 9aa802b215b66081d1a4a0ddbe9e7da1e1d9af1f..c810441b50a1b2f8b0c1137d2f16cfc719880579 100644 --- a/images/4770e887-f523-4609-a989-ded8c8abad19_d90ea9f6-714f-4585-92f0-7e3eecf2e396.png +++ b/images/4770e887-f523-4609-a989-ded8c8abad19_d90ea9f6-714f-4585-92f0-7e3eecf2e396.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8feb0beb5bb914814df196438a00d0318c7aed01260dd187d1325b8247c2e9b4 -size 789908 +oid sha256:9e2f3f4552ad435f1fcb583fa6ea75651580d8d6452e98c8dc0a5ef3e41e9369 +size 865046 diff --git a/images/4770e887-f523-4609-a989-ded8c8abad19_eefa305d-41d8-4e3e-9105-c389709d90ac.png b/images/4770e887-f523-4609-a989-ded8c8abad19_eefa305d-41d8-4e3e-9105-c389709d90ac.png index 0603ab2dbfb1e4e74285b878f215546f822c9dd0..8106c6e68e426b7aabbd9e4c7e991788bafa6fd8 100644 --- a/images/4770e887-f523-4609-a989-ded8c8abad19_eefa305d-41d8-4e3e-9105-c389709d90ac.png +++ b/images/4770e887-f523-4609-a989-ded8c8abad19_eefa305d-41d8-4e3e-9105-c389709d90ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38447c30bcf652c6a92291291a894599246400376619be818c2c1561a89c5c79 -size 804973 +oid sha256:1fa4b37b93660f467e2d3bae858f6a0398b710f8e4e61247e5186ddb9ec23a03 +size 670798 diff --git a/images/4770e887-f523-4609-a989-ded8c8abad19_f2d2e650-eea6-4670-b758-ee55649c1e07.png b/images/4770e887-f523-4609-a989-ded8c8abad19_f2d2e650-eea6-4670-b758-ee55649c1e07.png index 88e1df04b6f33636ee5799be834f4292e83577bb..b6e249c0e73077730c7f5d539224c633d69a392d 100644 --- a/images/4770e887-f523-4609-a989-ded8c8abad19_f2d2e650-eea6-4670-b758-ee55649c1e07.png +++ b/images/4770e887-f523-4609-a989-ded8c8abad19_f2d2e650-eea6-4670-b758-ee55649c1e07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44cff39227913a3601f553aba7b3f250d8d50eabe0679a6b9cbe0c80b2e1629d -size 796849 +oid sha256:4ffea2f218d5ef80434d7b288a0beeea46774926e3151fbbdda89f6b73f34acd +size 1001493 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_2bd4c20d-4f07-4507-a6c5-9cf1b634a4d1.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_2bd4c20d-4f07-4507-a6c5-9cf1b634a4d1.png index 6020cf7c8c072f8d3b6ea64a02f95bde59683323..f48e735d8d8135943fad1170564cd966b5ba9571 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_2bd4c20d-4f07-4507-a6c5-9cf1b634a4d1.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_2bd4c20d-4f07-4507-a6c5-9cf1b634a4d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:178532fed6feba36609e93e3e0cb4cb4e5f77878dd0d2cf1f28eded3666cd676 -size 761697 +oid sha256:2bf509c6821e2d793458a92f2a3927810c887cead57cc638a4045dc5f16a9260 +size 1162914 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_3badcc5d-9a3e-408d-92d6-46206293a333.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_3badcc5d-9a3e-408d-92d6-46206293a333.png index 72c9a1697cf61203dab3ec3a0fcd9ddbe9b6bebf..fd06e70d1989599921b7af18df3ee83df089c818 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_3badcc5d-9a3e-408d-92d6-46206293a333.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_3badcc5d-9a3e-408d-92d6-46206293a333.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b013493277fbaaa84472a7f98ed9a129407ce1a67d8cf3179e94192b3fdd6ef0 -size 816544 +oid sha256:2f87054da06706b172f0b4d1a3d68d86b9485f1755deb0289dfdae3475bdb83c +size 992203 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_57e8de5c-e063-4c92-a186-10c330179a68.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_57e8de5c-e063-4c92-a186-10c330179a68.png index 96a35c1be226a94d31b73de84492ce9d05dc3c9b..8af2563e9a06bc13ed02fc2bffca919901c3b6ac 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_57e8de5c-e063-4c92-a186-10c330179a68.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_57e8de5c-e063-4c92-a186-10c330179a68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:133522b3db64cfacdda2def4c1985a714e1f4c9e38f941ea753a9cebcad809dd -size 786766 +oid sha256:9a78c251d72ef404da301b413d506fb99b898f0dddb2c71fadf8b2bb62b96f01 +size 630606 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_6d39e04d-cc1b-4633-9459-350a37def42a.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_6d39e04d-cc1b-4633-9459-350a37def42a.png index 23111b2f05815941294e600229bf97ea1d3cfefa..582c30ca1011adf8e71a5c515db888c5f692e034 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_6d39e04d-cc1b-4633-9459-350a37def42a.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_6d39e04d-cc1b-4633-9459-350a37def42a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:167346d8bf0aee9146005dd43f41a6b063ddb7768548d3c41497577b47a045e1 -size 1083058 +oid sha256:3276fce3ec7f897b77f655db00e3ff5a5a674a5d0cae6d8d73e3b6b04326d873 +size 1506814 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_7da9920d-d511-4a49-a6d4-482753f64cff.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_7da9920d-d511-4a49-a6d4-482753f64cff.png index e2d80cca4d11856f3a34a524e4cda7fcca824f0a..6e69283fa39f44cc38752d0f659bd92671d498c0 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_7da9920d-d511-4a49-a6d4-482753f64cff.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_7da9920d-d511-4a49-a6d4-482753f64cff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd19214d03e1fa7c898d99cc8553d2c928606dc147d436f8f4868d0952f2f972 -size 924420 +oid sha256:b401efd87c6e797415f9277e5ccd6fd04c5c642bf08a95a27bb98d00515a1a8c +size 1087230 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_83a1c672-32d5-41fe-9c95-86a1ac14c208.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_83a1c672-32d5-41fe-9c95-86a1ac14c208.png index b1a69723b09cc6274e886ea47d1a3f62f5fa9240..566b9ed3588f3e003cc392c2bd7430d37e8fcedc 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_83a1c672-32d5-41fe-9c95-86a1ac14c208.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_83a1c672-32d5-41fe-9c95-86a1ac14c208.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:768d80840f3d4a3ed5ae5bbda6752c16129fe31bcc4cc98036c4aed0a7f18418 -size 1262116 +oid sha256:940602c9d8ca1af9c61ad3d456599899b4c78a203c3d6f0c9dc0af2b35fad54e +size 1802584 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a008a7f6-6480-487f-abda-be44b38c3d47.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a008a7f6-6480-487f-abda-be44b38c3d47.png index 7bd27f495dde33779aa129e65935f3788d32ad99..b73645d1b61587d9ed65432542b8c0d2184a053b 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a008a7f6-6480-487f-abda-be44b38c3d47.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a008a7f6-6480-487f-abda-be44b38c3d47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6ea37333edbe0a71f71aa25b4bc60327a488a9668cbc8aa9e3d2e2bd804ed4f -size 602551 +oid sha256:94c23315bb7637add969345218e5d845c186ab4af4e1db6aa80418aceb42f208 +size 1093267 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a702b9eb-d196-46e6-b587-372a8c3c648a.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a702b9eb-d196-46e6-b587-372a8c3c648a.png index dc0215225e004fe03b8f137c759c6040a69941fd..7ee2f251537c3a986c040c5895fdae49054aa8d9 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a702b9eb-d196-46e6-b587-372a8c3c648a.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a702b9eb-d196-46e6-b587-372a8c3c648a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e57b4a51bda8805b5c7d6505e8d55a98110f4a9a914e8215bc821e4b4a0cfe41 -size 897033 +oid sha256:c56510e21b6411b99b35e021dcad43b4e80de02ea74a42797bca723e6d820472 +size 990542 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_cff52937-7d1f-4306-8a48-62e7d8b814fa.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_cff52937-7d1f-4306-8a48-62e7d8b814fa.png index 89db995ebd9c7242c26f6d9bc4582afe65ed3840..f962877b75fa03b2a45768fac757ad8d866f4121 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_cff52937-7d1f-4306-8a48-62e7d8b814fa.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_cff52937-7d1f-4306-8a48-62e7d8b814fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9682c48add28c439795b74353fc97d967d41d83877e22838382e3f966ff54f59 -size 1592964 +oid sha256:637929c425b7e28619636ba3fdc576a45b7602fda64384bfd3922431e9ea8edc +size 2130571 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_d201ad20-3ae5-4d4f-95ee-54a12ba937e3.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_d201ad20-3ae5-4d4f-95ee-54a12ba937e3.png index 03b4708cb40ce9973400844afc7825c49f42a958..58b498b58bf11948e2f05b6c7ffc36b5594ce1d5 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_d201ad20-3ae5-4d4f-95ee-54a12ba937e3.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_d201ad20-3ae5-4d4f-95ee-54a12ba937e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68ec7b29ca9b8fa11d253e92f685ea7c25781c0c2e5c7629aae5355288517937 -size 1092253 +oid sha256:612fffcd024e711669aa00560a9ad40c43e1ae613b856a3438a4f78203432c46 +size 1067115 diff --git a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_e93a3684-bb9d-444f-af6c-bacd809f740e.png b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_e93a3684-bb9d-444f-af6c-bacd809f740e.png index e1e5a0b36fd035e974e2cfdc809cc6aacc1280f8..06f807648a93692c39eb8a96224e07351851f707 100644 --- a/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_e93a3684-bb9d-444f-af6c-bacd809f740e.png +++ b/images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_e93a3684-bb9d-444f-af6c-bacd809f740e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9946ee0c13a495fa8c14cf6f9d327c2e49b22563434396ebcd9f6d7938f5127a -size 893130 +oid sha256:4b84abac4a466c2b212dbeae6dc6ece3f1ca9c1aa04b6e27b99da89af8546204 +size 1151041 diff --git a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_06935ea2-746d-401d-8d7f-39e882db3cd9.png b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_06935ea2-746d-401d-8d7f-39e882db3cd9.png index 2dd4035a28a3e04db8effb9699bf15cdea70d37f..40436c052b7bc784af0e417006259540eb3372fa 100644 --- a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_06935ea2-746d-401d-8d7f-39e882db3cd9.png +++ b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_06935ea2-746d-401d-8d7f-39e882db3cd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a285b6633c5d6570dbe7b63e82af39e5b6bc8149591ec2b490ac8050687aa896 -size 1301688 +oid sha256:9a588e1bed4371f839027d31ce152b67ae3ab424e42f49f854423839a8c7658b +size 1463406 diff --git a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_1a28a0b0-4e06-4b68-9287-28d439b713ed.png b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_1a28a0b0-4e06-4b68-9287-28d439b713ed.png index 6d9b126687587768c4257b2636bd0ba75cb13b8d..ef9f0b280435dda6bde052ea3334ef57b7ac0804 100644 --- a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_1a28a0b0-4e06-4b68-9287-28d439b713ed.png +++ b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_1a28a0b0-4e06-4b68-9287-28d439b713ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5f6fd6683aa96abb3a27faf816630d6bea66e506d34d1c147533127900af805 -size 1009482 +oid sha256:804b90f843c21d7e3656c42e689d97a9338da76ed47ad50ca3b1ad39e3b6ec93 +size 1190565 diff --git a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_8baa6a27-c421-47ea-9ad4-efeeba6e1815.png b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_8baa6a27-c421-47ea-9ad4-efeeba6e1815.png index 79325efb17edf62516c9f5a9ff6b91193d49af11..949c71adc5e67fec5b3fe08d1ad4877c0bb0b69a 100644 --- a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_8baa6a27-c421-47ea-9ad4-efeeba6e1815.png +++ b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_8baa6a27-c421-47ea-9ad4-efeeba6e1815.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f260b4e8083ba14e11612d2faaa8cb9238a3f336a8c6dfffe4152b93a8046f0 -size 1128345 +oid sha256:4d188978f1afeefcb10bc7093686608c97b0658de2f705a6df0ae218ecf03226 +size 1184306 diff --git a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_b7a670b5-20f0-4800-b4b3-ffd095b8acd6.png b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_b7a670b5-20f0-4800-b4b3-ffd095b8acd6.png index 2751649c03adfc7e981903125c1bbebca62d7f70..87ef43eb844e2ff0be385761ffef4646a0f8c3d8 100644 --- a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_b7a670b5-20f0-4800-b4b3-ffd095b8acd6.png +++ b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_b7a670b5-20f0-4800-b4b3-ffd095b8acd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4af987ddbacbaac05ebef6363b825c42492dd34bfc7f59f84f201187a8eb1ae -size 1124743 +oid sha256:c9fc7f5d7df01d06315e0b3eed44bd36f67b995d84a3f35eddb6d65bb36350a5 +size 1750632 diff --git a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_f8078f35-e8f2-4eb4-be8b-f3a68ee359fb.png b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_f8078f35-e8f2-4eb4-be8b-f3a68ee359fb.png index 3cee564ec8a3fd213e99a46b21d7bc349f062eab..5f751d535c0ab2229338601ffcdab093d85840d6 100644 --- a/images/4777d638-204d-4e44-b81c-2fb43c471fb2_f8078f35-e8f2-4eb4-be8b-f3a68ee359fb.png +++ b/images/4777d638-204d-4e44-b81c-2fb43c471fb2_f8078f35-e8f2-4eb4-be8b-f3a68ee359fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d2ffa8d405856c3947d8ef73309140b538bc680ae6ed0ef60a99d0f7dd7b969 -size 1004692 +oid sha256:0ba50789add86ccaded48d67218ac08068cbae2e074ef66c3566f8d3aee7df2c +size 1208922 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_09b385ea-8bba-47d6-bdc4-1f42b4cfe5f1.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_09b385ea-8bba-47d6-bdc4-1f42b4cfe5f1.png index 943a32e1168dc1798647969ccedbed6b6054795e..52302d1aff10143b84bf7e3057807c889f2cd100 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_09b385ea-8bba-47d6-bdc4-1f42b4cfe5f1.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_09b385ea-8bba-47d6-bdc4-1f42b4cfe5f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ae1709ab0fabb2e9200637ac2b5a23871e67949f598885825073fdd2707335a -size 833202 +oid sha256:2006f6a47d6f79017ff8467be3b999fa147bd0261579574a3d6d81c0280f4cb7 +size 872178 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_1fa9707f-8e33-4b26-924e-5290048b35d8.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_1fa9707f-8e33-4b26-924e-5290048b35d8.png index c34b3a2d52cf0a69c8ec1a6f0bdce87f0920d294..622b437f31e04f0dc12507e4f6fffe7191b06885 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_1fa9707f-8e33-4b26-924e-5290048b35d8.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_1fa9707f-8e33-4b26-924e-5290048b35d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c7431144a1421b22223c134a86dc1354863bdacf0c431756cf5faaaa7b079e6 -size 1226526 +oid sha256:d2d6d8ee672ece9c186810e64ea4c671032922c29cd537c1cc828828bf2b66e1 +size 772173 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_424f88c5-1dc6-439d-8cba-43b2225ac064.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_424f88c5-1dc6-439d-8cba-43b2225ac064.png index bdedfd43cc9469a6250dca97049626f74dbfe02e..1b975aa72ac17e4b4573e2cd779d8aa84411671c 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_424f88c5-1dc6-439d-8cba-43b2225ac064.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_424f88c5-1dc6-439d-8cba-43b2225ac064.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90cf4bffcccd6feaee622d3e6fbd591bc293ea2f8472243261a25e09ed6d7b72 -size 901037 +oid sha256:111c73d6cdeb750217907a405bb38fad5edacaf41e76512162549115242573e7 +size 1801850 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_44a3d5ab-4a1d-4104-b3a5-67f097ea5778.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_44a3d5ab-4a1d-4104-b3a5-67f097ea5778.png index 4170059bf8eda025a3e0cdd1e24791249d2f7565..64d603aa565a258b97e6ab68383f24703df8479a 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_44a3d5ab-4a1d-4104-b3a5-67f097ea5778.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_44a3d5ab-4a1d-4104-b3a5-67f097ea5778.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c97e3ab3be449968545a5ae559974b50aed97d30d5f424ddaeeb046c3b43d401 -size 2011142 +oid sha256:d8b135d6908b6ae150967a8e80df19441b18bb7edcde5975ed34e5c79e1002bd +size 1783982 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_5dd12581-fb29-43fa-b5d8-785d5a442017.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_5dd12581-fb29-43fa-b5d8-785d5a442017.png index d4ac0aeff479021e7e9b221d703efffa24070566..c011874029550b44ca3545e2add5bd9893a16d8f 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_5dd12581-fb29-43fa-b5d8-785d5a442017.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_5dd12581-fb29-43fa-b5d8-785d5a442017.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e8432dc0d5d69b90048c8acca148fc374d1423e16ec0db6ef1a806ace38bd32 -size 1907890 +oid sha256:fb12f2e4e6f078fcd24e84fd4e72ec28d24515845ba09048e4c8f6f02bdbbde1 +size 1446705 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_64040e25-5374-49ac-bea1-3e0fbf44525b.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_64040e25-5374-49ac-bea1-3e0fbf44525b.png index f93e881f05a4721c2b2ff7f0ce7f5c0455b62b22..106071a5a8004594e5614482a624adb3bc79f5ca 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_64040e25-5374-49ac-bea1-3e0fbf44525b.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_64040e25-5374-49ac-bea1-3e0fbf44525b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4461dab16f0afd40db2569c76fe21d94a607a2ee3de5db40ff72c611d1d9522 -size 693094 +oid sha256:84f930a8e20f33c766451d56a4b9a657e2c0160d00cbbce3e862ea8cfdc87963 +size 758601 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_652f2f08-6660-439d-af3c-a7fc41fb8da3.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_652f2f08-6660-439d-af3c-a7fc41fb8da3.png index ae0a7240f33d6096b90c0bce73f27bf656a2afff..9cc5b66a975380a2f095011039bc069fa5e995f1 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_652f2f08-6660-439d-af3c-a7fc41fb8da3.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_652f2f08-6660-439d-af3c-a7fc41fb8da3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ade7cc44fb0fb66b0d2b41a2ff7b74b652f3832ee96c57f7a7bfc1fa7e8e9ac8 -size 2203490 +oid sha256:3a6e1d39f30cf8b3a0dd5bc2e1851090adca7f4f06b04f1fffe3d300b36aefa6 +size 1524718 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_776e07ac-4fec-47f9-8642-b4aa8dfe359e.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_776e07ac-4fec-47f9-8642-b4aa8dfe359e.png index fb2c48245061afc63b347854014f210c7d873594..b351c02bf81565d9d8155e15d6333e2f92fbc632 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_776e07ac-4fec-47f9-8642-b4aa8dfe359e.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_776e07ac-4fec-47f9-8642-b4aa8dfe359e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b239684c5fcc34e603fc2a878a0f6f17bdacab35a20df5630d34667abd98cfb -size 2319556 +oid sha256:d0be3af63a26e231b0c92b8e8035f91af719c093f6288ba03cc2131d9e031ed6 +size 1640229 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_7aae967a-36c3-48e3-9d72-b741fa568806.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_7aae967a-36c3-48e3-9d72-b741fa568806.png index 7d25a155a2585cc39ea01d09de7d0d5099eacf22..c0ca83c44fa4edfb2853ea4bc6ec3153be75bd36 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_7aae967a-36c3-48e3-9d72-b741fa568806.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_7aae967a-36c3-48e3-9d72-b741fa568806.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa3a3bcb38e409e4137e1b7b8f1c3431f20c353d528ced4f2c864a430c0d51b0 -size 865532 +oid sha256:2bb0c93f75a0e371f6ed21c98aacbfa065f4cf50f2b918860508e2384bbd30d7 +size 1509417 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_a28f8957-0d43-4b38-ae85-e2342c1e9840.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_a28f8957-0d43-4b38-ae85-e2342c1e9840.png index 853e2e65e1ded479ad2a74f79a367a4120b8fb23..02b01ee0e5632dc16ce8dd0d40c8aad3c9669347 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_a28f8957-0d43-4b38-ae85-e2342c1e9840.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_a28f8957-0d43-4b38-ae85-e2342c1e9840.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9df4ee872e94055dcb2d487ddac2ada7c9670dab3d5f5baf88a361bf90e788d2 -size 865628 +oid sha256:58e535f189eec36ac06ac57f2811d82ae591f2add40e336bd6589b8b7f5e4101 +size 464539 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_b58648d8-209f-43b2-aab1-5d3835e84d59.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_b58648d8-209f-43b2-aab1-5d3835e84d59.png index a5e0afe323b230ec80104333db3f21184f574dbe..7e38131945827c908fd0bf708a1ce7feb404c8e8 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_b58648d8-209f-43b2-aab1-5d3835e84d59.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_b58648d8-209f-43b2-aab1-5d3835e84d59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ecda325efae730f91c641f4a13daba58fedc2d30a9cb1f7c0ba134d24ee156ad -size 2164047 +oid sha256:ecd8f41d53d7c3ca2bb01503a74646b03ae91d11e8c02235607407ba72250b38 +size 1599856 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_d8a64f26-d3fa-47cc-a614-7ac555797a95.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_d8a64f26-d3fa-47cc-a614-7ac555797a95.png index 6b6e9c2e83cadc323451cade00cb7d2dda37b2bd..6817217e1920808e31c1b36317599f770eb936ed 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_d8a64f26-d3fa-47cc-a614-7ac555797a95.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_d8a64f26-d3fa-47cc-a614-7ac555797a95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:282bece48f13dc158f0d7ee8990a9af5711a3f5eadeda9ac23562c790cb6aeda -size 880603 +oid sha256:8ea0d18c7ace8079a37ef69a3a9ec878e1b8e2a4c21ee619b371512ec37b3b50 +size 1398012 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_eec3cc44-2bca-4fe8-ac6e-df1f0467410d.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_eec3cc44-2bca-4fe8-ac6e-df1f0467410d.png index 284293b4d10cbb14666d06a5d29ef1c0422dd158..b6fd667f225a4e6bde8127c77c58a97dceb6f021 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_eec3cc44-2bca-4fe8-ac6e-df1f0467410d.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_eec3cc44-2bca-4fe8-ac6e-df1f0467410d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:605d84046e928e750f89c6d3edf9797d80713882d337c5dfe2ab11ca00f1290d -size 2255159 +oid sha256:25c6284862832af1dd2bf54cf75c46d6f15e5a700b811dc833ba94c9b99546c6 +size 1298193 diff --git a/images/4786982f-25f5-4bf8-bb91-522656489f63_fdf4113a-2a65-4a27-8cb5-594795802f21.png b/images/4786982f-25f5-4bf8-bb91-522656489f63_fdf4113a-2a65-4a27-8cb5-594795802f21.png index def46830c84e0398d5bd44e7da80c0b9bf007c9a..2bbebc2e3ffea6c27322cf6c46143952ffdb816b 100644 --- a/images/4786982f-25f5-4bf8-bb91-522656489f63_fdf4113a-2a65-4a27-8cb5-594795802f21.png +++ b/images/4786982f-25f5-4bf8-bb91-522656489f63_fdf4113a-2a65-4a27-8cb5-594795802f21.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58a0f0aa55a98f2b4fc6c49ddaf894ec05a06ca66d478afd5fc0051c269a6fd6 -size 868729 +oid sha256:ea9a55f4c1231a205ca1886d54b39e790b5e6ae6f425a49b54755d7f89b9a7fe +size 912160 diff --git a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1e1c4337-d331-40e4-81fc-395b7c639757.png b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1e1c4337-d331-40e4-81fc-395b7c639757.png index 17066d86c288364de53134147edbe8f8a6d820a5..43ce6de3c3086dd53e20f228daefa52c335066bb 100644 --- a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1e1c4337-d331-40e4-81fc-395b7c639757.png +++ b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1e1c4337-d331-40e4-81fc-395b7c639757.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:567afe16ae277d04a104aa17e832d2bff9ff094a9169d5b6dfc28d6402fa815a -size 1660174 +oid sha256:731da4daef38cb21361ba4fdd689cd55eb9c4fc29081ad1c30a3bb45e0784202 +size 473336 diff --git a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1ec386a0-9145-4cc5-ad86-8df75e74fa30.png b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1ec386a0-9145-4cc5-ad86-8df75e74fa30.png index 1d2e28bf4b9a42bc505c7013bfbb7c77d25707f5..80a91fc5fd773cc3bcba764c06d0458e5939803a 100644 --- a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1ec386a0-9145-4cc5-ad86-8df75e74fa30.png +++ b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1ec386a0-9145-4cc5-ad86-8df75e74fa30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7e8128f3cbf3d4873f35edfc7c5ba9d810c05839dcae448248d388bf633b2f8 -size 1602311 +oid sha256:b05b47edc7190db6f58b6fd7eb2e6045220ed2055459d42f9d0ba6018f1d10aa +size 551157 diff --git a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_c9626d97-8b52-49bd-80e3-6490a55642f3.png b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_c9626d97-8b52-49bd-80e3-6490a55642f3.png index eb9734082eeaab1309f4f9e274816bd8d1573715..1667b27a51585b1ef8b5e490134853dfd1ea6325 100644 --- a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_c9626d97-8b52-49bd-80e3-6490a55642f3.png +++ b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_c9626d97-8b52-49bd-80e3-6490a55642f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86aeef99f3295674007959c605e437587e36ed9e264f3a16dfd475be48cab25d -size 1271954 +oid sha256:f10183567f42fe0c44b1baa84cec7694acbb8dd9afca152afcc2476362dfd0df +size 1576281 diff --git a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_cb270e9a-7513-44a0-992e-1db9994bb336.png b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_cb270e9a-7513-44a0-992e-1db9994bb336.png index b296001743001b7fb4bfdb6797783c00708f2e0c..5ba475be8a498054a553c6e483abefcbd5749f01 100644 --- a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_cb270e9a-7513-44a0-992e-1db9994bb336.png +++ b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_cb270e9a-7513-44a0-992e-1db9994bb336.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:517522628d66814f437e61d127819877a0902a07747bf1f85717af548a4db94f -size 1758748 +oid sha256:54e86722d9d89ed862335f5742982f6644afebd67de09b351e9778751d60fd59 +size 1549625 diff --git a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_ffb9e96b-9a62-4d47-b786-609d07e1a214.png b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_ffb9e96b-9a62-4d47-b786-609d07e1a214.png index c1984cc16d8bdeb12c0227d68522accbfda8af69..b58788f848cbf4a990add10d3a22be20e1f8b8b4 100644 --- a/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_ffb9e96b-9a62-4d47-b786-609d07e1a214.png +++ b/images/479bdc82-ec52-447b-a577-fa2bcdc3886f_ffb9e96b-9a62-4d47-b786-609d07e1a214.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:300e6a81e849cc842f4357bc3b77ed036e787e117094e3a5826e2f70829adbda -size 1542814 +oid sha256:f151a14310af5436ea7b057014f2b63278141100b25e28982740b76eff63506d +size 1012278 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_085c7e1e-20b1-4c50-ba2a-8e9088dfd3e4.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_085c7e1e-20b1-4c50-ba2a-8e9088dfd3e4.png index 4359f15681d9039ea83a26aa194a522d9c144947..a44c0abe0d8d2a92222e31413973f45c54b00374 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_085c7e1e-20b1-4c50-ba2a-8e9088dfd3e4.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_085c7e1e-20b1-4c50-ba2a-8e9088dfd3e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbc88e253cb10f6579e0310d0eb0acfdd8e19bc24b3b6e0dfae7af9c00780ef5 -size 512424 +oid sha256:78be551612ab06b22fa38642b615eadb02b486078d69a7ec11755868a04f49c8 +size 705587 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_14d7f372-72b4-45e3-9082-e6915b5bce86.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_14d7f372-72b4-45e3-9082-e6915b5bce86.png index bb1909503503c40d335976ff9788b352f2ce4c8f..d4044429fdcf2beed184ddfaa68b98f99bb568b5 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_14d7f372-72b4-45e3-9082-e6915b5bce86.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_14d7f372-72b4-45e3-9082-e6915b5bce86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce5212e8813b3ba699159fae5f0b71f6c73cb073b618d890b0fbe1b07571ee17 -size 402650 +oid sha256:ea3572020a0bdc2d21ed55f84e04696a8cc3c49cc8b73200657351a59165e4da +size 387775 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_1595ac95-da5c-474a-bba7-243c1f2fe245.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_1595ac95-da5c-474a-bba7-243c1f2fe245.png index f07addba4a7d070f6c89bf66fad5e36767f0ae48..2d8873acc38ae07aec9724c35bb659fae02c0d3e 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_1595ac95-da5c-474a-bba7-243c1f2fe245.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_1595ac95-da5c-474a-bba7-243c1f2fe245.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de6f5444c0c12f0f1f523f9cfac087ed26c240cd91e05283ff39a97040e78ee5 -size 290387 +oid sha256:564b2e801655699e565363ebc6ad107d35517af4770cf4ea7c49280f8992dbe7 +size 777776 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_3456d1ad-6145-49ba-bef7-cf879d1981f4.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_3456d1ad-6145-49ba-bef7-cf879d1981f4.png index 99785c0b44b3cb82dc0bc37aeebb55440e8bbee2..b7c9fdadbf193a041a7c7e67e3573512eeab72ad 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_3456d1ad-6145-49ba-bef7-cf879d1981f4.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_3456d1ad-6145-49ba-bef7-cf879d1981f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d3907b2f80fedae9766ab901f3f8570fc3e9794caac18ffa8d5d322f5ffc525 -size 585701 +oid sha256:e9babb8a298921f7c169576b6023a6fabf8e0eb02fc1a1f769b780527ba2bd8f +size 467292 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4b926a9c-fd06-48cd-b8dd-62a5b7d509a1.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4b926a9c-fd06-48cd-b8dd-62a5b7d509a1.png index 8440c9cab488fa55850bfed690a3cf9534f1bf36..f4786020f7d860db43a2c0cbf9e80a1e6c5a973c 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4b926a9c-fd06-48cd-b8dd-62a5b7d509a1.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4b926a9c-fd06-48cd-b8dd-62a5b7d509a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c04833ccc813bc8df33be571c1407c23c620abf400bb951a95aeef89664b5086 -size 585665 +oid sha256:d5a555eb6703f12acccc1884bd9f2f024945fe2eaa94443865fd0df078cd9e19 +size 525017 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4c3fa7f5-c7d3-4380-b3c5-3e0c6a22dca0.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4c3fa7f5-c7d3-4380-b3c5-3e0c6a22dca0.png index 37e7f96e502faa399a88b26c727c907d87271dd2..fd30e6d4af4ec567e66c0ba880b1fab17ca1bc9f 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4c3fa7f5-c7d3-4380-b3c5-3e0c6a22dca0.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4c3fa7f5-c7d3-4380-b3c5-3e0c6a22dca0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55249f1fcc8b815a853ec005b38ba5febfdfa95d6e6acd2318a3b56e728df2f2 -size 293011 +oid sha256:6f0f9a79186ef3cf9c27f307ecb80ae69c3ea7da3f3fee05de8782a3290758ad +size 679324 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4eb19133-5c75-4700-b3fc-0c913c32a1b1.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4eb19133-5c75-4700-b3fc-0c913c32a1b1.png index 48135b1f3f2b29c4d8a7d87aa10bd4d749100cb3..9f225849619702a2ba7135feff92ea17ba80359c 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4eb19133-5c75-4700-b3fc-0c913c32a1b1.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_4eb19133-5c75-4700-b3fc-0c913c32a1b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b9d2b0854e0d0631be47cf33523adadfc5a6398a2b8139133703ec160f4092c -size 564646 +oid sha256:4291b9368f44c79ad1cdf13d418b92785e218be028f51e8aaf317e9fe1bbda48 +size 413572 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_5b52c0ec-73b3-41b6-b34a-b0882e65cbfd.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_5b52c0ec-73b3-41b6-b34a-b0882e65cbfd.png index aee7bef45e33258ae764fe703e06e0467c899668..e6feba4fbd8f340553721c6fdf054ac9c65cb4e9 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_5b52c0ec-73b3-41b6-b34a-b0882e65cbfd.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_5b52c0ec-73b3-41b6-b34a-b0882e65cbfd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebb0df34a7f9aeb9c81ba7124158094ab92c564dece5c55e3ccc2d5a9761316d -size 586220 +oid sha256:66a580d234421002c1bf6e00f960b6d95d6c43b2758a8ab50b0cf1e7db390565 +size 363062 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_6d1655d7-b144-4284-a3f2-60ffcafeac40.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_6d1655d7-b144-4284-a3f2-60ffcafeac40.png index 80f8930ed9812328987c9df53a6c1ebbd9015fdf..38e745b21242cf9624642462340163db477b3b53 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_6d1655d7-b144-4284-a3f2-60ffcafeac40.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_6d1655d7-b144-4284-a3f2-60ffcafeac40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08280ff5445b13bd916b7de40dbfb8c59319473c6a517e67b5c4842ca61ea3db -size 471747 +oid sha256:c4787020d401ba602f29f8e1cd77eba442ed961e87ae4dc6ebd1c8c346281208 +size 689120 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_78c8ae11-a625-4244-9dc6-d9b5c26f064e.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_78c8ae11-a625-4244-9dc6-d9b5c26f064e.png index f8d1d471d9f845dd6108273245dfd7e660824049..08132af3d623719651332d42a58b3bb968483e70 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_78c8ae11-a625-4244-9dc6-d9b5c26f064e.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_78c8ae11-a625-4244-9dc6-d9b5c26f064e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2dad5053e53ef503ec968627d116973815bd5acd7707d2b98c115cf1b1389f8 -size 515929 +oid sha256:e2b055f37ec40090f3ddc90288f4c87ae1119056b054be7e2285373c476a3e5a +size 898271 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_7d0cf261-5cc6-41cf-8142-21fbdd4ffda8.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_7d0cf261-5cc6-41cf-8142-21fbdd4ffda8.png index d915f6f59d848d00b5f5f379a4b9a3f93e5abec9..e88de17a4ca1a5e6051bc6153cd3105f334880a2 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_7d0cf261-5cc6-41cf-8142-21fbdd4ffda8.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_7d0cf261-5cc6-41cf-8142-21fbdd4ffda8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80e14a359bd02909f132f4359ca4c2e948b0f4f9f33f4251ae9c52e92a5a6e95 -size 273032 +oid sha256:ccc5311c04aa3a38971126d7c24ade959b76f603dbe5dac08ec0da36895d1ebf +size 216705 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_898bc6da-8851-414f-9e66-eacba595b118.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_898bc6da-8851-414f-9e66-eacba595b118.png index 80f8930ed9812328987c9df53a6c1ebbd9015fdf..9b009cb127859040f7a08c542b840decb47c148a 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_898bc6da-8851-414f-9e66-eacba595b118.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_898bc6da-8851-414f-9e66-eacba595b118.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08280ff5445b13bd916b7de40dbfb8c59319473c6a517e67b5c4842ca61ea3db -size 471747 +oid sha256:78936ff3eaee61831b80de001ad9d991dca0162a251d0faadcbff234d982c146 +size 494227 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_92db244d-ca13-4885-8d45-87f3df9a87c0.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_92db244d-ca13-4885-8d45-87f3df9a87c0.png index d3a7efbf22ee1791290a19fa5312bde91da382c2..deeed1a1e12f566666a7f53faf7855496ce3504c 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_92db244d-ca13-4885-8d45-87f3df9a87c0.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_92db244d-ca13-4885-8d45-87f3df9a87c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:537a8fa2468657b41f672fd74f2a2af21dad2223253e0434b815b3732a9a04f1 -size 462650 +oid sha256:98b37d37341f400b5b3ec85e92846977f9bc5e2ba6f15e25b751c6ad149a87bc +size 574480 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_ba02b1c4-6cdb-4c43-8f62-e7fbe49c6a90.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_ba02b1c4-6cdb-4c43-8f62-e7fbe49c6a90.png index 7d185edadedbda17c5a147671ca8ba8064698a44..f32f25a7e9578af0a6f788e4ea599766b20469f1 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_ba02b1c4-6cdb-4c43-8f62-e7fbe49c6a90.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_ba02b1c4-6cdb-4c43-8f62-e7fbe49c6a90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27e8a4118796192fb900c491a056c986ff753edef649cc3406efafba56aba7cb -size 480769 +oid sha256:5d120dbd48ccddbb9ad4ac661d3bbaeed014e0750df05cdd2b099d47a3e165d4 +size 438786 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bdc9a239-d845-44f2-ac6d-a78a43ce85a5.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bdc9a239-d845-44f2-ac6d-a78a43ce85a5.png index 40f554012299173f3a72c008abca2264ea1be299..7d6e7b5a363dbcec2180b5d13327e4c34745428b 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bdc9a239-d845-44f2-ac6d-a78a43ce85a5.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bdc9a239-d845-44f2-ac6d-a78a43ce85a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4577868a9be2a6a08857b92070df720d713e0f69d8a6de0ce7e76ea7377edf55 -size 312361 +oid sha256:8a7babd1c441e886e97e721cfe32b439474a13ddec7b242bb421120f96be6096 +size 480420 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bf3a4668-958b-4149-a5b0-3870bf764b06.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bf3a4668-958b-4149-a5b0-3870bf764b06.png index 1155b9932295f4402ae0ff3e76e2deb61cbfd5af..aeb726026f23efdaf080dc00c2e05857dac6f43c 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bf3a4668-958b-4149-a5b0-3870bf764b06.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_bf3a4668-958b-4149-a5b0-3870bf764b06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31a18ff2d36f06eb87d3331a9fc99f136aacbe7a4ee90a799bbabeef89c3bc2e -size 362013 +oid sha256:b6b4a27ac8b201e2c8f8c94cfd2793c90f5418cd3c5621aaae3df6fe720c4d41 +size 216206 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_cc81bedb-0054-414a-873f-dc03997bd360.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_cc81bedb-0054-414a-873f-dc03997bd360.png index 3b516bd712867de9b742871c81a787fe5531a91c..964925373b8529a51fcf66e1ddc42acdba2da8b3 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_cc81bedb-0054-414a-873f-dc03997bd360.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_cc81bedb-0054-414a-873f-dc03997bd360.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43eb8baad4c00148ee9990743652cfe6d51ce0d7263ba35158bcc1d002d00b01 -size 376849 +oid sha256:fd14a2031ee6c816a60bf121430cfff4be4d873d0817b7586d93f6c67bacfd85 +size 493290 diff --git a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_d81b3416-82ef-4ec6-b938-3da5c2548270.png b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_d81b3416-82ef-4ec6-b938-3da5c2548270.png index 770343971b38320f7590964ac3254b390ca38665..1e9fc6807078bb7c79ef817cc65c42d363016c20 100644 --- a/images/486bdb13-16c5-4a53-8566-a60caaf94a73_d81b3416-82ef-4ec6-b938-3da5c2548270.png +++ b/images/486bdb13-16c5-4a53-8566-a60caaf94a73_d81b3416-82ef-4ec6-b938-3da5c2548270.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:207666bdc171a8ff86e1d00beed0c82127b0540f73fd93dc34ac815d9cd6e454 -size 440398 +oid sha256:39eac221db821c33e7aa3bb003bccdf821cbdb19e4f6ccf5b701f2accf98e735 +size 718113 diff --git a/images/48ca542f-5346-40b9-b586-9294a2f64519_3d969a7c-5bb4-45b6-9fd1-ba7943641510.png b/images/48ca542f-5346-40b9-b586-9294a2f64519_3d969a7c-5bb4-45b6-9fd1-ba7943641510.png index 2910d4a201b1fea8ea55ba2f9adc1cfb2ec15928..00664d98ecb4b3d89f1c17cee297adb7bb5c9bf2 100644 --- a/images/48ca542f-5346-40b9-b586-9294a2f64519_3d969a7c-5bb4-45b6-9fd1-ba7943641510.png +++ b/images/48ca542f-5346-40b9-b586-9294a2f64519_3d969a7c-5bb4-45b6-9fd1-ba7943641510.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:525302afea64e566dd3c341b1a8d046637c0a5a282581bbc1fab4cb457a2a231 -size 1709051 +oid sha256:2d80c3c3b636189b7b6f107063b1e05c585bab486112e3bb570dab3a11e7ce40 +size 893661 diff --git a/images/48ca542f-5346-40b9-b586-9294a2f64519_93b28944-f841-4fa4-a303-7c39d9c73332.png b/images/48ca542f-5346-40b9-b586-9294a2f64519_93b28944-f841-4fa4-a303-7c39d9c73332.png index 00fbfba406a856afec3a95f021ba176e15a0b7aa..2ad0516d68a4e8d236291ea7230055860b68a018 100644 --- a/images/48ca542f-5346-40b9-b586-9294a2f64519_93b28944-f841-4fa4-a303-7c39d9c73332.png +++ b/images/48ca542f-5346-40b9-b586-9294a2f64519_93b28944-f841-4fa4-a303-7c39d9c73332.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37b1042fc2758697f74a1f434c2ba8970838d6e3a8ede539b0287488bb7477c2 -size 1105650 +oid sha256:79bbda7fad9730d0ada1767f9c8ce2486e09e07198b17c4e335de1c279fb95ce +size 931628 diff --git a/images/48ca542f-5346-40b9-b586-9294a2f64519_943a0122-f698-44fa-a09a-a51b0b364862.png b/images/48ca542f-5346-40b9-b586-9294a2f64519_943a0122-f698-44fa-a09a-a51b0b364862.png index 1a2905c6cc9ce4ba03758f17ca2697c1bdc98964..3c5592d08b62b88e8ec15c07dc6a67c48c8a642d 100644 --- a/images/48ca542f-5346-40b9-b586-9294a2f64519_943a0122-f698-44fa-a09a-a51b0b364862.png +++ b/images/48ca542f-5346-40b9-b586-9294a2f64519_943a0122-f698-44fa-a09a-a51b0b364862.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1dd44674a0511d871c55e789a08c45e726de5c784262bb3de7920e09bde582fd -size 563913 +oid sha256:f8e52530e49da94c6f7b0fd98567705b7383cbcf74e54e898470bd7dceada702 +size 633304 diff --git a/images/48ca542f-5346-40b9-b586-9294a2f64519_d0d60f7e-1c65-476d-95b5-731034550fab.png b/images/48ca542f-5346-40b9-b586-9294a2f64519_d0d60f7e-1c65-476d-95b5-731034550fab.png index 0f929a55427adddb2ff54758e7dffd56d80b4f91..2c3f04df04940e0010488b490865b06eef0e5a3c 100644 --- a/images/48ca542f-5346-40b9-b586-9294a2f64519_d0d60f7e-1c65-476d-95b5-731034550fab.png +++ b/images/48ca542f-5346-40b9-b586-9294a2f64519_d0d60f7e-1c65-476d-95b5-731034550fab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad227bb3e871d569783fae57cb934749ce99d0e46c7fb1e8f1635f7528615df2 -size 740145 +oid sha256:ecb40f94a6185e852a7db49bfd33032c147f2c27332e3a841135c6466b6d8f52 +size 480244 diff --git a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_0d1d347a-d1b1-4f96-884b-502fa81b3184.png b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_0d1d347a-d1b1-4f96-884b-502fa81b3184.png index 4dbecb4dea04c0e7acd7e51591e42c68887b740a..32c999fae4db2e8e4b22ba0f21a359d1af8e15f7 100644 --- a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_0d1d347a-d1b1-4f96-884b-502fa81b3184.png +++ b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_0d1d347a-d1b1-4f96-884b-502fa81b3184.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eca60ac350d1e0e770ebd289b9a891f009070f727f7b6e9b1700c5b57391c693 -size 1885788 +oid sha256:721f1c6e05a34a942da451649418b5cf32baf679ad3749b25de400cc70c61eda +size 2192822 diff --git a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_54c8ce05-463a-4151-9ae1-6b09bb09a183.png b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_54c8ce05-463a-4151-9ae1-6b09bb09a183.png index bea55ceeed1a26b5a9aebd3f5aedbde81bc74cb9..cf5da58b1c9ea2287388ccd27a35b52468527e2d 100644 --- a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_54c8ce05-463a-4151-9ae1-6b09bb09a183.png +++ b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_54c8ce05-463a-4151-9ae1-6b09bb09a183.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3dd84d9480876feea7ca8d9dc10bc14ec308ff492db4097694c909275304cf74 -size 1108989 +oid sha256:3bb93c34800c1d59f6e5c4e1c80bfd9ec350bacd230c292e181cfc4c34e3ce04 +size 1181908 diff --git a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_5fa29f1e-7753-4629-b276-e8466bef50a1.png b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_5fa29f1e-7753-4629-b276-e8466bef50a1.png index c81cf127ec039fca33e0ddcc199c82f0cad5da7f..42792f842b40dc0b82e501ad5fd3f77af75abedb 100644 --- a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_5fa29f1e-7753-4629-b276-e8466bef50a1.png +++ b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_5fa29f1e-7753-4629-b276-e8466bef50a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:396c084470a7153b4e5b22928d2d2fb0132d3a9de9bc0e56fc65c62ff27cb132 -size 1616374 +oid sha256:75ccdedddc2663b00be675db363d0c699c35e80ee4569ce6ae0906e700172b52 +size 706431 diff --git a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_a320d96f-1ba9-4eac-978f-0716a62c6f42.png b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_a320d96f-1ba9-4eac-978f-0716a62c6f42.png index a7a41c0e173b4a5fef91ddc30562fd9f8bbd2624..1e88798a74ca6ca1630fbc91cecf4cda46246165 100644 --- a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_a320d96f-1ba9-4eac-978f-0716a62c6f42.png +++ b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_a320d96f-1ba9-4eac-978f-0716a62c6f42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77154afa55a603c61512d31cb00c073e366f7a388b546500694f198ba182f1c2 -size 2559205 +oid sha256:ba6734e02588f3014874ba295e6b1356af3618737f511e6b3cc20de1a4a4aeea +size 2838918 diff --git a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_c4146be9-5d24-4618-975d-5ebfba34bf9f.png b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_c4146be9-5d24-4618-975d-5ebfba34bf9f.png index 01ed3c71826a81f038fcf3c517cf2cdac95aaae6..4e46874851bc53940d818c03d7de2ffe56050647 100644 --- a/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_c4146be9-5d24-4618-975d-5ebfba34bf9f.png +++ b/images/49372757-4c58-4cb5-bdb4-eed0c3e83199_c4146be9-5d24-4618-975d-5ebfba34bf9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:343dcd9a2fc80306e837cfebd5665921e0f394bebea246d56ab0250095c9a363 -size 1146462 +oid sha256:8c732f20d29f59a3901adb383234ee44459300b3c8bd002aa4c1dcce0de9bb7c +size 1499359 diff --git a/images/4947d606-626e-4da3-a595-bfedacbed3ec_68c477a3-d1b6-4c90-95e1-e78aa128bf1e.png b/images/4947d606-626e-4da3-a595-bfedacbed3ec_68c477a3-d1b6-4c90-95e1-e78aa128bf1e.png index decf2a6e9e0b4e2d86ab511d182b6cf02d8d429c..698e3af229c1632758cfe63d154b9fe2c30548c0 100644 --- a/images/4947d606-626e-4da3-a595-bfedacbed3ec_68c477a3-d1b6-4c90-95e1-e78aa128bf1e.png +++ b/images/4947d606-626e-4da3-a595-bfedacbed3ec_68c477a3-d1b6-4c90-95e1-e78aa128bf1e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:368617bdc27bebbd7c3d18f50732d769b47b0d40d21473169c57f359a5d9fe43 -size 259409 +oid sha256:876c7510b6fe8856c37042fd1c95edff670ac6ae1f4f5ad5c2a993f852ff94a5 +size 273534 diff --git a/images/4947d606-626e-4da3-a595-bfedacbed3ec_84c808ae-d79f-4884-8b59-0ae14f0dad91.png b/images/4947d606-626e-4da3-a595-bfedacbed3ec_84c808ae-d79f-4884-8b59-0ae14f0dad91.png index d56a942aac2efadcb218b03e0700eaa75048695e..177437ba14a251c5bf6b83fec2c0e5f1ce717da5 100644 --- a/images/4947d606-626e-4da3-a595-bfedacbed3ec_84c808ae-d79f-4884-8b59-0ae14f0dad91.png +++ b/images/4947d606-626e-4da3-a595-bfedacbed3ec_84c808ae-d79f-4884-8b59-0ae14f0dad91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b53176c1a47319c3349e2fe809398cf2fd81d07c2d9ce6bc1e809e60fb088ec4 -size 415971 +oid sha256:a4eb7831a4eb8a3583ed89a06d1dd21642c8213867196e1a56223b26295ae746 +size 636407 diff --git a/images/4947d606-626e-4da3-a595-bfedacbed3ec_851b2cbd-f474-4372-976c-f0b18b7fdbf5.png b/images/4947d606-626e-4da3-a595-bfedacbed3ec_851b2cbd-f474-4372-976c-f0b18b7fdbf5.png index 127f07c12c63ec950575939f9d0c55d86bbc2c99..8432763d2f0d34548cb21ee558cf4cfe5a330f11 100644 --- a/images/4947d606-626e-4da3-a595-bfedacbed3ec_851b2cbd-f474-4372-976c-f0b18b7fdbf5.png +++ b/images/4947d606-626e-4da3-a595-bfedacbed3ec_851b2cbd-f474-4372-976c-f0b18b7fdbf5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8881519458162dba239b6feb565919ccad68291aa02a575384171b25346dbea -size 1745644 +oid sha256:61e4c1ef9d5ec9be3ef4201ee8239b8da9916969f2bc32a1568ccec5be748006 +size 810217 diff --git a/images/4947d606-626e-4da3-a595-bfedacbed3ec_ca273023-d776-42ee-b189-656af8e4a2f0.png b/images/4947d606-626e-4da3-a595-bfedacbed3ec_ca273023-d776-42ee-b189-656af8e4a2f0.png index 074324f24887114c1f1e764ab1fa838a4d8905c2..358243298c60f029b122b45fd6703a5fd36149e6 100644 --- a/images/4947d606-626e-4da3-a595-bfedacbed3ec_ca273023-d776-42ee-b189-656af8e4a2f0.png +++ b/images/4947d606-626e-4da3-a595-bfedacbed3ec_ca273023-d776-42ee-b189-656af8e4a2f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9af64e98bad19cc24f4e1f5a4eaa10ec6537dc52ded4bbb97cc245a41559040a -size 261861 +oid sha256:0ce22f8b38b32c2fef982146cd6609cb9d1e253d4b5df54eac9b093dcf69e9fb +size 195712 diff --git a/images/4947d606-626e-4da3-a595-bfedacbed3ec_d1477074-827a-4194-a1bc-1c17e76b13c5.png b/images/4947d606-626e-4da3-a595-bfedacbed3ec_d1477074-827a-4194-a1bc-1c17e76b13c5.png index 947c3eacf6a96b9ba2f85ad91ae10de0c235a9a6..7380aa92a3abf9f3cdff45ea83ba6aa1cc83711e 100644 --- a/images/4947d606-626e-4da3-a595-bfedacbed3ec_d1477074-827a-4194-a1bc-1c17e76b13c5.png +++ b/images/4947d606-626e-4da3-a595-bfedacbed3ec_d1477074-827a-4194-a1bc-1c17e76b13c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68e85e67e77fc3a59abfb97f781d0d10cce385e8f198aaed01bad3197976c03d -size 265297 +oid sha256:8cdc5c32e717faf109a2221d16a9ba974650288800a2e30e7454d92f267f87f0 +size 265050 diff --git a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_39e9097a-7ac8-4543-8ad6-91b40f932b34.png b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_39e9097a-7ac8-4543-8ad6-91b40f932b34.png index f10f741adaad6b7a5b17efbe7eeab2e64091da19..c78c2ce4057afc427f1f14dbfbe1deeaeb007bb9 100644 --- a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_39e9097a-7ac8-4543-8ad6-91b40f932b34.png +++ b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_39e9097a-7ac8-4543-8ad6-91b40f932b34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0daa1e780f9ec02f6930d125b5e7bf263bb16b140c3dc3faa984541642e8dff0 -size 527296 +oid sha256:b3e5547d54e263bed506affc4283bf54c10f4bba39d593f015b69ce8ded58044 +size 518865 diff --git a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_517a437f-10b1-4713-b44e-9d72da782cb5.png b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_517a437f-10b1-4713-b44e-9d72da782cb5.png index 877254d9f87a792f3d5d0feafd71d5dcf7b4818d..48640f8c0704089d8579c03305ef4667ef1cc6dd 100644 --- a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_517a437f-10b1-4713-b44e-9d72da782cb5.png +++ b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_517a437f-10b1-4713-b44e-9d72da782cb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:101ce55c8cc9e9497cb8d11954cd10e35f1d40dc49ffa33164b5c6dae89806e9 -size 515059 +oid sha256:731c0cb5d75868c60584d5d71b586b35fc0bdfd481a5b3c58b2110451f397948 +size 577162 diff --git a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_662c8c2a-d32f-4265-85c2-2c854b72c7e7.png b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_662c8c2a-d32f-4265-85c2-2c854b72c7e7.png index c1e8d592449fc2c0d8909294d4e1fcd88a56f57a..3ab5986e1fc5fb39f20af733de7d61382bbbdc88 100644 --- a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_662c8c2a-d32f-4265-85c2-2c854b72c7e7.png +++ b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_662c8c2a-d32f-4265-85c2-2c854b72c7e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c8b10e21da13c89e0f807a86cbf3e43d646f0d3aac6f625af88e33fc42eef88 -size 2398434 +oid sha256:1c3ff74c4d154d2d1fbeb52040eaaa5ef31785ce14cb951a3dc6e7a6e7ef0e00 +size 3209499 diff --git a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_8d855d42-03e9-4258-a883-c730da0c9300.png b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_8d855d42-03e9-4258-a883-c730da0c9300.png index fe2640adff7a79d4621396f97d7bc86eee3eb1bd..3805fd8b39abeeea7731ab90c9d5b9f03d323cba 100644 --- a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_8d855d42-03e9-4258-a883-c730da0c9300.png +++ b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_8d855d42-03e9-4258-a883-c730da0c9300.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c84b6e46dce21045f9a3bdaf598367db34855130435d8662ec673eb657d71d89 -size 758523 +oid sha256:b5f167b819bf7e6d21b3c7ed7eee92e5d8287e5554bcf74c26cfe1f4880dae5c +size 766116 diff --git a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_c6894a9d-3c38-4df4-b21f-e4135fb0b585.png b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_c6894a9d-3c38-4df4-b21f-e4135fb0b585.png index 41d7a9f85bc6b5b3ef72e93bb59d23e5556d34f0..e694fe4f61e5feed91744e84956e2ab06f49f9fc 100644 --- a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_c6894a9d-3c38-4df4-b21f-e4135fb0b585.png +++ b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_c6894a9d-3c38-4df4-b21f-e4135fb0b585.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:255660b4cd4207060fd8d1cf1021c4d10f76ceaa255f1fc022b9a4d36280e49f -size 3403516 +oid sha256:7f74a5bf60a3ce837d5f829ff6151a52d38e82dffb3239ee882f1af01626222f +size 3031165 diff --git a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_d9a57dc9-b37d-47f8-801f-36523ba7235a.png b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_d9a57dc9-b37d-47f8-801f-36523ba7235a.png index 2042fa1c2a9880fd4d05b4a8fe82b42ae14a3a59..9bf90bfba6f9f41d1e720b39a2b9c4a5a695c5ab 100644 --- a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_d9a57dc9-b37d-47f8-801f-36523ba7235a.png +++ b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_d9a57dc9-b37d-47f8-801f-36523ba7235a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5044a9a761c4133d6ea7286c1daeb977e31fe9ce20fca598fac3cdc322b044bc -size 757564 +oid sha256:a1082fa7f344e1abac16f3be423bb9e2d33dbafa32f2fe1f151ea09caa5b0aa3 +size 811920 diff --git a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_ed2436f8-6375-4214-b4ff-64c690a30d12.png b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_ed2436f8-6375-4214-b4ff-64c690a30d12.png index 4687e9d353fbbf76f63f68cffbbf267f28ec8d51..a7c366885e7a02d0bf5215f62fe7d4f42c5dacdb 100644 --- a/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_ed2436f8-6375-4214-b4ff-64c690a30d12.png +++ b/images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_ed2436f8-6375-4214-b4ff-64c690a30d12.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68e3bec79808650a1541aeac3c8101796e84dfcfec128f67cac151aee6780ba5 -size 605968 +oid sha256:dc15f7a3ea474bb87a5b1edb041c5092a0fb664a117acd9380385d0b9a197594 +size 607440 diff --git a/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_751493a4-4198-4f94-abf0-701f037f7e5c.png b/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_751493a4-4198-4f94-abf0-701f037f7e5c.png index 0a34436f1e0cb0f66503487157a73965d6bf0afa..7a181c41ec7dee6030fc7f900f6d5e9576518240 100644 --- a/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_751493a4-4198-4f94-abf0-701f037f7e5c.png +++ b/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_751493a4-4198-4f94-abf0-701f037f7e5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be5f84d96339f80d9dd03283048ba789055298ee1490167d5732eade969e453b -size 406245 +oid sha256:91352b5e5d9834076909bfa05fd0edad30f08b56a768c74307fca355d95bba23 +size 424047 diff --git a/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_90eaa533-7bcb-43a7-9e66-21ceab440567.png b/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_90eaa533-7bcb-43a7-9e66-21ceab440567.png index a45d01fc1214c254c35029d88a08b1cd84c0d956..a600af87df24d07655d19b23fb63613e9173ed3b 100644 --- a/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_90eaa533-7bcb-43a7-9e66-21ceab440567.png +++ b/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_90eaa533-7bcb-43a7-9e66-21ceab440567.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d61177ade698459331342e2e71f399c48fc62b3ffc83980ec1e3eb3cd057868 -size 412125 +oid sha256:004fafea9d73ebb22bad82c51d64b7067d02bbba9e223174721b5f48f0a59654 +size 425928 diff --git a/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_d2628a1f-38e2-45f0-b1cc-07292b3b737b.png b/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_d2628a1f-38e2-45f0-b1cc-07292b3b737b.png index 445f328d4a03a025a797689941657bf1f3451bc4..b9cd79eaa0924154dcb6fb82786a52774990f6a2 100644 --- a/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_d2628a1f-38e2-45f0-b1cc-07292b3b737b.png +++ b/images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_d2628a1f-38e2-45f0-b1cc-07292b3b737b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cddbe6248e305d70193647406cfd8c2681234ae904239ca4f670af710ab8195d -size 1229965 +oid sha256:2b73d64de83138825bd5a2f79b2d57fc9377e00bbcc019f9d950d6b0d7544a95 +size 1359293 diff --git a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_56ff70da-d235-48f9-875f-9f3a17423d58.png b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_56ff70da-d235-48f9-875f-9f3a17423d58.png index fc5d00a607fee449c642017954e1bfff661b40d1..02595a1408f4261dc8a84f26799a02d085e38ffa 100644 --- a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_56ff70da-d235-48f9-875f-9f3a17423d58.png +++ b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_56ff70da-d235-48f9-875f-9f3a17423d58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:159aaa4f8f4a3b678fc7d14079bdd203fff7967ed741f9b6e4598fe68facdf2f -size 894663 +oid sha256:b7090191c50630c2e87f636a7c786ca4de26c5e621a7c34f2a9388f3941731ca +size 942217 diff --git a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_89a74936-e94d-46b5-acc8-142543492cd5.png b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_89a74936-e94d-46b5-acc8-142543492cd5.png index 6f104278c188c5f98554910dca6aa13be8730993..83eacffae78cbdc850acd077f52cf538d560daae 100644 --- a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_89a74936-e94d-46b5-acc8-142543492cd5.png +++ b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_89a74936-e94d-46b5-acc8-142543492cd5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9bd33c5a5f43c8c3bc4a74fd5f13c540fecb7ccb8b68eeb22172f7a218e8fbe -size 871639 +oid sha256:80367fb9b197d1aee9a3111135aa9355650c938f18af73771074aa435210a747 +size 884995 diff --git a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8c977ab8-7653-4549-a276-20b0a42543e9.png b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8c977ab8-7653-4549-a276-20b0a42543e9.png index d2d117796002f4b36a92c221386a27fc956a2981..153324c746d5082573a037ec13ac4d9bdab36aa5 100644 --- a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8c977ab8-7653-4549-a276-20b0a42543e9.png +++ b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8c977ab8-7653-4549-a276-20b0a42543e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edba565a4b15c8576283efb8f5afa5239e764aeb2634a21fcc6e7c843faf546a -size 469681 +oid sha256:535c9c02ac5c84d8c741509566db85a298cbe52e9cc4a582eeb0be9ff4bdcb97 +size 297944 diff --git a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8e5bdf6e-9a87-406b-b130-634faa438a4a.png b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8e5bdf6e-9a87-406b-b130-634faa438a4a.png index dfccaea4ba6cf08d47eb7e8e149a7ca755334ceb..908cb3697ca65edf6388c7a3d167cfaa97440993 100644 --- a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8e5bdf6e-9a87-406b-b130-634faa438a4a.png +++ b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_8e5bdf6e-9a87-406b-b130-634faa438a4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cec16a6e10660ff87d5eb60abb8e50fef5a927f636b554957c781d126ceb7af -size 885344 +oid sha256:d8cd62f2a7b7903658f5bb9c82b5ba5887057f1f168aa8ec4886bd852b76b3a0 +size 1151312 diff --git a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_9e4b272b-5b5f-4648-b670-b9f64de663fa.png b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_9e4b272b-5b5f-4648-b670-b9f64de663fa.png index 5b578cba09cb08afa7f41cdfbe492fdada010b85..9dc8be807e85d46a385b8d5da865e85e35a6607a 100644 --- a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_9e4b272b-5b5f-4648-b670-b9f64de663fa.png +++ b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_9e4b272b-5b5f-4648-b670-b9f64de663fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc486c49677fb78a483c2ad36e1c710cef323a831db6c60197dc8dc7ab55aad3 -size 611883 +oid sha256:002cdb461972c6ddcb053e0092b4fced3bcf4c74e2b26d614869a43b2c10abb7 +size 1165430 diff --git a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_cdc883f2-d336-484b-88e6-badadb5a758f.png b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_cdc883f2-d336-484b-88e6-badadb5a758f.png index 18e5a6609b049cf08403a7770fb97fff8b700b5b..df06686ea273df505a07d0710e0dd361b44a7992 100644 --- a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_cdc883f2-d336-484b-88e6-badadb5a758f.png +++ b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_cdc883f2-d336-484b-88e6-badadb5a758f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a472f7361d4c9139eca77042cc6574824df8f9f1d704d4beb420f999ecbe4549 -size 858450 +oid sha256:e4f225406562481b3835d55f8b95e810ef8e8ec88126aa59ec3a2c6a17e6be61 +size 932606 diff --git a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_ee1a7f4e-9ba8-454a-a034-2b3c21806cdc.png b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_ee1a7f4e-9ba8-454a-a034-2b3c21806cdc.png index 35c43b0b73521f7fbd5ea8ea25e48969b01b7fa4..c715cb94c3af227c490f3df1297e82f3312ab70c 100644 --- a/images/49c60777-2500-4cea-8200-a95d3be2c9a1_ee1a7f4e-9ba8-454a-a034-2b3c21806cdc.png +++ b/images/49c60777-2500-4cea-8200-a95d3be2c9a1_ee1a7f4e-9ba8-454a-a034-2b3c21806cdc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9bcf641ac475b0f6dfbd5a5f1eb2dc96f372b165a2103f9424b40905a6a1d16e -size 874282 +oid sha256:a8b54541bf22334432fd7622880cd667993586885019ed4df6f1759d08dfd988 +size 1237157 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_0a1e66bf-415c-4c64-a0c9-7fe592135fec.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_0a1e66bf-415c-4c64-a0c9-7fe592135fec.png index 5550855926fc0b63775da2ecae312de4219bc9b1..f30bf485a6a56299ddc95031a2eba101ecb9576f 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_0a1e66bf-415c-4c64-a0c9-7fe592135fec.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_0a1e66bf-415c-4c64-a0c9-7fe592135fec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a2f6d14e5120d57864fd38c1f5a2845da6a485e9ee8df8d3028cd5bb17efa02 -size 1050314 +oid sha256:d140ce1d88c2b51c4569c7c1c498f8ba85b7f40304270a813f8e3b387ec3237c +size 885924 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_33ee7882-a48b-49b5-afd7-d34ebec0a600.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_33ee7882-a48b-49b5-afd7-d34ebec0a600.png index db86b588b51b727cf79ffdc8d96d0d1d15b1aeb1..187d64efab0dbe1a725b360c5042a15951b67e29 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_33ee7882-a48b-49b5-afd7-d34ebec0a600.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_33ee7882-a48b-49b5-afd7-d34ebec0a600.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01d7dc2b2495e4141eacc41eb872139d33256b589de620f5f207a7cc54af0fcf -size 807290 +oid sha256:59c9a1c987a89b63f647d1066ce76b5472b92b17c55572402a4b773621f0e0f9 +size 933631 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_4b695869-979b-4fc6-bcef-b75508d9d353.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_4b695869-979b-4fc6-bcef-b75508d9d353.png index 35dfc411a9dab73273e0f187d1af8417354abe1c..fe182208ed4d2b3c995e6808101e9616f08c4224 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_4b695869-979b-4fc6-bcef-b75508d9d353.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_4b695869-979b-4fc6-bcef-b75508d9d353.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d5eebc08b24d864dc5b834de7d7981a2788570d596dc84d512dec18d12a389c -size 1073159 +oid sha256:5a23bb72cffbe36221f2c0b9b7a24141d7c9612d9b6e91fdaa3ceb002aac1527 +size 936646 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_5e76ed62-0279-4542-a2d8-928980ccbe2c.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_5e76ed62-0279-4542-a2d8-928980ccbe2c.png index fa4eab5cdab1a044f585b8ce27a12f605f905a84..9a4f02eb38856a583af03939661d3fcc633afb63 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_5e76ed62-0279-4542-a2d8-928980ccbe2c.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_5e76ed62-0279-4542-a2d8-928980ccbe2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a99779a68747e43d7d66add4621babffe7867499096ee3d1046ae6fe6b50370 -size 1541532 +oid sha256:8a4f4c38bf6a8ed49716cf539deeebc96a9cf3f9baf1887b9e10b3ff46e48ec9 +size 704136 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_7c149935-f2e7-47f5-beca-303dc388238e.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_7c149935-f2e7-47f5-beca-303dc388238e.png index 5317602a966e3be7acee386cfcf405c7e774014f..9d83bc5c6ce65e014d14ac733385eb803d39b14b 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_7c149935-f2e7-47f5-beca-303dc388238e.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_7c149935-f2e7-47f5-beca-303dc388238e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:244764bb02b5eec0bd91dea4cf5e96a6fbc0dc7d8b03bed57de669f12646d6f7 -size 936507 +oid sha256:730ddb1b76b6db89fe61fb6976b6c9f5ab56da7a682b98bfc6f96f9a3e77bd7a +size 1132250 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8975d7cf-935f-4d95-aa36-5eb71e2b01eb.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8975d7cf-935f-4d95-aa36-5eb71e2b01eb.png index a6a082f9f572fe4a10ab00e8a9d2b7dc59b1bd35..3586eaa1cb4a726c19746dd1fba97bc6eb1464a9 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8975d7cf-935f-4d95-aa36-5eb71e2b01eb.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8975d7cf-935f-4d95-aa36-5eb71e2b01eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:412ba1c6fdb36680b9b1f475e2a8fcaa0b909cd7087bd3a610dc93bb21a1d629 -size 1194585 +oid sha256:a206b558a96064ff0604537dbebb529cb022068e7de6327a6b7193505e120d2a +size 1212682 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_89cbd56a-b983-4a06-afa8-cfd121dd0ddd.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_89cbd56a-b983-4a06-afa8-cfd121dd0ddd.png index de136fe73bb9521fdc1024ab54ba7a0c3d629a48..de4208efbd196c40480fa508b2bd1ef92d439252 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_89cbd56a-b983-4a06-afa8-cfd121dd0ddd.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_89cbd56a-b983-4a06-afa8-cfd121dd0ddd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09a2f7c4c66e9aa26215845fba3b51e6fbfa8de0d35aa5319c14988416155350 -size 1181641 +oid sha256:4875c1eec15a51bebd67c6f1665050ce28f80d62f50bccfedb3c2355257d6c1e +size 1518894 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8aabcd7c-a3ec-4ba4-83c7-c61f37de5cea.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8aabcd7c-a3ec-4ba4-83c7-c61f37de5cea.png index 297f377596ace7aa2094b94c7c9d6c1a2bf1f9ea..3ed834f54002e49934f24627b4a3ca6ad9d33162 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8aabcd7c-a3ec-4ba4-83c7-c61f37de5cea.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8aabcd7c-a3ec-4ba4-83c7-c61f37de5cea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1aeec74fdba4bd43d9d01ef25c099b904f78ed7a05edd34c04df735f01046291 -size 1216707 +oid sha256:f3b9e8c32e5eda198aacce53866180da9171459a75d52fb21ce3d20e9a3025b0 +size 1493336 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_91398ccf-62f2-4b00-99e8-538f8dc83ff1.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_91398ccf-62f2-4b00-99e8-538f8dc83ff1.png index e213ff7b5a018de7fcb9aee4882666670cca9165..7af86a9b134d86a58216bb5aecf632ba412dcea2 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_91398ccf-62f2-4b00-99e8-538f8dc83ff1.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_91398ccf-62f2-4b00-99e8-538f8dc83ff1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9172e156706e3583792cdf5bea7bd51e1d6b750c4f66ec4ef07c716f02702d4b -size 1265907 +oid sha256:9a027396d91b4033dd1904706b5b8195f2cbaf0756ea26018e1ee8f4dd750a38 +size 1249640 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_9b26383c-fd9f-4719-80be-5cab61f9a8b7.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_9b26383c-fd9f-4719-80be-5cab61f9a8b7.png index b0d03ea171ed3cd868c00358f9c5f3571c95254b..31d923c4f61d84c157b67f6b7df49bf3af23e335 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_9b26383c-fd9f-4719-80be-5cab61f9a8b7.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_9b26383c-fd9f-4719-80be-5cab61f9a8b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45a1baa3e170b7aea31ad180abded33f0ac8fc09186dafe500fbd78abd805c3a -size 863669 +oid sha256:7cc09947bfd24313879a22520978b6a35bed0cef0da5cf957ae07cb73284738d +size 864876 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_a8d5de92-8fd0-4c76-abb2-99501c4f2e36.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_a8d5de92-8fd0-4c76-abb2-99501c4f2e36.png index 5d2303f251882f660d203ed8ee75d012573fd4be..dc8c5bdcc4f25f68f8bdcd9dd2552cdb11605c58 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_a8d5de92-8fd0-4c76-abb2-99501c4f2e36.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_a8d5de92-8fd0-4c76-abb2-99501c4f2e36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23bfbc5674d3d09c1f6b4d76f359e1db3a92bf845893d0f4f1274560062fc8da -size 891210 +oid sha256:83584f7ea987dab3d6e76e0b81460bb3b86e365a2dbd8759edd88712d592325a +size 885034 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_c744062d-fb8a-4354-b660-adb22d70dc2b.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_c744062d-fb8a-4354-b660-adb22d70dc2b.png index 80fe89a904d3b884b267aca14d1bdcd3cf11689e..412c1f8fc7fee3a13fff48c42251891a2775e859 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_c744062d-fb8a-4354-b660-adb22d70dc2b.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_c744062d-fb8a-4354-b660-adb22d70dc2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2283ce8aceaa3cf6f7c0f6eac534c4d87fb328ee50f9fb4fe9f0cf5b07fd1a83 -size 878293 +oid sha256:3484f5ee4401c59566d03cca85efaafd6dfdba8af0a88edfd0a663a5d0d7e36f +size 1240381 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_d2348c91-b246-41b8-8215-b8ab7894ba2f.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_d2348c91-b246-41b8-8215-b8ab7894ba2f.png index 816a24f684de743de3630b3a6582f9558d2fde09..840d77d91a08a42df0b7231e4fe75dd75942b610 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_d2348c91-b246-41b8-8215-b8ab7894ba2f.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_d2348c91-b246-41b8-8215-b8ab7894ba2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cfebdffc10ce9bcf16e77a5be65fd21398f1e61c09dbe90a39b4f919c66db011 -size 1203816 +oid sha256:589493883913b1f46fc8db7f891b913f64a9506e97a84f6978418a1beefac026 +size 1525034 diff --git a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_ff696e9f-af71-48b7-a4cb-fda242e97114.png b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_ff696e9f-af71-48b7-a4cb-fda242e97114.png index 0c3e2b6a99429fa8beb591dfd896210a53aad373..0fda0b06e22ae33edb70bb1d368e0fde73e80e7e 100644 --- a/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_ff696e9f-af71-48b7-a4cb-fda242e97114.png +++ b/images/4a0bd619-4aa9-48d8-8322-89a86aef93db_ff696e9f-af71-48b7-a4cb-fda242e97114.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32dbedc57c620adb39f19a38fddabe11bb11debc5ebb7356323b37b46cc15029 -size 1167558 +oid sha256:afa66fb421078685cd5a3cc8408674f2afd746372c1bd035f6177d47b4f8616c +size 1372197 diff --git a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1004b2e9-f35b-41b0-8d61-f0b4bc059024.png b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1004b2e9-f35b-41b0-8d61-f0b4bc059024.png index 94861b0bf81681686b85f9732ca28cb4657eefbd..dfbb14b1d27f8e47ef9285abede00edc8c98687e 100644 --- a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1004b2e9-f35b-41b0-8d61-f0b4bc059024.png +++ b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1004b2e9-f35b-41b0-8d61-f0b4bc059024.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b8bbd70746a20b19aa5c66a1511087e9e3a62c7e16c8b762ae531cad6403726 -size 3293061 +oid sha256:74a66c4e54ac62a680acfa5f6a16555e098fbf877876d4728eab9e751fd1f0b1 +size 2068464 diff --git a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1cb32c35-d655-487b-ad30-fc234522bfe5.png b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1cb32c35-d655-487b-ad30-fc234522bfe5.png index 85c78dfbd74567c115ad554daecbab065566e22f..fca5782d1d55a3404cb639e23781cee61139c6ad 100644 --- a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1cb32c35-d655-487b-ad30-fc234522bfe5.png +++ b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1cb32c35-d655-487b-ad30-fc234522bfe5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81bd56cb488bb37192d283ff66a7cbd0ac6d567ba11510f4800a1029bcdb9a47 -size 5969867 +oid sha256:ac1d8429004ee6a602ea1914c10b333357ea90cb8937d492c57b3fdb4f8b1ce2 +size 1451451 diff --git a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_2b14fe2d-795e-420f-a424-5a0246897456.png b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_2b14fe2d-795e-420f-a424-5a0246897456.png index 4689555f2c581834108643bd54a207edc8ee41b4..f8e7eed9ed8b7329f61acffaf14e8c4b5c8a2613 100644 --- a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_2b14fe2d-795e-420f-a424-5a0246897456.png +++ b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_2b14fe2d-795e-420f-a424-5a0246897456.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4991025104f7e8eff712336ccdee2cf7cc2ca9933532c2664884dbfb490b7a15 -size 1323373 +oid sha256:0dd55a547adfeec4f3b16803860c8df0f15319eaae037f85f99e88ab1cdd396b +size 652618 diff --git a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_73327f56-01db-46fd-b7d5-b3c3d84a563d.png b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_73327f56-01db-46fd-b7d5-b3c3d84a563d.png index 90b82a4dabeb375289bec4318c0de0852804a3e3..b2bd51afd579c7a29078bf88f8e10aeefe211fcb 100644 --- a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_73327f56-01db-46fd-b7d5-b3c3d84a563d.png +++ b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_73327f56-01db-46fd-b7d5-b3c3d84a563d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:427b01100d2949f0a697a0ea753464f55a85bf4cf17270f39fcfd1c37c4d7367 -size 1370855 +oid sha256:6023f719fb72a5506b7d86c0484fc092141523ab898502e5d5f3649b9d52ef5e +size 1395795 diff --git a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_77dbcf0c-47d8-4597-abfc-2b218fe292e5.png b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_77dbcf0c-47d8-4597-abfc-2b218fe292e5.png index f2e4b06e66b45dec6c0d9595d476d08b1de4591b..3571c672ea8f37d82a1f20d9b15331ea3ff1ea44 100644 --- a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_77dbcf0c-47d8-4597-abfc-2b218fe292e5.png +++ b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_77dbcf0c-47d8-4597-abfc-2b218fe292e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2573175f7ccca003e71ffe30c23b7799a16280792996b1451dc7af1ba6addaf -size 1221746 +oid sha256:90c3211c0b23e2ba5641380caa24801032ad10575a5bd7cb0e72083d7f4bf9bc +size 1589450 diff --git a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_7f6709ef-e981-466c-93ef-0fca08b49eba.png b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_7f6709ef-e981-466c-93ef-0fca08b49eba.png index 8cf3be1e3fcca97158036471464fc98d27183787..0a48ed1a6848ccf193b7a97a1352b38a01feb7d7 100644 --- a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_7f6709ef-e981-466c-93ef-0fca08b49eba.png +++ b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_7f6709ef-e981-466c-93ef-0fca08b49eba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67332b919c56a3c44cfa19eeef67eb19665e18c3027cbf4871eb055583b7fe91 -size 3211308 +oid sha256:01f5c2f670c2b16de5b6571c0a0cfaa639b68992857be2d6b1658b97956ebaad +size 2176907 diff --git a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_d0076f22-6fca-4791-b04d-2567fd6b3d69.png b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_d0076f22-6fca-4791-b04d-2567fd6b3d69.png index 746e94efde25e703b6c2af8595a4b50f21a35d85..4a05d0b1328bdba491334989db6d72a385675fa7 100644 --- a/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_d0076f22-6fca-4791-b04d-2567fd6b3d69.png +++ b/images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_d0076f22-6fca-4791-b04d-2567fd6b3d69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a94000e91c14489cadca1a5698187973dd06ba6a20aa8c3ef261b6676941755f -size 1370806 +oid sha256:82a3603ad8fc493340cb6cdaf3d2b46d658bbbcb799061d2e6d3a7c2871815dd +size 1730643 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_0cc8cc75-1c79-42f9-a1d1-1af3cf84ff58.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_0cc8cc75-1c79-42f9-a1d1-1af3cf84ff58.png index 387c05b30b60f7dd533898ff35144e32c6c86a12..e4c6837d116e6036f1a596de77dd199ed20061f3 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_0cc8cc75-1c79-42f9-a1d1-1af3cf84ff58.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_0cc8cc75-1c79-42f9-a1d1-1af3cf84ff58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5871e254b0976205d089f000785bc4940f1994336b32f10ea631871f544e9ad -size 1083906 +oid sha256:e04731bfd383ec2fac1b33b1946386225f7bfea8cbbac170fc5d50437efffb23 +size 1030808 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_4dfdbc97-9aa9-466a-ab54-17f52d97a814.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_4dfdbc97-9aa9-466a-ab54-17f52d97a814.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..73c6927013adf65cf2fd2e14014089c3556e07b6 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_4dfdbc97-9aa9-466a-ab54-17f52d97a814.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_4dfdbc97-9aa9-466a-ab54-17f52d97a814.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:3bbc2df65d7897c8d2fffa5dfaf619a5449ae0766ecdd85c7cbc2aa3728a453e +size 2110537 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_63c62cac-1560-44ee-baab-e349ce9a7fc5.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_63c62cac-1560-44ee-baab-e349ce9a7fc5.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..5b1d848782dccd0d19adfe8a45d04c60cdcaae6e 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_63c62cac-1560-44ee-baab-e349ce9a7fc5.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_63c62cac-1560-44ee-baab-e349ce9a7fc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:c70677732d9c8075348cf13757fa1cb727e480e06f5529e7903b843720b33352 +size 2110618 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_65379b10-2f40-4af4-a21a-685cdc35a990.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_65379b10-2f40-4af4-a21a-685cdc35a990.png index 2f1df3b1647b121f508d791c4531cd27e429ae1a..88e30a63feaa1dcf99579a0cdc91ab64c30f73e5 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_65379b10-2f40-4af4-a21a-685cdc35a990.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_65379b10-2f40-4af4-a21a-685cdc35a990.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4dc631ec2b8425ccff705d1b398b8ea2c5718cd94902c747a7b258ea955cf0b9 -size 1826108 +oid sha256:2c4fba585b1d3735b8b24a68e36c861606c18a70af2975296098cac097dd57ad +size 1309183 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_84500f23-9176-446d-8886-4f791999ef9e.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_84500f23-9176-446d-8886-4f791999ef9e.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..96ba3c1a628f917c95b9da9b2d6c0a12f7edb6b4 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_84500f23-9176-446d-8886-4f791999ef9e.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_84500f23-9176-446d-8886-4f791999ef9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:98aadd1b730442e5c8f2c9cfb9a1f964bf79851cbb11774d072d7c85e4a63275 +size 2105543 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_8f47dd64-2175-4e78-ba73-3395e8188152.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_8f47dd64-2175-4e78-ba73-3395e8188152.png index 28ff9cb14fcbc31c4e6fcdf20c925f86a89d860e..f7a70093148d8b99e2c379155629215136024aa6 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_8f47dd64-2175-4e78-ba73-3395e8188152.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_8f47dd64-2175-4e78-ba73-3395e8188152.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b05578ca286a44ccc7d5d1a0a04dd12300925dd242df207a32998341fa467df9 -size 1750216 +oid sha256:6ecee2088cf61be5097f484dbd2da08a3a9d7fc861dfad3edd9667684a0caa51 +size 1778003 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_9900996a-927f-4aeb-9632-a97400207554.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_9900996a-927f-4aeb-9632-a97400207554.png index 2fef5fa702e16143e7011f573c93548398131c6d..bdbc71d9215b3e5e78b653cd2b8b196cc90877b8 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_9900996a-927f-4aeb-9632-a97400207554.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_9900996a-927f-4aeb-9632-a97400207554.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48b8a58a5e21ad3b86e68a0f8e30d9b10d3022ec72544eeb9bdf040af26ab63e -size 2094585 +oid sha256:ad96c6badc104d82fdb97511f9c8b27ad98033670336cb86bf4988c45725b75f +size 1108145 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_a7534242-8fff-4286-9a78-7289a2e16c2a.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_a7534242-8fff-4286-9a78-7289a2e16c2a.png index e72ac77c9b9ae64d89f11b3bb7553fa6659b52a4..2f6162599023c5fa5cee22d099472cf92aae6f3a 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_a7534242-8fff-4286-9a78-7289a2e16c2a.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_a7534242-8fff-4286-9a78-7289a2e16c2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab3e8c92cfe93328c22cee5fdc0aa0cbfe71c6215a2485a0fe2a0a2ec8c22f06 -size 2279969 +oid sha256:90cb3861001794644d09dd68c9e7e7a3418318bb2f1d364b63e545f65055b189 +size 1694075 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_bad8613e-08b4-4ae7-af27-263f36e2ff69.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_bad8613e-08b4-4ae7-af27-263f36e2ff69.png index 1b02c6fe624666b8a8de58a3381054fd835c7ea1..4bf2c2c833b644a8c98acdf5e919ce04063c4095 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_bad8613e-08b4-4ae7-af27-263f36e2ff69.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_bad8613e-08b4-4ae7-af27-263f36e2ff69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3078dcfb66056d651823a93197e12a7577d1cf131ed69d1f9e9862ab50613e5 -size 742244 +oid sha256:88b1d0396d6684f02f635044030d75d28a7c457777c451f0df041af0021d0cb0 +size 751843 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_c0a0ef81-6f9f-44d3-9189-18f73175c4ea.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_c0a0ef81-6f9f-44d3-9189-18f73175c4ea.png index b9d717c2fda36186c137a53bab2e2d8f216f303e..5b64bebb812b2cdc0d8db6266e2ea1163b59ed05 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_c0a0ef81-6f9f-44d3-9189-18f73175c4ea.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_c0a0ef81-6f9f-44d3-9189-18f73175c4ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19427fc06c54d405f0f5d38c040756b4cf393a997fb5f5de28834b204f81f679 -size 2278464 +oid sha256:79b03f9cca9411c46b33f136c55b9f9e60f4a61b6cca8938f96e1fe369b33a53 +size 1238686 diff --git a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_d3fae0d1-475d-4570-b0bf-7288bf69fc36.png b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_d3fae0d1-475d-4570-b0bf-7288bf69fc36.png index 6eee453a413c56f6b07dbe79c19a9bd793db5328..99fc534d794e33784069fcfb64f84b38153dd0a1 100644 --- a/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_d3fae0d1-475d-4570-b0bf-7288bf69fc36.png +++ b/images/4a9a05f8-6c23-46c5-bb38-eec63a477475_d3fae0d1-475d-4570-b0bf-7288bf69fc36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f17c3686cb576c907212c3dfed176f59d1e94d3ec8113ed0aa04ce829c02ee86 -size 2100865 +oid sha256:58706a027911c32c6f14a8310aaa379bf4622b430dbac0ffd47e0e71f8c5c15c +size 1938574 diff --git a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_4093dcf4-7a5e-49f9-8eb7-adf5db810557.png b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_4093dcf4-7a5e-49f9-8eb7-adf5db810557.png index 857a9f54e297e6932b63541b59f745ed05659589..b368b7c1337f8cb5343d5c47c00c4aa9ffe77ff5 100644 --- a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_4093dcf4-7a5e-49f9-8eb7-adf5db810557.png +++ b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_4093dcf4-7a5e-49f9-8eb7-adf5db810557.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:055d0ce0a9cf6b2222f9c09f1c6c008f584e46e7713231c1d065ee5ac5abba38 -size 576887 +oid sha256:e2d2d5f0104faa626b06e61d7f75d6602e230964672cb11f83742e0d1d9f1e83 +size 694496 diff --git a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_871a1cc6-f377-40b7-bb1e-aba28ec0787e.png b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_871a1cc6-f377-40b7-bb1e-aba28ec0787e.png index 65154ffb0d09796de73478c85bdfabc7d6923760..52535beb864f722da9bdb945ecf96ecb273dc2b8 100644 --- a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_871a1cc6-f377-40b7-bb1e-aba28ec0787e.png +++ b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_871a1cc6-f377-40b7-bb1e-aba28ec0787e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e2c2c7d2f0b9081a835242e1ded81b17a14e0040ab1197e2296307f242f35ef -size 1075179 +oid sha256:65a32bea47678ab1a8e57e26d30aa989ba6579cba2cde216f7857a675cab10c9 +size 1194087 diff --git a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_a1f00027-cf54-420a-b375-71b179d29a4b.png b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_a1f00027-cf54-420a-b375-71b179d29a4b.png index 4c26063531b32cf2bed0cfea943bd59d2c884a6e..bd542e61890e157a591fe046d3bb054747e3e309 100644 --- a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_a1f00027-cf54-420a-b375-71b179d29a4b.png +++ b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_a1f00027-cf54-420a-b375-71b179d29a4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dd4c40e228e295384035effa6308723059a03df85d0a9b335799494c5f0287d -size 1042893 +oid sha256:409d56fde319981c925d19876dea672ede146349827f47ab9f89343668f40a77 +size 716687 diff --git a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_bd5dfac5-d874-4fb1-beba-7ecc203439f1.png b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_bd5dfac5-d874-4fb1-beba-7ecc203439f1.png index 4c26063531b32cf2bed0cfea943bd59d2c884a6e..7a20e07f041adf56af596483afce6428a20a8787 100644 --- a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_bd5dfac5-d874-4fb1-beba-7ecc203439f1.png +++ b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_bd5dfac5-d874-4fb1-beba-7ecc203439f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dd4c40e228e295384035effa6308723059a03df85d0a9b335799494c5f0287d -size 1042893 +oid sha256:f6a9bb32d92e6d5bc1b3cd4c9353a8b15b7337a3b9bfd1475d8280f9efe82380 +size 867511 diff --git a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_fbede47e-7ae1-4ed2-9cc9-0d0d7a55577a.png b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_fbede47e-7ae1-4ed2-9cc9-0d0d7a55577a.png index 4c26063531b32cf2bed0cfea943bd59d2c884a6e..99b53b5dae83712f2c32168170bcbeea095d68ee 100644 --- a/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_fbede47e-7ae1-4ed2-9cc9-0d0d7a55577a.png +++ b/images/4aaf59c0-889a-406c-b0c2-454d7670e04f_fbede47e-7ae1-4ed2-9cc9-0d0d7a55577a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dd4c40e228e295384035effa6308723059a03df85d0a9b335799494c5f0287d -size 1042893 +oid sha256:21898eadc9bbe65fc1567df8d6dfb257f7dc94577455842cb08838e13afe0bd3 +size 1465772 diff --git a/images/4af615be-4472-4357-8737-5286b06b385d_4bd6b427-d3d3-4918-a9d8-605b56eb6ba7.png b/images/4af615be-4472-4357-8737-5286b06b385d_4bd6b427-d3d3-4918-a9d8-605b56eb6ba7.png index dc16026b7bc5aa74b1ddb59f466233fd97f92016..223d8d55bfdbb1b506556052c29b0691c91ded16 100644 --- a/images/4af615be-4472-4357-8737-5286b06b385d_4bd6b427-d3d3-4918-a9d8-605b56eb6ba7.png +++ b/images/4af615be-4472-4357-8737-5286b06b385d_4bd6b427-d3d3-4918-a9d8-605b56eb6ba7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:678881595e39e11e9ee37d06dd641a98e67bda6b6fcc848463a70ed3892afca1 -size 945740 +oid sha256:03a6e50393db3bef5840176183929dc281998ea329782fcd19111f836d327a20 +size 693691 diff --git a/images/4af615be-4472-4357-8737-5286b06b385d_4bf5478b-1450-424b-80a6-b2acb4798bd3.png b/images/4af615be-4472-4357-8737-5286b06b385d_4bf5478b-1450-424b-80a6-b2acb4798bd3.png index 9e9c0f615ee01bb1eec48e36d620630e1d4a0663..f1a2a1a4a3bab161a5cd1df8aeac956f25d9ca57 100644 --- a/images/4af615be-4472-4357-8737-5286b06b385d_4bf5478b-1450-424b-80a6-b2acb4798bd3.png +++ b/images/4af615be-4472-4357-8737-5286b06b385d_4bf5478b-1450-424b-80a6-b2acb4798bd3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:691aaa3f6775cbd7c252f3378405d7646763ddb25052dc1a314987d68ee62393 -size 708152 +oid sha256:f89bf9dff2aabbc295fb7e16b9fabcc8e145510a799e928deeda7b2f79c9dbed +size 743011 diff --git a/images/4af615be-4472-4357-8737-5286b06b385d_7fd3adeb-d440-4a31-a75e-3d2c009e00b2.png b/images/4af615be-4472-4357-8737-5286b06b385d_7fd3adeb-d440-4a31-a75e-3d2c009e00b2.png index a9ceb6809d25272ec325370c380ab34d31174df2..feb94203bae830fc88df46b301274dbb17f0bf8a 100644 --- a/images/4af615be-4472-4357-8737-5286b06b385d_7fd3adeb-d440-4a31-a75e-3d2c009e00b2.png +++ b/images/4af615be-4472-4357-8737-5286b06b385d_7fd3adeb-d440-4a31-a75e-3d2c009e00b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69d20398b5c6bbfa74674bf2e9e8fc6116298fba36c0e9bbda5901458ea2a510 -size 703746 +oid sha256:a8b7642817f8cb927bb358ebd137cf5b9bc35481911d5adf4f3f9d9f6d69a805 +size 643063 diff --git a/images/4af615be-4472-4357-8737-5286b06b385d_9720e24e-7a9f-42e7-86e1-b48d0b05c57e.png b/images/4af615be-4472-4357-8737-5286b06b385d_9720e24e-7a9f-42e7-86e1-b48d0b05c57e.png index 8eb19876e652ab55ca95aaf2dce1ffd88a9abcc4..f91825c9da4dac531ad5e2a36d59a67c9d43916e 100644 --- a/images/4af615be-4472-4357-8737-5286b06b385d_9720e24e-7a9f-42e7-86e1-b48d0b05c57e.png +++ b/images/4af615be-4472-4357-8737-5286b06b385d_9720e24e-7a9f-42e7-86e1-b48d0b05c57e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fdf9f6286c54fd7e5eeb27095ee0c5dca4e1768fbcdf4861d6d1ae7d6d7eb37 -size 666881 +oid sha256:ca4fa0db67f24dd37580b16a03f3548e10bc489964cffdde69e181c22a307b50 +size 693963 diff --git a/images/4af615be-4472-4357-8737-5286b06b385d_e8bc0cef-e7f2-447c-8393-356a10b812b7.png b/images/4af615be-4472-4357-8737-5286b06b385d_e8bc0cef-e7f2-447c-8393-356a10b812b7.png index 56748a5e223c253761bb932e5f997081f5b9b9da..804f59b1ca13d0755b15c921fd5b16f7c1dcc936 100644 --- a/images/4af615be-4472-4357-8737-5286b06b385d_e8bc0cef-e7f2-447c-8393-356a10b812b7.png +++ b/images/4af615be-4472-4357-8737-5286b06b385d_e8bc0cef-e7f2-447c-8393-356a10b812b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2dd9f3a09b3d971215c6041e3f0564ccc05834f0b0d0658f0006a1bbe9d07857 -size 921219 +oid sha256:666053dbc4ece14677302cecf6d268b3960eec7704dd5b7b6d67ef9e69b7c2f4 +size 1065418 diff --git a/images/4af615be-4472-4357-8737-5286b06b385d_ee2d1aef-5a2a-4702-bd44-e5d6536ca7d6.png b/images/4af615be-4472-4357-8737-5286b06b385d_ee2d1aef-5a2a-4702-bd44-e5d6536ca7d6.png index 5a117f675e3d0d8121bff59c2d672137292b0ed0..ea309b4c63b7f47943a60d07850b33c7f884fa7d 100644 --- a/images/4af615be-4472-4357-8737-5286b06b385d_ee2d1aef-5a2a-4702-bd44-e5d6536ca7d6.png +++ b/images/4af615be-4472-4357-8737-5286b06b385d_ee2d1aef-5a2a-4702-bd44-e5d6536ca7d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4bcdcef238d5a9c2c50556f338d9cb3e5017957de706fe6c293c719bdda843e6 -size 728157 +oid sha256:cb5405aaaca708f8eda5578198badc0ebf81d3a5c2d0ea5b02ad7139b294516f +size 908415 diff --git a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_7cf1ed05-1957-4d82-955e-7b21de6a6ff1.png b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_7cf1ed05-1957-4d82-955e-7b21de6a6ff1.png index 54be35c78e36a034a1ffb32d3edb3bd355edb4f7..dfaae6e16b5b0e7b398b9651eee80bdfedcc84cd 100644 --- a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_7cf1ed05-1957-4d82-955e-7b21de6a6ff1.png +++ b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_7cf1ed05-1957-4d82-955e-7b21de6a6ff1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73621057499b8adc6ad95d5eeb16013bfa6bc58c8de2a69da64afbc30926bd25 -size 1849007 +oid sha256:6557e1ae9b2b655e76b31180dfaa85e5f8cba35750bef0cb155239c64753074c +size 782588 diff --git a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_8a5b0737-d36f-4476-bfdc-64c4c76d5551.png b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_8a5b0737-d36f-4476-bfdc-64c4c76d5551.png index 10c3c24c455226b1b7cf5baa7213ea96e15113f9..b5837eceeb7a4b13c8743ffae96c56c10ac00cd5 100644 --- a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_8a5b0737-d36f-4476-bfdc-64c4c76d5551.png +++ b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_8a5b0737-d36f-4476-bfdc-64c4c76d5551.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80ae3feda22976e98c1665929a465d2926038e92ae99919df2b341a8cbe185ee -size 992440 +oid sha256:18df6f007d47e81b8c48262ee7ccfbb3985717b1dd3a3ba6dc9bde86b4b4ed01 +size 1063289 diff --git a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_a4c16da0-0706-4d0a-a259-eb7657bbbbc9.png b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_a4c16da0-0706-4d0a-a259-eb7657bbbbc9.png index f5a507e5b2fc1ba073472df05df57b1e0d26b610..6b6c4426fdd1c79a7aeaabfbda3951d554bd15f9 100644 --- a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_a4c16da0-0706-4d0a-a259-eb7657bbbbc9.png +++ b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_a4c16da0-0706-4d0a-a259-eb7657bbbbc9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d32203728a20796659af479962caa75722aebd895b1d520bfda4579b988c609b -size 1754037 +oid sha256:7c1f4dac9f069d0b402d3ae4b7f6c72471cc2b72d49feeb50e89c5348ed51a19 +size 1372508 diff --git a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_ff0ee393-4490-4aaf-9fc0-a21fcdb41c9b.png b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_ff0ee393-4490-4aaf-9fc0-a21fcdb41c9b.png index f5a024f716ed3712df736e19a39c057177402efd..1fff23df6b746b029c72338cb25ee0605967dcfb 100644 --- a/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_ff0ee393-4490-4aaf-9fc0-a21fcdb41c9b.png +++ b/images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_ff0ee393-4490-4aaf-9fc0-a21fcdb41c9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c37b22ae92b87adc6a2382ada66f1e5b0fd60a21f0fafb85103d43997769660b -size 1415617 +oid sha256:31abaff49c7bc4e591900335a7809eeff6ddc06b2a4e989c46796a89f88122eb +size 1962074 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_0e088632-253f-4d11-af58-c48d7b276f16.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_0e088632-253f-4d11-af58-c48d7b276f16.png index 860edfc51a6babfafe03ad1ce7f470b60fc78cbd..f70328f420d36c10b65cd5019f3c2fa677fbe5c3 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_0e088632-253f-4d11-af58-c48d7b276f16.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_0e088632-253f-4d11-af58-c48d7b276f16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c90430409b7ab3e14be40215311603a9d3832cd663c04b1414537955d2a9117 -size 1293992 +oid sha256:763a846af513258f9d0dc389872326fec7ddce4a6a546b1df89cd5eee989e389 +size 1756665 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_35301bd1-f6f4-42b1-811c-f35b27afdc8b.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_35301bd1-f6f4-42b1-811c-f35b27afdc8b.png index 34be18a04f8f3495a745ae9911872e7c7c3aa925..4f2b27954d6f1e8bff7aed0aac0f7e8264ffe8f0 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_35301bd1-f6f4-42b1-811c-f35b27afdc8b.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_35301bd1-f6f4-42b1-811c-f35b27afdc8b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47df4cb41ccfeda1803a4c7710ca13f225b822e8ca437c14ca6e23b01a8a3665 -size 540667 +oid sha256:6001ed2e37f9b49db4b94b3e751433040368a1a0e9cb33022ead1a04c4f0b9dd +size 457594 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4349ee88-0a9c-44d8-b554-f4952ee742fa.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4349ee88-0a9c-44d8-b554-f4952ee742fa.png index f4464452a80981eb5f5379bd2642afb67f80d0ba..0d700d08c01d70340e0107465f837e8367b89429 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4349ee88-0a9c-44d8-b554-f4952ee742fa.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4349ee88-0a9c-44d8-b554-f4952ee742fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00d51516bce3bca717bf6b54865dda73843d08502e7ba005ccd9f988f05c7467 -size 326814 +oid sha256:9a1635d3cea9c70cecaf5e8076eae07a1047c51bb6be655a687f2ddec228d6f4 +size 363502 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_496a9af2-4f3f-48b8-aaa4-2c0e5f30309a.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_496a9af2-4f3f-48b8-aaa4-2c0e5f30309a.png index bd92c23edfafe9998ce91dae2208c3cfd14c855b..4a41467e14d4b50441ae5b1431edf2cd6929527e 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_496a9af2-4f3f-48b8-aaa4-2c0e5f30309a.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_496a9af2-4f3f-48b8-aaa4-2c0e5f30309a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09a3ddbf2cb1e6c1ff3cf733b1b300805a1b7345590eddc510bbb7c2af0494fc -size 1390245 +oid sha256:ea86bb34dc780d8694bfc778ae951f55f312bb75010dd726e929772425a44c5c +size 1643773 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_b5462d25-73ae-4282-8023-716e111a610d.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_b5462d25-73ae-4282-8023-716e111a610d.png index afe546c397be74cd0811405fd59893fa500f096d..53510d046e7a432b48f20159dd7c1d071bc96aaa 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_b5462d25-73ae-4282-8023-716e111a610d.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_b5462d25-73ae-4282-8023-716e111a610d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70ff6ae0f4943e85439ce20016ad30dd97ed1ef6fbdeb6a46c24223fdf408ead -size 566227 +oid sha256:8f8873a9ece4e3770b4b443d4485e96d0bf3e25c97baa8b74697d56af34a2e67 +size 674160 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c3c9c397-7337-484c-97c1-71421f964f62.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c3c9c397-7337-484c-97c1-71421f964f62.png index 95505315c2184cbd0585bd2384cbf89c246b1818..abcf114d94c1cfa8a363135e78ea0c4f9c3269de 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c3c9c397-7337-484c-97c1-71421f964f62.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c3c9c397-7337-484c-97c1-71421f964f62.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6929b88a271c0c2b3ed81da37cc54350f30fc128c0bdcd564188a5e88d2e9a5b -size 567473 +oid sha256:a17969a93cd82caff63903c6f760bbd3b8ae13d251ca756b01589a66ffef02f6 +size 528282 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c8d056c9-ef8e-4e07-9631-352f60776776.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c8d056c9-ef8e-4e07-9631-352f60776776.png index de858242ed6fe25f7a6d231722618cec4b361e2a..2afa647c0ca5210971f08c958c570d365244be0c 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c8d056c9-ef8e-4e07-9631-352f60776776.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c8d056c9-ef8e-4e07-9631-352f60776776.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9ce1c0474a2b823cc63bbb42435df08fefd121bb1a15bc648630da2faec279f -size 325738 +oid sha256:e4f8876a008d2e250df53142ef4ecfa7afa7f7f84a6a3c01991b3f70d0b04d41 +size 325096 diff --git a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_d616b2c2-43e5-43e0-89db-5a8daf4728de.png b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_d616b2c2-43e5-43e0-89db-5a8daf4728de.png index 02e32131894d6f5604a52abcf345a708c14f3f6b..c313deb2016bfeda5fa77e79953f4f4470dcbf93 100644 --- a/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_d616b2c2-43e5-43e0-89db-5a8daf4728de.png +++ b/images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_d616b2c2-43e5-43e0-89db-5a8daf4728de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75d8d040a69bc7b6b6d1464340524fdc7a809cc9e1d76aff36c47e14215d256e -size 1797091 +oid sha256:e22afe2e7e3dc03505880b57cc11ec6a2f03a60ffbd9f190771143df71a1bbbe +size 945066 diff --git a/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_708d6e85-370a-4de2-b507-35df29ee9a42.png b/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_708d6e85-370a-4de2-b507-35df29ee9a42.png index 13305d4e18c1a2c5afadd21473bac3754183fca1..1f9b69605955a93cf07fd4a822316b17b8529647 100644 --- a/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_708d6e85-370a-4de2-b507-35df29ee9a42.png +++ b/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_708d6e85-370a-4de2-b507-35df29ee9a42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf42888146ea055c38ba31f99ed48e9dfee582b39017c5f52022718af1c3444a -size 876747 +oid sha256:4fe8d31cb906fb246ed2f2c9f5870ebdfa4c5350eb4e1f44043bbcf84f955bbe +size 874622 diff --git a/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_adf176ec-c852-40c8-842f-2c4133f8aa43.png b/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_adf176ec-c852-40c8-842f-2c4133f8aa43.png index f798d29655b42701c7429553c177453c50f50828..35e7cce1d798a6c362202a17642ac6e5c59197a2 100644 --- a/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_adf176ec-c852-40c8-842f-2c4133f8aa43.png +++ b/images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_adf176ec-c852-40c8-842f-2c4133f8aa43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc097e4f5862c8fe3264367c811c5c3d1c76f90d29b34aa45b2a544f8e197cbf -size 866342 +oid sha256:8e151f6c8bf237cdbdb59e4cd2a35e4f9c5a130838ceedbf7cbd33693d66ebfe +size 631278 diff --git a/images/4b431888-9909-40b5-8351-be52905e4d5a_1a497e83-83d0-4ccb-ae4d-22ec497edc64.png b/images/4b431888-9909-40b5-8351-be52905e4d5a_1a497e83-83d0-4ccb-ae4d-22ec497edc64.png index a218f0262dc312352eb479c7a3d0fc03ab22b492..26395a70926643d842090558c1523d60d5236fae 100644 --- a/images/4b431888-9909-40b5-8351-be52905e4d5a_1a497e83-83d0-4ccb-ae4d-22ec497edc64.png +++ b/images/4b431888-9909-40b5-8351-be52905e4d5a_1a497e83-83d0-4ccb-ae4d-22ec497edc64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35824a70dcbf2f4245006504bc66f4366261a8938953e489deba19cddf0606e0 -size 1081318 +oid sha256:3404cf869f88930d5655267104839b59f736464a106f7e85d3930841b519d2f3 +size 1601589 diff --git a/images/4b431888-9909-40b5-8351-be52905e4d5a_57599408-9c94-4845-b966-d78e7c2fdd24.png b/images/4b431888-9909-40b5-8351-be52905e4d5a_57599408-9c94-4845-b966-d78e7c2fdd24.png index d6b279fc1692670189182267828ee1fe25eece20..0a1362142910ec555f4e574cbc4160ed5a7a0c5a 100644 --- a/images/4b431888-9909-40b5-8351-be52905e4d5a_57599408-9c94-4845-b966-d78e7c2fdd24.png +++ b/images/4b431888-9909-40b5-8351-be52905e4d5a_57599408-9c94-4845-b966-d78e7c2fdd24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16a3b56c8367a012dd2a7093b7def424f935ea5afbb7caaae72bf45030c848c4 -size 768175 +oid sha256:d55bde591eab13daff306690bbc202691f3dcfa2cc190c6d106725006422cb0e +size 768631 diff --git a/images/4b431888-9909-40b5-8351-be52905e4d5a_5d543f92-b9a3-4ffd-8b08-c10032b9c704.png b/images/4b431888-9909-40b5-8351-be52905e4d5a_5d543f92-b9a3-4ffd-8b08-c10032b9c704.png index e34216a09d4b3e33a5a58545df4d2b704034f82b..b03d969a43862c970ad5a8d3b56e164dfe769792 100644 --- a/images/4b431888-9909-40b5-8351-be52905e4d5a_5d543f92-b9a3-4ffd-8b08-c10032b9c704.png +++ b/images/4b431888-9909-40b5-8351-be52905e4d5a_5d543f92-b9a3-4ffd-8b08-c10032b9c704.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:516f2c12605bb5f35abd04ae9fb77fd17a31d8bfecbbd13ef1de12679adf2a85 -size 1385284 +oid sha256:3baa78f93d5dfe8b40babd0fc2016250cda23b720fd055435d008c02fbbc88b1 +size 1256205 diff --git a/images/4b431888-9909-40b5-8351-be52905e4d5a_bb8869c7-6b6f-4878-852b-40a52c258f7f.png b/images/4b431888-9909-40b5-8351-be52905e4d5a_bb8869c7-6b6f-4878-852b-40a52c258f7f.png index b6043533d6829830ba0d65db65f21751c732863f..d1e979354d96dc01a9a89ab0d0a76a963b9f3fcb 100644 --- a/images/4b431888-9909-40b5-8351-be52905e4d5a_bb8869c7-6b6f-4878-852b-40a52c258f7f.png +++ b/images/4b431888-9909-40b5-8351-be52905e4d5a_bb8869c7-6b6f-4878-852b-40a52c258f7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ebc4c1ad5cd2b6d8abeac767e235bcc6234949ba3aca951908d0947f1db4e41 -size 715071 +oid sha256:92bc32aefa46b2260be68bfff3b5cd83d00dcc001eff6183fe1c0759e9bd88b3 +size 1384909 diff --git a/images/4b431888-9909-40b5-8351-be52905e4d5a_ec42f184-f160-4f42-94d4-6789b2afea9b.png b/images/4b431888-9909-40b5-8351-be52905e4d5a_ec42f184-f160-4f42-94d4-6789b2afea9b.png index a1cdc90760c305324f8e9f4a670ad552f092e790..11e54ad89415c7ab15e52707c6fc01e54e4d687a 100644 --- a/images/4b431888-9909-40b5-8351-be52905e4d5a_ec42f184-f160-4f42-94d4-6789b2afea9b.png +++ b/images/4b431888-9909-40b5-8351-be52905e4d5a_ec42f184-f160-4f42-94d4-6789b2afea9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9016937213722ca0824c42357aed7a1fb4a3a1af2d2c68a4c67b4c5bbab1f4a2 -size 1069778 +oid sha256:2520e1fd455cc65d796fe0058a235f35b77917ef16febe4cd085bb61b9ccd5d4 +size 1492805 diff --git a/images/4b99412b-6be2-4274-8843-4fc97f0c8247_449b0be6-d463-4adf-bc5a-5a7c1173d402.png b/images/4b99412b-6be2-4274-8843-4fc97f0c8247_449b0be6-d463-4adf-bc5a-5a7c1173d402.png index 7a700f4a4cd468a650d996d119c77efc1f023810..2b75062123ecc04fe8423de5f2c9bcc22acc208e 100644 --- a/images/4b99412b-6be2-4274-8843-4fc97f0c8247_449b0be6-d463-4adf-bc5a-5a7c1173d402.png +++ b/images/4b99412b-6be2-4274-8843-4fc97f0c8247_449b0be6-d463-4adf-bc5a-5a7c1173d402.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d47ab7ece77308cd09d928f12856c18aa545ce37e75c6adb63c9ed293c23e5a4 -size 1333604 +oid sha256:ad2345c9517749a1231528cd0c8f47d75d53c37071cc2da1fa23480c34376aa1 +size 1590297 diff --git a/images/4b99412b-6be2-4274-8843-4fc97f0c8247_fe2329c4-61b1-43e9-9ef6-52d2ee4bdd48.png b/images/4b99412b-6be2-4274-8843-4fc97f0c8247_fe2329c4-61b1-43e9-9ef6-52d2ee4bdd48.png index 3a6108e2fc408fc2fd486512ad9e567b14959da9..bb835ab6272a53298a46b25883db7ecc3fb39f5a 100644 --- a/images/4b99412b-6be2-4274-8843-4fc97f0c8247_fe2329c4-61b1-43e9-9ef6-52d2ee4bdd48.png +++ b/images/4b99412b-6be2-4274-8843-4fc97f0c8247_fe2329c4-61b1-43e9-9ef6-52d2ee4bdd48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd75f98d48589a94dba40c321c7bad34122cbb5c33b3c2d7d5e47ab37f8b7a70 -size 1241522 +oid sha256:6a4c41edc94e88f7057dbb5bde32bac7f5a741137a4c183c6ecc6e96208b554a +size 1604351 diff --git a/images/4baa4918-3312-4b9b-9506-333057d6856b_0d2a5b19-2a2b-4397-b66e-ddb186bdebde.png b/images/4baa4918-3312-4b9b-9506-333057d6856b_0d2a5b19-2a2b-4397-b66e-ddb186bdebde.png index 5b9212f18c8f17ad9482f8452635bbf79d469e28..ba58bc201d2404269526a5c19fdd1da9757bb9e5 100644 --- a/images/4baa4918-3312-4b9b-9506-333057d6856b_0d2a5b19-2a2b-4397-b66e-ddb186bdebde.png +++ b/images/4baa4918-3312-4b9b-9506-333057d6856b_0d2a5b19-2a2b-4397-b66e-ddb186bdebde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e90d35882b33079d673d4af8e83c5330f8db3799a39b8eb10b8d3d14cb2634d5 -size 846802 +oid sha256:2f318864ce7235a8c73e4bd91e26922446df3b45777a82e5e16fd21185e0954c +size 780642 diff --git a/images/4baa4918-3312-4b9b-9506-333057d6856b_2fa2e6c0-de6b-4376-bed6-9acdf1eb3c84.png b/images/4baa4918-3312-4b9b-9506-333057d6856b_2fa2e6c0-de6b-4376-bed6-9acdf1eb3c84.png index 770558fa0d045537a2d8c808c3a1fdf03412e944..aa5881582d680fe99c699f01044df89996e4424c 100644 --- a/images/4baa4918-3312-4b9b-9506-333057d6856b_2fa2e6c0-de6b-4376-bed6-9acdf1eb3c84.png +++ b/images/4baa4918-3312-4b9b-9506-333057d6856b_2fa2e6c0-de6b-4376-bed6-9acdf1eb3c84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a03ca848a2aef8cded8b0c990830ac2d1f19da4a4d1093ea838eebf252d3e86a -size 453690 +oid sha256:a93ff254344337594c7fc13e46478063161efe96793de5d6926e168b5f171da8 +size 508531 diff --git a/images/4baa4918-3312-4b9b-9506-333057d6856b_71adc0ae-641d-4097-929f-2698381759d4.png b/images/4baa4918-3312-4b9b-9506-333057d6856b_71adc0ae-641d-4097-929f-2698381759d4.png index 32abc9b26efc07fb00442ec69669e9b11550f88a..07daedcdc8f50cee8d2fc8a5acfa024d4ac32d9f 100644 --- a/images/4baa4918-3312-4b9b-9506-333057d6856b_71adc0ae-641d-4097-929f-2698381759d4.png +++ b/images/4baa4918-3312-4b9b-9506-333057d6856b_71adc0ae-641d-4097-929f-2698381759d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe0d519747c7480274e43ddce1cb60402220677d11939299289d017716801cb1 -size 411411 +oid sha256:625c3104221f71f67d2d040451c95defb80c7a325c7a3c50994c91edb9631285 +size 462515 diff --git a/images/4baa4918-3312-4b9b-9506-333057d6856b_fb91be3b-ec6b-40ee-8ca0-c9a87489b5cf.png b/images/4baa4918-3312-4b9b-9506-333057d6856b_fb91be3b-ec6b-40ee-8ca0-c9a87489b5cf.png index 6b4da34eb4093621ace3a76364c524e756db3b2c..141fdaa8a686fdb551e3d228188d47b761a8de3f 100644 --- a/images/4baa4918-3312-4b9b-9506-333057d6856b_fb91be3b-ec6b-40ee-8ca0-c9a87489b5cf.png +++ b/images/4baa4918-3312-4b9b-9506-333057d6856b_fb91be3b-ec6b-40ee-8ca0-c9a87489b5cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33d11df450aeb1878be78f38b18e5ae20a6119ec379f8b647f7294f8319a7d54 -size 462734 +oid sha256:f12df2adf6e5bfef53e4633aa9534dca52d73c80732dae4f256a04b355fe9208 +size 513923 diff --git a/images/4bc70fa1-e817-405f-b113-0919e8e94205_aef52996-d58a-4772-9e64-05599aab864b.png b/images/4bc70fa1-e817-405f-b113-0919e8e94205_aef52996-d58a-4772-9e64-05599aab864b.png index fc6e6e745cfc50a0599f350a16cbcd2b9f8e908e..8adaaabac6d424e1473be1de6e782631f77ccd50 100644 --- a/images/4bc70fa1-e817-405f-b113-0919e8e94205_aef52996-d58a-4772-9e64-05599aab864b.png +++ b/images/4bc70fa1-e817-405f-b113-0919e8e94205_aef52996-d58a-4772-9e64-05599aab864b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f55cc2d689a46dbd5a7ebc0398353b1c702552ac4eab2a02db8535df4c449831 -size 1816071 +oid sha256:1f13d81d22e5feb165d3050fab45728e36e565b59206a2987f8ef24096a2c8d5 +size 1793920 diff --git a/images/4bc70fa1-e817-405f-b113-0919e8e94205_b1ec99ca-9953-4575-b633-8b9e6da0aee4.png b/images/4bc70fa1-e817-405f-b113-0919e8e94205_b1ec99ca-9953-4575-b633-8b9e6da0aee4.png index de8cd4e29b4e49a9c91ca025af95e8dcb3819381..3321ff69ce10d2d9414a7903f7ab8cc44a316680 100644 --- a/images/4bc70fa1-e817-405f-b113-0919e8e94205_b1ec99ca-9953-4575-b633-8b9e6da0aee4.png +++ b/images/4bc70fa1-e817-405f-b113-0919e8e94205_b1ec99ca-9953-4575-b633-8b9e6da0aee4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:082170e8cb9f8ea980501f9508d48ce1eef49f93374303042116fb27284aec5f -size 1067371 +oid sha256:2423a2fe7e02c9ac799aa13bfefb80f60f29787ff3a065fdb670592a1b0aee54 +size 997214 diff --git a/images/4bc70fa1-e817-405f-b113-0919e8e94205_c8661052-8860-4fe0-b8aa-c95cd1ec01de.png b/images/4bc70fa1-e817-405f-b113-0919e8e94205_c8661052-8860-4fe0-b8aa-c95cd1ec01de.png index f54afefbe30d3a652fedcedf5c234250c4a4aa3b..1f63acc741069ebbfeb651257df332382cd3d338 100644 --- a/images/4bc70fa1-e817-405f-b113-0919e8e94205_c8661052-8860-4fe0-b8aa-c95cd1ec01de.png +++ b/images/4bc70fa1-e817-405f-b113-0919e8e94205_c8661052-8860-4fe0-b8aa-c95cd1ec01de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:786656f976a159b94b8850be2b68144ee67f6a4bd19b484dac9fc20000791459 -size 1004765 +oid sha256:fc924567f25f639ca8a618b71ca731e4a5579b8124887ab2129c2020eda4f80f +size 1078008 diff --git a/images/4bc70fa1-e817-405f-b113-0919e8e94205_d26cb28b-0f54-4b70-bad7-c7e384c799c6.png b/images/4bc70fa1-e817-405f-b113-0919e8e94205_d26cb28b-0f54-4b70-bad7-c7e384c799c6.png index 4f9b670de37fb2cc75080586749fa5ae622f0480..be502c8128f2c85b8d92bf01a5089e68e559bada 100644 --- a/images/4bc70fa1-e817-405f-b113-0919e8e94205_d26cb28b-0f54-4b70-bad7-c7e384c799c6.png +++ b/images/4bc70fa1-e817-405f-b113-0919e8e94205_d26cb28b-0f54-4b70-bad7-c7e384c799c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b0a1eecee5fd3d678f681bf13dc5fa0c3ce09d59b75e958f1e389b5392a16b4 -size 1403049 +oid sha256:141458e9d58e1927d65a1f46bcdac1c834d7422117e0bea6155e04716b48d01f +size 1424938 diff --git a/images/4bc70fa1-e817-405f-b113-0919e8e94205_d2d729b6-3704-4165-b841-843500524934.png b/images/4bc70fa1-e817-405f-b113-0919e8e94205_d2d729b6-3704-4165-b841-843500524934.png index 16105eab2b2161f760d81cedb0f174659c3376da..a81a7a47405be3f7225bb72c25a015eacbd72090 100644 --- a/images/4bc70fa1-e817-405f-b113-0919e8e94205_d2d729b6-3704-4165-b841-843500524934.png +++ b/images/4bc70fa1-e817-405f-b113-0919e8e94205_d2d729b6-3704-4165-b841-843500524934.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:606f90edec5a26ed7b90cf6c49ce83297ea4ecfabdfb8b4fc937aa0017274607 -size 549540 +oid sha256:2e3d699c6ad8496d09839d08ff320f99aefc302dde3d734ea16eb50a2134732c +size 525226 diff --git a/images/4bc70fa1-e817-405f-b113-0919e8e94205_fc698c26-502b-442b-8790-0538d09406bc.png b/images/4bc70fa1-e817-405f-b113-0919e8e94205_fc698c26-502b-442b-8790-0538d09406bc.png index 88b6238722de87cad2e6a38e18636cf2e23f3bb6..548b2f3e0d92f184da984f846362c36cca162544 100644 --- a/images/4bc70fa1-e817-405f-b113-0919e8e94205_fc698c26-502b-442b-8790-0538d09406bc.png +++ b/images/4bc70fa1-e817-405f-b113-0919e8e94205_fc698c26-502b-442b-8790-0538d09406bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:511c4ac0458c7a3fc78a3e6b6f7e9df74a6172cc60cfbbfb2257997545192ff1 -size 997496 +oid sha256:7d40ae18625f706d03ba81232963ad60659772339c1ebc15a735e405dd01f371 +size 747891 diff --git a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_0ba0e97c-8b51-40e2-9387-368af44c654c.png b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_0ba0e97c-8b51-40e2-9387-368af44c654c.png index f17ac23f7c5afbf20b212d8d7989d45c792efe85..dde66edc9a30149981914dce59799af43e735671 100644 --- a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_0ba0e97c-8b51-40e2-9387-368af44c654c.png +++ b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_0ba0e97c-8b51-40e2-9387-368af44c654c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e1158541a3992f493574427f8bdf0bba86a974585b09d1989d46be04986fcd1 -size 1474482 +oid sha256:26e5b47270cc67d773978d0be8849163b16e71acd43bc493ace7b8d613295043 +size 1940174 diff --git a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_1361d84a-7104-44ca-a6d3-373efea244df.png b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_1361d84a-7104-44ca-a6d3-373efea244df.png index 235152d31f0075ee6dac7dea28682d5122d61cd9..626a621d1fe19a76860924cd2daf5fd03bce6090 100644 --- a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_1361d84a-7104-44ca-a6d3-373efea244df.png +++ b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_1361d84a-7104-44ca-a6d3-373efea244df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d2304e9eb7e55e53677f9d6fab0e062f3290ff5edba40836bcd1676f3a98948 -size 259636 +oid sha256:5017ddf6e3c60197cd89983ff77556dae415b0d5e4d5656352886f9a87661932 +size 208418 diff --git a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_43acb344-07a8-4519-92c8-32d404a0ae8c.png b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_43acb344-07a8-4519-92c8-32d404a0ae8c.png index 643429275aa3cf18aa2fa518d051739d46dc092b..71440195a5bf2f03b9cc8687b3603885088c89f6 100644 --- a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_43acb344-07a8-4519-92c8-32d404a0ae8c.png +++ b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_43acb344-07a8-4519-92c8-32d404a0ae8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fb2fe7fc626c87e4c482618f6f25af538e4d476ecaf0d56a8cb4785daf9f34a -size 321363 +oid sha256:648c899c9464a66adf3e384fb6a67db0e937b38d31c6a5f7b0a666bb5c8e2cd9 +size 332922 diff --git a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_70ab65e1-8095-4743-b42b-90879639ae57.png b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_70ab65e1-8095-4743-b42b-90879639ae57.png index daa71dcbc1778627eb20b6cb1ede307ba30a16d4..f7730dc98c745f2bf2eee04a8d3aaa140fe0c07f 100644 --- a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_70ab65e1-8095-4743-b42b-90879639ae57.png +++ b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_70ab65e1-8095-4743-b42b-90879639ae57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88c5928902aabdb567f53af06a07c9bd85950a4a03b2e036bdcb21397d00b96c -size 925729 +oid sha256:29b891475a74ca196b59fa495b76b4976a75deb57f2e3a899ad5971d7abab23e +size 601858 diff --git a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_826632f8-5f08-404f-855b-b7b3374dfde3.png b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_826632f8-5f08-404f-855b-b7b3374dfde3.png index 89006f75674be09d02170dee7cdd455195358a4f..ce41ffa3a682c185a1b7e97c84fb1111f81fd09a 100644 --- a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_826632f8-5f08-404f-855b-b7b3374dfde3.png +++ b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_826632f8-5f08-404f-855b-b7b3374dfde3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b5f60414bb821d78c5497c7ee5e52909941798c5572600a792a6dda2a02bce0 -size 1047688 +oid sha256:5c6d393c4f1498925d7a0eb1a0a0a8cd96112842d475b4088ce941c5a62b8314 +size 772128 diff --git a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_b7a36124-3c4f-4a40-b927-9e0c1f548427.png b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_b7a36124-3c4f-4a40-b927-9e0c1f548427.png index a57c9c818eda710d07539b4e2f9c9a6eb1a57f3a..f914816abecae0b805a664c5ca10bdbdc7c106de 100644 --- a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_b7a36124-3c4f-4a40-b927-9e0c1f548427.png +++ b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_b7a36124-3c4f-4a40-b927-9e0c1f548427.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53314eae86878fed751dda63ccdf6ca68077b24736994f1494c6e4cb2ad79829 -size 1914504 +oid sha256:2770203c1f7dbee447de7419e8b593a316e16781e7241a8f24e2fad1db510e03 +size 554201 diff --git a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_ed669e14-0f55-401d-80f4-9708fed8e93f.png b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_ed669e14-0f55-401d-80f4-9708fed8e93f.png index d8e8f27481af93cd7eba06bfe00356add91a2c45..734e13172c0d4f3e1a4842939991205a598c7ddb 100644 --- a/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_ed669e14-0f55-401d-80f4-9708fed8e93f.png +++ b/images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_ed669e14-0f55-401d-80f4-9708fed8e93f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd8842895c70755b87820148f48cef706c65a0cb84648c4f1b74a379d216fd9b -size 1172718 +oid sha256:baaaa7910a7eddb6eadd2b1084465b3f72e0b103c6fba6bbe786e16c6e332b02 +size 616831 diff --git a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_6f2a987a-c636-4917-a2c9-d0396c21a1ef.png b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_6f2a987a-c636-4917-a2c9-d0396c21a1ef.png index 1d9920a8f194127834f34428a74e09a24f32856f..fa01c851d847bccc1f7ca0294267945591fc83d4 100644 --- a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_6f2a987a-c636-4917-a2c9-d0396c21a1ef.png +++ b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_6f2a987a-c636-4917-a2c9-d0396c21a1ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f3c8343fac0208aae499baa2d0fbf604a8aa818b5ba71e21aa5a60284989ff1 -size 314402 +oid sha256:9699b6cb4025a27976482138fb1df08a6ec49b7d78ebae51b0fb30d3356e3c61 +size 326939 diff --git a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_84e6c79d-94e9-4e04-b994-04ebf807383a.png b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_84e6c79d-94e9-4e04-b994-04ebf807383a.png index d4d0472294f2b6f937b0038149e6fd759ce9bb13..b9a334733375f0c11c1acf192052452dca98ff62 100644 --- a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_84e6c79d-94e9-4e04-b994-04ebf807383a.png +++ b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_84e6c79d-94e9-4e04-b994-04ebf807383a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84cd84a25976d7aa2a4c3624d39c3387e00022ce48414a99a9013fb2e86c6ee5 -size 1343822 +oid sha256:ac8b08b04204991c956d4313131ff37b15fab1cff48c5b6a2af7abfa8edcbb6a +size 1836372 diff --git a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99c566eb-d382-4848-8302-73ac22a42e9e.png b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99c566eb-d382-4848-8302-73ac22a42e9e.png index 5e787a4c60d9e97033737f5edc65e79f8d3480fe..3c2273c3d57c0bd4c1b2ff6df376a6b27b4172e7 100644 --- a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99c566eb-d382-4848-8302-73ac22a42e9e.png +++ b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99c566eb-d382-4848-8302-73ac22a42e9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bb5e3c5bf59c50987071511d51c239702c9bc9e386100bb0c066b4f0d607765 -size 4285349 +oid sha256:690e1e530a6f8ca747fd7f4e712bd220f85b2fda1b088a75ffcda8bd362bcb8d +size 797903 diff --git a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99cd5f2e-d013-4b2e-864c-902cb13df909.png b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99cd5f2e-d013-4b2e-864c-902cb13df909.png index 27212ddb018d480a3d113bbef9560415be387d38..b0c525f81debabb7c43d02408a158c72aced3b95 100644 --- a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99cd5f2e-d013-4b2e-864c-902cb13df909.png +++ b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99cd5f2e-d013-4b2e-864c-902cb13df909.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac9eebb2eb4b472e2bbf175b8fbae0653ee0b2e612b2b58e0b8dbd34e8a7a9c6 -size 305802 +oid sha256:a24251ca85217ff6534ba6c83d0b3f353b5437118b85fa8b4841802bf38c266d +size 329225 diff --git a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_fe665efe-5d42-48d3-ae92-66c30e8134ef.png b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_fe665efe-5d42-48d3-ae92-66c30e8134ef.png index 7de91c240d4b18694ce715bb41008d4c30cd45ee..86af2613fddcd3c074839429d4d22c01aa5f6ccc 100644 --- a/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_fe665efe-5d42-48d3-ae92-66c30e8134ef.png +++ b/images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_fe665efe-5d42-48d3-ae92-66c30e8134ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fdaa68ff3b250f19a2bd9c28bc34b26af806fee4b97498f585b5221f3d2554c6 -size 415455 +oid sha256:3f2df0699ee321f5a97ff8cf709af1e774a66991cccd1b876209db0ef97de240 +size 476512 diff --git a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_9d13499c-38c1-4c07-9165-7af7d7dc7bee.png b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_9d13499c-38c1-4c07-9165-7af7d7dc7bee.png index 388cc00fe2078fbb59b03773a4658740adf08f67..51c805d31ce1d471b711bac5fa58d2b3c583a2d1 100644 --- a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_9d13499c-38c1-4c07-9165-7af7d7dc7bee.png +++ b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_9d13499c-38c1-4c07-9165-7af7d7dc7bee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c44dc31d6448f28718c68936651c648c6e9c555cb271ddb6714e79448175ea7b -size 852067 +oid sha256:9319dc7f194b613ec85f3184f6d84c70888ca257f21b2366490b089acd0f4892 +size 883667 diff --git a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_acf51d07-4630-4160-999d-f3ecfe8a47a9.png b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_acf51d07-4630-4160-999d-f3ecfe8a47a9.png index ffa2a4dcd4b9c9971cf9cfbf08edd7078db77b98..5b22a524be360c412adc5f598287af8f9dc91931 100644 --- a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_acf51d07-4630-4160-999d-f3ecfe8a47a9.png +++ b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_acf51d07-4630-4160-999d-f3ecfe8a47a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb47036b3a6f84df79f5fd428ff55a5bac05d1c1dd533fc2f973e3c19d89af06 -size 893761 +oid sha256:c17f2cd274e8b4b049d4c85aa39e30df0b92ac35fb705c11df06cdc06367376d +size 934212 diff --git a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_b9cf6d20-8607-44e1-841c-7a2fac6505e9.png b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_b9cf6d20-8607-44e1-841c-7a2fac6505e9.png index 953c4322f0b41909223308881be6021b2fd931ba..278b3fcfc3f9a788f4135ad0b051fa4eb5e60224 100644 --- a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_b9cf6d20-8607-44e1-841c-7a2fac6505e9.png +++ b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_b9cf6d20-8607-44e1-841c-7a2fac6505e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c073399cb53f5cb3b5a000b0e4b284d5125df058a8bea1abd4329882b276c647 -size 943738 +oid sha256:3737ac696cf161ff068c6d055c6f6d87139bfa362a6ca05001e43bd4bb27e82e +size 504496 diff --git a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_e49d2de9-5610-407b-8f08-cb457d9b6297.png b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_e49d2de9-5610-407b-8f08-cb457d9b6297.png index a6eccd39030814867dbc116cbb187b3db183ec02..96167f91ad16a19873f87dd7ed8ec470aec318db 100644 --- a/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_e49d2de9-5610-407b-8f08-cb457d9b6297.png +++ b/images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_e49d2de9-5610-407b-8f08-cb457d9b6297.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d078cc0810bdae4ab60f38eae4055751e6447558d195923d471f786c8039cd1 -size 945057 +oid sha256:1094c01672a01398fd2449b1d7df82b1df615d162d7ec6a600e3f4a7819d80f8 +size 942575 diff --git a/images/4c578076-b877-4097-bf67-e231e349d56f_09ae59d8-4c49-4242-b028-24d761e54b7f.png b/images/4c578076-b877-4097-bf67-e231e349d56f_09ae59d8-4c49-4242-b028-24d761e54b7f.png index b8fe1c3868b399ae5626abd5a908831c566cceb5..9ecc7d4c82bc5529900c751638bcad3c07aa559f 100644 --- a/images/4c578076-b877-4097-bf67-e231e349d56f_09ae59d8-4c49-4242-b028-24d761e54b7f.png +++ b/images/4c578076-b877-4097-bf67-e231e349d56f_09ae59d8-4c49-4242-b028-24d761e54b7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:483ff6f707b3730d4e030614191cbbd651dbba4a61d42119a9f66c7d5221d1af -size 1527694 +oid sha256:284b7520e91b4913a4ce3ab9aa02b5ff94f1d2874f0bef27646771597a8e2564 +size 1900478 diff --git a/images/4c578076-b877-4097-bf67-e231e349d56f_5485fe1f-9623-4530-be5e-76bf6dce88c2.png b/images/4c578076-b877-4097-bf67-e231e349d56f_5485fe1f-9623-4530-be5e-76bf6dce88c2.png index e01284b7a9a438727aabb20655fd9c3ef7a0180c..0b94a2e7d4b367b6b8b79e2e58c1003c11eefc0a 100644 --- a/images/4c578076-b877-4097-bf67-e231e349d56f_5485fe1f-9623-4530-be5e-76bf6dce88c2.png +++ b/images/4c578076-b877-4097-bf67-e231e349d56f_5485fe1f-9623-4530-be5e-76bf6dce88c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50f593d47b202a9dbeb7f1abe96cc80451efd7182a53122cc78a774c90756b6b -size 3619752 +oid sha256:8df8121801fb5cfc79199225c5327b719cd67a58c0f5faa9c28ebbfa6d3202c3 +size 2436367 diff --git a/images/4c578076-b877-4097-bf67-e231e349d56f_e8bf99cb-e1b5-4b35-9c26-9bc74fcd3cbb.png b/images/4c578076-b877-4097-bf67-e231e349d56f_e8bf99cb-e1b5-4b35-9c26-9bc74fcd3cbb.png index 09e9c752751a00dfe8f555d4a6630301177fa5b7..fca71932ccb3a2f1ccc3002b1ad9563d34082044 100644 --- a/images/4c578076-b877-4097-bf67-e231e349d56f_e8bf99cb-e1b5-4b35-9c26-9bc74fcd3cbb.png +++ b/images/4c578076-b877-4097-bf67-e231e349d56f_e8bf99cb-e1b5-4b35-9c26-9bc74fcd3cbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e47ef92417e91bae25ee732d75b7e8a2b2fad665f90e9079440a3b2bc8e964a6 -size 3592945 +oid sha256:7fa2fd565c3b38fe5533183f21dce6e7eb7e2d712cba3b1ffc1bb18578a7cd58 +size 4051961 diff --git a/images/4c623130-32db-4cc8-928a-68b0bc816efc_2478e44d-fd60-463e-bf01-3073e5b5b703.png b/images/4c623130-32db-4cc8-928a-68b0bc816efc_2478e44d-fd60-463e-bf01-3073e5b5b703.png index 3cb271926915fb4e05ef7e4896e26af3e7e00842..728b2ccc4182a5bca9d291811d576eea59662f9d 100644 --- a/images/4c623130-32db-4cc8-928a-68b0bc816efc_2478e44d-fd60-463e-bf01-3073e5b5b703.png +++ b/images/4c623130-32db-4cc8-928a-68b0bc816efc_2478e44d-fd60-463e-bf01-3073e5b5b703.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11cf4863656f5433f87a73c693c99ff0e87aec72cf190150d2297d5fdc0a79d3 -size 1139289 +oid sha256:2cbbae2db67da36c9cf3f02619ecedc232e24390edf7fa4212adb00cceb59f81 +size 752564 diff --git a/images/4c623130-32db-4cc8-928a-68b0bc816efc_47a8b77a-b439-4aae-b55a-dc9989289199.png b/images/4c623130-32db-4cc8-928a-68b0bc816efc_47a8b77a-b439-4aae-b55a-dc9989289199.png index 1b5b4489a010ed606a011362dc9bf62224a15e12..6368d0cf051a36ba954ae68837b8e86b062fcdb6 100644 --- a/images/4c623130-32db-4cc8-928a-68b0bc816efc_47a8b77a-b439-4aae-b55a-dc9989289199.png +++ b/images/4c623130-32db-4cc8-928a-68b0bc816efc_47a8b77a-b439-4aae-b55a-dc9989289199.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9ef94f409dd1042e383c634c37bed7a8a5ee8bc4c580892a1c971540cdb9179 -size 556214 +oid sha256:4f96e2bfb6c5053b70b6496a4d99d45c59e6717311be3b40d07133f5dcec432e +size 707365 diff --git a/images/4c623130-32db-4cc8-928a-68b0bc816efc_6eacf7f1-bfcf-4dad-9660-fda396a4b150.png b/images/4c623130-32db-4cc8-928a-68b0bc816efc_6eacf7f1-bfcf-4dad-9660-fda396a4b150.png index 1ed8ab6a1dc4707a066a054720db93ec000b7286..28f82a8428522a33c3e0497f96df9ba1a1f8cf67 100644 --- a/images/4c623130-32db-4cc8-928a-68b0bc816efc_6eacf7f1-bfcf-4dad-9660-fda396a4b150.png +++ b/images/4c623130-32db-4cc8-928a-68b0bc816efc_6eacf7f1-bfcf-4dad-9660-fda396a4b150.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea3080a070e818ba82cb3e6888a24779f3cc21c28b93d5902db7b2e803c06941 -size 1015337 +oid sha256:e5bb9c4af0ceae97cd1087630aee39c5501184bc6693f69a8f86f71842066c1d +size 1239098 diff --git a/images/4c623130-32db-4cc8-928a-68b0bc816efc_97ed99f6-1db3-4a61-a2ff-356c3ebc03cb.png b/images/4c623130-32db-4cc8-928a-68b0bc816efc_97ed99f6-1db3-4a61-a2ff-356c3ebc03cb.png index e100502a07ab169f3e4eea07fe39495a04b49fb7..8aaecb9d24020ffc3ca840fb65d1b387d7fa2047 100644 --- a/images/4c623130-32db-4cc8-928a-68b0bc816efc_97ed99f6-1db3-4a61-a2ff-356c3ebc03cb.png +++ b/images/4c623130-32db-4cc8-928a-68b0bc816efc_97ed99f6-1db3-4a61-a2ff-356c3ebc03cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c4452e4f3195b74a29974da535fa0c783a9dec6b594d34fb871a057fbbb71c1 -size 809271 +oid sha256:33359e8d7defdc95baf9963ef25ed83b8e48bf1517e6e3fd4b3850ffdb07fcf7 +size 1018735 diff --git a/images/4c623130-32db-4cc8-928a-68b0bc816efc_a87551ef-f7ee-40d6-8c93-deeea86e0d50.png b/images/4c623130-32db-4cc8-928a-68b0bc816efc_a87551ef-f7ee-40d6-8c93-deeea86e0d50.png index 5d9862c85935c72f13c51658455662e848f8cd22..970ed4c478386c985ce9328747e46457f27a9699 100644 --- a/images/4c623130-32db-4cc8-928a-68b0bc816efc_a87551ef-f7ee-40d6-8c93-deeea86e0d50.png +++ b/images/4c623130-32db-4cc8-928a-68b0bc816efc_a87551ef-f7ee-40d6-8c93-deeea86e0d50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f58dd142ce99ca24b2218833032e9025b2ae9118f50f084e96eb47f751c489c -size 859346 +oid sha256:b01fe3bcae0a76233883bebaf14e71e22dca134cccd4bc27b049fb2b39b303b7 +size 814634 diff --git a/images/4c623130-32db-4cc8-928a-68b0bc816efc_ab0c775c-0a0a-4b43-85ea-e348652a38da.png b/images/4c623130-32db-4cc8-928a-68b0bc816efc_ab0c775c-0a0a-4b43-85ea-e348652a38da.png index 1fbdbb9b27c11bb12e2b4c4d6d8ea744d259ec00..1339ac20773ceb8de6c57d80901fd03a40470814 100644 --- a/images/4c623130-32db-4cc8-928a-68b0bc816efc_ab0c775c-0a0a-4b43-85ea-e348652a38da.png +++ b/images/4c623130-32db-4cc8-928a-68b0bc816efc_ab0c775c-0a0a-4b43-85ea-e348652a38da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31a4dca89fd02f68935ef56e3ea18381f10ccbf3da9504bd3eafd498ce061a0a -size 754985 +oid sha256:2ac28b1eab6b8dcca7afbf932ee7381422e50f3ad91124bcbc64a78f35cb7cec +size 555706 diff --git a/images/4c623130-32db-4cc8-928a-68b0bc816efc_b8d7b8d1-7a8f-49e5-93af-d3f99b95b647.png b/images/4c623130-32db-4cc8-928a-68b0bc816efc_b8d7b8d1-7a8f-49e5-93af-d3f99b95b647.png index 194225de81c27358f9cdd3da599864c351c08a96..9475d515c7800c2bff0da455ba40708a6a08cdd1 100644 --- a/images/4c623130-32db-4cc8-928a-68b0bc816efc_b8d7b8d1-7a8f-49e5-93af-d3f99b95b647.png +++ b/images/4c623130-32db-4cc8-928a-68b0bc816efc_b8d7b8d1-7a8f-49e5-93af-d3f99b95b647.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd709ae77ef4ac1142d8c5feb80110ebacd7f8da160ee173e34d59b41470622c -size 824376 +oid sha256:33c277b37f7f307d798c55f7d2453e253a7037df23aac95b28974328c522a405 +size 917586 diff --git a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_1a6370b9-054e-468e-8385-b363be981b1e.png b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_1a6370b9-054e-468e-8385-b363be981b1e.png index f31224043e96336d9b0545c1a48c91a05b18a9c8..b105e7ec8237a91adc37727d8482a3a5242abc22 100644 --- a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_1a6370b9-054e-468e-8385-b363be981b1e.png +++ b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_1a6370b9-054e-468e-8385-b363be981b1e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fef317e8a71579638fe34f3198fa5695a29eeb8dd4b5d79344aa8f710244bccd -size 550817 +oid sha256:2ee229ad929802ea37854957c860f0756350d366557b0ea37a7e9e6ddb5d4af6 +size 652214 diff --git a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_4c5b26b1-21bb-4ba7-a996-a9609b832e1f.png b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_4c5b26b1-21bb-4ba7-a996-a9609b832e1f.png index 517e61212e5a4dc5349fe19c33f99315a57d6771..0c67abb9529369783e7d811f3c0cc167688983b1 100644 --- a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_4c5b26b1-21bb-4ba7-a996-a9609b832e1f.png +++ b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_4c5b26b1-21bb-4ba7-a996-a9609b832e1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb65f217e5911a3eff449ae62f18343036c20c7ebd1d60526c0b2ab5e9ba3193 -size 661213 +oid sha256:48f7bd65e397257f0d2a70f981927d66781dd14e9859d138b27f6be4abeae6b1 +size 497970 diff --git a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_7dd2e5c2-af14-4f5c-b147-58913a3bb612.png b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_7dd2e5c2-af14-4f5c-b147-58913a3bb612.png index 3a220a22ab207513935a214b4cd114a7dc5c0650..22e6e053dfa9caf56bd8ec5a96dd5591d1c027d4 100644 --- a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_7dd2e5c2-af14-4f5c-b147-58913a3bb612.png +++ b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_7dd2e5c2-af14-4f5c-b147-58913a3bb612.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:168212e08ddc14ab649d1e43168ad3e8ac3f4d293870359824991acc570353fe -size 646323 +oid sha256:a0991f3bc2a59267224c0570d6cfaf3c2197d54d2c277fad0433d8ee5e04c25c +size 583353 diff --git a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_85649f4f-ff05-45bf-870c-6154412c5750.png b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_85649f4f-ff05-45bf-870c-6154412c5750.png index 7ba07a097ccdc3c1285005a1677b0414e02f7b1d..c6b0823e37c37c921092d1b0039eff4b440f9384 100644 --- a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_85649f4f-ff05-45bf-870c-6154412c5750.png +++ b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_85649f4f-ff05-45bf-870c-6154412c5750.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6443741f162f723f8f8e4a6b8991d01a2099388fd8571d4710ee860092f868e -size 637231 +oid sha256:5c09595c2af1210e93413b8c0e74461e46862a1ca82859d08d8c7d7dca83d6a8 +size 710503 diff --git a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_bc99632b-a2a3-4848-88cd-6917ff4e3596.png b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_bc99632b-a2a3-4848-88cd-6917ff4e3596.png index 9f0951b39d5ee7f09c7266ba7491976d78e3f85f..5d322e88fc53e5d275ad9e031debde4502326f75 100644 --- a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_bc99632b-a2a3-4848-88cd-6917ff4e3596.png +++ b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_bc99632b-a2a3-4848-88cd-6917ff4e3596.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb3721dbdc95d2c686ec647e3f8e9d3be37452f2b56133df272d86bbbabf73be -size 645371 +oid sha256:c06158587484543216bb413db0dea562daa0d6c3a8adb12cc3bd1a38fabe797d +size 713392 diff --git a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c326a9f7-8f91-48da-b17e-8fa200ceabef.png b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c326a9f7-8f91-48da-b17e-8fa200ceabef.png index 62e14e1a84077d4a8d43906d97c0660c3ed7ca60..64a8d337852b1ca82ea7e25fb09eca27cc621bd5 100644 --- a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c326a9f7-8f91-48da-b17e-8fa200ceabef.png +++ b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c326a9f7-8f91-48da-b17e-8fa200ceabef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49a63eb9d2dc1bee091617b42a54a17c00a4589405ad1cfbe414873a37eb877e -size 538660 +oid sha256:11c30378b75be51060c2ec453bfaad887bb07ac3f9fb08c798ae414796a2fc2c +size 620686 diff --git a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c6c213c6-e9c9-4ebd-b779-19fd733f7453.png b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c6c213c6-e9c9-4ebd-b779-19fd733f7453.png index a9f3f70525eadf52aa532275bc78515997b4a7f6..cd6277d5249d51a68a3967c178db48a0a00337ee 100644 --- a/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c6c213c6-e9c9-4ebd-b779-19fd733f7453.png +++ b/images/4c997e24-d185-4730-84d6-f8cb512f4c03_c6c213c6-e9c9-4ebd-b779-19fd733f7453.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:966874326abe24d94050bf91d6aaf25448847e7d4b81ac2a07c4abfb049437e5 -size 645681 +oid sha256:ba07a3360189e3b7b0d0e293c724e23a72ca3f7a06217b012ba5db76067135ad +size 645415 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_023f505d-630a-4860-be8d-bef956e29522.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_023f505d-630a-4860-be8d-bef956e29522.png index bfe630ce79b6adadcdc33def8308c95f1d23b2d7..9c6ec0480d7c25f55b9f49b3b3db6ae6dd2bd4d6 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_023f505d-630a-4860-be8d-bef956e29522.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_023f505d-630a-4860-be8d-bef956e29522.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb4a115739f4b94387c498786ff9d73082706e9a1f87dca28eed38ae2d75257e -size 146818 +oid sha256:6aba27f2db1b8945952c50eaf89a6472ad155e26c404c92a6e74c7b2b0c8dd2e +size 146642 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_02cd0163-e5d7-4bad-92c2-dfe415380130.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_02cd0163-e5d7-4bad-92c2-dfe415380130.png index 25ffc8a6e5458e9fb205b0476aae55a17a975d5d..c0eb8332e0d33fed54778048dc4f2bb23dd66b85 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_02cd0163-e5d7-4bad-92c2-dfe415380130.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_02cd0163-e5d7-4bad-92c2-dfe415380130.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43e1e216be24cdd921994a5bbf0264105d43a1bdd625b6e9dd3da0f3921ffd65 -size 376370 +oid sha256:e31ba4d6960247c1f66eb27341c62c725e5334102401f9aefc3a1172b79013c9 +size 395918 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_162a7c64-4c7e-4540-a732-954d6b4be4d7.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_162a7c64-4c7e-4540-a732-954d6b4be4d7.png index 45002b2e3b4a0eb5bf89fb19d1b1815a17cc6d06..dc182bc2ff238d529a7c7d83d5a589af837f2601 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_162a7c64-4c7e-4540-a732-954d6b4be4d7.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_162a7c64-4c7e-4540-a732-954d6b4be4d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49dc9111d39ce5e52b5259c42f80f67b5305d97fa486cb18f0fea2d1fbf09af5 -size 1496500 +oid sha256:2ad431a62be9ebca298bbe4848f992eeff953e82576620effc4051548fdc55c1 +size 1797094 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_203ab1c5-86ca-4185-87ee-b74643fa9e97.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_203ab1c5-86ca-4185-87ee-b74643fa9e97.png index d0c3c44caaa01b654c853f7df5f4a18a737d3139..7104afaea4442cf882a20d53e572ad31ba826837 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_203ab1c5-86ca-4185-87ee-b74643fa9e97.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_203ab1c5-86ca-4185-87ee-b74643fa9e97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:583ca5cfc7d89afb5bff2a923f919288dc2033d7fd82a37aa50d7909bbd7dcee -size 1564412 +oid sha256:53e2ccf55d7cf5c2cce5a2fad62420d22022fc34c4881efb97e8205679f47bf2 +size 1379517 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2895a313-9711-4a8b-9467-88df99b6dd48.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2895a313-9711-4a8b-9467-88df99b6dd48.png index a65740c3361f2be5fd8c0f9f35136870333b0c7e..fb816972b71d6ae39708c7f5d25419fbed25c961 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2895a313-9711-4a8b-9467-88df99b6dd48.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2895a313-9711-4a8b-9467-88df99b6dd48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f79ee729369074958bdc2d9cefbfa380975f1de8474ffee71f9505e3d1494f65 -size 1531485 +oid sha256:8f72c9f73d5358fce58685034595f1763320ee8c026fd4745ca43c1819827912 +size 1042250 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2c1fe6d9-48b1-4b39-9d5d-09b14fb70ff9.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2c1fe6d9-48b1-4b39-9d5d-09b14fb70ff9.png index 5fe54407a9eabd3c01548a2629ece0a8f4916775..f46d009a91e94e86cc17975835f16ad0aa6f4bfc 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2c1fe6d9-48b1-4b39-9d5d-09b14fb70ff9.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2c1fe6d9-48b1-4b39-9d5d-09b14fb70ff9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b149e29bad8b6092e7f0e3d57e77de9148de6153a385d4a55c2695b49797bf9b -size 678994 +oid sha256:7413b7655dc0c93a9f1abd04258ba2c8c35f2fbd32a080791e7cfa2a6b5646e3 +size 529524 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_3f2a8087-9586-4576-82d5-aebc1c19025b.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_3f2a8087-9586-4576-82d5-aebc1c19025b.png index 1e64750fc201c0ec46d15e75430bb9bcc4bea12c..886153c368019b833eeb6530214b3e938667956c 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_3f2a8087-9586-4576-82d5-aebc1c19025b.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_3f2a8087-9586-4576-82d5-aebc1c19025b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3b68214fdd47f2c6a3ec8ab80a951334ba3b5bc8ecd388214cb85e0159ccb11 -size 1565577 +oid sha256:5f37538d4523f75f8ae4e338549030e07fd2c195f843cf892a73b06b0f01e2cd +size 1774507 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4230e0d5-3c05-4f5e-a84b-380081e7d025.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4230e0d5-3c05-4f5e-a84b-380081e7d025.png index 26c555fd2611efffcbb9fc2f11beb0d985bfb739..dd539d5c82679d503209bc2fe0a642fa68ab008e 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4230e0d5-3c05-4f5e-a84b-380081e7d025.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4230e0d5-3c05-4f5e-a84b-380081e7d025.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ab084313b3435ce4adb898b500f6c1ae438e715629fa29cc96c00be4519ab4e -size 1563624 +oid sha256:fc5352602ff233b6415646d73b5c7c2bf4507316dab99fc7079875bc5b01b0a2 +size 2003691 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4a795c4f-8cd7-4d8b-8dfb-747268abf852.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4a795c4f-8cd7-4d8b-8dfb-747268abf852.png index c38440c238105c790ee218505a49e447a3ed8867..914dfbaf27e803eb22ac59fc6530db4b7a29398b 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4a795c4f-8cd7-4d8b-8dfb-747268abf852.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4a795c4f-8cd7-4d8b-8dfb-747268abf852.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:553de2fbe951596d4a78a61df72662800f566eee87f672f526ecf697bc063155 -size 1503129 +oid sha256:6e06596a256fa5bcdddb994e64a71d9fed5ca03fb2ab911029a18f7839b007bf +size 1826806 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_57d7eb23-80b4-4046-9c8b-114b199b3b6d.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_57d7eb23-80b4-4046-9c8b-114b199b3b6d.png index 799be1beea60f2ca0ed3f4170e4c09aa2780a5b0..73c6a2910b89e5821ec7a2e0499e6c4755327694 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_57d7eb23-80b4-4046-9c8b-114b199b3b6d.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_57d7eb23-80b4-4046-9c8b-114b199b3b6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38884e619396281896f4f7cfe731f6d2da1d320626d188d5dc079d12912eef7e -size 1496444 +oid sha256:a7cc92238bd880b67f50eab9d2713100eaf573bb36e1a1c43221f056b009b8e9 +size 2106720 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_7d2f0f0a-fb03-4063-a5e9-5f047e6285fb.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_7d2f0f0a-fb03-4063-a5e9-5f047e6285fb.png index 9ef69fe260d9c19cd993ca8266a4c03fe31b70f0..7f8008f766e882bcaa6cff8fef06e23cef02814a 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_7d2f0f0a-fb03-4063-a5e9-5f047e6285fb.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_7d2f0f0a-fb03-4063-a5e9-5f047e6285fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d97204047f428fbd3e33e550052a2202e7921c57cc3f5c5d1ccd0fe679afb72 -size 1564131 +oid sha256:8d06186c5e5686bfc0703206444254a0fe088c408dbed7444ae32f02a30f10a8 +size 2453667 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_81adcddc-419d-4f81-b70c-348be8137bae.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_81adcddc-419d-4f81-b70c-348be8137bae.png index 6289a6f881384ccb65b0f72e6ee0cf87c69d9c71..c1cf68391c6a6c62a1a11256c5e98c85ed0778af 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_81adcddc-419d-4f81-b70c-348be8137bae.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_81adcddc-419d-4f81-b70c-348be8137bae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca40f88a29eb4d560d46f03d8632041883fd6e22043f3104cf23df2f7ab86bd7 -size 849713 +oid sha256:b1f2b30768393ee1d5c0bc4a06b2f8b6b6f0734d832dd1f5d0b8472f7abe94d1 +size 1002930 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_972c9c30-8c70-4bdc-b484-3f38d969ee99.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_972c9c30-8c70-4bdc-b484-3f38d969ee99.png index b8a569d30937ce450cf4179d004eb7213b8f3189..b2e204e01756d16fd971c077e75d0f5d8f9d5d9e 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_972c9c30-8c70-4bdc-b484-3f38d969ee99.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_972c9c30-8c70-4bdc-b484-3f38d969ee99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90c285c472257a5da58b9646e3a4a47ecea1fcd53347b809a2715e9b63c423e5 -size 1518724 +oid sha256:c4c923251c1b64ac812c6f2f6e54f4890899cdf08628f937a65658c12c504d06 +size 1511940 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_a2ff1967-a42c-486a-9a4b-356fc3d1f590.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_a2ff1967-a42c-486a-9a4b-356fc3d1f590.png index 34c6e240a4854b644d5711c3b6a1c02566e01611..1016cb53ac8b74fc659766ed6ea023ab078dff12 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_a2ff1967-a42c-486a-9a4b-356fc3d1f590.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_a2ff1967-a42c-486a-9a4b-356fc3d1f590.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3dec3a4e84417d92525d2ca4c6c5bb34749b978a91f16292db8cd4787103f7b0 -size 150549 +oid sha256:cb1b80de55b8b80c729d93ccaabd77ee518b131302ab7e5871c64f4f65abf047 +size 109323 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_bc272ed7-686b-48e6-be10-19d50e5ff9d3.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_bc272ed7-686b-48e6-be10-19d50e5ff9d3.png index ac649e453b06e531f967f662f05119f09e94e63c..1bc1beadd98abc76ebb077b58dbaacb6045f7025 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_bc272ed7-686b-48e6-be10-19d50e5ff9d3.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_bc272ed7-686b-48e6-be10-19d50e5ff9d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91a0be95f97dd321f8614131cacf6afdbc3243e1e8c6d1554d47c7bd1e43b3b9 -size 1501099 +oid sha256:9417912c180cb8a9c6bc6eda1384875557278eadaefa4c10f63b245cfc7786dc +size 1254026 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_cff604f9-1605-4b09-b220-446853102b4c.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_cff604f9-1605-4b09-b220-446853102b4c.png index c9ee4c7c55127c01d88235eff2b011ad152c32a2..a08b428b76811a79524def99843b7db3e23e0d42 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_cff604f9-1605-4b09-b220-446853102b4c.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_cff604f9-1605-4b09-b220-446853102b4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f9e428c49d001adf17f93edf905f823d7f1aea39f72d4ec41768ecfd8ac7b45 -size 93158 +oid sha256:4693734d104153d1c68a78c48e4d85c4dfcae4613a5482529ab894db9044b80b +size 94098 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_e28396bc-f80b-4f22-adc6-9462051a4b4e.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_e28396bc-f80b-4f22-adc6-9462051a4b4e.png index 29b0b9692832f230843c0489ca77c0207ed4f188..ac8ac0c3a0b9d1c7ec039d9356fec9fe75a90241 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_e28396bc-f80b-4f22-adc6-9462051a4b4e.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_e28396bc-f80b-4f22-adc6-9462051a4b4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab3c7ddf7e2a201fdda098568814ee7d8dc2b23f8933f8d23a8a30ae0026e720 -size 383066 +oid sha256:5492575b015737e50b553cce985e751b3ea7a2b280be224a2d7ecc88928f96a9 +size 401519 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0c02b8e-48d9-4b91-aff4-829b7a9d82c4.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0c02b8e-48d9-4b91-aff4-829b7a9d82c4.png index 05eb7de089bbc5286d2fe6f1fb5bf2ecffa13c69..5d4063a6673f97becfa3620fae8034afe5dc1566 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0c02b8e-48d9-4b91-aff4-829b7a9d82c4.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0c02b8e-48d9-4b91-aff4-829b7a9d82c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06125f1d81ce9b06423aa7062bc83b33cbcfa0dc7486ff57dc4a37403ceb9878 -size 2655094 +oid sha256:b7264638448e93f6f3b7eedaf0cce6d0792a80304ae7130ccf0085a65c21e22c +size 357494 diff --git a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0dbd5cb-4ecf-4e6b-8612-ad6b7974e5aa.png b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0dbd5cb-4ecf-4e6b-8612-ad6b7974e5aa.png index 91850cf2d0c966c3f89b62cf183d6743e5136d19..48517cc55b51def42ce951770d5345c61cdcb56b 100644 --- a/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0dbd5cb-4ecf-4e6b-8612-ad6b7974e5aa.png +++ b/images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0dbd5cb-4ecf-4e6b-8612-ad6b7974e5aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22024699758ba8eb68db6b6a8183a0693967ee2b25c6ae37affc74bfa2611933 -size 1562666 +oid sha256:11142c484a6304d153f136901b76e509d1e062f89426c14622cd1c572152990f +size 1422648 diff --git a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0a6e420e-940c-499c-a0b2-5bcd58f42594.png b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0a6e420e-940c-499c-a0b2-5bcd58f42594.png index 03b22fc51dfb697b9b8ac08f0e23072f009899eb..a9c7fc14e7a62cda7e02db26294d5a8800a0292d 100644 --- a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0a6e420e-940c-499c-a0b2-5bcd58f42594.png +++ b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0a6e420e-940c-499c-a0b2-5bcd58f42594.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51d998c4a4734e670a8176d467115d4475a28fd7612b8e921584a2bf57208892 -size 2610770 +oid sha256:c1034355615e84cc03d836ba8586b66a0e20e2d793d5af8767939177b23bcc08 +size 1248413 diff --git a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0f806e67-309d-409d-8959-e24867e11888.png b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0f806e67-309d-409d-8959-e24867e11888.png index b3cdf2fb234a67ad02aa1ec12a4d0385c7cc6983..82aab21ef60e78e904efe24fda85b10b1450dadb 100644 --- a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0f806e67-309d-409d-8959-e24867e11888.png +++ b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0f806e67-309d-409d-8959-e24867e11888.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6556a4a8b0273f255d8821da86480b4d4629c6e6e606ab5063e133326284e2e5 -size 1972257 +oid sha256:4db621e084a702f2681f7509c7663c9d667bc6bc3c67e0bdbcc1f95d636dc672 +size 2074990 diff --git a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_8f0334e0-1f8a-4958-9416-68b2d03744a1.png b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_8f0334e0-1f8a-4958-9416-68b2d03744a1.png index 8bd281984911e40ba52f539a35e9faddc4269d37..7d0500719945a3e1dcfe1d1a1ec6a352776d74d9 100644 --- a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_8f0334e0-1f8a-4958-9416-68b2d03744a1.png +++ b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_8f0334e0-1f8a-4958-9416-68b2d03744a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ede8797c52655a42ffbaa485d3fabac42850a266d6683061f8b4384f759cf3dc -size 1607631 +oid sha256:c49d550643579a964e925afee72cff1e76a0d6e95e9fba12f5624becfc325470 +size 1706265 diff --git a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_cf8a0b6f-dde7-4ec5-959a-23c1fd331528.png b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_cf8a0b6f-dde7-4ec5-959a-23c1fd331528.png index 5e48b96a8b2f3298409cad869cc6de4f87ee3db3..de236d0ca8e5d5d1f5f815721262985bf88a8001 100644 --- a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_cf8a0b6f-dde7-4ec5-959a-23c1fd331528.png +++ b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_cf8a0b6f-dde7-4ec5-959a-23c1fd331528.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6747bf43f52ebdc5a90bed108f45fa20993922a70dffa42bfe11cbb388d2696c -size 1530945 +oid sha256:a399b1ec596c8c580be87f264bd5e6fa3960a252e51d1ee589b4528e8065afd0 +size 879058 diff --git a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_eb57cac8-928c-4777-8c81-103790610108.png b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_eb57cac8-928c-4777-8c81-103790610108.png index c72dc9ee54d1de115b0fcc0ad2c55f02ee21d9fa..47aabbc3b1840f3bd3fdfdbc6794fc982e343f90 100644 --- a/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_eb57cac8-928c-4777-8c81-103790610108.png +++ b/images/4d73937b-3be7-4f5f-950f-b1905244a2ac_eb57cac8-928c-4777-8c81-103790610108.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3069ea585bd97d27349f3dff2506cb982b9a70ce1c25ee3969fb8c03daac9afc -size 2081764 +oid sha256:ec22db9c9678fa03ba52f353e57a23fc6fa21fbd6f4cbfc011a21da20c15f92a +size 2031832 diff --git a/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_823c594c-a988-4977-9651-e7eef65e4f8f.png b/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_823c594c-a988-4977-9651-e7eef65e4f8f.png index 1ef6cf77355ec6be498fd4a5786e0c545c60f6c5..b423615c00ee6b4807a61c6f3e9ae68cf01b59e5 100644 --- a/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_823c594c-a988-4977-9651-e7eef65e4f8f.png +++ b/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_823c594c-a988-4977-9651-e7eef65e4f8f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:780ff5a4d5b8aaa04c2ea5e99b44702f8d4bddd313a2e1b2d7c83fb52799b723 -size 796364 +oid sha256:a2e1f57e9bc982849faa3efd0f42a6c138825c9c06c7cd234401f07e5b4a25e3 +size 631934 diff --git a/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_a43bbcfe-96dc-4222-ac22-4f2afc78bc28.png b/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_a43bbcfe-96dc-4222-ac22-4f2afc78bc28.png index 57be55214560f7b1cc9244f50b19663139bfaf2a..dfd5a001436608d4f9d3e1af54412dfa6a3a0d92 100644 --- a/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_a43bbcfe-96dc-4222-ac22-4f2afc78bc28.png +++ b/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_a43bbcfe-96dc-4222-ac22-4f2afc78bc28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0e3b6910f0da4b0467d16a894feff568c8386ab75f2407c45a9becd3bed050a -size 428064 +oid sha256:9833b07341bbf07c5e92c11bff5a191d9aa905247a0950e45c42cf09e954bc00 +size 819071 diff --git a/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_d95c30f0-a89a-4c2b-ac95-293e0904cf22.png b/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_d95c30f0-a89a-4c2b-ac95-293e0904cf22.png index 3532061b45bb0fd3535956e5fdffaa121e1bd22a..2ac9fe88f047d5c01714927c5e745caa589681c5 100644 --- a/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_d95c30f0-a89a-4c2b-ac95-293e0904cf22.png +++ b/images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_d95c30f0-a89a-4c2b-ac95-293e0904cf22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a57a9a702d9766ea92de5fe59cea28e02ffdce21be55fb72e3246f2fbb6f352c -size 820104 +oid sha256:05dbd8a1df21c4a181d17ddb301c7db886346d2b3e68a6fe2e0af4744c407288 +size 1345005 diff --git a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_0295c274-6cda-45f7-98c7-7166ccc9b078.png b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_0295c274-6cda-45f7-98c7-7166ccc9b078.png index e2b549547f961c2930086498249d247a5c37f1ec..47a96e330a69f095c4d19782070b2036cbf95c8a 100644 --- a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_0295c274-6cda-45f7-98c7-7166ccc9b078.png +++ b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_0295c274-6cda-45f7-98c7-7166ccc9b078.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19680f9032f650f303d1df38214b816083a21f4eac41813d0c6870e43f8d78b1 -size 648525 +oid sha256:ffdc00ad20069f86a0cc32115f238a36c41fd65c6e8a56efdf0c9ea6e11a7d99 +size 600382 diff --git a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_59b106a8-1c6b-4d63-bf92-a82e063fc15a.png b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_59b106a8-1c6b-4d63-bf92-a82e063fc15a.png index 9495ab193771e66a4d1b773e83772f9cab33ffc9..ef7410ce91745ef18b9549f89352f2d84187e7f5 100644 --- a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_59b106a8-1c6b-4d63-bf92-a82e063fc15a.png +++ b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_59b106a8-1c6b-4d63-bf92-a82e063fc15a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc27b42dce892436cfebab1b74e25e12574ff42f1761151ae53cbc3194cacc36 -size 2119137 +oid sha256:ad832beff8874e92f6dad0948b0043a8d12603e2b8502ce973f041333c1a85a5 +size 2120218 diff --git a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_84dd4b7b-ea40-4309-914e-f2eea4e5e68f.png b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_84dd4b7b-ea40-4309-914e-f2eea4e5e68f.png index ca6a64f56b27c0e3c43ada1a044641956b61d557..6526835c363f92d394f214087b0bce4d3e94a795 100644 --- a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_84dd4b7b-ea40-4309-914e-f2eea4e5e68f.png +++ b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_84dd4b7b-ea40-4309-914e-f2eea4e5e68f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f40f776347095d91d031e6abfa4b5dd528bf6a26222db5eaa00b97bf6387190d -size 2252583 +oid sha256:0ccf15053b0eba8b9b9ad68a58526102073457ef2564a54dd0b89e7b6bd0d6d9 +size 1214410 diff --git a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_8de97dd0-06a1-43b2-9db1-9a50efe628b6.png b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_8de97dd0-06a1-43b2-9db1-9a50efe628b6.png index b92aedd53807586b46fab6d35fdf1aa32a9c7caa..6de7effb25056591ba71d2eabdfae8ac9b9b2ff1 100644 --- a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_8de97dd0-06a1-43b2-9db1-9a50efe628b6.png +++ b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_8de97dd0-06a1-43b2-9db1-9a50efe628b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d423eace7015fecd1ebd9346a078e0175a16a9c0e3bf0812f7bdfca532a46706 -size 1499390 +oid sha256:070b2e6457fa6eb5c1329a60f8e2caa5ee76da181ce0aace0ce4a744230f3dc0 +size 1681161 diff --git a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_dc7e4e12-7d61-48a8-a1ec-2c52646d5975.png b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_dc7e4e12-7d61-48a8-a1ec-2c52646d5975.png index 3b047e98f1b2f286497890e49e07c8f14fb1a0e6..c1a20226810ad8a38cd392a5cf663f2bbdf46569 100644 --- a/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_dc7e4e12-7d61-48a8-a1ec-2c52646d5975.png +++ b/images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_dc7e4e12-7d61-48a8-a1ec-2c52646d5975.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:677eb69fb16216be62c57e491135d231a7ea591d8690899b8b00e728ed799827 -size 618670 +oid sha256:840d9f65ae31ae3d07a3585268b3d932e37818319f7660e24af4d2741dab0e6e +size 1061353 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_1c73847e-41c9-4e2b-ab4d-f9a8c2156508.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_1c73847e-41c9-4e2b-ab4d-f9a8c2156508.png index ebc61901f5e810a6eeca4d51138a9df96411324a..673957316029802fdd62af40438106d04144527d 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_1c73847e-41c9-4e2b-ab4d-f9a8c2156508.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_1c73847e-41c9-4e2b-ab4d-f9a8c2156508.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e60c487d7cdbfff65262ecc949bf31e643183d2611d64c63e15dc26607bdf7a4 -size 472405 +oid sha256:3ba04c7a3da47f28db9853ec9d6209a8346bdfef2e33e410b2b5814fd56f2388 +size 227295 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_21c94120-79c7-4305-af25-b347848f9b6c.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_21c94120-79c7-4305-af25-b347848f9b6c.png index ee523db1b14b8070c11b3cb594f6866149f3b2da..ea52f9299d237bf8affd495ebd375f014fdca620 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_21c94120-79c7-4305-af25-b347848f9b6c.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_21c94120-79c7-4305-af25-b347848f9b6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f67f89022f942672ce8e7c23c301431310ce4ee4dd49ed2b88223ba055255c46 -size 650625 +oid sha256:488ae85ae0d23809c65e736421430158a50f98a55fbc9e5068a4e6382ddf78a7 +size 530718 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_3b8bb1e4-be14-49ff-b042-11d4639daa4e.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_3b8bb1e4-be14-49ff-b042-11d4639daa4e.png index 50265de3426c4e41504a0c5329e9e49ad756a98b..cc62377336886e557200133ab75cf32191789e05 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_3b8bb1e4-be14-49ff-b042-11d4639daa4e.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_3b8bb1e4-be14-49ff-b042-11d4639daa4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c900b85aa4b389f347b8c91a27f22d0929026c9147de8d2dde64296865d2aeb2 -size 547927 +oid sha256:9bc77203d34989a887cad8bc9b008f903e8fe99e8f6a4183f05540faaad3545e +size 375203 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_52367d47-a04c-4db0-94f4-b1525d6e4db1.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_52367d47-a04c-4db0-94f4-b1525d6e4db1.png index 550c7abbb1439cb43c97977a9e0bca7b51873bad..603857a70069887db6d5cad11e2d54dcd7e65155 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_52367d47-a04c-4db0-94f4-b1525d6e4db1.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_52367d47-a04c-4db0-94f4-b1525d6e4db1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b30787f0f9522af42406f7bd1d35b909f30a03b3e807bf56baab10c1dbea421 -size 641882 +oid sha256:51b734af86d77bc599d24be090977c65afc199ca2d584ab28e210fe97402f5df +size 204234 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_83552bdf-be4a-412c-a088-0615ea08bbaa.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_83552bdf-be4a-412c-a088-0615ea08bbaa.png index 6e95d9aef290647e6e618971e201c1fe3c904e8e..f3ccd770369c99fb11f3afe0c657b6a2b8f69575 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_83552bdf-be4a-412c-a088-0615ea08bbaa.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_83552bdf-be4a-412c-a088-0615ea08bbaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50fe0532039984add7d916859806765111ffc960d428da55c608ff2baaad0c0a -size 431966 +oid sha256:fb7074cef3c26095dda6e39597a4007902a484bc9169a457bb6ae153361f916c +size 556420 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_949e1e48-94d8-4d69-aa74-24e5582011bd.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_949e1e48-94d8-4d69-aa74-24e5582011bd.png index e1f443e5ec52a231c60a42ee249979ce41aec373..93962cd627cc43b8df8a571f348a48307104233d 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_949e1e48-94d8-4d69-aa74-24e5582011bd.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_949e1e48-94d8-4d69-aa74-24e5582011bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de0e465f246ba809a8300e657f5f92baacfab6ffe76ac03d7286b05a61438097 -size 748042 +oid sha256:b54c629f033f64dc5da273e711f0dc2c5830473bc3029c669c61bddd19211924 +size 821648 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_a918c8d9-504e-4c11-b878-34e2b00a3cf6.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_a918c8d9-504e-4c11-b878-34e2b00a3cf6.png index fb23ad5d1142190e9edec485ece5c0765963a627..9c02c6ccef9e548428c4100983c5142e44200268 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_a918c8d9-504e-4c11-b878-34e2b00a3cf6.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_a918c8d9-504e-4c11-b878-34e2b00a3cf6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b4074143e3ce8a284635fb129ec8408b06bdc002a45c506d0f9f08cc6e54637 -size 643971 +oid sha256:fb0a2abee0544d7a217b16ae590530f30c6c6eec33e9a6897719be7754f6f197 +size 426052 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_b774739a-82c1-4b3c-a4e6-9925804f8038.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_b774739a-82c1-4b3c-a4e6-9925804f8038.png index 04b759173f5d36417c3a9cf5f330a2fc6364e5d2..2a9bf1d8547c39b915bc6212884c411820dd2f7a 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_b774739a-82c1-4b3c-a4e6-9925804f8038.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_b774739a-82c1-4b3c-a4e6-9925804f8038.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c7bb19f7d0c69e07481baffe2b0bbdf2bd37570e9e8c98dcfb9955d92e3b827 -size 552565 +oid sha256:094ef1e0d2e9c6d82155206cbe42ab6c113eca2d0e6886ce45c85712198f00a0 +size 602257 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_ba10e068-4c5c-44f8-8b25-50986ef28501.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_ba10e068-4c5c-44f8-8b25-50986ef28501.png index 6eb7d12a5bd80c587ffa0c7295054ee43de13fd8..1e624ca01b1576d21dcdb02525ab003b95ec9f18 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_ba10e068-4c5c-44f8-8b25-50986ef28501.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_ba10e068-4c5c-44f8-8b25-50986ef28501.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e939285862325bfd753c5a9278c2234aab976d59935a4c64aa6266e2976426a6 -size 550613 +oid sha256:7f1e90a6c6fe30d7124fb5e7385ed8f7fbe0653b75adce34939455deab4f6a20 +size 642101 diff --git a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_dc71652f-34de-4786-b270-3b5b750c5905.png b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_dc71652f-34de-4786-b270-3b5b750c5905.png index b9bc4603551cce9388b49582c92d9caa03dc3a34..972f3fc347cd8251a5d5bcf20b3dbdede0f8524b 100644 --- a/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_dc71652f-34de-4786-b270-3b5b750c5905.png +++ b/images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_dc71652f-34de-4786-b270-3b5b750c5905.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35c5d59fe0e031d56b4cb66a919b8794ee6d8990e2b739187974be3753023feb -size 554108 +oid sha256:cf80f0a67608c45671fc2c7fbaddc626f95ffed13ec01711728aff2fa26a88f5 +size 453477 diff --git a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1bf42320-592b-4bfb-8141-a292892eb093.png b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1bf42320-592b-4bfb-8141-a292892eb093.png index eebbedf308950701b1d9e5836c1cc08a8fe23d76..c7513b04a03e82d2897378dd1d26753fa2df07f4 100644 --- a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1bf42320-592b-4bfb-8141-a292892eb093.png +++ b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1bf42320-592b-4bfb-8141-a292892eb093.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42351684fcfe36a1c4e6b0f064b4a7600c1f497713f95cb579ab3524da83a277 -size 1399052 +oid sha256:59f0191a580a565d4fce8981037248322c103fc7353cb7db13200c7a1a8657a0 +size 1402398 diff --git a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1e025e9b-cd9f-43a1-83c4-088b78703733.png b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1e025e9b-cd9f-43a1-83c4-088b78703733.png index 23ec866da46853788c0a5e7832a5c9f7325e04b9..d8276feb3c8cc47d0fa126b489c74b5158d05568 100644 --- a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1e025e9b-cd9f-43a1-83c4-088b78703733.png +++ b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1e025e9b-cd9f-43a1-83c4-088b78703733.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ccf89b3100283de41b98fec356a68cbb8552063a96b5407ec25fe373832ce0b -size 1873863 +oid sha256:6a7314ed8b77be8eb743c64971909220d8805ef3864d79a387e4d0a1f76d5aa0 +size 1538448 diff --git a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2e3725c5-8ae4-4af4-b4da-d19fa51f89d1.png b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2e3725c5-8ae4-4af4-b4da-d19fa51f89d1.png index 1b7e959c00b0105a8e4a7adca96d2ae9a24d60fe..9af00148d47e8e6720383c8ee3ca61f290ea4e36 100644 --- a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2e3725c5-8ae4-4af4-b4da-d19fa51f89d1.png +++ b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2e3725c5-8ae4-4af4-b4da-d19fa51f89d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e0b7d25bd47d045b11bac85aa2081a67ff5f6682b2e7c0a85406086f84371af -size 489252 +oid sha256:fd4c8b89c42e9a21fd802447a3a44ac032294382879ebbd1e99b8e7685f2c982 +size 378875 diff --git a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5cb18345-d17d-4d6a-9db8-dbfe0d3cd3bc.png b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5cb18345-d17d-4d6a-9db8-dbfe0d3cd3bc.png index 1313cf9d1da76f4d4d2804ee94d687df04c370e7..44d4defdef709e9aac3d12de6e22d2c23ba2d99e 100644 --- a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5cb18345-d17d-4d6a-9db8-dbfe0d3cd3bc.png +++ b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5cb18345-d17d-4d6a-9db8-dbfe0d3cd3bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6ee99145114d4e348201e181d5f60975057d4118f38d009e21cb04a1baa19a1 -size 1272371 +oid sha256:824486e68b749efea5c50b4c0669084a5841283804300bc9b950c1e4270f29b4 +size 408099 diff --git a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_b5cf3337-8a5b-42c9-b0a3-2d56740dd044.png b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_b5cf3337-8a5b-42c9-b0a3-2d56740dd044.png index aa45aa98224bd89c65d9ee5e3fc49f39b9ac1f04..fc26f59295b85c39b93f46ae3e6b755d1ec2333b 100644 --- a/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_b5cf3337-8a5b-42c9-b0a3-2d56740dd044.png +++ b/images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_b5cf3337-8a5b-42c9-b0a3-2d56740dd044.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c80d652a62f2f74b63e4c450fc8dae4d312e0ea93b45841f15dd68d41a480f74 -size 1231151 +oid sha256:02e3b51c4945d1cbbdf33050b53ef0734360dae7c61c870e6ca9364e8e392f82 +size 492692 diff --git a/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_683aa55c-8275-4665-901c-4148a4b9ba73.png b/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_683aa55c-8275-4665-901c-4148a4b9ba73.png index 75086e340de7bc13d7f339538cfb88c7416f4cfe..4a0c68bb5793435c2a9debc2b16d4aa7f3ad2f56 100644 --- a/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_683aa55c-8275-4665-901c-4148a4b9ba73.png +++ b/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_683aa55c-8275-4665-901c-4148a4b9ba73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4c77bd5cad38d0c09cee87c3c72020a1742c2d76076da2ce203f2f0eb440d55 -size 566358 +oid sha256:9ab2ecc6aed9abee86093155fc50d3c58324d981092f5d834ef2b8e5a31bf2c8 +size 571682 diff --git a/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_fb07bb60-507e-4d13-8d03-5a9acbe22238.png b/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_fb07bb60-507e-4d13-8d03-5a9acbe22238.png index 0976931768fda0843faf62deeb0fa44d08f3075a..e2f1d0746517b9545074f7d52bbeabbabd6a3ce0 100644 --- a/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_fb07bb60-507e-4d13-8d03-5a9acbe22238.png +++ b/images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_fb07bb60-507e-4d13-8d03-5a9acbe22238.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:556fb8731b758d28779e34da9deb3804df99690a62afe538f8ceffc24e6ed64b -size 185630 +oid sha256:3b1807ac70bfb1ea35970801ff938df2bceae810fbbcde071c06da8ade417cd9 +size 212485 diff --git a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_2e04274f-f7b0-447f-a96d-7094c9e50f25.png b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_2e04274f-f7b0-447f-a96d-7094c9e50f25.png index 586e82e61f1ba11fe5fa503bd35a8d3476c31fa6..c0e74c927f20e014a19cad8fe1fde7b761fa2062 100644 --- a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_2e04274f-f7b0-447f-a96d-7094c9e50f25.png +++ b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_2e04274f-f7b0-447f-a96d-7094c9e50f25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ff27212019207b7fc9b220e814677b6e571a3a52820eaf3d0ce1584216d2e87 -size 944283 +oid sha256:eb4352fee6ab2d9c3607454c4f79ab50cc7fbfce16fb82da8167486abf03b686 +size 1459885 diff --git a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_4f12982c-1b2f-42b8-9391-dd4b17ff0ced.png b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_4f12982c-1b2f-42b8-9391-dd4b17ff0ced.png index 7a0f23d3d1d6d70ad9c9a37d4af60b34d2754d18..c6974b53b40b75715c98713b13ae00b6868757fb 100644 --- a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_4f12982c-1b2f-42b8-9391-dd4b17ff0ced.png +++ b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_4f12982c-1b2f-42b8-9391-dd4b17ff0ced.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52e2408c3e6f12a1eb98119cfb43c697822e862a21a7900406e61cc88fcceb6e -size 1258999 +oid sha256:e82923beaedfed3477fb381242bb1d6c0d04c5203bfb7e34310df30e3a145a47 +size 865376 diff --git a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_58ba99b5-6faa-44dd-b30f-0e2896aa3265.png b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_58ba99b5-6faa-44dd-b30f-0e2896aa3265.png index c01e75e966e39bcf46e8f9830172bb135e288157..41c1543e80360042a248f0e31bbee9bf0d9fb616 100644 --- a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_58ba99b5-6faa-44dd-b30f-0e2896aa3265.png +++ b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_58ba99b5-6faa-44dd-b30f-0e2896aa3265.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:058832c49da12308e4df9fff93852c740403f27ccc10cf6e7ab54de610ea9c95 -size 932313 +oid sha256:5ad62553d1020f27cdd7fedc48bdc33662c4591ff349668f117a27346584431b +size 1063139 diff --git a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_c3c62fdc-7f5f-4b13-a9e0-2fce42f49db2.png b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_c3c62fdc-7f5f-4b13-a9e0-2fce42f49db2.png index b089ee1dfb1659eebe150e6ba1da01acecf56663..0b6ba50bfc34bb035c10aea3f234289c73bbf1ba 100644 --- a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_c3c62fdc-7f5f-4b13-a9e0-2fce42f49db2.png +++ b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_c3c62fdc-7f5f-4b13-a9e0-2fce42f49db2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a31618de2e072488776e05077c0d8d5f67309b4a926d9df53b061cb438a5f98 -size 1283696 +oid sha256:b11714729b4af2bb072cbd6c6c8bf49ab54a1640bc99b4b264ac16fe04c1c80b +size 1259158 diff --git a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_cf70ebee-773d-4ad7-b6b1-a2d55fdca152.png b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_cf70ebee-773d-4ad7-b6b1-a2d55fdca152.png index 68879e1c68958ffb8a7b609bacf5343b176ecc9a..1ac1d26b858a67d12cc4cd324afef2c63d13af68 100644 --- a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_cf70ebee-773d-4ad7-b6b1-a2d55fdca152.png +++ b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_cf70ebee-773d-4ad7-b6b1-a2d55fdca152.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27165cef9c1612c43bbfe34acb2d5ef36bc564e3f182125c8b3e1bf102ccf66b -size 1095653 +oid sha256:67833dbbb22887ddb6943725ce10d801ce84d7d8ef954c4d35f09207729a0a3e +size 844483 diff --git a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_e47cbe0a-a59a-4622-8016-d9d8f32cf08e.png b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_e47cbe0a-a59a-4622-8016-d9d8f32cf08e.png index 33e307a1a3b23549b2a14f5fccc7be0d2c538a02..a3fdec4ca56111d168fd39a61c6e4bb76a05ccb2 100644 --- a/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_e47cbe0a-a59a-4622-8016-d9d8f32cf08e.png +++ b/images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_e47cbe0a-a59a-4622-8016-d9d8f32cf08e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e2dce92d75be9247bf9af19fc2d3238f3ea6a1001b2e9cd54d4eac574a2e8ea -size 1263153 +oid sha256:3ca50e11d56942297c58800ebacdb5bb7037171c28db340b04af1b54137aad8d +size 1262270 diff --git a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_09210be8-ee5d-4061-87d4-9f48ede5dafa.png b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_09210be8-ee5d-4061-87d4-9f48ede5dafa.png index 4a39a1301d1576b72d3abd9eab193405cdf41c36..d4c1c30acb9a8970d68f7d173c6f8e41122d6425 100644 --- a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_09210be8-ee5d-4061-87d4-9f48ede5dafa.png +++ b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_09210be8-ee5d-4061-87d4-9f48ede5dafa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98374f1ea35373593d063d270bf7b52ddc5e7c2df386d2d18e27851458953f39 -size 1553886 +oid sha256:86c8be5efb3e3fe275d370e8b2e11e2c91ae043608c3e361e04d35c88e16cc2a +size 1233066 diff --git a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_456d9ebb-0ba2-4003-aaa3-a020deb5f737.png b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_456d9ebb-0ba2-4003-aaa3-a020deb5f737.png index 66ffd0f6dc624d399546c0ab8ddcb99527e45812..9bc1464d105222ff2e2938661c05972c3db27ff6 100644 --- a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_456d9ebb-0ba2-4003-aaa3-a020deb5f737.png +++ b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_456d9ebb-0ba2-4003-aaa3-a020deb5f737.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3bf35a966bb6a110c4d6b15e7bfc69c5636dbc34b90653abc4f84d90f1c48d6 -size 2177626 +oid sha256:faa89e40bde3512bcd868bc1030508503250c3ef4baeb054ee7e9a660154d8c8 +size 2246933 diff --git a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_69f3a5c7-082c-4b11-a016-a1138abc3d8c.png b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_69f3a5c7-082c-4b11-a016-a1138abc3d8c.png index 4d4be8ba8dfce653af28e64b5a00c80e440e9cd7..65a48f3eeed886ebd93b615b24f08114c6f6f337 100644 --- a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_69f3a5c7-082c-4b11-a016-a1138abc3d8c.png +++ b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_69f3a5c7-082c-4b11-a016-a1138abc3d8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e80456658d25bd917df4665190681adb8d3999c9a174e11bee6aea82b5abdf5 -size 1691227 +oid sha256:54d0e1446d2c3bc957c27a5966e881b4a0bffc72c16821a5b05c6244503d4b7b +size 1650816 diff --git a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_841d4e1a-07cf-405d-97d3-771bfc9bd3fe.png b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_841d4e1a-07cf-405d-97d3-771bfc9bd3fe.png index 8b1f588a64f3085f1de02edeaa918ffb9b6e8f5b..966861b7517a77fe1706be9c03c6f56d305b8bbb 100644 --- a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_841d4e1a-07cf-405d-97d3-771bfc9bd3fe.png +++ b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_841d4e1a-07cf-405d-97d3-771bfc9bd3fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59135007b9acc038b1cd605e50b505c162183519173dd15d2370aace13dd0a11 -size 1551824 +oid sha256:382f9b65aa1161793dd3a57520d0c1f75d4632678df57ab62e26d49f019ce25b +size 1564952 diff --git a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_aa0d6885-065b-4dfe-8471-916d90dc4f57.png b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_aa0d6885-065b-4dfe-8471-916d90dc4f57.png index c7939106d0652157e40c82a62783e8930a397d23..d226b935fa344ab640a2dc34f2fe45a75bccffc5 100644 --- a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_aa0d6885-065b-4dfe-8471-916d90dc4f57.png +++ b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_aa0d6885-065b-4dfe-8471-916d90dc4f57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b592c92b7081cb67391350c11a5c6d25988f80733616db11c39e24aa49be3d66 -size 2315230 +oid sha256:f889b6a4347db6ff4b4ec36df412667d83e670ec3027cb9730e566217049af6e +size 2593218 diff --git a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_b1b264ad-e45c-499e-b244-7057ae2b6a20.png b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_b1b264ad-e45c-499e-b244-7057ae2b6a20.png index b1dec3fd0adef5e3b3879a02fa024d87c281bddc..2ad31dd6888e69d05902f1cf6df70dcb8fbcd1ec 100644 --- a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_b1b264ad-e45c-499e-b244-7057ae2b6a20.png +++ b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_b1b264ad-e45c-499e-b244-7057ae2b6a20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ef28ba56d4d2a9c56c77715f7742d1939a6f57a2ef3c4f7dbbf29887402a918 -size 1549535 +oid sha256:6865d308e55d341abd187a53df74392ac61920e5faffddc28b7d5b2ceca985a3 +size 1643684 diff --git a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_cdddded4-b437-467d-99c4-8f76f89e0aaa.png b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_cdddded4-b437-467d-99c4-8f76f89e0aaa.png index 8b1f588a64f3085f1de02edeaa918ffb9b6e8f5b..a18dead3e393356212895cab62f9b89d4821de7b 100644 --- a/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_cdddded4-b437-467d-99c4-8f76f89e0aaa.png +++ b/images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_cdddded4-b437-467d-99c4-8f76f89e0aaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59135007b9acc038b1cd605e50b505c162183519173dd15d2370aace13dd0a11 -size 1551824 +oid sha256:1e27c08036e6b8eeabf31dc363854c5a2983d3069855357cc2cb62c4b8d7dd39 +size 1378952 diff --git a/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_405e3a16-3fd4-405a-8e06-74ca8e5fe25b.png b/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_405e3a16-3fd4-405a-8e06-74ca8e5fe25b.png index bdccaef460d3f3289035339439aac87877a92888..2c4d0a66719dc6d703f6117f74b2313842c4d62a 100644 --- a/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_405e3a16-3fd4-405a-8e06-74ca8e5fe25b.png +++ b/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_405e3a16-3fd4-405a-8e06-74ca8e5fe25b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:022f846138e54e6437c90825c339c76cf6537a693f2b6b4a68438a00462c275d -size 165846 +oid sha256:e35590719bc8d2f74cf247c95439c153c7d9e786d5ee5668dd2157fdf00ccf3e +size 181588 diff --git a/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_50018063-9417-46ac-a1ed-269e8302453d.png b/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_50018063-9417-46ac-a1ed-269e8302453d.png index 00db2d979ec8751a8956faf9741cd1aff07ca865..58bd6ab53deccb1575e0e4dd272286e80b2caf6a 100644 --- a/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_50018063-9417-46ac-a1ed-269e8302453d.png +++ b/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_50018063-9417-46ac-a1ed-269e8302453d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59b3ace362f247304717f7e6f84fe078370c47f6856eb6ab1f5d19c00fa766c6 -size 646911 +oid sha256:48a975bde75211ffa9ff8c3e17020962e0c235b4bf4504ebdbfbd60962816657 +size 464295 diff --git a/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_6505d42e-3973-4cb2-9d59-b7fa6513d6c9.png b/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_6505d42e-3973-4cb2-9d59-b7fa6513d6c9.png index 4e02388464b9605a6c12ff367064dd412c4ff8dc..8eb52eb00d58906dc889ef3a52d53e3ecdc2bb10 100644 --- a/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_6505d42e-3973-4cb2-9d59-b7fa6513d6c9.png +++ b/images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_6505d42e-3973-4cb2-9d59-b7fa6513d6c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10df737722f3b969e7dc37fcfee9600609ab440e7aea724ce6f63268c5838fda -size 397990 +oid sha256:cefdf321e739c4e090c919dd0358dbe7eb635695bb85980fd54fa4d9313d9ac1 +size 475163 diff --git a/images/4f395aad-6f10-4055-932a-d2af443e6bfa_214bef59-0758-44eb-886f-b6745b668e05.png b/images/4f395aad-6f10-4055-932a-d2af443e6bfa_214bef59-0758-44eb-886f-b6745b668e05.png index 3f0c67b08f85d6c7ae46a6d098632af0b94b1274..b2427a329c21580c2c097a32e24053cf6191a8fe 100644 --- a/images/4f395aad-6f10-4055-932a-d2af443e6bfa_214bef59-0758-44eb-886f-b6745b668e05.png +++ b/images/4f395aad-6f10-4055-932a-d2af443e6bfa_214bef59-0758-44eb-886f-b6745b668e05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9eaf53a3445f3d79b578230bfce6aafcbfbaff21502c6a2ed76adb4132471b72 -size 470751 +oid sha256:e97e099b6383461d2f3aa396ae3e528a5f9211b5e415d8c7f7770a682ba79b31 +size 1094935 diff --git a/images/4f395aad-6f10-4055-932a-d2af443e6bfa_88e84f41-446a-49a3-a1ae-fd9d685f93c0.png b/images/4f395aad-6f10-4055-932a-d2af443e6bfa_88e84f41-446a-49a3-a1ae-fd9d685f93c0.png index 5fd38101bad3853db9c688c29bf28a0c3ba1c7c8..aff7ed6675868af80c6df9a721e94680f7400215 100644 --- a/images/4f395aad-6f10-4055-932a-d2af443e6bfa_88e84f41-446a-49a3-a1ae-fd9d685f93c0.png +++ b/images/4f395aad-6f10-4055-932a-d2af443e6bfa_88e84f41-446a-49a3-a1ae-fd9d685f93c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:861ac2c43657012a33e830d57611cc0dedd533a91e1533c747329083e4795fdb -size 1159685 +oid sha256:cd7f8157175877767f6e28e682bf3cbe63519bc8895362089cb2561ae788f368 +size 1208591 diff --git a/images/4f395aad-6f10-4055-932a-d2af443e6bfa_8dada5a6-6c79-452b-9908-de98a25c6f5d.png b/images/4f395aad-6f10-4055-932a-d2af443e6bfa_8dada5a6-6c79-452b-9908-de98a25c6f5d.png index cb509e5c8671b6a52878a2baecd1037b9bd4a536..8e2d6b3c12d3db20b02840fcabaa6d1c893fff9e 100644 --- a/images/4f395aad-6f10-4055-932a-d2af443e6bfa_8dada5a6-6c79-452b-9908-de98a25c6f5d.png +++ b/images/4f395aad-6f10-4055-932a-d2af443e6bfa_8dada5a6-6c79-452b-9908-de98a25c6f5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4b2e74fa2f89a2383a6cc4d4f264592b967f8a411664c83bc8d420a5ea987b3 -size 1089209 +oid sha256:1187e19041791adfb1b20a6d6a53f9ce4f6b99e6439a0ce6aea72cfeb49487d0 +size 1711327 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_06b3b17f-68a0-4d62-a236-9852c9ae658a.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_06b3b17f-68a0-4d62-a236-9852c9ae658a.png index 7102a7c773f60bfd79f5fec0937768b80047845b..c7a1edd5f36f94dca39cb332d7ff6e6df0e4ca21 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_06b3b17f-68a0-4d62-a236-9852c9ae658a.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_06b3b17f-68a0-4d62-a236-9852c9ae658a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75dfeced59a7d057c253fb417e5a525911414db5f9c703567dce5b5de3faf121 -size 1847616 +oid sha256:27ffec658876796b7937e26fa605131fd33a32451b7a50c07077d8b6e3f7994e +size 2139802 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_0dbe031f-7330-4084-81bd-d133f5f5014a.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_0dbe031f-7330-4084-81bd-d133f5f5014a.png index d390ff8c868fedd0037439db6986eff5270dec0c..5c1c3ee23242a71a4c2a2d7a225789dab73295a9 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_0dbe031f-7330-4084-81bd-d133f5f5014a.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_0dbe031f-7330-4084-81bd-d133f5f5014a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b9d9ecf16ddd244389d66471199b3140cfd7210175b8a089fc565cbca83be86 -size 831890 +oid sha256:143f17d84b75ee6285a54e634113d128bd173257189effc4b54e97f8b11b5363 +size 1181072 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_2a12305f-5f27-4743-b696-61ca159e6fc2.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_2a12305f-5f27-4743-b696-61ca159e6fc2.png index 311846cab7a2f96f82dc2b9c4322473bf435182e..1d8d4d00ff511748900fab84066f00e4e8944e54 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_2a12305f-5f27-4743-b696-61ca159e6fc2.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_2a12305f-5f27-4743-b696-61ca159e6fc2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7ee0980f939f44868411853919ef728291e52403bc3f00a92700d89f55dcdec -size 1415283 +oid sha256:80f891e187497bfe0ac2c02a4d62341915ddad8546a7879552843cd4c0f02c97 +size 1420130 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_3b2f20d2-4e98-433d-b1cc-4c6495958de0.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_3b2f20d2-4e98-433d-b1cc-4c6495958de0.png index ffdb404bdd708c83566bbaaf199c67453df5867f..f7c8d486d76b9227ff44ae6888acc63d17a0fcbe 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_3b2f20d2-4e98-433d-b1cc-4c6495958de0.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_3b2f20d2-4e98-433d-b1cc-4c6495958de0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a0ae410e39be537d888df61083539a5972ef572d46441b5b3567c3930c69ba9 -size 1781726 +oid sha256:716fb7f8fbfdf64cb5150ea8aaa7abc7c90ba01b84cc080ce0a46920d9bbbf8b +size 2115360 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_83ca13ff-29f2-4738-bcd2-859f003ae40d.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_83ca13ff-29f2-4738-bcd2-859f003ae40d.png index 721d0e054573957eaf66d0067325f5660c02bcb4..5aa450455816a04b06ef240565c69121750379e9 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_83ca13ff-29f2-4738-bcd2-859f003ae40d.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_83ca13ff-29f2-4738-bcd2-859f003ae40d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d179088d26373b913448714bfa99507bd5e327d519062cc1170524cce928c786 -size 1402853 +oid sha256:b6e40e3be152b5f783823e252c3bc81d5d9ea066d1dc51b2e127ed3329153bed +size 1608025 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_a5d96eea-9933-4c28-aba9-b0b7b95ea8a4.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_a5d96eea-9933-4c28-aba9-b0b7b95ea8a4.png index 0c267390bd83cb98c73d817a89ddd63abb0c9006..f924cd1f1e6bbde803ebe58da43856aedadd71e5 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_a5d96eea-9933-4c28-aba9-b0b7b95ea8a4.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_a5d96eea-9933-4c28-aba9-b0b7b95ea8a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c29dc84d92004b42f0bfcaedfd3571f2fa7125abac14460acbe9da9d73a069c6 -size 746756 +oid sha256:d4481eece7efc8360cdc61f2d66ebf5867f0047868c3b2ee2319ced04f068006 +size 739692 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_c343cfea-aace-4a34-bd4d-b2bb679d74b9.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_c343cfea-aace-4a34-bd4d-b2bb679d74b9.png index 350272e7325d7feb4b6a54156f4581463e6ff31d..ed957ea67f0def925b79d63cde0608eee7be1614 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_c343cfea-aace-4a34-bd4d-b2bb679d74b9.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_c343cfea-aace-4a34-bd4d-b2bb679d74b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bffddf1f0e46d59f123210e0d39ad6bb625e7c46b5b5c0ed30585265cb3abac3 -size 1966125 +oid sha256:e7dd15ad973f41be4a104f2fb201087c817013f0de23fe23fff6dcdd49d3c09c +size 1273662 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_cd059490-19cd-4a25-9017-ecd728b2b58c.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_cd059490-19cd-4a25-9017-ecd728b2b58c.png index d6e31f676335f355c2e23d8a7c8c0bb14459633b..5bbb4e1d86b1c05a08563fa16c0a6fdf319247a5 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_cd059490-19cd-4a25-9017-ecd728b2b58c.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_cd059490-19cd-4a25-9017-ecd728b2b58c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76586b95fbbae2b1fe43cfd4cb8f8abb0cec357b9a79ea064a439bf5022a0796 -size 1813603 +oid sha256:9306ac06645257f42e3c93d9f977b04dc341b9a7290a542a49a33e91062d9da0 +size 1775843 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_e7011714-61ba-428e-903f-5c06b791549a.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_e7011714-61ba-428e-903f-5c06b791549a.png index 5a0bd846e968f4abdb7bd659a3c21d90909c40aa..38ab03010b844e9d51f95abea1cd6aa8d6beda3f 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_e7011714-61ba-428e-903f-5c06b791549a.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_e7011714-61ba-428e-903f-5c06b791549a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a08e286bfa14f80d04a3524bdfe0b79819bf8aef7b9cc54259bde97564817819 -size 2226874 +oid sha256:50e901cb93b489a30cb8bc17bbe84f4fa0470dee0f62f316f5b6a8e1dde0954d +size 1583135 diff --git a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_f0255e45-ed88-45fb-bb17-d493aabf1d30.png b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_f0255e45-ed88-45fb-bb17-d493aabf1d30.png index c574cb94eae7bc242c6b80a87532b3a9f3bf7d66..522675c34494076adf8b70f2c72997ebc48d37f9 100644 --- a/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_f0255e45-ed88-45fb-bb17-d493aabf1d30.png +++ b/images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_f0255e45-ed88-45fb-bb17-d493aabf1d30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d12221a81599abef642fc497ed96aa2ffdd84d1a778281e851b27ed3bd9289b7 -size 1657600 +oid sha256:d21e49b64311754feaca33da58f1c398ea9741551de5aa864644fcea158f7372 +size 1946646 diff --git a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_5fbd504c-aa43-420a-9f09-73ebbb6b7e0f.png b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_5fbd504c-aa43-420a-9f09-73ebbb6b7e0f.png index bba5f5fabf7a5654c9c99a4b561493aae048ebfd..7d1215f95f082fad40f554a229f3d857992927cd 100644 --- a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_5fbd504c-aa43-420a-9f09-73ebbb6b7e0f.png +++ b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_5fbd504c-aa43-420a-9f09-73ebbb6b7e0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:402a107d23e425568f55eaab6ae17f637409355131de17c78568ebbbe028863f -size 1144968 +oid sha256:60bcee81f114c95171de84df22bae710f775a7bbde0af5a1db8582b97409e8ba +size 1165610 diff --git a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7647cc73-2926-423a-b613-cd280e8c3858.png b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7647cc73-2926-423a-b613-cd280e8c3858.png index acc716a79541de8f70ae29d30028bb29d55a02ba..f8f9766f046176b5c4c592e4058fa82f73d04007 100644 --- a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7647cc73-2926-423a-b613-cd280e8c3858.png +++ b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7647cc73-2926-423a-b613-cd280e8c3858.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0176448d0421e8eadbe89c64753fe9d64a0f5fb4395bf2a8b5140d48ba40b6f4 -size 1184213 +oid sha256:b0aee77387a0261a8798e5f33cd3d3d82f69d74ba50c48409eca6ece78809822 +size 1187303 diff --git a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7c992448-8fd4-4aff-96a6-34b790363b71.png b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7c992448-8fd4-4aff-96a6-34b790363b71.png index 68768b238fc65d530c0723825f2c74d622760729..2a19a7a6476618b8e6dd6ec71c56b8305f2c7064 100644 --- a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7c992448-8fd4-4aff-96a6-34b790363b71.png +++ b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7c992448-8fd4-4aff-96a6-34b790363b71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48eee245fd244e31213c21a33baf668696a3cfc3826e0009071c0d67fe8d1d83 -size 720655 +oid sha256:bb4f01e989b20b69146e3b14f43315307c673bede52843e526b4d261506c0a5b +size 578129 diff --git a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_9998f7f2-76a0-4860-8f9c-bd56a2bccaf2.png b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_9998f7f2-76a0-4860-8f9c-bd56a2bccaf2.png index e1cfab10cf47a42f6b64527820317b0aa42207b6..e3c3c0e8885e5cfe1953afcd0c3692bc07f0bbdc 100644 --- a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_9998f7f2-76a0-4860-8f9c-bd56a2bccaf2.png +++ b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_9998f7f2-76a0-4860-8f9c-bd56a2bccaf2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ace61a9b6cd4f589ab48fe2658cfc352098d7ca599de3dd9dfeff3ffc6635f5 -size 1583124 +oid sha256:1733aba504fe729c223b5d638b34ff68c1c96831483b971c1c0133c6f46e4a58 +size 1132850 diff --git a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_fa588085-8f33-47b1-8ec2-145c85ae252f.png b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_fa588085-8f33-47b1-8ec2-145c85ae252f.png index d6672b87ba2db1450a84b4eac6d6db1cac134aeb..245c60b3d135caf4cae23a0c6ac550ac6be359e0 100644 --- a/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_fa588085-8f33-47b1-8ec2-145c85ae252f.png +++ b/images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_fa588085-8f33-47b1-8ec2-145c85ae252f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:367b1fbab63bfea0590671a7219b71b480b4a507fd02803b67d5db4f2fb0f4ff -size 1923738 +oid sha256:d25672e49dd2bf494dce1d264ac2b974b1b15ec0c0090577ad667af630b6164d +size 1338551 diff --git a/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_ccf4155a-d9ed-4ede-a1a6-010e16f61ea7.png b/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_ccf4155a-d9ed-4ede-a1a6-010e16f61ea7.png index 38174d7c301b336343207da341d3838fae522092..ab04dec60f8ff0110993e79d06bbf22d3a04ae27 100644 --- a/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_ccf4155a-d9ed-4ede-a1a6-010e16f61ea7.png +++ b/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_ccf4155a-d9ed-4ede-a1a6-010e16f61ea7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82f7fa9270ebffdc769f3ab9dafd3dfb8ae333b2565373b8d718b5c64e43535a -size 2218053 +oid sha256:39ac24dcd7cb62312ec418a74a39337e3cfda723c8e1307f6d8c188dc35ca500 +size 2204595 diff --git a/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_dcf6e978-dc7f-436d-80c6-2f8ad9445bcf.png b/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_dcf6e978-dc7f-436d-80c6-2f8ad9445bcf.png index 11accce7c4ac4907071e65d013eb75080935b5da..6d2b92682d616b4d8ac9380de95ab3f7738f773c 100644 --- a/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_dcf6e978-dc7f-436d-80c6-2f8ad9445bcf.png +++ b/images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_dcf6e978-dc7f-436d-80c6-2f8ad9445bcf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4c8e720bedea93325869c59c3218bf2cd39a0b6ceaae6f0c6fd47363af81d53 -size 2218443 +oid sha256:da00f19cea11faec225850a41cd0e68fe170ff3c98f6ef19eb3a3fa788efc834 +size 2413287 diff --git a/images/4ff347e6-e911-4af5-8151-7805a9e91b28_7ac03130-a5e7-41b0-a3bc-01c1cb99f1da.png b/images/4ff347e6-e911-4af5-8151-7805a9e91b28_7ac03130-a5e7-41b0-a3bc-01c1cb99f1da.png index 12a2afe16eacb7ede5958ae62eb75022e41334e9..8f54ff940e191f7187ce0bef1158fd07a57f3ab3 100644 --- a/images/4ff347e6-e911-4af5-8151-7805a9e91b28_7ac03130-a5e7-41b0-a3bc-01c1cb99f1da.png +++ b/images/4ff347e6-e911-4af5-8151-7805a9e91b28_7ac03130-a5e7-41b0-a3bc-01c1cb99f1da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f76bf64dfd81622a419be663a09f2d7f82b09aadf70ce509553d96e7b3f4139 -size 493591 +oid sha256:719a51f3448c0c8b1da0faea88396bc1d717d78739b0c3a43fe606231d376cb1 +size 627396 diff --git a/images/4ff347e6-e911-4af5-8151-7805a9e91b28_d0e2ec63-357b-4237-9476-b54c6feba4e7.png b/images/4ff347e6-e911-4af5-8151-7805a9e91b28_d0e2ec63-357b-4237-9476-b54c6feba4e7.png index d79064f1cc3bc85e22a829063d2b36344eba003d..64c8b51c2f12cea01fe79c284a87af2b608f9b92 100644 --- a/images/4ff347e6-e911-4af5-8151-7805a9e91b28_d0e2ec63-357b-4237-9476-b54c6feba4e7.png +++ b/images/4ff347e6-e911-4af5-8151-7805a9e91b28_d0e2ec63-357b-4237-9476-b54c6feba4e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9dd14190b0134042a638ebe5f622126c4f4d077434afc9ca9729709a72d29cee -size 362085 +oid sha256:1c25f701ad915909b43873dd46019e8bc3d1e87b7c4698fe8d0326032dfb88bc +size 295547 diff --git a/images/4ff347e6-e911-4af5-8151-7805a9e91b28_e0a4ce5f-1ee3-4a27-a60e-5c7ca962277e.png b/images/4ff347e6-e911-4af5-8151-7805a9e91b28_e0a4ce5f-1ee3-4a27-a60e-5c7ca962277e.png index 8330e74c4810568e8c2328bf8c56e7382c56d988..cb0aa8b55d19737aa42c4bd5643ffa4c6c57642d 100644 --- a/images/4ff347e6-e911-4af5-8151-7805a9e91b28_e0a4ce5f-1ee3-4a27-a60e-5c7ca962277e.png +++ b/images/4ff347e6-e911-4af5-8151-7805a9e91b28_e0a4ce5f-1ee3-4a27-a60e-5c7ca962277e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba4b6faf840dff50744a965bd129cd7a2e577c984747f800d279754f048421e0 -size 502158 +oid sha256:009f1c3826726a0d24545448278891d0c03f01eea50c8428fc269cc978f10ed4 +size 593905 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_04f7c7bb-0def-4780-aea6-e6171f06625a.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_04f7c7bb-0def-4780-aea6-e6171f06625a.png index e8f4512767b30a6e8824a1180a369fb7a078ff00..58faa23ea54311e9f1dbc1b79eb249b3182646e3 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_04f7c7bb-0def-4780-aea6-e6171f06625a.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_04f7c7bb-0def-4780-aea6-e6171f06625a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86fabe50f60cee575c676842ec632dc6884021bb73e6b64ee0149da6597bdd73 -size 725987 +oid sha256:905f329d0d70be98989ea20100465890d7e91169852bd6800b9ec490b7d698c8 +size 870190 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_52bdee9e-0430-43f8-a614-9b708c175125.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_52bdee9e-0430-43f8-a614-9b708c175125.png index 85fd2fd220331dee7c023ed011f7a11d22ca65d3..1857bbd80fad5c10f0532780892c0f44df235185 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_52bdee9e-0430-43f8-a614-9b708c175125.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_52bdee9e-0430-43f8-a614-9b708c175125.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d205b12b4cca10d9fea806e3a38361db7bc5bf0b801ece3c0d118266196a5ece -size 483089 +oid sha256:f32f5f0cc1a04bc7eee6f3530379736b9287cf7130e7860f8329fb895a7720dd +size 760473 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_55038342-45e8-4973-a605-cadf080c5785.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_55038342-45e8-4973-a605-cadf080c5785.png index 77c18996a7a89c10fb9d6feb273e6084cfa96fa1..4160c323819f771aaf719741045f94b1e24f2f4f 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_55038342-45e8-4973-a605-cadf080c5785.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_55038342-45e8-4973-a605-cadf080c5785.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ed96ff44a0725a12dc5e86bdc1fede427d65b82c58dc12717b42fba94815e69 -size 974108 +oid sha256:e990cb70f8bba072d3e37bb58c86862cd3b470b6bedc062e4b53ca10ba8a3c55 +size 1103143 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_613c170f-ebe3-451c-ae18-a3d8ad9c5b0c.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_613c170f-ebe3-451c-ae18-a3d8ad9c5b0c.png index 6793e8677f9388b4e6fda399d9c66f7ca0afd303..a43923029addcb47b5dd4552a0de4a4978c28872 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_613c170f-ebe3-451c-ae18-a3d8ad9c5b0c.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_613c170f-ebe3-451c-ae18-a3d8ad9c5b0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9e6d3506bdfab91144178d86702e3cd4165510d1f7f9e8697aa057a7cfb0598 -size 374764 +oid sha256:5dc605b365896f8f16e4ca34eea4b1044df3adff091492e58b75cafdf7bb8c5d +size 1823410 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_648bdb7d-6268-4937-afe4-50036e127c4d.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_648bdb7d-6268-4937-afe4-50036e127c4d.png index 50c1f7e11544bf83119b22934e2cba873f6806b4..87ae00946db2bb79c4235f9b502092dd40d549eb 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_648bdb7d-6268-4937-afe4-50036e127c4d.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_648bdb7d-6268-4937-afe4-50036e127c4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5828186bf8f1d5f7a9b9f7bd017495dff39435987bd50587f82faf4b51c791c6 -size 456977 +oid sha256:c135456eb01f6fda6f890d65c07a04ecbeb921b8885d92c2ff3127ca9da028f7 +size 665877 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_66ecd249-ed96-47a9-9e83-29e6d273fb6b.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_66ecd249-ed96-47a9-9e83-29e6d273fb6b.png index 87b79c9abbe0542515ff6332143cdadedbab43c1..c07cebf38342bbc48428b1ac12207448b4e7589e 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_66ecd249-ed96-47a9-9e83-29e6d273fb6b.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_66ecd249-ed96-47a9-9e83-29e6d273fb6b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12b3319288e2ec0e231b7d336a4bf4719dda02ed5ec7bc93b3eb182a463f82cd -size 841337 +oid sha256:3e3f1edc84b55df07e4a586d850e90cfee950d4ee28ae4fd5f2e4e9b7d69a2f9 +size 870776 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_81da20ab-ac0e-46a3-a331-7680d55ffb13.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_81da20ab-ac0e-46a3-a331-7680d55ffb13.png index b3e96bec0e07b21a98d5968eed474a54800fd68e..66d23887db1a5986a8c8b9191262480594d11a77 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_81da20ab-ac0e-46a3-a331-7680d55ffb13.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_81da20ab-ac0e-46a3-a331-7680d55ffb13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6bb952c3848b5635025eb88dca6d2402a3a862bf768182fe9a65259888ee2fec -size 725907 +oid sha256:d3a57b2b65fc0a16f9044bd161b74a2371b55bb730c988a8248824f78e43a7c9 +size 772737 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8bcb7868-5c9f-444e-8759-3c089e797034.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8bcb7868-5c9f-444e-8759-3c089e797034.png index 023e7ccf15d87f03ef6fc3c0dd95369075740f31..6f114d3427135c890e935858d5b08596e1ad7578 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8bcb7868-5c9f-444e-8759-3c089e797034.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8bcb7868-5c9f-444e-8759-3c089e797034.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c218fd7e23d60b646cf4a922fc3476692516316c3a6aeceba4e9535e14d5ff7 -size 559746 +oid sha256:6c122e7c042b4dd49312cc1a2bcf6e1670eb543451ad1c094fb2288883d19077 +size 525495 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8ec95e8f-a20b-4ab2-be5b-78333b5b16fd.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8ec95e8f-a20b-4ab2-be5b-78333b5b16fd.png index a9bb52fa0f306f6e96350902f2d8ac116a13d460..b69cf0ecc46c89fd7561d4b352db9cafe60405c0 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8ec95e8f-a20b-4ab2-be5b-78333b5b16fd.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8ec95e8f-a20b-4ab2-be5b-78333b5b16fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cef06d85ccbb27941dd8e3c48c7a43ed896bbcb4936203e102774bcf6956257b -size 808113 +oid sha256:7b268a3b94732bcd8c3d70590030d724f1957e0154394a0aa2c23239ad7353a9 +size 886384 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9945456e-05f3-4c9e-8ce8-65ca56ec133c.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9945456e-05f3-4c9e-8ce8-65ca56ec133c.png index 097eddadd84b92ad560e1396824f8492e49dde04..591d5ad436ae4b539fa27ab9f2c55311d7d065df 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9945456e-05f3-4c9e-8ce8-65ca56ec133c.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9945456e-05f3-4c9e-8ce8-65ca56ec133c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb8f87da6d68227a93590f1d36e436af0ff207ec479137d2d4a2265b48ddda92 -size 719060 +oid sha256:185a6a81e8ddc1b6ca180666519f816c3a8e69752c1edcf07305c6af648a4296 +size 796098 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9abb53ba-45cd-4c7f-92ee-33073e99789b.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9abb53ba-45cd-4c7f-92ee-33073e99789b.png index 710e43fbb0ac11238d716de6059d973d0fd5b267..81bdf88350c32dad6f5fa1964b5366614de97135 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9abb53ba-45cd-4c7f-92ee-33073e99789b.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9abb53ba-45cd-4c7f-92ee-33073e99789b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcff344589a6f8e99676a0a003189a3c4217c4156878d082791af2ab14606436 -size 369738 +oid sha256:61844bbd404ce38ca7ad3b36c7806f3dcaa2e0135ea088cda02da38ee93a7dd1 +size 1699736 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_b7887659-969f-4d57-b28d-4c563523c87a.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_b7887659-969f-4d57-b28d-4c563523c87a.png index f5adf71b6cd022dea5b6d7b895169d4fe78c0183..4b8509c968bb85ca187b1bc466dfc1fed365290d 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_b7887659-969f-4d57-b28d-4c563523c87a.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_b7887659-969f-4d57-b28d-4c563523c87a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6bba5b739d5a499b8ad78a239b1ead0fec09f5bdfd19fb7ef18148776e50f47 -size 667822 +oid sha256:d067c0271e6e2ec700dacbe17766019598d38a64cc4f3a417d8ab837a5e74d8b +size 834776 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bbff6535-c2fa-4fe3-ab52-3ba6813014b0.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bbff6535-c2fa-4fe3-ab52-3ba6813014b0.png index 8eea485c3b4911b93c0e4e225ebda7a9d5c70861..09e14fdf576f74f3a79a302260a87795fcaea3d2 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bbff6535-c2fa-4fe3-ab52-3ba6813014b0.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bbff6535-c2fa-4fe3-ab52-3ba6813014b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6874d3ab672188933737c1f40cc8b6e82b0dc93e6ab500260516d163435dc2d3 -size 746137 +oid sha256:4c47e0477c73fec4096851a34cd67c7990a4a1815e8307dd0e4b7bfe348ea132 +size 846137 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bf663b13-de7d-4099-9aa6-cdd33f15c1f3.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bf663b13-de7d-4099-9aa6-cdd33f15c1f3.png index fce0a4df1b748483218f138e02fd7873aff1fd0b..f1eb88e620c0be2184e01c03e7d2030538097a58 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bf663b13-de7d-4099-9aa6-cdd33f15c1f3.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bf663b13-de7d-4099-9aa6-cdd33f15c1f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73ef35473d3a0b8a79521477e5d23f817c941efe891b125df4b4f5ff7fc1f5d6 -size 746114 +oid sha256:5244ec289c8478043a4780e2f6ba7349fe335439d76621a4f8bbce5c2e0c7c68 +size 732195 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_d27c83d7-a7bf-4035-be1a-7cfe70abd291.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_d27c83d7-a7bf-4035-be1a-7cfe70abd291.png index df9a33d7e6e8acd24d907e0294ebc18fbfc8930f..2542963819f84f39583504f7c852ead618e21cd5 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_d27c83d7-a7bf-4035-be1a-7cfe70abd291.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_d27c83d7-a7bf-4035-be1a-7cfe70abd291.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e195594075b27451b1d6794d0d0aa24915fea1773f2a1027c9d600c3624ebc2 -size 745943 +oid sha256:910ae8468136d8d2194b7d18bacbb60a794247f984daf7bbde476987e7028b32 +size 870079 diff --git a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_ed3c1666-d006-4dfa-8ba0-9b84253364e0.png b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_ed3c1666-d006-4dfa-8ba0-9b84253364e0.png index ec481b7407976f407d9842a81a22a8b79ecf8940..bb46db78cabf6d03908ac7f906c97d98ab711aa1 100644 --- a/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_ed3c1666-d006-4dfa-8ba0-9b84253364e0.png +++ b/images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_ed3c1666-d006-4dfa-8ba0-9b84253364e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2373bfb9cfd96c35a55f9a917671e24cd9bc29d2e7ef3bbec10b3083a3ed9d3e -size 656267 +oid sha256:8000f2f3a43ffab6cd440b39d91299596eec98bd194a0e1fe6d2f1a46e94329e +size 809246 diff --git a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_206ec9bd-b2fe-4964-8dcb-c593a923ad7d.png b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_206ec9bd-b2fe-4964-8dcb-c593a923ad7d.png index 33680115e3243842084ab28b3dbe782d27e0bc66..56383b12dd84ba580b437899e568e987b0d31ca1 100644 --- a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_206ec9bd-b2fe-4964-8dcb-c593a923ad7d.png +++ b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_206ec9bd-b2fe-4964-8dcb-c593a923ad7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2420897f42c80597ab64c30a800ecef96b08977bf79c562242d100b8afaf66c7 -size 2295579 +oid sha256:d240e537e6170d2ff6b38875fdfdab8d40f035e9dab74336bd6787b7b1441e8f +size 2132062 diff --git a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_700fb498-5bab-4e61-9e6b-6f3679b2ca2d.png b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_700fb498-5bab-4e61-9e6b-6f3679b2ca2d.png index 7091f3fd80917f47eb2359c253d370e81672e39d..8f0776da2a598c5992beea8598b520e5cf70b935 100644 --- a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_700fb498-5bab-4e61-9e6b-6f3679b2ca2d.png +++ b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_700fb498-5bab-4e61-9e6b-6f3679b2ca2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1f941f21b74824a421edbfe2272543fdaa6e07d44e0a5fa1041c9a938a93644 -size 1217813 +oid sha256:d5bdd0c367f7594dfc12064ab7c9d2640995e3e40ad9feaa29028d967f7cf958 +size 1010681 diff --git a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_7dc2e9bb-ea97-4cc9-8824-aaf9e70f74d6.png b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_7dc2e9bb-ea97-4cc9-8824-aaf9e70f74d6.png index 03a8fd4485687874698fe8359507a0f6cd7986be..3c6c0ea92d6ad8cf5c06c0196bbafe0098c2984d 100644 --- a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_7dc2e9bb-ea97-4cc9-8824-aaf9e70f74d6.png +++ b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_7dc2e9bb-ea97-4cc9-8824-aaf9e70f74d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0e354facf54cd28878fe7413fd4433d0493a655e799efd86e46f8981dae2e18 -size 1577096 +oid sha256:37786e16cae3f33c8c5640e9e8e4e615fe0b6b2981d38919c1435c1e56678041 +size 1406467 diff --git a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_b802897f-2c52-42af-b317-321de287b5ab.png b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_b802897f-2c52-42af-b317-321de287b5ab.png index da2c6774573e476f428a113830d3b97e32b67e0a..c629fe69539b9b88a51b73542591410a86276825 100644 --- a/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_b802897f-2c52-42af-b317-321de287b5ab.png +++ b/images/5092fad7-ff8a-481e-bb0b-fe83590193ce_b802897f-2c52-42af-b317-321de287b5ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9d38604c5033dc3ca7604fbfe45d569c7144a2224027000875f04297c8328c6 -size 2326025 +oid sha256:26b0cd963e303ec1aab697d8e56e68c88d2ebea0d18ff03a1a8ce267232a7e25 +size 445520 diff --git a/images/5098c679-dacc-4abd-9331-18b898f936dd_37627ad6-4621-4127-bbeb-101ffa0b748f.png b/images/5098c679-dacc-4abd-9331-18b898f936dd_37627ad6-4621-4127-bbeb-101ffa0b748f.png index 80bd3c52dce9de9faf06dedbce5cd173a200d269..be9f3d379165c47e7c27b90e6f08cfc68b7a35de 100644 --- a/images/5098c679-dacc-4abd-9331-18b898f936dd_37627ad6-4621-4127-bbeb-101ffa0b748f.png +++ b/images/5098c679-dacc-4abd-9331-18b898f936dd_37627ad6-4621-4127-bbeb-101ffa0b748f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d82bf9234abb7cca3e98858aa16624b477383869d011daedfb38583b049f2c2 -size 2602174 +oid sha256:9bade0525243de34607d93639266cf7d1ae5ec1f47018804f75745ab74ecc378 +size 1673192 diff --git a/images/5098c679-dacc-4abd-9331-18b898f936dd_c866b19d-d657-4385-9c9f-c43f7e09d2f4.png b/images/5098c679-dacc-4abd-9331-18b898f936dd_c866b19d-d657-4385-9c9f-c43f7e09d2f4.png index 796ddbb98e12d75ed9dd10d7b94f85732a51f6f3..934feb95e83df38a4f032d584bf9a6956b195a2b 100644 --- a/images/5098c679-dacc-4abd-9331-18b898f936dd_c866b19d-d657-4385-9c9f-c43f7e09d2f4.png +++ b/images/5098c679-dacc-4abd-9331-18b898f936dd_c866b19d-d657-4385-9c9f-c43f7e09d2f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07b11d06454cc9de7660aa2d59d93c333e74c7c0e949056ef0cf95500d8ae0f6 -size 3185956 +oid sha256:b6934a4619ff9066925e5b6b32211ed9e193ca1957928256e9af31703221bd3e +size 2301350 diff --git a/images/5098c679-dacc-4abd-9331-18b898f936dd_cd74b537-1276-4126-bf2e-d2135aba47ef.png b/images/5098c679-dacc-4abd-9331-18b898f936dd_cd74b537-1276-4126-bf2e-d2135aba47ef.png index 514e6c83047a0826920fd4fac3736415738fb1ca..dacfb5994adfa8c0b2fd209ceddf3f99be5afe76 100644 --- a/images/5098c679-dacc-4abd-9331-18b898f936dd_cd74b537-1276-4126-bf2e-d2135aba47ef.png +++ b/images/5098c679-dacc-4abd-9331-18b898f936dd_cd74b537-1276-4126-bf2e-d2135aba47ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb367901310ae060f83f5e42a96684399d6d12754c9a339c2491a0909dc29098 -size 300600 +oid sha256:3d35a9e8ad7056c4284ebea2d4549a1b71fc4d475395dde48a1ecd18de0792a5 +size 617636 diff --git a/images/5098c679-dacc-4abd-9331-18b898f936dd_eaf07f0c-2487-42fc-9bae-98d8979b6192.png b/images/5098c679-dacc-4abd-9331-18b898f936dd_eaf07f0c-2487-42fc-9bae-98d8979b6192.png index e9c08b904b9a0f7ba0985ee02063cbf3820cae07..984e1f80cd384cb6eaf24c41aeccf44265f6d593 100644 --- a/images/5098c679-dacc-4abd-9331-18b898f936dd_eaf07f0c-2487-42fc-9bae-98d8979b6192.png +++ b/images/5098c679-dacc-4abd-9331-18b898f936dd_eaf07f0c-2487-42fc-9bae-98d8979b6192.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cbc6259978d08a478fa783b194d718c6bdda647678f5f63643a51f863011e58 -size 3245427 +oid sha256:2d728d9362a2616e5ff67cf41a95306a660cc6f414ebe0a108041e42e8c63cce +size 1360087 diff --git a/images/5098c679-dacc-4abd-9331-18b898f936dd_ec93c9c4-0c2e-4576-84b5-0f558804edc3.png b/images/5098c679-dacc-4abd-9331-18b898f936dd_ec93c9c4-0c2e-4576-84b5-0f558804edc3.png index af6b04a0f5ebf59da11f6c06e239a2e4754cd115..2cc5f081705589f5d212c7989f71e8fcdefa2342 100644 --- a/images/5098c679-dacc-4abd-9331-18b898f936dd_ec93c9c4-0c2e-4576-84b5-0f558804edc3.png +++ b/images/5098c679-dacc-4abd-9331-18b898f936dd_ec93c9c4-0c2e-4576-84b5-0f558804edc3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94da13e06693ef6e1d18fa323a2e104201a670a48933083b6bbb54392acc69bc -size 1879799 +oid sha256:8a3d03916bf2784f7f4328b27af99c1bc37e5b515b2dd8f7b1e2f1cb10111cfb +size 2498509 diff --git a/images/5098c679-dacc-4abd-9331-18b898f936dd_f8551872-cb70-46ad-b3d7-435fef6cf6ea.png b/images/5098c679-dacc-4abd-9331-18b898f936dd_f8551872-cb70-46ad-b3d7-435fef6cf6ea.png index 0aa488fe0e0af4a19ef4f7079b6505790745e3cb..0d88deec5a77c06e047324ca844de8e23b352ddf 100644 --- a/images/5098c679-dacc-4abd-9331-18b898f936dd_f8551872-cb70-46ad-b3d7-435fef6cf6ea.png +++ b/images/5098c679-dacc-4abd-9331-18b898f936dd_f8551872-cb70-46ad-b3d7-435fef6cf6ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e48a49dd819657f6c28b699dd6cfcaca36b1d2475073cc0589d23f3dc9004141 -size 2385900 +oid sha256:6616f6a88ef547f1b88648e7d7468a514e5a4e654923c795a08a030885f470fa +size 1901102 diff --git a/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2a7a1bbf-df80-4b6f-a57f-fd754cc16db3.png b/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2a7a1bbf-df80-4b6f-a57f-fd754cc16db3.png index 6ce00da6fa8ae68ad6d41d24ac6b3509e05f1672..ee443c06465f870450eba83fe7369c3e15ed66bb 100644 --- a/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2a7a1bbf-df80-4b6f-a57f-fd754cc16db3.png +++ b/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2a7a1bbf-df80-4b6f-a57f-fd754cc16db3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd4a5c7596c31ed6757ad8e43a6aa4a361523281a015cb17d410b48c6fed264e -size 914055 +oid sha256:5916bae7c47f00986c3423a215229f6e7d1719b7667982f26b10b97a00cfddeb +size 1257730 diff --git a/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2add8689-72ac-4b04-b149-3ae7d54b630b.png b/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2add8689-72ac-4b04-b149-3ae7d54b630b.png index 399326e88863fea1fdeee54acf4a754f995478cb..3c0e3e7f93e23ba4e47e281c2100e68a3060ae81 100644 --- a/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2add8689-72ac-4b04-b149-3ae7d54b630b.png +++ b/images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2add8689-72ac-4b04-b149-3ae7d54b630b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf047ce7ab5bb71398f14be7373ffad7fea2b216f531ea5a6d04bab8a85564b5 -size 964776 +oid sha256:4999df099f298ebe476fa7c427f525285f62cc922e3dfec9969e54827d4552aa +size 431273 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_08fe33bf-abd6-4099-b093-38ac58b3051b.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_08fe33bf-abd6-4099-b093-38ac58b3051b.png index 30f4b24a03df8f8459333acb201f3aa9b707c5c5..69945da5135c17b9ba9f312ad0755ac1d0900ce2 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_08fe33bf-abd6-4099-b093-38ac58b3051b.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_08fe33bf-abd6-4099-b093-38ac58b3051b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:360feddb2ee7ef30a4d8eaafeb940be3c22a70c57a644a9d1db45783f0323176 -size 2179522 +oid sha256:4238498c5bf8ee28910c9adf19815405813732f9aab5fb727c6b9b50754b0672 +size 1302685 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_1335391d-8f77-4fc8-ab7e-983f67cc075a.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_1335391d-8f77-4fc8-ab7e-983f67cc075a.png index 8af4a15f0da048114218ac44a2d863c47f66fbc5..5f65f010e4dffe323a78184c24d7c5945f68dc77 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_1335391d-8f77-4fc8-ab7e-983f67cc075a.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_1335391d-8f77-4fc8-ab7e-983f67cc075a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6c27596ada5ea10a8b4819cf8928104fced07f1422f81e595ee09232d9fcbbf -size 2316493 +oid sha256:3e0fc550f1553e99195dda48298081bf4c90d9c357304a89e4b13b5924bd69f6 +size 2018202 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_14d83651-d626-40c5-bb20-7cafc64a78ee.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_14d83651-d626-40c5-bb20-7cafc64a78ee.png index 6038db6eb8a8a6be16c41edcae027b979e436a72..d7d2d6086e0907d4f168bf082c00157df34f989c 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_14d83651-d626-40c5-bb20-7cafc64a78ee.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_14d83651-d626-40c5-bb20-7cafc64a78ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ec4bf5cc7111649186dc618788593051934cf2137ae72e20504c004c52ae0c5 -size 1752507 +oid sha256:69e12d6d5a332d9f17b45b82fe4c05c124d6658bd698c01c5319e6372a428d60 +size 2110193 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_277dd183-dd49-4294-8b98-5da138f0cc1a.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_277dd183-dd49-4294-8b98-5da138f0cc1a.png index 8ff743718881ec8f02b50c3ff08bcc0d4a52101c..f231ea309ee0e1f4f1bbb3e26ea9faf37a0b8480 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_277dd183-dd49-4294-8b98-5da138f0cc1a.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_277dd183-dd49-4294-8b98-5da138f0cc1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c01949271702233094f19cffba5a1ef55cf5ebf5f3c33d8b89d5b9a9294a3920 -size 1140526 +oid sha256:fa9d092f969bc224ab4cd9fa39644bb793ba07200fb85b79126c07d11cd8cd97 +size 1107922 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_2d7c0f04-8f46-4ca8-bd6d-950c31e920f9.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_2d7c0f04-8f46-4ca8-bd6d-950c31e920f9.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..a9e0ad5801e162b36ff25fa5db06ae4b62136f7b 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_2d7c0f04-8f46-4ca8-bd6d-950c31e920f9.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_2d7c0f04-8f46-4ca8-bd6d-950c31e920f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:e73d7ad6a3ea8a61b16d4598a9729cf7cf4acd5152f76d51e05b85df95800bb6 +size 1514578 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_4d289945-1ccb-4aca-9ca5-00c19003c28b.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_4d289945-1ccb-4aca-9ca5-00c19003c28b.png index 865b0383f7f666b4605d6876e94d97155487481f..8191234c8e3056fa8aeca8f73d6cf0468c6b165f 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_4d289945-1ccb-4aca-9ca5-00c19003c28b.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_4d289945-1ccb-4aca-9ca5-00c19003c28b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4e7fdd39699c6918ce724ed7ba3346a172df5c3b8e99b89b35274db3e724d55 -size 1318647 +oid sha256:5151446e1053df05cafb723ac163fb6f8b1abc3a3d7071d79d8e2f2bd4da33ff +size 1798689 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_689efb16-8afc-4054-ae22-289fba6674b1.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_689efb16-8afc-4054-ae22-289fba6674b1.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..38fc841d4eb60b8fa367d62dc9fd5243a3f6980b 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_689efb16-8afc-4054-ae22-289fba6674b1.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_689efb16-8afc-4054-ae22-289fba6674b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:af0dd074e43a170463bfda721c01775149e7466aefb8a7d9a809d086c494bf9e +size 1407167 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_a360bd43-6df1-447e-a026-95d0a8b1cac9.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_a360bd43-6df1-447e-a026-95d0a8b1cac9.png index 92b15456f81c68f7b5379369e4bcde8adb728948..ff385bed7b21ad3178df3d53ab93aed319710185 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_a360bd43-6df1-447e-a026-95d0a8b1cac9.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_a360bd43-6df1-447e-a026-95d0a8b1cac9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8690c244052dd32bb559c73403ead70f8cc7911ab4c862038f4930fb1ffeb5ba -size 1163996 +oid sha256:d82218e29b081cf8d124028d19c4cc7a914de4e93b1b028072b1a4018938b88a +size 1033343 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_b106cb92-845b-4cc4-b750-58e03d6ac5f6.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_b106cb92-845b-4cc4-b750-58e03d6ac5f6.png index 3fb070b874bd464f3fe965dace22312facb0c818..60d0fa33be45eee5d4d4b31a25a36abe8c1d7c15 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_b106cb92-845b-4cc4-b750-58e03d6ac5f6.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_b106cb92-845b-4cc4-b750-58e03d6ac5f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c8c58faee50ede20828b28f2b1a818f930ae4fb83afe1ebdb91fc0d6351734f -size 2182653 +oid sha256:b24819ddf84d48f4c95a363f6a9bb37cffe6d332dff449f5a8b8c5802b8f666b +size 1157887 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_c3c60d0c-7d49-46c8-ba77-3b9a28a14d52.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_c3c60d0c-7d49-46c8-ba77-3b9a28a14d52.png index 7e57f9061758536bebeca9fbe16f4056b123e260..d81ea945d5e30d10fc1ffe12c2296324be8dab3d 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_c3c60d0c-7d49-46c8-ba77-3b9a28a14d52.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_c3c60d0c-7d49-46c8-ba77-3b9a28a14d52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b804843a9afbaaf4f10de7a2aeaa6f04a61b6c59ecff5ad94c0bc145445c2dcf -size 2137664 +oid sha256:b5b0bed562935be5eaa07a1bd269cf8bf895b05fa0f0549eb0066fa3f70908b2 +size 1511579 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_e26a7560-027d-4467-b206-33ac5f582855.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_e26a7560-027d-4467-b206-33ac5f582855.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..3a9186ded75edba73b9f04c87d22fbab9b53f79a 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_e26a7560-027d-4467-b206-33ac5f582855.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_e26a7560-027d-4467-b206-33ac5f582855.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:fda709d8b7a36f985b0325957c99e8aec87151c9bf6e455ab60554d632cc10b8 +size 2105583 diff --git a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_f2c74424-c791-46ac-8ed3-080e0b523ebe.png b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_f2c74424-c791-46ac-8ed3-080e0b523ebe.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..d17ffbc0b364917411d1f0e1c9dc2faaaff0ff81 100644 --- a/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_f2c74424-c791-46ac-8ed3-080e0b523ebe.png +++ b/images/50c13c64-4c9b-4f55-84f6-65ae443848a2_f2c74424-c791-46ac-8ed3-080e0b523ebe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:d5009333c05d7f4bb825922f96f27a9884092eb0c6c439b853d8f02259c2fe19 +size 1373986 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_031cfcfd-3ab1-4519-9d37-3d418a54d4a5.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_031cfcfd-3ab1-4519-9d37-3d418a54d4a5.png index e3d95e1ccb515ed95be4d59f3d093cb5e8586f18..2e98d0c87e2216a5b667f6fa128b4f910387b6ef 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_031cfcfd-3ab1-4519-9d37-3d418a54d4a5.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_031cfcfd-3ab1-4519-9d37-3d418a54d4a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a5da72a9c559857ffdc14e6d7f304dbe42be1c20ccbe7fb4cf9b3c5d4b2238d -size 467951 +oid sha256:92ececaf311644a3aa1c7540c281a20829212c1e1ffaf9cd26db5c4c87d772ba +size 316665 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_68e483d8-9bfd-4c8c-9327-82577a11be18.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_68e483d8-9bfd-4c8c-9327-82577a11be18.png index 4ad3500d87b7745602590d62630dfde794e7e87e..a1cfde34c6cff302ebd56a1d3da7078ecc70e5d7 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_68e483d8-9bfd-4c8c-9327-82577a11be18.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_68e483d8-9bfd-4c8c-9327-82577a11be18.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:698ff99bc1082e56c6084e3f9c7209f1c86e248b0cab463abb8f71e875173578 -size 1258215 +oid sha256:54aa8f4347217481c5903b23cb97690e211f6762f373909e492422cf64933a76 +size 980035 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_72893745-5e04-4e66-8557-81416454ade4.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_72893745-5e04-4e66-8557-81416454ade4.png index 4ff735ba46228de81b4432de0c92720719fd3211..ac7955733f2a78f63aeb74037218c6e66e39a76a 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_72893745-5e04-4e66-8557-81416454ade4.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_72893745-5e04-4e66-8557-81416454ade4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a86027d4bddedfe1dc963ea0e680bd929cf930809154f265e7ed45c6cc54750a -size 468587 +oid sha256:3011d92317ea645ffe4ce6b3b04cba0ebef8949a942f9a0f7988cc19517d31b6 +size 330853 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_8ffbcf91-77e2-469f-be1c-a9fc64ea6f62.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_8ffbcf91-77e2-469f-be1c-a9fc64ea6f62.png index 7b2431dc649136e20b29ccff25849c57b92240c4..a317f5e647a92eedb65cabb4e7eaede40b015f44 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_8ffbcf91-77e2-469f-be1c-a9fc64ea6f62.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_8ffbcf91-77e2-469f-be1c-a9fc64ea6f62.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f48153fd525caa70b4b5153b14174dd53ee13f84c3e4ae7855d99fc8818a2def -size 467899 +oid sha256:9f028e2aeabfe3c85301795df9cada205220d5c15305aeddbb9650fe52acd859 +size 293280 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_96dbde61-5327-46c8-8e3d-d2380577f324.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_96dbde61-5327-46c8-8e3d-d2380577f324.png index 38db119f3c18a272cd9912546fdfa41513eb3dea..1fef76bbffb40b15df23b8bc67ffe9718bf4d441 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_96dbde61-5327-46c8-8e3d-d2380577f324.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_96dbde61-5327-46c8-8e3d-d2380577f324.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2d780c237508a43b4949e65da1c6bc6857a3d1c3c3276667490b044636747df -size 513640 +oid sha256:c4261d413371945598b4aa58f3354cfc8659d116334f80beec36189328c80bde +size 512150 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_9f7bc34f-467d-4f7f-bbea-3cf3193b675d.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_9f7bc34f-467d-4f7f-bbea-3cf3193b675d.png index f5a67ae4d37809e990490907c692ab7e5786fd2f..edb7b7cac0c63f08d7a0463993358d44d34144a1 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_9f7bc34f-467d-4f7f-bbea-3cf3193b675d.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_9f7bc34f-467d-4f7f-bbea-3cf3193b675d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:237305f04f183c84cfa68cc1553aa63bda03b42d19fa1df79f9fdc6af978698f -size 474116 +oid sha256:8f12db068d2253b53b20b99386de49e17c29c0cf3f83f4b7ab0f4911efb6fb08 +size 542709 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ba1ffe14-ee2a-4736-96d7-b3c1d5f6f99f.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ba1ffe14-ee2a-4736-96d7-b3c1d5f6f99f.png index 6f95378ab4d00101d0e73c445ec9c39b012afba6..35081ba2168ee21d3220a95f4d75016bf0d564ea 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ba1ffe14-ee2a-4736-96d7-b3c1d5f6f99f.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ba1ffe14-ee2a-4736-96d7-b3c1d5f6f99f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9014dc74d2ed6158e35cc8935e046f1b2ec485eba4da5818bad2a8ac1bcd3151 -size 467431 +oid sha256:4a1b812bf3a9e2251b6829d786eb24294a5b2666e98f85ac004c5e3871952fec +size 271468 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f0a252b2-33f2-496a-8540-e943e77082bc.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f0a252b2-33f2-496a-8540-e943e77082bc.png index e3d95e1ccb515ed95be4d59f3d093cb5e8586f18..9cae8313e37e20bfd0938fac96f616d9f8d008e5 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f0a252b2-33f2-496a-8540-e943e77082bc.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f0a252b2-33f2-496a-8540-e943e77082bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a5da72a9c559857ffdc14e6d7f304dbe42be1c20ccbe7fb4cf9b3c5d4b2238d -size 467951 +oid sha256:4158ba7d9f727d93e133df2c4234d712c2e1ad648149584a735dbfc6fde027b1 +size 324320 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f82185ba-eff4-4e0d-b9f0-d14a1403c7f8.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f82185ba-eff4-4e0d-b9f0-d14a1403c7f8.png index ccdebc67d4201842e72b39378a2f95df02fd3a37..aee0855860fb1a3a5a357a23d32784fa3d297a1f 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f82185ba-eff4-4e0d-b9f0-d14a1403c7f8.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f82185ba-eff4-4e0d-b9f0-d14a1403c7f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84fcc77aaead8148403df912f0cb00c92f532eed36b992a91cb60edd16e5d63b -size 468314 +oid sha256:3f924c43acaba3408576e7f81c088c3d8e259f9a43f1e67c99aef961086a8f3e +size 501654 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f86868ba-77b6-40ce-afe6-ec0cdbf31f08.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f86868ba-77b6-40ce-afe6-ec0cdbf31f08.png index fd982d7f7fce471308188271b5ae3a1a8f6d3cc7..0b3fdbab67ca5c8308343c251c5080f7fa695aac 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f86868ba-77b6-40ce-afe6-ec0cdbf31f08.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_f86868ba-77b6-40ce-afe6-ec0cdbf31f08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e7c59ba7b235757a7a2c7b42434c7dd608bbd8936e0ce17bebf5c89a6472d2d -size 467893 +oid sha256:2da93277363522ca55cb19c6f367b098af52764a883a6fbd4b6384564969b315 +size 405828 diff --git a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ffa65472-f26a-475f-b7f3-b038d6bf632f.png b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ffa65472-f26a-475f-b7f3-b038d6bf632f.png index d32d9f4faa90d611fdd05467da499f4f5ab4ae0f..0234bd612ef07db640ad3212c97ff47e0f390984 100644 --- a/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ffa65472-f26a-475f-b7f3-b038d6bf632f.png +++ b/images/50f1e384-4b63-4827-a670-cd19f5a2c710_ffa65472-f26a-475f-b7f3-b038d6bf632f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48261b92e51ed4980930ccfe0ad9c059ff38e6c6cfec0963d42fd73d1b1ada14 -size 467624 +oid sha256:3eeb2c0617718e252aa1a7ee04e9ca37896cced3f8f1e1892ed4b98c34de6df7 +size 488067 diff --git a/images/51221157-cb79-407e-95c8-b2c730e95e01_602a35f1-1a78-4137-8444-16379c1aa2e6.png b/images/51221157-cb79-407e-95c8-b2c730e95e01_602a35f1-1a78-4137-8444-16379c1aa2e6.png index fa861c94096e7a906a1962058777a1aaf59d1937..f60a569a180119a5a4a3217d2d083445982445b0 100644 --- a/images/51221157-cb79-407e-95c8-b2c730e95e01_602a35f1-1a78-4137-8444-16379c1aa2e6.png +++ b/images/51221157-cb79-407e-95c8-b2c730e95e01_602a35f1-1a78-4137-8444-16379c1aa2e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6740ec50b105f667b235b8e9aee87942334690d6a3f3e142193139c5632ad614 -size 796001 +oid sha256:da243258893d09bcbff4fe672f4439b2ece36276f09af5b92b3b77e08ca87467 +size 775755 diff --git a/images/51221157-cb79-407e-95c8-b2c730e95e01_7e6e1e43-af17-4934-848a-4c235520b30e.png b/images/51221157-cb79-407e-95c8-b2c730e95e01_7e6e1e43-af17-4934-848a-4c235520b30e.png index 2c3ed5b728a0d86f1c3566ed699c675f9b78b056..09fdd0fb71d3c8095ad62e828681d22772c26740 100644 --- a/images/51221157-cb79-407e-95c8-b2c730e95e01_7e6e1e43-af17-4934-848a-4c235520b30e.png +++ b/images/51221157-cb79-407e-95c8-b2c730e95e01_7e6e1e43-af17-4934-848a-4c235520b30e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3469ebbc99efe1d21666734c92a59d36e245900dfe0a4218cda5c3e062babcee -size 290187 +oid sha256:e1d93ad20aeecf75644885fd1a27c9087848029f77382029d0c81db47cba4139 +size 486718 diff --git a/images/51221157-cb79-407e-95c8-b2c730e95e01_8f46db0b-776d-4841-b186-b3c0faa3dd27.png b/images/51221157-cb79-407e-95c8-b2c730e95e01_8f46db0b-776d-4841-b186-b3c0faa3dd27.png index 259fb52d89ff76466537cdddaf6760e3a6304714..cef95de9bde6ab43222c1713bf75ec0eda3a2b3e 100644 --- a/images/51221157-cb79-407e-95c8-b2c730e95e01_8f46db0b-776d-4841-b186-b3c0faa3dd27.png +++ b/images/51221157-cb79-407e-95c8-b2c730e95e01_8f46db0b-776d-4841-b186-b3c0faa3dd27.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d78ed6c1abeb22be78e5ede161223175c55dceeec4d356c115ca27762ccd8cd -size 1331759 +oid sha256:9470996e35e24d2e27d5a69159b51c419df17030502901375f343ea31f2eb12e +size 824470 diff --git a/images/51221157-cb79-407e-95c8-b2c730e95e01_cec37f28-5752-445c-a4e5-68017784fcc0.png b/images/51221157-cb79-407e-95c8-b2c730e95e01_cec37f28-5752-445c-a4e5-68017784fcc0.png index 25bad78058ae74e5c0b97842fafcd8e0835d56f7..77f3198cf730300454684b665644873521c49675 100644 --- a/images/51221157-cb79-407e-95c8-b2c730e95e01_cec37f28-5752-445c-a4e5-68017784fcc0.png +++ b/images/51221157-cb79-407e-95c8-b2c730e95e01_cec37f28-5752-445c-a4e5-68017784fcc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33ed9c5aa5d5ed822a042aa33bc03217fddc1a57747363b84dd791c7e79c4ccb -size 311210 +oid sha256:963361576126dca9f44745bd7f527cca1f075dd20050909f5a038d227cbd99cd +size 309909 diff --git a/images/51221157-cb79-407e-95c8-b2c730e95e01_df559d9e-9ae9-42b5-833d-1268b513e3db.png b/images/51221157-cb79-407e-95c8-b2c730e95e01_df559d9e-9ae9-42b5-833d-1268b513e3db.png index 654ba5bbd1b38e9b68c2907b6451c2cd78174b3a..54f4ab7b9be7fa72397a5f39265f490c0562cb35 100644 --- a/images/51221157-cb79-407e-95c8-b2c730e95e01_df559d9e-9ae9-42b5-833d-1268b513e3db.png +++ b/images/51221157-cb79-407e-95c8-b2c730e95e01_df559d9e-9ae9-42b5-833d-1268b513e3db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c0361aaa79d6ad6adb0bab14b10c47cc2f72dc6c7f7c06fd9db396394a6185b -size 347047 +oid sha256:967e67b599f66416b28849ac249883cc5af2027a437ea97ee3e53eb71ddd8bd4 +size 543833 diff --git a/images/51221157-cb79-407e-95c8-b2c730e95e01_ea68d691-915a-45f6-b4ce-fc194d1a5207.png b/images/51221157-cb79-407e-95c8-b2c730e95e01_ea68d691-915a-45f6-b4ce-fc194d1a5207.png index 34d8c51b332b36b6397b6a8f10d0a486c6b51e2b..dd286f6b00f95ed96e267f2fba84b37f10f77900 100644 --- a/images/51221157-cb79-407e-95c8-b2c730e95e01_ea68d691-915a-45f6-b4ce-fc194d1a5207.png +++ b/images/51221157-cb79-407e-95c8-b2c730e95e01_ea68d691-915a-45f6-b4ce-fc194d1a5207.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51479a86d065f22bf4f6d583e215ba44c1c15c688f15879163840a0af778f88e -size 1271375 +oid sha256:a711efdf71098f75fff63387e5b3ea7935ee1daa5ac5d485beb90007df26c540 +size 1512641 diff --git a/images/51221157-cb79-407e-95c8-b2c730e95e01_fcedbc87-f73f-4420-994e-2977bd3bab6e.png b/images/51221157-cb79-407e-95c8-b2c730e95e01_fcedbc87-f73f-4420-994e-2977bd3bab6e.png index 13368f0c513cdfe4b6fbca216013ca35bc520aca..6348b246d5089c8dae6b4b50bd43aa9c52ef7426 100644 --- a/images/51221157-cb79-407e-95c8-b2c730e95e01_fcedbc87-f73f-4420-994e-2977bd3bab6e.png +++ b/images/51221157-cb79-407e-95c8-b2c730e95e01_fcedbc87-f73f-4420-994e-2977bd3bab6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7e906a97fb512ca2f9619d7d156c1e02b4008fcd054cdeeba6dd80ef1f438e4 -size 291499 +oid sha256:d2db98b85e5c2f4996425b38917137f5cef26cb29a32e76b64e76cb92b7e0038 +size 488911 diff --git a/images/5199e802-2fce-448d-8859-3cdf57b8dada_945a12bb-d7e9-4fca-b017-2f102026def7.png b/images/5199e802-2fce-448d-8859-3cdf57b8dada_945a12bb-d7e9-4fca-b017-2f102026def7.png index aa919db44ed25be3e49ea3b65387ffd1d850dcfb..65af8a29eaded94d38fa954d1c54f0e9d5116aaa 100644 --- a/images/5199e802-2fce-448d-8859-3cdf57b8dada_945a12bb-d7e9-4fca-b017-2f102026def7.png +++ b/images/5199e802-2fce-448d-8859-3cdf57b8dada_945a12bb-d7e9-4fca-b017-2f102026def7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab5b7870ada7d151a627ab2ba7e6ecdf6f6365415dce3c5e15f77d40873b4858 -size 679002 +oid sha256:c8e6327f799b4cfdb6f5c9a20a7e890979927cc4dfd7fc0722991d54394256de +size 489475 diff --git a/images/5199e802-2fce-448d-8859-3cdf57b8dada_bb8662a9-6602-4a34-814b-7c8c9177374e.png b/images/5199e802-2fce-448d-8859-3cdf57b8dada_bb8662a9-6602-4a34-814b-7c8c9177374e.png index f441a3234093a36a64603ea37f44fdb0d40a492f..2ffe05f7c770105237f961c62c92f5ec5fadc493 100644 --- a/images/5199e802-2fce-448d-8859-3cdf57b8dada_bb8662a9-6602-4a34-814b-7c8c9177374e.png +++ b/images/5199e802-2fce-448d-8859-3cdf57b8dada_bb8662a9-6602-4a34-814b-7c8c9177374e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fead3d0919df7539cbbee5231db8f78f23a6c0010d10d2197bd67bb7c43f000 -size 613951 +oid sha256:4d0c45b74a9753138e13e94c3c9a5d9aeb53980f31623b3ad627f236ca840de3 +size 906465 diff --git a/images/5199e802-2fce-448d-8859-3cdf57b8dada_c1b6e8bd-86ad-45bf-91b1-1afa13bf0167.png b/images/5199e802-2fce-448d-8859-3cdf57b8dada_c1b6e8bd-86ad-45bf-91b1-1afa13bf0167.png index 8dcc4551976ed9abb322bb7eac5008dbf7e84bc8..86016c1086cb165c84d257690a4106c3daea6dff 100644 --- a/images/5199e802-2fce-448d-8859-3cdf57b8dada_c1b6e8bd-86ad-45bf-91b1-1afa13bf0167.png +++ b/images/5199e802-2fce-448d-8859-3cdf57b8dada_c1b6e8bd-86ad-45bf-91b1-1afa13bf0167.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:370e8656b0c77243a3c73c60d4805f5a1db2143ca339039ce62ed477ff098587 -size 2095739 +oid sha256:f4ac5e29d0d16a8e79bb3edb7f8d6d65b551d4f8479a97768b5d6d5b617803f5 +size 1078448 diff --git a/images/5199e802-2fce-448d-8859-3cdf57b8dada_d2462cfe-1b26-4571-be84-5f838fdcbd5f.png b/images/5199e802-2fce-448d-8859-3cdf57b8dada_d2462cfe-1b26-4571-be84-5f838fdcbd5f.png index aca171aa4f3b48fba5c43593c81d6996fef38a82..20375aadae2fbe79cb1aa2f4477c3cb8ad6daa72 100644 --- a/images/5199e802-2fce-448d-8859-3cdf57b8dada_d2462cfe-1b26-4571-be84-5f838fdcbd5f.png +++ b/images/5199e802-2fce-448d-8859-3cdf57b8dada_d2462cfe-1b26-4571-be84-5f838fdcbd5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7d4b261b1a006656f44a37567e44968c3cb5e20f821fee83d8eb38561d556d2 -size 1989383 +oid sha256:499af3f0c0f88cdd650e1c69fe626ed732c3a59dbf362a0abe0f7d2efe6d4b7b +size 987783 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_15ac7d92-b5d1-4f92-8da2-54c3dd71a321.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_15ac7d92-b5d1-4f92-8da2-54c3dd71a321.png index 7b701f72a4bc0a66eda4b7d100ef7cccf02577c2..eae6f1f6373a2c5cbc301c09353698bb15268433 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_15ac7d92-b5d1-4f92-8da2-54c3dd71a321.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_15ac7d92-b5d1-4f92-8da2-54c3dd71a321.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dad2bcd358a471062fa79d233468ae292b9dc5f221da1d2f8c9d043ea736efa4 -size 1310247 +oid sha256:c4abf732320813d22566fad6b1dd95be4b9bdce8ac61526f6fb9f27d319bc3dd +size 1265148 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_185b383c-4764-413c-94db-33a69434174e.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_185b383c-4764-413c-94db-33a69434174e.png index 9c62bf7d23c64485361e5e05258f4404cdd2f4ca..0fef82a6db748b894f32fa51d4ee5e56bd44b32c 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_185b383c-4764-413c-94db-33a69434174e.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_185b383c-4764-413c-94db-33a69434174e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d689ac6d9c5516f738204505312ef41866c0fc0f2dbc46f87e4c997b35d2d4b -size 1798962 +oid sha256:5d7b229533cbd258a79b09ece84e121aeda0db78726a9b8515b524bb5ab1a9d1 +size 1664497 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_2c773e90-ebe7-4e70-ad41-8b5e3ea66c7e.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_2c773e90-ebe7-4e70-ad41-8b5e3ea66c7e.png index 379801b0932c2b9f185b3f7b4eb15d0e3c562a35..0f4a02f24c14d374c0fa176775ee36ec3d19d49d 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_2c773e90-ebe7-4e70-ad41-8b5e3ea66c7e.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_2c773e90-ebe7-4e70-ad41-8b5e3ea66c7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95a5cae89a62c7d4e749f3b9e919ce8bf79a4f6289cd988abf10e97a2efea19a -size 2596388 +oid sha256:b6727cdc872b5693a1e05d38d781dee3cf5add46caa005d6cca98fc71f1d1c41 +size 1107018 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_3d8b49b4-942a-45c5-a376-69f56192a34e.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_3d8b49b4-942a-45c5-a376-69f56192a34e.png index 303677f32b94f368b94c846d6ea75a324660a088..6c2e357e5e1d15a88c5d6437002bb10eec7d434c 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_3d8b49b4-942a-45c5-a376-69f56192a34e.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_3d8b49b4-942a-45c5-a376-69f56192a34e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4559e8608d998e162eb3e2e73b8a06686b7c79256fd383c0b67308cbcc25637c -size 1933233 +oid sha256:2083e88b6d99e09ac3ad6b12b6bf0222de1874e9cdb39588c2d09fd4b8fad752 +size 1371474 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_4208985a-6f68-4493-a1fb-3abbe9503a0f.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_4208985a-6f68-4493-a1fb-3abbe9503a0f.png index fda10e4ebc123b70d439aa1516c1c43026ece630..99ffad7530c5a72933dc3e9070480d9fdba26bab 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_4208985a-6f68-4493-a1fb-3abbe9503a0f.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_4208985a-6f68-4493-a1fb-3abbe9503a0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13ac051b72bec4352a1bba956fcf553251de3848b7450490cf304d54b076fcbd -size 2898302 +oid sha256:1c0ef6fd295948dd74e551f8519d477a6101bac6ffd0e2ba5a09e04fd670d05d +size 1462284 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_9b46df9e-8342-4070-a385-365f7f893f7f.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_9b46df9e-8342-4070-a385-365f7f893f7f.png index f3b426fb11abd1c42563a27b45aed459439c9c9d..8b7ae52f975828564c8d36e5fa9045b0cf5467f2 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_9b46df9e-8342-4070-a385-365f7f893f7f.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_9b46df9e-8342-4070-a385-365f7f893f7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:168e05be8dc181d7058a41fe9a92ffffec0b8ccc8c017bf3f4ed272434ee87c5 -size 1048624 +oid sha256:0ceb3c6eda200604fadfc879c5156d400eadf5086168e80e0ccf472a4b69268b +size 1104473 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d34f2314-6468-4913-b6dc-56ae993b7467.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d34f2314-6468-4913-b6dc-56ae993b7467.png index 10db0fd11d984257ea0ede66e4e38442f12e7300..f576c9b8f70e99e29e7f78612663fc423d8847a6 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d34f2314-6468-4913-b6dc-56ae993b7467.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d34f2314-6468-4913-b6dc-56ae993b7467.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c19365b2a837e5200315b1e84783deae7f8638f749bedfd0e4f46c38ee60998 -size 1899896 +oid sha256:e205de11539fb9dea4867f24525a3d33844dde3c8c05dffb265477d525efe73e +size 1986615 diff --git a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d71a35ff-906e-400a-95ea-268aec2e265b.png b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d71a35ff-906e-400a-95ea-268aec2e265b.png index faf7c6e80faaaa006cc2bac9910beca556b8da5c..78acca04b337210295af981bee59891e93007525 100644 --- a/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d71a35ff-906e-400a-95ea-268aec2e265b.png +++ b/images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d71a35ff-906e-400a-95ea-268aec2e265b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acfedb7577b4d54723d1d2fe03619e214b026517bf1fb88ab3f0b5093f846fd2 -size 1504134 +oid sha256:e2eae4a3d3ebfa3c7a2b04fb36f28878eec5fb929e9f5f21ee0ad1b7ab08c3bd +size 1887940 diff --git a/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_05655bed-844e-40ad-8f78-36a2466eb50a.png b/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_05655bed-844e-40ad-8f78-36a2466eb50a.png index 5f72e4105b2640acaf20b85d3eedbc201e5d7114..64ed18b98b03445ab192b18c42f3bb1eb32b8903 100644 --- a/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_05655bed-844e-40ad-8f78-36a2466eb50a.png +++ b/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_05655bed-844e-40ad-8f78-36a2466eb50a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e3ee274409c5eb087bf553b63a7c29123ef21b73f49b9141741a57889ad1792 -size 1162727 +oid sha256:14981a32b55d85ff4c6246b1362ef24e1f059041414d7319a23800d0fe789f45 +size 1005062 diff --git a/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_4bbd596c-f120-4a5f-ad59-c4fa1887b64e.png b/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_4bbd596c-f120-4a5f-ad59-c4fa1887b64e.png index 1de494c0f0e30c0f6dba4a5b24716529b86c089d..7bf0f6ecef15e4aa28e84f12f6722fd4758a7be4 100644 --- a/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_4bbd596c-f120-4a5f-ad59-c4fa1887b64e.png +++ b/images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_4bbd596c-f120-4a5f-ad59-c4fa1887b64e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27fff62b860825b5c0b6a91903240a6978e0b2762dbe021cec223ca9f4edf1b6 -size 756972 +oid sha256:25eb303b82a058af85be5d86eedd2da7ed27793242a81974890cde60397734f5 +size 814842 diff --git a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_43e6acc3-98fb-4911-8cc5-128d2ce4c14f.png b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_43e6acc3-98fb-4911-8cc5-128d2ce4c14f.png index 6c7f82bc2f31c9eeed14c619a827daf8a5c3124b..8a9f0cebf14f03a2e26f4dc2c70b1726ed634470 100644 --- a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_43e6acc3-98fb-4911-8cc5-128d2ce4c14f.png +++ b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_43e6acc3-98fb-4911-8cc5-128d2ce4c14f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5674de9893ccce4e8cf706f72cb44030c8470e2d802c900935a342c2f89c1476 -size 707316 +oid sha256:0b9dea4de4c586fc8b2b7ac5c19acedaa6c0259595c515c499848ea6fb896262 +size 558712 diff --git a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_6484f6b2-1c3b-438a-b60d-739032df779a.png b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_6484f6b2-1c3b-438a-b60d-739032df779a.png index a518df4bd82a243ff7872091062c978d94d63a8a..4df388afc339d72a842f66e0abf76f273586fa94 100644 --- a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_6484f6b2-1c3b-438a-b60d-739032df779a.png +++ b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_6484f6b2-1c3b-438a-b60d-739032df779a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97cb00873b7502896d39d1b221eac5dc85a10391e9d8f41ebc60ba96052da8dd -size 785542 +oid sha256:e211ace1625c43240d2cff701a39367429a9b6ebc055928c601df462f57bff6c +size 929986 diff --git a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_98776082-1913-404c-8a5b-ff56c03291c8.png b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_98776082-1913-404c-8a5b-ff56c03291c8.png index b68c4954b6f8611d829a010d440d6b16574689a5..011262b1c99c79ccda687b3fdcb131e4a061f9d3 100644 --- a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_98776082-1913-404c-8a5b-ff56c03291c8.png +++ b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_98776082-1913-404c-8a5b-ff56c03291c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c80ee6966ba059a9eb011245e074e50b4287e9c0b04d2c7a4b8e45558cc63f5f -size 1507299 +oid sha256:d4346245ec10da3fc493a52266b240ad235624b8524ada176762d95414fd9cf6 +size 1086216 diff --git a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_aabba011-6b2a-42a7-abcd-05d5a92d6784.png b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_aabba011-6b2a-42a7-abcd-05d5a92d6784.png index ad840a3a932edb7285d86dd3b33fa7f20105706b..5d1d6b6cea41e47288c929b9f0a1723a6d4c8252 100644 --- a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_aabba011-6b2a-42a7-abcd-05d5a92d6784.png +++ b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_aabba011-6b2a-42a7-abcd-05d5a92d6784.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19d511f7faf6b977b40360db4d139db17bd583622c62fe12a6c45f1c3d02bf4c -size 798637 +oid sha256:bd5a8cf965d20f198a1303c4f7f26f1e5033e46cc27bf2866cd0724a9e7fc41b +size 999672 diff --git a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_ad240481-8606-4009-a2f0-7b4766c10fae.png b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_ad240481-8606-4009-a2f0-7b4766c10fae.png index 2207a4e1b18fedf7d715d3544d82e41e468b3f44..346b8268398cd9e6a7c7be8fe12a004f1e28ea7b 100644 --- a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_ad240481-8606-4009-a2f0-7b4766c10fae.png +++ b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_ad240481-8606-4009-a2f0-7b4766c10fae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:020bba399fafe3c3d61b043f4194c341d36ff05ccf72932bd256af479ba1b333 -size 992135 +oid sha256:108b779d0f3c2c140163f96deea637a62308f2f40868782c11d15259f1dce206 +size 996817 diff --git a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_e16396d8-5c1b-48e4-a7a1-5fbdea6617d5.png b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_e16396d8-5c1b-48e4-a7a1-5fbdea6617d5.png index e6a5a2b7d9e652fbe18be19da2ced0deea4405f3..af8c520519a2e925353a95cc5d60020bf55ea011 100644 --- a/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_e16396d8-5c1b-48e4-a7a1-5fbdea6617d5.png +++ b/images/51e85ea8-7a75-40f3-8f35-ed255f87171f_e16396d8-5c1b-48e4-a7a1-5fbdea6617d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb5788c80d545230229918b54dbc3b899480da8d014bdcf986699a325bac2501 -size 800361 +oid sha256:bc4f63c0e5b82ebc7fcd662268cb5cd57e1b92704b0ed8daf12c3cdc19113870 +size 992386 diff --git a/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_552b53c7-b311-478a-9d2e-752a31e92556.png b/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_552b53c7-b311-478a-9d2e-752a31e92556.png index 5ab9e59ffe40685371a3baf8779602d08805cc4d..0a7d9d786c5c35368dabcd81951e02f8694667e0 100644 --- a/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_552b53c7-b311-478a-9d2e-752a31e92556.png +++ b/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_552b53c7-b311-478a-9d2e-752a31e92556.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0feb9d2a9d51b3243f00c28409d8d6d3a36cd5409086d8b6aa6ff540e61a33bf -size 683860 +oid sha256:393e0f940e1f7774af65c7c4ac584a364b5567756c09eb40cf2be63522dd1171 +size 745998 diff --git a/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_6cab71d0-6ddd-4214-a367-31723a534fe1.png b/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_6cab71d0-6ddd-4214-a367-31723a534fe1.png index b46a21e154eb822933b1b9e517cec074a52f6bfa..1cd78f49875ab34882964dc4b155dc260f1a07b1 100644 --- a/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_6cab71d0-6ddd-4214-a367-31723a534fe1.png +++ b/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_6cab71d0-6ddd-4214-a367-31723a534fe1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f07c6096e5e0cf5e561941a8f11d8925e37f897fe3e21ac30f1507cdce84f9a -size 540140 +oid sha256:ba27a3cbf4c051a540fa181518a8a394bc20481026a10242be6ebd911b3bc8e9 +size 690310 diff --git a/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_ae673ea3-b7f4-47ed-bf82-0f42f5cd51c8.png b/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_ae673ea3-b7f4-47ed-bf82-0f42f5cd51c8.png index e08e94ffcd67673522ae39c7fa78919c8edea0d9..36826f2f82de56d19508808fdb154d00796bfab6 100644 --- a/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_ae673ea3-b7f4-47ed-bf82-0f42f5cd51c8.png +++ b/images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_ae673ea3-b7f4-47ed-bf82-0f42f5cd51c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:812bbd0efa34341a64dd49c07c793d824ec688608ee40cc7f7b75940ce93f77e -size 986856 +oid sha256:2faf4cd5ac25ce37136ab675be12c28300e9f519d0c896c83323b6cefb7b667f +size 1059654 diff --git a/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_29dcce31-0589-4080-9abe-16f7658e7693.png b/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_29dcce31-0589-4080-9abe-16f7658e7693.png index 35a9e3cb5beb13a20e973dfaaf80f171feb8081c..34ae437638d83ac69216f4d677e22e3d5d7b6bb7 100644 --- a/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_29dcce31-0589-4080-9abe-16f7658e7693.png +++ b/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_29dcce31-0589-4080-9abe-16f7658e7693.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a33e4f930e2cc4296a7e21ca6031056a9776b9a035454b4f1209ceb5879b5a68 -size 206664 +oid sha256:270d0f0660a7642ba887f144b89bfa6982065da324c96490a5fa28f3791b2203 +size 445525 diff --git a/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_bf4e20d6-7a31-4c0c-94b7-1ca00193f3ad.png b/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_bf4e20d6-7a31-4c0c-94b7-1ca00193f3ad.png index 4b69285e1ae453991bdbadcf0832a3e9c5cb6cca..3ec69db8a689210ece5917c5d9dc221c43cb697b 100644 --- a/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_bf4e20d6-7a31-4c0c-94b7-1ca00193f3ad.png +++ b/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_bf4e20d6-7a31-4c0c-94b7-1ca00193f3ad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd671e9b000ecc46872afa7a47b3d8b145ce9cbf2b9cec4924290690f0eece9b -size 397461 +oid sha256:91b944aacdb9d6da5b9c5e01850ede2b26221f564ca48baedb73c97de338a842 +size 508689 diff --git a/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_ea4f8853-82be-4579-8d16-ba3a72401d3c.png b/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_ea4f8853-82be-4579-8d16-ba3a72401d3c.png index 8e19fbe67ef8ca7dbb512b9232668358c7593b10..0a490922dfae26d083a7b8fce510e755d759bed3 100644 --- a/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_ea4f8853-82be-4579-8d16-ba3a72401d3c.png +++ b/images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_ea4f8853-82be-4579-8d16-ba3a72401d3c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:005b2228877abeb4228239c3676f96cdb0c9f637c7ce73aa28f9071c21678b7d -size 1111152 +oid sha256:9f86fc1b5572f115ce39aa17b72c484250c5c359207cf8fa2d160a3570945cb7 +size 1297328 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_0d5b7f9f-9236-43f6-a551-ef4633323303.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_0d5b7f9f-9236-43f6-a551-ef4633323303.png index 1e8e063a8b4c4da676b70ab74104653abe9960b9..690403f0d3e3ec724d7aa52a048d3e17e4a3aaf2 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_0d5b7f9f-9236-43f6-a551-ef4633323303.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_0d5b7f9f-9236-43f6-a551-ef4633323303.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de545f115ef19b7efbc53d309e973d8d6ce84e413b2a16ae3539479678de0ad9 -size 709414 +oid sha256:0fd69e6779f02eecaddd7506770d1c3bcaecb2b85bb9c3c17a4bc6bd82e21b32 +size 1135368 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_101d08fa-ab8d-4d48-8827-10b75525a40c.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_101d08fa-ab8d-4d48-8827-10b75525a40c.png index f21e31a7620576a5e3876fc1b3dde56f5d9ebd20..1f8fb368d2076a5a1ae87405dbc867016d8312ae 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_101d08fa-ab8d-4d48-8827-10b75525a40c.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_101d08fa-ab8d-4d48-8827-10b75525a40c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20c22c60c11ce77177f2bfecefcf9cf34de954a079b3287f9f5fabf4e8d66158 -size 510433 +oid sha256:c01e921853985ff028a0c302289c465d0502c17604881bf5acac93cd27834de3 +size 607959 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_2a784ab8-38ce-492f-8942-69b903f33a57.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_2a784ab8-38ce-492f-8942-69b903f33a57.png index a800d9395fd3c2f88fc83f2724c0339756de846a..d09eed08fad6f01ce8a23b1d86a499764a7b3cef 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_2a784ab8-38ce-492f-8942-69b903f33a57.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_2a784ab8-38ce-492f-8942-69b903f33a57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d648ba5585ec7bcddfd69fcd2318fd54fad7aa8d9973629424fc40b48041fbae -size 710239 +oid sha256:79b56d6db493725ee173bf64c78e8e7ef68d5b381f55a0a5a673ef1ff7cfb6a7 +size 715419 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_33c0189f-b6a7-4bae-9005-79334f91871a.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_33c0189f-b6a7-4bae-9005-79334f91871a.png index c313a36cfd36ac546b9831e29cd65cd1e8ddd18e..33a43a62105a9c2345db4697cd7b44fef9a9318e 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_33c0189f-b6a7-4bae-9005-79334f91871a.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_33c0189f-b6a7-4bae-9005-79334f91871a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9322c43a5b4d0ca9bb52d5de3d8a40523c1df9ec8e00ce5b5c21af33af6e1993 -size 1472291 +oid sha256:7977ba3069dc9fde24757f42063d2ba4b38d11d1660cd2a7a78e4cecc8a0653f +size 2033371 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_3b5e5e0d-4c51-489f-84e5-48d3c9e81a1f.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_3b5e5e0d-4c51-489f-84e5-48d3c9e81a1f.png index a6d2df0d006772e90b19dd529b7f0a08b706cae9..f37cdb766c8fb866887a78644402d9a132b44ad4 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_3b5e5e0d-4c51-489f-84e5-48d3c9e81a1f.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_3b5e5e0d-4c51-489f-84e5-48d3c9e81a1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2251e1d475e0d623b40f0f3f80911a3e90208c015b6550399ebe0fc3d6a8ae3b -size 629604 +oid sha256:82f05700217b480c7c2a7194f99cc6baaf505a4604b7233da492d91ce1666011 +size 983180 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_43648ae0-85c2-474e-a170-3220f5ffa6e9.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_43648ae0-85c2-474e-a170-3220f5ffa6e9.png index 805be1fce049ac14c375f90e2511d618f6316346..4fd1d5680d7a9429c5369c8b38de8a2e72f2a09c 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_43648ae0-85c2-474e-a170-3220f5ffa6e9.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_43648ae0-85c2-474e-a170-3220f5ffa6e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8503195fb57ce85c02fd4d67ed7c02cf40d08f54f09c477c7d04d7cad90f8117 -size 565380 +oid sha256:c6580a24bde354ecf9ea5cbb04819fb37cc7e29ad705de2e84ce02b6bf25318d +size 1045502 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_4c99e677-3a4a-428f-818f-5505d1841eca.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_4c99e677-3a4a-428f-818f-5505d1841eca.png index 76486f9a305ce6c81a1d6541295c1342aea2f97f..06a9006012775a735865489833ae8e630fc566a6 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_4c99e677-3a4a-428f-818f-5505d1841eca.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_4c99e677-3a4a-428f-818f-5505d1841eca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6e7c48326e8e5332271fa74d0d9e945db2d8655ec1714738e84dde8dfc67b56 -size 595490 +oid sha256:8977e0b89b34b239a17c1bdef7ea2c209fb93a302fa5ceafe66002a0669b4004 +size 1472969 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_4ea0b732-6a9c-4d03-8da2-9045ecc460b2.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_4ea0b732-6a9c-4d03-8da2-9045ecc460b2.png index b5b97c9b8701497980002e499a013c8e3af0d3c1..35b41a4c0c0714052ebf1d48df35a5bacd7fb27d 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_4ea0b732-6a9c-4d03-8da2-9045ecc460b2.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_4ea0b732-6a9c-4d03-8da2-9045ecc460b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29a87291be4fc94eded007dfb1b9a07bf246aaecf490d5318b45fd1c7a867d69 -size 2726715 +oid sha256:29b97f90ca2d739f0343e625773c52e49aaa5caeded66990715c976b7aee1222 +size 1061234 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_57b08e09-a0e6-42ad-a73b-bec681a0fe05.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_57b08e09-a0e6-42ad-a73b-bec681a0fe05.png index c80da0d902c8479ae0657d3c1a45b723abdba0f9..e74506cc2f78e00ecb576705d359dea50f1cd5a3 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_57b08e09-a0e6-42ad-a73b-bec681a0fe05.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_57b08e09-a0e6-42ad-a73b-bec681a0fe05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf3a89469f6d30d654b6e9b21eb55921fdb349cc420841a1a15ed4219010b704 -size 716267 +oid sha256:b8726f8ed01e0a6d1fcf70145c11edb25d5bc4c89848fe6c1f765c2bc735a940 +size 974160 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_5f14babf-98ed-4a84-a458-84233cd7bb3a.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_5f14babf-98ed-4a84-a458-84233cd7bb3a.png index 9fc047d776d5cde3443a65fd4fdb0b0fb23cf6c6..ec780955fe0c7545e5e31f153782a664a342be56 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_5f14babf-98ed-4a84-a458-84233cd7bb3a.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_5f14babf-98ed-4a84-a458-84233cd7bb3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4d352fc927109246a0c0867d25fb7cf0fde780369502972d7ecbba8e331c10e -size 630293 +oid sha256:eb4b98db99275711d120a3c7f18fe571ddb03c7e45a4a49952fd46d96884deeb +size 1320475 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_60575d32-63ce-4809-b1e9-936707216285.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_60575d32-63ce-4809-b1e9-936707216285.png index d6ebe3b3aebbe48187074d824bc1f597c249334e..ea34525e19f0997476aa5f135c4a06b0127ba003 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_60575d32-63ce-4809-b1e9-936707216285.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_60575d32-63ce-4809-b1e9-936707216285.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40d531f5639f1e24e89ce9b2a0e61dec6e47bcfdea6574ef73e348ed6dfd4aaa -size 513118 +oid sha256:cf3150181dc63173db1d306cfe17df8c0d6d31afb39fec867720d4fa6af1eb2c +size 1085310 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_728b67b3-d076-4667-afca-854c4864e209.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_728b67b3-d076-4667-afca-854c4864e209.png index deadc3052c217e89445b5230000dec7cb0cc76f2..ce2755dea5f170e80901bfee82efb669c25953e4 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_728b67b3-d076-4667-afca-854c4864e209.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_728b67b3-d076-4667-afca-854c4864e209.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a14f671f2f9f8de90990bfdf3308e89037f73135acd778e50ed88de18da9cb91 -size 507905 +oid sha256:df51e55d553c7316d7f77cbbce8ee93d65b51a524a30ed6becaa5fde426500cb +size 593703 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_74e72084-66fc-48b6-adb1-1795475571ae.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_74e72084-66fc-48b6-adb1-1795475571ae.png index 7c470375f79c03bb4ade46e9ebc53652e19cf9d5..ce4cb8047ac3a32542603360c9b48ed194076ea9 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_74e72084-66fc-48b6-adb1-1795475571ae.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_74e72084-66fc-48b6-adb1-1795475571ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c211cf75d3232c4f0ae07e77db3ab209a85623cea07d86132397befa8291827f -size 723430 +oid sha256:889fadd1b806694a5cbbc72f3f7c90e42bb59730f9127709266c0f6829491d70 +size 1147275 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_76d2a3a9-6953-4102-b032-e0b0907c88e0.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_76d2a3a9-6953-4102-b032-e0b0907c88e0.png index 5828776a711d2b0bbe13de3eb304603a99d2f895..4a03e654006582dab2cf285ed1ce0d99585bc563 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_76d2a3a9-6953-4102-b032-e0b0907c88e0.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_76d2a3a9-6953-4102-b032-e0b0907c88e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06cb700578b6d536d7f9b121cd36ee749c5f5bd25db90830de630e2b6b673972 -size 537512 +oid sha256:7532d5297ed5b1e976a6d07a82a06cc992f1c04030153a3a8b8816fceeae063a +size 798963 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_83aba46c-31c6-4a64-bd2f-dfc6ce379419.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_83aba46c-31c6-4a64-bd2f-dfc6ce379419.png index 9e53c0519e696cce6bbcffd01a79eff04652ac2d..e77bf33d7fc35a4edb50668c35557bfdc3268bc3 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_83aba46c-31c6-4a64-bd2f-dfc6ce379419.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_83aba46c-31c6-4a64-bd2f-dfc6ce379419.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80ed58f93e0efb46dafafe28e96a57593c6ce97d83e2de6e80df031befd301dc -size 3152349 +oid sha256:04a021115c31bc3708dae1736d620fbbc132490dc666eec275a8051566bdda0d +size 1874815 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_83bc3946-6fe5-400e-b95a-ba5c990b552c.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_83bc3946-6fe5-400e-b95a-ba5c990b552c.png index 8c77ace5c20613babe02fc5ecea3b0099e1242f1..3cb95ef979bc9ac5d980bc4869c277d2da1bfb5e 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_83bc3946-6fe5-400e-b95a-ba5c990b552c.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_83bc3946-6fe5-400e-b95a-ba5c990b552c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93981c039e42b152207ecc2d3cf4655982f0edae135e1b54470aafd59ec0d9af -size 1164957 +oid sha256:9fc026d0b9f25931ad890a600a6a2c062331e143ca05b01958768155beee5ec5 +size 1925549 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_84fd0d10-e222-4c1b-a852-e49ac16d3462.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_84fd0d10-e222-4c1b-a852-e49ac16d3462.png index b833af87220af12814803dd3f2ce0a8a1618a441..1f8e5250d611574949b3d8b05206727919088630 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_84fd0d10-e222-4c1b-a852-e49ac16d3462.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_84fd0d10-e222-4c1b-a852-e49ac16d3462.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ed4d7dfeddb11af1be0d3e68072d39455dc9d5fe1686e165ab0dc79f1b1c14a -size 3300489 +oid sha256:4c51ccd4744f23911a43cfe419a59487d3bcf94afd5a07099a1a53a0ed7e6db0 +size 1973283 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_9fd1755f-24be-469a-8f05-55c07c1b34a7.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_9fd1755f-24be-469a-8f05-55c07c1b34a7.png index 1c90cd8d86df814ed3d7a89d5172132dee7bf535..aac85c232c5a7f8f13a3e0d094af732294ba2087 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_9fd1755f-24be-469a-8f05-55c07c1b34a7.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_9fd1755f-24be-469a-8f05-55c07c1b34a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b1f533a3eca709b93c346736a66ae1ad2968e404699f32933873a930334823c -size 531948 +oid sha256:71fa6d2e0ec16827397c4b269ebecd4c0c4bc97fff2235ac3bd578102ad7390d +size 625107 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_ae0e503c-2e94-4e89-92e5-a385c1434d50.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_ae0e503c-2e94-4e89-92e5-a385c1434d50.png index d34a1ede42c89a72ef76f19ee73dda7ec0747c38..0863c051b255a725df27415f67b132699889008e 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_ae0e503c-2e94-4e89-92e5-a385c1434d50.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_ae0e503c-2e94-4e89-92e5-a385c1434d50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45aba7256f773225e778d7bdb05252c2592cd2de6b4678b27159a8093712efde -size 630257 +oid sha256:f10ef5230e1c5fe347ab1e0f372b2905c4752a29283c758d6ef5911c0f23545c +size 693437 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_af8eb390-8d1e-4b3a-b5d1-a3401025320c.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_af8eb390-8d1e-4b3a-b5d1-a3401025320c.png index a2d1845cbf3d026481b48c8bab973f794fc76760..029817134d0d14cdf26f8dd8d5bd9b182732b3ee 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_af8eb390-8d1e-4b3a-b5d1-a3401025320c.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_af8eb390-8d1e-4b3a-b5d1-a3401025320c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fb87f671b163d173ba99d2b27e67dd8caccce6ea7f4bd6f369e2155fdaf7c0a -size 512910 +oid sha256:985bf8416f45f304a59d2b02240781e5cecc14bbb389fbfcb5562fb5ad0fbb4d +size 963627 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_cbca43c1-359f-42bd-bcab-d1f61c9af591.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_cbca43c1-359f-42bd-bcab-d1f61c9af591.png index 38e4ac609f2e290358a5005294c390adf293f048..5ad6b50a3515aaee5a9a3add2c1f90b2422b599d 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_cbca43c1-359f-42bd-bcab-d1f61c9af591.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_cbca43c1-359f-42bd-bcab-d1f61c9af591.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1901e8f16282959f3964a61086d4ab9f10e58f5cae98546afc242bf0a897d226 -size 522833 +oid sha256:0f88908adfb18d71c33a1cd7ca6ebac45435cfb69f34132853f2d0720e1e3192 +size 786508 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_cd3c2bb0-9c7e-4ac9-ad60-b26a34297217.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_cd3c2bb0-9c7e-4ac9-ad60-b26a34297217.png index 8aa3c50674dbdc45e3c6233266134e05c24b4949..3b732e146de891f2a8fe6e0aba13d1425a310acd 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_cd3c2bb0-9c7e-4ac9-ad60-b26a34297217.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_cd3c2bb0-9c7e-4ac9-ad60-b26a34297217.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9885e78f40df87db83fce90abba51d49bd069513da0eb6a6a0cedb1d6f72fa3 -size 630129 +oid sha256:67183412bbd8da2b02f4ad63b7c8533960cee7efe63aa45c67d6866f8b1589f3 +size 687917 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_d05d5797-f2d2-4046-b3b3-8f19e5bbd1f3.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_d05d5797-f2d2-4046-b3b3-8f19e5bbd1f3.png index 34c3eed897b6304037c15c6fb0f81cb70310d790..3f497d483622a2cdc99946ae9ab195365fe372ce 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_d05d5797-f2d2-4046-b3b3-8f19e5bbd1f3.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_d05d5797-f2d2-4046-b3b3-8f19e5bbd1f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a64355045a2bff8b42b641dca33dde53b3c3e97e57fffa2e81e9c7373fc96068 -size 910249 +oid sha256:22bead93cab9b4867c4b8f74948e6bfa32d0d113dc45e791d6476a11a25d6057 +size 557565 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_d4a307c1-04e4-46fa-8d17-2bd949c405ce.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_d4a307c1-04e4-46fa-8d17-2bd949c405ce.png index f06864f88bdba16a21d918b4c89fed548880c8a6..aafa746c8f4b8204df0231e2670a1edded74e202 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_d4a307c1-04e4-46fa-8d17-2bd949c405ce.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_d4a307c1-04e4-46fa-8d17-2bd949c405ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:019a6fac1e86b5c49a2fe539b51ac0fd47adcde9dce5e5003f2ab63c31af6e7f -size 724548 +oid sha256:cafd248a9443cb1429d8f4473d5d9e086fdeff0e056ac660a669da4262281ba5 +size 743201 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_d79a8219-43dc-4b34-b8e4-bdc43b6678b3.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_d79a8219-43dc-4b34-b8e4-bdc43b6678b3.png index 43c83d11bcc750d244d145c66f56c252ec32e835..0d32aebe45b9cc2a0860b6fcb7a620ad90812f7d 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_d79a8219-43dc-4b34-b8e4-bdc43b6678b3.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_d79a8219-43dc-4b34-b8e4-bdc43b6678b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad1de6295c064543c5db13d78e1ecef20c76e5b99504378238d650c80c712559 -size 716918 +oid sha256:9846d2ea56ad3611330287e86975a43990765a78e2e117eb371a19d72291f8dc +size 1500716 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_e62a0a6c-d17d-4675-9dcf-80b0aebd0e3a.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_e62a0a6c-d17d-4675-9dcf-80b0aebd0e3a.png index 6ecdd9130fbc6988df1a133151cca9d80bd15d32..6b1e14d45a9ffe5eef12f897082fa12f15d92695 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_e62a0a6c-d17d-4675-9dcf-80b0aebd0e3a.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_e62a0a6c-d17d-4675-9dcf-80b0aebd0e3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:785d1824a48398bb7e89ac5c3b507e384b34612cd31c1bde71f5fa9ad5202830 -size 2271048 +oid sha256:cef7562b456cd1e5164f01a8f899e7de89937feeaa1693b034563b03972aa47e +size 828912 diff --git a/images/521d9006-4560-49af-b232-c713d87dd2e2_e8695bb9-96f7-47f0-8ed8-13a4d78e50d1.png b/images/521d9006-4560-49af-b232-c713d87dd2e2_e8695bb9-96f7-47f0-8ed8-13a4d78e50d1.png index 9140433ef0ad9c65f58f1d0dce0a55a2b7bb9959..2a1c13e0b03f665f5eb0c8dfa0d5e49dde6f9f1f 100644 --- a/images/521d9006-4560-49af-b232-c713d87dd2e2_e8695bb9-96f7-47f0-8ed8-13a4d78e50d1.png +++ b/images/521d9006-4560-49af-b232-c713d87dd2e2_e8695bb9-96f7-47f0-8ed8-13a4d78e50d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98cfebeca2bbe2657e449645bf651da0a6351ee0e0b9ccdc02313842fe8f8d3b -size 709535 +oid sha256:987424da504c3dddb531d49238f90b375012c6e58d21f89c84f67c65bd132a41 +size 1147092 diff --git a/images/52a8bace-f14c-41ce-980f-50d95e5ac259_39ed9e93-b3b0-4010-88bf-a0f716059c7c.png b/images/52a8bace-f14c-41ce-980f-50d95e5ac259_39ed9e93-b3b0-4010-88bf-a0f716059c7c.png index bb9bae74dacf0d85e83303b733f57078c8fcab3b..dfa1433ad034d1cf06614d6dcf35a8e42bfc5a57 100644 --- a/images/52a8bace-f14c-41ce-980f-50d95e5ac259_39ed9e93-b3b0-4010-88bf-a0f716059c7c.png +++ b/images/52a8bace-f14c-41ce-980f-50d95e5ac259_39ed9e93-b3b0-4010-88bf-a0f716059c7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1e8f92cba04d51b0edcaaf0031ef4ab06ae82143839a889cc7dc7dc9e7a784d -size 1265090 +oid sha256:d29351547f9dbf61b00c9ddbef703cfd27fbc8da16fe51c0295ff322083a2a36 +size 950840 diff --git a/images/52a8bace-f14c-41ce-980f-50d95e5ac259_bc681a63-23ff-493c-9959-e1b8e93a7aaa.png b/images/52a8bace-f14c-41ce-980f-50d95e5ac259_bc681a63-23ff-493c-9959-e1b8e93a7aaa.png index e61ea0cfba373fdf5067a2ec29a8503bd3cf84c5..c3c6f1f5614336d902905da386e726e143be5068 100644 --- a/images/52a8bace-f14c-41ce-980f-50d95e5ac259_bc681a63-23ff-493c-9959-e1b8e93a7aaa.png +++ b/images/52a8bace-f14c-41ce-980f-50d95e5ac259_bc681a63-23ff-493c-9959-e1b8e93a7aaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd479eae5c060cba1880dcc39ea69a978ac58a3f072fdaae199e7c850eec60c8 -size 1434920 +oid sha256:0bd0e1d8b5e180c0287bdb61c83d8fa0e73bcd60655fa1489394b566c4a6da58 +size 1278063 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_022d62a7-4416-402b-a696-356bc1b74b88.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_022d62a7-4416-402b-a696-356bc1b74b88.png index 95fff5f31740d399cf819cc7a5084fe1167260cf..c26f5f885c1e2cadb6e8aa05297a8238ac186d5e 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_022d62a7-4416-402b-a696-356bc1b74b88.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_022d62a7-4416-402b-a696-356bc1b74b88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c844b301d39c6bbec8afa58d1858a24edc3bcbd8336c3cae02b64c59269316ac -size 971943 +oid sha256:71a3b8b4688faaa99af779576db0f5ca6932752f81e71f51123fc9c6dd8a44c0 +size 989443 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_19a9c862-0926-49c6-aa16-66a8e1138678.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_19a9c862-0926-49c6-aa16-66a8e1138678.png index b60e32b72f184a91ac541d61346ff694e9db0c2c..c2e9b26f7efe70a82a543d57b0049e86926126f6 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_19a9c862-0926-49c6-aa16-66a8e1138678.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_19a9c862-0926-49c6-aa16-66a8e1138678.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56446f633984c4c04d942e24b8823dc59f00e0e1d0f489ba10f967a815f7887c -size 970707 +oid sha256:43cbb2e5ccd414b90d2f0eed39a016d3be381699a517d2e6804a1a4b230487ea +size 1248068 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_226fcc40-31c7-4c76-8934-4c6294ae162d.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_226fcc40-31c7-4c76-8934-4c6294ae162d.png index 7b87a411b766152f57589a40fdbfd14a24a4f317..ac131fd6416b07aa5d734f7919735ddfbaf7c3c1 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_226fcc40-31c7-4c76-8934-4c6294ae162d.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_226fcc40-31c7-4c76-8934-4c6294ae162d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd85bffef0b8ecbf248e7ef64f2a1d6577e3f6e5efb38eb37afebdb891cd0ee7 -size 1011704 +oid sha256:638bbe776bb921b62c07e4e5aa22db96f10d0312005d808511bece475da48a24 +size 986331 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_344872cd-6715-4851-a8f6-01eaff065563.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_344872cd-6715-4851-a8f6-01eaff065563.png index ee5f31f10741763a43d6490adcf0aa090f35d696..2b104cec09e5f81bf56a9dfa76413730c5fdcf47 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_344872cd-6715-4851-a8f6-01eaff065563.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_344872cd-6715-4851-a8f6-01eaff065563.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e9ef65c9b8f2831c7c34999c836d31b022f63a571f57a671067529685869f2c -size 893819 +oid sha256:3ee567ec272fa58d9fd22460a974356f551431e169dfd7476cb18ed8c451fb2e +size 1077022 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_40a0c326-bcad-4edf-8b4e-6fb3af658ab1.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_40a0c326-bcad-4edf-8b4e-6fb3af658ab1.png index 482615e8b54141b14137ebf8dc02c99ddad1f180..236825b0283402015f91a82c719c1b542b35f858 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_40a0c326-bcad-4edf-8b4e-6fb3af658ab1.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_40a0c326-bcad-4edf-8b4e-6fb3af658ab1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb0344dc1160da7aefc52dfd3169c05576cb49e1455eb2c1eac0a6d02e1759a0 -size 1514837 +oid sha256:1b5ef47b8e32a0f0e201ebaa560f9cf7bcae38476f765731ae368af8701f5558 +size 1662646 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_4abeefc8-cbb3-4f6b-9059-6c379d4e2e26.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_4abeefc8-cbb3-4f6b-9059-6c379d4e2e26.png index 20148f3fc72d847901545385c407790f0e2fca29..a48bc9d6c951d600b47738d14180fa5f971c02bc 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_4abeefc8-cbb3-4f6b-9059-6c379d4e2e26.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_4abeefc8-cbb3-4f6b-9059-6c379d4e2e26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6e4c596adb5fa8521437caf72e2dc52a097c80828a3bfdf2e32f2808940a0f7 -size 1000444 +oid sha256:636264a41bdc14d401a8d8e6c77802ae2f3f6362b17174ec0bec42c2c279ef44 +size 972063 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_84c30b1d-9a13-4f5a-9afc-716c157523b0.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_84c30b1d-9a13-4f5a-9afc-716c157523b0.png index 6e4604c6c3ce4def45be9feced4f687f48815550..42f9db1c3876c7f2f4c296bd2bcb5fa27b0aee8b 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_84c30b1d-9a13-4f5a-9afc-716c157523b0.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_84c30b1d-9a13-4f5a-9afc-716c157523b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c064f6d90a966cb8e1e29ccf4d4b6121f77baa03a7359aed0ae60a14aaaab2b -size 1435367 +oid sha256:73d2500596e650d3700ae8af8b13aba9948382a2a7b3cc4c8d9a664e60e30d20 +size 1301672 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_9311ff44-ee59-4214-a920-2b5fb38d43f6.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_9311ff44-ee59-4214-a920-2b5fb38d43f6.png index bf3fc74cb34310ae168d57567a23250dc4e69a8c..12fbe6778e038819cb46f9a035aa39fc4641c834 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_9311ff44-ee59-4214-a920-2b5fb38d43f6.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_9311ff44-ee59-4214-a920-2b5fb38d43f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b012b0b6a5cf9e4e154eddf12b91379c9bc8222b978d154977e41f509dc55a54 -size 1000070 +oid sha256:a4903994b0a30e6b9ef6ea796b674dac9ae54dbac7fcfa1f930f4a9de637a055 +size 1245695 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_998f610f-7f61-4784-9f51-d37d0a3d635a.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_998f610f-7f61-4784-9f51-d37d0a3d635a.png index e0cfd96103c28a9040fe75a9516ae83c2f937d8a..353b8aa1ff42da8fbe517b6ade32a1a0f4ce073c 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_998f610f-7f61-4784-9f51-d37d0a3d635a.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_998f610f-7f61-4784-9f51-d37d0a3d635a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:352bfeec44df2c26af39b83fe9833b66c24484ab01172e09daef46875382a332 -size 1445935 +oid sha256:493cb3e23dbc559ae1a885fad0ac28e21199211006ebc169d494253caf5844f5 +size 1474653 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_b2fd1297-19ee-4a76-89d7-39842b79a223.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_b2fd1297-19ee-4a76-89d7-39842b79a223.png index 662bc7111ddd19edeb74bbf71b072f5a6c547048..89eb00d7f9cf5f5aa47b592b3a04b8a11c88f1c3 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_b2fd1297-19ee-4a76-89d7-39842b79a223.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_b2fd1297-19ee-4a76-89d7-39842b79a223.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef6645d9b9eeb4d5afa8482728a962e325eefe7c985c4576c1467a7bb50b91f4 -size 962408 +oid sha256:4b63f534c622192e7919fe78b978d7344a4d83a52c86771083983d90d8917290 +size 1189910 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_cdadd9af-a0b0-47d9-8b2e-9b01d1ecf507.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_cdadd9af-a0b0-47d9-8b2e-9b01d1ecf507.png index b7322ad44f11bcaa404ce861674b775892bdeff2..ca50aa049dfde67b086c4af300464ba48c054866 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_cdadd9af-a0b0-47d9-8b2e-9b01d1ecf507.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_cdadd9af-a0b0-47d9-8b2e-9b01d1ecf507.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b109bb7bf2eb1ca7130361d15e5ce74e96f8d8bd0d282a19755db1149275c99 -size 859364 +oid sha256:a07d4a90fc525d210a1be5fa2b3f406e94f50cd75f8e0b386397ffa872921314 +size 1067139 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_e8ebcee6-59f0-4613-a5c7-fb120ac0a491.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_e8ebcee6-59f0-4613-a5c7-fb120ac0a491.png index 90d921fde2fa5c6af386bce744a6b90f7312f9ed..62dace64c720d51765e8e89c94b66192c1d942f4 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_e8ebcee6-59f0-4613-a5c7-fb120ac0a491.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_e8ebcee6-59f0-4613-a5c7-fb120ac0a491.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b09f5e76f1bdec9b2bf55e4a1c8440efbd120fb352a54acf3a2cccc1168e4fb3 -size 882829 +oid sha256:84f9c9a62ad7f6170212a4942e1d9cee057cf4584f50881128ccf0a4958b22ea +size 845551 diff --git a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_fa8985dc-a6b2-4b61-8ee1-b532dff08e13.png b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_fa8985dc-a6b2-4b61-8ee1-b532dff08e13.png index 34dc7521ca3cba088ee4c88dd8b157f469dafe0a..3c4efc196d10c79fdfe63283a927754fc833a0fc 100644 --- a/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_fa8985dc-a6b2-4b61-8ee1-b532dff08e13.png +++ b/images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_fa8985dc-a6b2-4b61-8ee1-b532dff08e13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1318afcc8221c5c915d355bc697ff13a9ef33b9ea690eef2d7bbaea31c753249 -size 1010162 +oid sha256:9c23935531207ee9bf0220681d35dfc1686e2346b2902d9fde0b7a31b3acfa9b +size 962847 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_18bf58f3-ebb4-42d1-b1e3-ef8ba7e28ea5.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_18bf58f3-ebb4-42d1-b1e3-ef8ba7e28ea5.png index 46beb0565f6e804b48d3530f6dcc50f30d86f742..6abf1cc7047b6987409d427aeb1ab927b895879d 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_18bf58f3-ebb4-42d1-b1e3-ef8ba7e28ea5.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_18bf58f3-ebb4-42d1-b1e3-ef8ba7e28ea5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1504a499ad21d8a133a34846dadf7a832cac54baaf962bb185355c8a71fd3206 -size 2556402 +oid sha256:d2760ef5a39eaa1991362d245af7e02274a03f9ae4e2226c82c0e7591c3935e3 +size 1500232 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_25e25bd8-d8c8-4a09-a158-eccbd9e38296.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_25e25bd8-d8c8-4a09-a158-eccbd9e38296.png index ca310831fac938d97f7464fa01b7146af394c99c..55fd6bdb9d18661f80a455918f2c479b7b4ee66c 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_25e25bd8-d8c8-4a09-a158-eccbd9e38296.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_25e25bd8-d8c8-4a09-a158-eccbd9e38296.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe56e4a9d1169ca697b6f691193735b1fb7c3ddab601424b2e48c7ba0e6fb2a4 -size 3071663 +oid sha256:d5b47e52ad3d705f911b3df96f2026f1efb306b5a639e38aa9ad598a7242615c +size 1176326 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_3eb24abe-68ed-45f0-b53f-9873bc0d09f9.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_3eb24abe-68ed-45f0-b53f-9873bc0d09f9.png index cd195e88356aa195f2cf8f304644c0fd723d55e4..6b7354c2cca279d0320f9365ad46dac45c417deb 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_3eb24abe-68ed-45f0-b53f-9873bc0d09f9.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_3eb24abe-68ed-45f0-b53f-9873bc0d09f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba91ab6533988fcca854c568a2db7f11847ed0b226827f3bbd126804b5c67497 -size 800491 +oid sha256:5309c98515400331780ed63f16bff25c303b06a7cca99aa4243f85c9a2e447be +size 879856 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_5232d12e-5d7f-406f-a6ad-f9f054f9e2b2.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_5232d12e-5d7f-406f-a6ad-f9f054f9e2b2.png index e4d7c3f9d5f420536ba4bfd51295ca3cecacf85e..63fe658df6d0826fe956d03af11a5caf5407b40b 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_5232d12e-5d7f-406f-a6ad-f9f054f9e2b2.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_5232d12e-5d7f-406f-a6ad-f9f054f9e2b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ef4b7b5771546d0d7ce916b29ea699ef32b958c78298cdf814909f77b6040ef -size 798726 +oid sha256:4c922c74ef5808119144b19f66ea10a292d69c444dc4f99f8bfb2a21e3cda92f +size 637701 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_7a8793c6-ae97-498e-b9d4-5bc223860950.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_7a8793c6-ae97-498e-b9d4-5bc223860950.png index b1069998900c64bcb4b7639a772551b8620b107f..8c18ac20936f7372f32797b5589e943648e4ffad 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_7a8793c6-ae97-498e-b9d4-5bc223860950.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_7a8793c6-ae97-498e-b9d4-5bc223860950.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:875f20e6091e1300bd0aba78b86d80742f8d2930de9a7eca23473f1b82b1b187 -size 1461236 +oid sha256:828e5c7ef851fe0f8300f9de800734a92ad916e350f3eb05ea27b70f2d9f6381 +size 1223576 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_8427717e-96a8-4a13-b271-cb4eefd926be.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_8427717e-96a8-4a13-b271-cb4eefd926be.png index 3891d38044e3346ac47eef45daee6181c3bb3fa0..c1c5d43b51a07aad48b5ca3f096dad543f354b92 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_8427717e-96a8-4a13-b271-cb4eefd926be.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_8427717e-96a8-4a13-b271-cb4eefd926be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a6d080ddc9d2cfd797ccd7cffb1bb0711ccba117f38cd82cb95b5d0eca2f36a -size 1917421 +oid sha256:0e8bdae3e44ee27f6f0b771727f7f2e29f7d2c402ae32b98b21834dacc960f45 +size 807452 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_acdf9fe4-f31e-4899-955f-a59164fe2044.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_acdf9fe4-f31e-4899-955f-a59164fe2044.png index 61712a32f88969d1302b737ae8f14c35575b8640..37dc0f81bf0f9e9f8ec44177ac57fabbfc4636fb 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_acdf9fe4-f31e-4899-955f-a59164fe2044.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_acdf9fe4-f31e-4899-955f-a59164fe2044.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2343b07353fc705007855175ed462d114bf9303bcdd06ca9281aa9a036a0620 -size 785673 +oid sha256:8c93b1c7938bea4a478cef70c1f8b2f6e4d96f09ad7079007bf858e57f2f0c6e +size 1037649 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b76a522b-8917-4f56-a7bd-f0ff4fa2cabf.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b76a522b-8917-4f56-a7bd-f0ff4fa2cabf.png index 5a7a70392fbbcd34deae1c2959f7be94379bee12..76458e7fc39bb7bf1821b4577ae13d55f7b7a97e 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b76a522b-8917-4f56-a7bd-f0ff4fa2cabf.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b76a522b-8917-4f56-a7bd-f0ff4fa2cabf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7f9509276247064ced80940cb93e78c382edf22aaf1e127f125a51d790e832e -size 992022 +oid sha256:1c8feadc4a664d657cd430519627685ae8576e39a96bdfb8a95180abfbd841a5 +size 981902 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b909fb5c-7fa3-4c7e-b535-b80693e60d34.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b909fb5c-7fa3-4c7e-b535-b80693e60d34.png index 8c4c615ff2a17d38d73356eef57102516d11222e..b7e07ff6406c4422d6bdd2c04808567a58faa617 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b909fb5c-7fa3-4c7e-b535-b80693e60d34.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b909fb5c-7fa3-4c7e-b535-b80693e60d34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f9305f9caf726856adcf64a6e7c1801fc3394fd928b6fc856296caa72a024c7 -size 992117 +oid sha256:97a7956c226e6ccb8e4d3992a2c4a38aacaacc541d2ccdf5e545cad9a17bbdb3 +size 475938 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_c75460c1-761c-4db0-ae39-226820fe160b.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_c75460c1-761c-4db0-ae39-226820fe160b.png index 5bb2bd7b3b02d11a0ddbb44ed5ac446e3894e506..37c68898f0f079b20366ce96b615919376150e43 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_c75460c1-761c-4db0-ae39-226820fe160b.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_c75460c1-761c-4db0-ae39-226820fe160b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31d4f65c85b6df771f798bedae17058de566760d4d951a35c00a30d1fff81540 -size 3245076 +oid sha256:15e1eff84202b00daecb834e71ccab7b61630dc99393758b5071157b06854298 +size 681423 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ce4d06b6-cccb-471d-a105-368e76a1aa28.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ce4d06b6-cccb-471d-a105-368e76a1aa28.png index 5db9c201028d5dd936075528563c7786efd38af7..5a7f3f14b5ef296ccd4e97fe9b1b3597d75b714c 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ce4d06b6-cccb-471d-a105-368e76a1aa28.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ce4d06b6-cccb-471d-a105-368e76a1aa28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06e72e5b113d1f094be28b8ed6ac52410ce100ee2bdb769180360e25d6165600 -size 889094 +oid sha256:e77de22884ca51454dde416fee118881f37ded28ee6e1f1d8fa989caf502082a +size 797823 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_e749f011-925d-4541-bd98-9a4e3a6d80d5.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_e749f011-925d-4541-bd98-9a4e3a6d80d5.png index 273f0559453acf33db13472d17b32e86fba04fbc..ce40fb4e9ebcffce79370b10d6ba5d8911eb72c9 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_e749f011-925d-4541-bd98-9a4e3a6d80d5.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_e749f011-925d-4541-bd98-9a4e3a6d80d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e63725c387828ee5b76301f47659f4f0bdde8f1bc2f5b9fd4e1c2fa92372f614 -size 1755611 +oid sha256:c4b1cb9b34b33d9b49f2974d3f72918066bede579fb09ee70998a53e8d83c4f6 +size 721146 diff --git a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ed1f55a1-64f6-433a-b8da-0abfcdad6ec5.png b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ed1f55a1-64f6-433a-b8da-0abfcdad6ec5.png index ccfac9c7e582959acb17991e25f1e07ceb37d190..62a665c4b576f549e38b85832d61742a00858978 100644 --- a/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ed1f55a1-64f6-433a-b8da-0abfcdad6ec5.png +++ b/images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ed1f55a1-64f6-433a-b8da-0abfcdad6ec5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d31bcff5f6142516818e43bdefd4d8198b2f04fa65ca1e64fefd1eb7372cdfc -size 2779056 +oid sha256:5afdcab7ae9343a481ed9bd7ef306d68c84abdef071076a21efca2b92d055b29 +size 1081202 diff --git a/images/54112d86-1d85-4abf-9e12-86f526d314c2_8949caa0-b7f1-48f7-9c16-6303d8e5139e.png b/images/54112d86-1d85-4abf-9e12-86f526d314c2_8949caa0-b7f1-48f7-9c16-6303d8e5139e.png index 498fb26c44c4147ec3d6464deba220e351eb3ce3..c90fce9d917a8b5583ad1dfe304bd72b759b24a2 100644 --- a/images/54112d86-1d85-4abf-9e12-86f526d314c2_8949caa0-b7f1-48f7-9c16-6303d8e5139e.png +++ b/images/54112d86-1d85-4abf-9e12-86f526d314c2_8949caa0-b7f1-48f7-9c16-6303d8e5139e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a134a6628f27f74b8b0477d215e1f62217a0ae76b7f16916569c676b125921f -size 1583032 +oid sha256:81256e16261e33f216381447c3554846c784b7206707e49440f4c26d1fc3ed2f +size 1883812 diff --git a/images/54112d86-1d85-4abf-9e12-86f526d314c2_a3c7b2db-75e5-41d9-a23b-b01d06ba008f.png b/images/54112d86-1d85-4abf-9e12-86f526d314c2_a3c7b2db-75e5-41d9-a23b-b01d06ba008f.png index d92a250123fcf5cecaf163536014c5fa7fcfb2e7..3b1e3b79b4e5a8962926bf957f1c07a6d3e4a2ff 100644 --- a/images/54112d86-1d85-4abf-9e12-86f526d314c2_a3c7b2db-75e5-41d9-a23b-b01d06ba008f.png +++ b/images/54112d86-1d85-4abf-9e12-86f526d314c2_a3c7b2db-75e5-41d9-a23b-b01d06ba008f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1efca96f5e69a9697b7fba839ba77e8d1ef52c973087ecbaa26ffc4800c3ba5c -size 1942216 +oid sha256:b27d52268a4d04aef9ce766b55d1a4821a0e51f7a3d895827442fe984f13ccdf +size 1597762 diff --git a/images/54112d86-1d85-4abf-9e12-86f526d314c2_e00a7248-aab6-4799-9307-6f4750f0a727.png b/images/54112d86-1d85-4abf-9e12-86f526d314c2_e00a7248-aab6-4799-9307-6f4750f0a727.png index 779962461bdef57843f2cb40f087e91b41baa322..b31834795b53c97318cf8ac197cb5e3ea104e63b 100644 --- a/images/54112d86-1d85-4abf-9e12-86f526d314c2_e00a7248-aab6-4799-9307-6f4750f0a727.png +++ b/images/54112d86-1d85-4abf-9e12-86f526d314c2_e00a7248-aab6-4799-9307-6f4750f0a727.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9c7651966f0fc77e9ce09373a401ffb7903285f16e91a7e85973e4c5c8bf1ed -size 800344 +oid sha256:864e2448ca16760b9829a19acb66b527e3f8018e924c9e4b2e4d286e3812f58c +size 703608 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_153ea3be-8400-41e8-b0d1-339375d3b742.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_153ea3be-8400-41e8-b0d1-339375d3b742.png index 7518300d0f11f9c57cd39c6529e3fb9f0028957e..5aa012a5d998eecf76374b6453f233bd4b7e5f31 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_153ea3be-8400-41e8-b0d1-339375d3b742.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_153ea3be-8400-41e8-b0d1-339375d3b742.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e269b819afdbb90a7034f003b9bc6f3f2a399127df093feb6106247da666e865 -size 2620227 +oid sha256:13f4927221d9d0df29f8610d7d4b78cf6d0d042c0515f23d54ea1e83c3055f77 +size 1730290 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2909b809-4844-45d8-b651-c4e61bb750d7.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2909b809-4844-45d8-b651-c4e61bb750d7.png index 89ef2692288e3e6f103d9554d2b8b2e5b85c659e..844820a06ab4eb33609bc0f7ae32e907790f6944 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2909b809-4844-45d8-b651-c4e61bb750d7.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2909b809-4844-45d8-b651-c4e61bb750d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3974de0b9c3e9ef15456c2bbb6af93d5953ed2b3287c0c674129493e99e5b94 -size 943870 +oid sha256:39581d4f03479dc9fbc934d17ad69f535c8a73e7e70271b73f99c2167b0d996f +size 841558 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2dcf1f5a-3eb9-43e5-9f31-661189ad71d5.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2dcf1f5a-3eb9-43e5-9f31-661189ad71d5.png index ffff3ed533187e2eb54bca550f9e270bb0f8617f..eac815b73e5008d471cd88f930827b94cee6c611 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2dcf1f5a-3eb9-43e5-9f31-661189ad71d5.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_2dcf1f5a-3eb9-43e5-9f31-661189ad71d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4eb8d898b6e6b3bdcf344630572d8c99039ea61843732bed25d4e5c212336399 -size 312561 +oid sha256:7753695a21fead5339fca2840b820159feb799217bd7bc958b5cf1b85bc78f3f +size 459812 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_528e9700-0759-4a2e-a6b2-b5eceaa76ec4.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_528e9700-0759-4a2e-a6b2-b5eceaa76ec4.png index 32841afffd87fc5f6478040fbaaad705e6d47961..d473dab2540904466f332ebbd193d7a13c4d06e4 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_528e9700-0759-4a2e-a6b2-b5eceaa76ec4.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_528e9700-0759-4a2e-a6b2-b5eceaa76ec4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2bc67b860e0e0c811f1d9bc6657078f924dfd9642d0b4e18dca1da51614cc935 -size 659747 +oid sha256:4cf083de7abf267af9144d58d934c102e8ba15cdfef85dc01463bb5318d044a4 +size 317451 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_575c9828-b577-4fd4-bc2c-656a78fe0d83.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_575c9828-b577-4fd4-bc2c-656a78fe0d83.png index a1981ad304ddedb4f847ede67217cdbe67a07db1..3843fc41f45b469ea6603dbe94762d1c6b6622a6 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_575c9828-b577-4fd4-bc2c-656a78fe0d83.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_575c9828-b577-4fd4-bc2c-656a78fe0d83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28e4fc800ee0fcad1790441b55cc497122f4f3a08c34d598feb677c1ee4ed832 -size 564641 +oid sha256:5f47ba4a4ba7346e8b4c008cc18bf8e41f0c9764a2eb688af03e5d4dea358fe6 +size 808588 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_587cf410-7484-4014-a232-ebe323189d47.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_587cf410-7484-4014-a232-ebe323189d47.png index 74d05a51b0434c0347e021572ba5677edec6edab..76f9160aee1c8572e4ac64968d433372820545a4 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_587cf410-7484-4014-a232-ebe323189d47.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_587cf410-7484-4014-a232-ebe323189d47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfd6ac0b63a96194f23b663c8d9308a082f64f67d28e2f688e4b746b7707b5eb -size 487911 +oid sha256:c68fb381133a0fc129eaa0bf01b7ba5d3eed1dc081618e2e36d11c2ef220096e +size 619804 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_916fed28-a897-4397-bbb1-6829346f320d.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_916fed28-a897-4397-bbb1-6829346f320d.png index 0202289e22e0a0c94be61ba13daeaec688889e0b..8044c57e871f4e3f331e9c96f802cca9923d7e04 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_916fed28-a897-4397-bbb1-6829346f320d.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_916fed28-a897-4397-bbb1-6829346f320d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83a926db14f8cb0962dd0fdd63f8e6ec14bbec9762b4b5ed3d9a3c9e87e241a3 -size 439957 +oid sha256:699a036515b9187bbf884774dbf7ce573e21a2f6c5c0c2c484f1c6a44902d6c3 +size 459369 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c5db25df-3e40-41fe-8667-1e5ba8f58c02.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c5db25df-3e40-41fe-8667-1e5ba8f58c02.png index 62bd26b254fc8767ed064597a52e1b5fba4b85e6..b239f430911f0a9ff3989436c0dddae46494a96d 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c5db25df-3e40-41fe-8667-1e5ba8f58c02.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c5db25df-3e40-41fe-8667-1e5ba8f58c02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ebb85a716270e2da64672a767438168123e67a1c6858ada4dfda08373e3db89 -size 693439 +oid sha256:e7a94d3a3c3e1807b04f69af55dc77c2972401f41a6f970feeb42aca3de771e6 +size 967142 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c6753f2d-865b-4622-89e9-09d2beb9e602.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c6753f2d-865b-4622-89e9-09d2beb9e602.png index 5a3caeafcbbdd6234d726db277c26175c96b29d9..d7166804cf39552e81cfd425cb184bbbf5480aac 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c6753f2d-865b-4622-89e9-09d2beb9e602.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_c6753f2d-865b-4622-89e9-09d2beb9e602.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d32bb09d785a7a5c90f3c19d6fe8eec8d6aac52dd35e60cc61b112e45ff8ea48 -size 1702468 +oid sha256:baf58871afc9da5cd3039afa15cdd4ac39cda693a2d756d3924840aaa7bcdc7d +size 1764579 diff --git a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_cdeb8a94-6f22-4f9a-9224-861a9f9518c4.png b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_cdeb8a94-6f22-4f9a-9224-861a9f9518c4.png index 8ea088050c3a449c9efe75cbf64e5023abb51a92..a951386822daa5d78c76ea98ce62ba212ab111be 100644 --- a/images/5418beec-bc06-4e11-9664-4038fbdeefe9_cdeb8a94-6f22-4f9a-9224-861a9f9518c4.png +++ b/images/5418beec-bc06-4e11-9664-4038fbdeefe9_cdeb8a94-6f22-4f9a-9224-861a9f9518c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57a4820ae702d0e5c70fda6e4cffaaf4742302d2b6a216c3bb0d6abdd47d5a61 -size 295902 +oid sha256:59460660a5249f256fb28ca7749eac98361aa6ec1ca8749a573acc633f8c3df8 +size 242882 diff --git a/images/549452ab-637a-4997-bce1-5898541bb288_1cba5090-1401-4ce5-ab29-6dbb9aaaac26.png b/images/549452ab-637a-4997-bce1-5898541bb288_1cba5090-1401-4ce5-ab29-6dbb9aaaac26.png index b50b37d18a5934e67fb187101ed26705514e9e6c..6c6d4a9a159a4eac77c0ec289130085057a60a0e 100644 --- a/images/549452ab-637a-4997-bce1-5898541bb288_1cba5090-1401-4ce5-ab29-6dbb9aaaac26.png +++ b/images/549452ab-637a-4997-bce1-5898541bb288_1cba5090-1401-4ce5-ab29-6dbb9aaaac26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b9ab25122209d8c8bcafd3b6c5c473420392325d3fe147ddb56683e7b92a493 -size 2034890 +oid sha256:04ffd9dc02f21f6aa3b70654b2f3a0dee410b86b7759fb61bf6979a91910ce55 +size 1946572 diff --git a/images/549452ab-637a-4997-bce1-5898541bb288_218bb404-5a73-4d8f-a72d-1b680a898e89.png b/images/549452ab-637a-4997-bce1-5898541bb288_218bb404-5a73-4d8f-a72d-1b680a898e89.png index 2cf13bff1db6de4bad2fb94be3d8fe9d3828989c..db910d39bd13be86edcd5fb48e937cbdc402fc49 100644 --- a/images/549452ab-637a-4997-bce1-5898541bb288_218bb404-5a73-4d8f-a72d-1b680a898e89.png +++ b/images/549452ab-637a-4997-bce1-5898541bb288_218bb404-5a73-4d8f-a72d-1b680a898e89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3811cb45670b07140bfb6518051dfe06b90632c45e787e085c6843f460bad99 -size 2039373 +oid sha256:ab16a009fb07b0b5e705996d11173da851ae9dcbc8015478e142bc893f4b5158 +size 1293982 diff --git a/images/549452ab-637a-4997-bce1-5898541bb288_3a060beb-0619-4c77-8131-7ffe4c62debf.png b/images/549452ab-637a-4997-bce1-5898541bb288_3a060beb-0619-4c77-8131-7ffe4c62debf.png index ad0d8992570cfa32bc0342d7aca135567a7042e2..7ded5473596133abd7ada7d675a59547674cedfc 100644 --- a/images/549452ab-637a-4997-bce1-5898541bb288_3a060beb-0619-4c77-8131-7ffe4c62debf.png +++ b/images/549452ab-637a-4997-bce1-5898541bb288_3a060beb-0619-4c77-8131-7ffe4c62debf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:754541bd1258befe5d1baa95c8a4144e57984384bba01e5e7dc7649e7d0aa873 -size 2023224 +oid sha256:0b57bd51a14782634fd27ad55253b46bf9198f77c74426820f0e19d4b7690b61 +size 2173989 diff --git a/images/549a9805-e404-4dcf-8c72-ddf46471114b_348c21cb-4bc5-454a-b3b3-3955c93b08ea.png b/images/549a9805-e404-4dcf-8c72-ddf46471114b_348c21cb-4bc5-454a-b3b3-3955c93b08ea.png index c9ca2add758bc22183150886423c747e614cc7e9..b1d03a1058c1423a9f8691f7e08883c2f2e85b82 100644 --- a/images/549a9805-e404-4dcf-8c72-ddf46471114b_348c21cb-4bc5-454a-b3b3-3955c93b08ea.png +++ b/images/549a9805-e404-4dcf-8c72-ddf46471114b_348c21cb-4bc5-454a-b3b3-3955c93b08ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56556cbb69c16ac4195d1752981c2dd8d2276d35f8755e351688b990a80e2ffe -size 1541109 +oid sha256:6ef1df8c28d7a8b141eb8c87e8649b5a56ec3f0401925880294838b59f2576f2 +size 605034 diff --git a/images/549a9805-e404-4dcf-8c72-ddf46471114b_478e1cc4-76bf-46aa-beb1-599a90f1a9b1.png b/images/549a9805-e404-4dcf-8c72-ddf46471114b_478e1cc4-76bf-46aa-beb1-599a90f1a9b1.png index 803d7f778de00e8808235ef0c074215f7f3b5feb..50cc2f0eb2abf18b410776d80ed05483c1272b80 100644 --- a/images/549a9805-e404-4dcf-8c72-ddf46471114b_478e1cc4-76bf-46aa-beb1-599a90f1a9b1.png +++ b/images/549a9805-e404-4dcf-8c72-ddf46471114b_478e1cc4-76bf-46aa-beb1-599a90f1a9b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:550a4ddc41f70ea82d03a410ea0cce709fb23bf27aa4792a96dcef4b041bd371 -size 1113546 +oid sha256:00a5b89db0c874184d597084f09264cf7b0256379d4d59ae9261713296f6c0fe +size 1027967 diff --git a/images/549a9805-e404-4dcf-8c72-ddf46471114b_614535fe-72dd-44fb-bb5e-97d4a2547a1f.png b/images/549a9805-e404-4dcf-8c72-ddf46471114b_614535fe-72dd-44fb-bb5e-97d4a2547a1f.png index f53889f33636679adf9b26afe21c1e41024bc07f..88141ab1d047727a8835af91c3ff2f7eceb1f1fc 100644 --- a/images/549a9805-e404-4dcf-8c72-ddf46471114b_614535fe-72dd-44fb-bb5e-97d4a2547a1f.png +++ b/images/549a9805-e404-4dcf-8c72-ddf46471114b_614535fe-72dd-44fb-bb5e-97d4a2547a1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd3af1558e9a750e0c2c13e32162fce8ad72015e29c5b32ae7370bcf3529899e -size 2389732 +oid sha256:20b3ceef5954bab2648f482069b40f044ff7a9b07ba131724285e70f850a1cbf +size 1288931 diff --git a/images/549a9805-e404-4dcf-8c72-ddf46471114b_8ca47df3-bfb0-474d-bb4c-705ed1bbf199.png b/images/549a9805-e404-4dcf-8c72-ddf46471114b_8ca47df3-bfb0-474d-bb4c-705ed1bbf199.png index e2dc7d526355c60e98bdfae954b6b850d00deb37..c0ab16b36cc316fc99d5e043788a80d2853825c0 100644 --- a/images/549a9805-e404-4dcf-8c72-ddf46471114b_8ca47df3-bfb0-474d-bb4c-705ed1bbf199.png +++ b/images/549a9805-e404-4dcf-8c72-ddf46471114b_8ca47df3-bfb0-474d-bb4c-705ed1bbf199.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:016f04c63db4ce86251bc855a358bdbb99c770bb4702428e8b56e8820e0f6525 -size 918739 +oid sha256:c5f6953e8d21aebcf51535890b617d70366113c91a32fcfab37c38a65e5e0a07 +size 658243 diff --git a/images/549a9805-e404-4dcf-8c72-ddf46471114b_9c2290bc-9528-494c-b4b2-6c24d402f0ce.png b/images/549a9805-e404-4dcf-8c72-ddf46471114b_9c2290bc-9528-494c-b4b2-6c24d402f0ce.png index 9a4db98c4a017b17cfbbeb817174901cb76c2632..b47c7a9f0b2ef52cd2ab77bc2c5af11b13b7650c 100644 --- a/images/549a9805-e404-4dcf-8c72-ddf46471114b_9c2290bc-9528-494c-b4b2-6c24d402f0ce.png +++ b/images/549a9805-e404-4dcf-8c72-ddf46471114b_9c2290bc-9528-494c-b4b2-6c24d402f0ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e173d1a9b9466497fc7ecc1290cb39eb172ef43f6c34d269b8b615e5046254f -size 1928670 +oid sha256:4a15fb145d262e02be0cd390ad65d130f52aca6cf04e2135081f97a2784030ea +size 843524 diff --git a/images/549a9805-e404-4dcf-8c72-ddf46471114b_b9fe007b-b768-4fd2-96c9-21e99e1fc443.png b/images/549a9805-e404-4dcf-8c72-ddf46471114b_b9fe007b-b768-4fd2-96c9-21e99e1fc443.png index e20732c3ff596c28c75031b9c46681ae82e9652e..a9cae2225de866aa3c34ebc5129855c64088510d 100644 --- a/images/549a9805-e404-4dcf-8c72-ddf46471114b_b9fe007b-b768-4fd2-96c9-21e99e1fc443.png +++ b/images/549a9805-e404-4dcf-8c72-ddf46471114b_b9fe007b-b768-4fd2-96c9-21e99e1fc443.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b81d7de0be77b76b97206f786c3b01a7dd6839cbe628a26eb914608b90c1035 -size 1308040 +oid sha256:9839350842fea79f0cd17ec414d2807f567c8dc3ff4dbd7398c6ed3276b5d546 +size 1119782 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_227c3818-5a1d-45fb-b107-14f02fd50a22.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_227c3818-5a1d-45fb-b107-14f02fd50a22.png index cf6e1ce6b56d133f85ef348a27fe53b639fa6b12..7fe052f7c635798b16cfa93316ceb06e599174de 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_227c3818-5a1d-45fb-b107-14f02fd50a22.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_227c3818-5a1d-45fb-b107-14f02fd50a22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c72a9f2c894890f6c0a7c19a13a025955fd0ee0ace2fad2b881bf0d8c5a67019 -size 1080337 +oid sha256:c885d84eac7142782b3bf09d9e1b396f522655461a134d11cd04dfb03a887a42 +size 945847 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_4da9977b-a124-4efe-9395-6120ab50f4d3.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_4da9977b-a124-4efe-9395-6120ab50f4d3.png index a50c134a7512dcaff942f0d1b8c580eba8ca7e19..04a8ac841bb252eabd5d6609b2bd621013d63d58 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_4da9977b-a124-4efe-9395-6120ab50f4d3.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_4da9977b-a124-4efe-9395-6120ab50f4d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0890d057cce0fc53aef6bf8a023dfd7fdffe756fef4bc8ce9cbb8c75e7acdc7 -size 1067405 +oid sha256:d4d58168e24377e917e9f691ed1f2373bedd3a223cd9a5a70eed29284649e62d +size 1235466 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_66342372-6689-4084-8008-5bdd51746855.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_66342372-6689-4084-8008-5bdd51746855.png index 30ab20c172c0546fd5ef1ae59effa2029056f0c2..59f34179f38f48950967c051a3cfe0a8e7cdab16 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_66342372-6689-4084-8008-5bdd51746855.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_66342372-6689-4084-8008-5bdd51746855.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:744a10367932ca572799aba8067de3d3445e8f37ed2a52f003f76109cb4385f9 -size 805234 +oid sha256:b62c2f39a55430cb63e7a0bb44b2e790ef66fb85a13d428edcfa88e96adc79e5 +size 1251888 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_83f96f84-7682-466e-a739-da6ce13c247e.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_83f96f84-7682-466e-a739-da6ce13c247e.png index b548486724c5e8dcfa29e2347a90b28cae018de7..45fae6a4b312b5509d6d0d2f0f27b4ab1ef931fb 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_83f96f84-7682-466e-a739-da6ce13c247e.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_83f96f84-7682-466e-a739-da6ce13c247e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd51de446f6c49ffaf774a00cb583c05ff1f3fb62883bb0efdbb15ac7dcfb4f0 -size 1080468 +oid sha256:f76ad5388d95cdfb47a7679b73dba78d6a9c0cdc2e8b92a6cbb56d2c2d3f14a7 +size 780815 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_97e0fff2-f03e-45eb-9263-2ee6bf94bac9.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_97e0fff2-f03e-45eb-9263-2ee6bf94bac9.png index 6e35724ae8fa2ca289416fbe39774c15b3b59291..00b8ceb0c3a6cf270898593bf498618fe2ccacd6 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_97e0fff2-f03e-45eb-9263-2ee6bf94bac9.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_97e0fff2-f03e-45eb-9263-2ee6bf94bac9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:588d8b5bf843fb385cd7f05e49d9ccb490124ddbf50865c04a7e6597fa91d6f4 -size 1091965 +oid sha256:3033ddf1fe1c6934e0939fda87f5f28c2f2fd861bfc4b72f8e244ef2051eddb2 +size 817834 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_acc48811-0d86-4f47-ac69-4ef0073c9d99.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_acc48811-0d86-4f47-ac69-4ef0073c9d99.png index ca3cf7af9563865aad2ebc36fb3569e734392003..6f1a09ad9fe8013f90272089b5cda72508023aff 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_acc48811-0d86-4f47-ac69-4ef0073c9d99.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_acc48811-0d86-4f47-ac69-4ef0073c9d99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18c53647062b8ce663e27a54958f31781f40414c68e719019ce1389983ec12e9 -size 1055935 +oid sha256:9cccd1b517dc61b576c8116a5de2f8f074f3ea578e48a8b377369ee84878d21d +size 1219825 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_bd621503-fa0c-4902-80aa-28bca4aaa791.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_bd621503-fa0c-4902-80aa-28bca4aaa791.png index 4de2e300f5d550bb265eec7d38c5b59de9cea8f0..55f2a826bfc5686f3d263104e4c5fd60e5ba8336 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_bd621503-fa0c-4902-80aa-28bca4aaa791.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_bd621503-fa0c-4902-80aa-28bca4aaa791.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66934091698d3bc29a35df9e33eab662685112ec3fbc097a415196e7a3a3ba4b -size 1037089 +oid sha256:976e564930b8031eab1b8565a2ac6fdde3ac6a5429095d26c3c3ab21c85ef4ad +size 1007518 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_d3001f53-d5a7-4ef6-b3f3-30fd3b8fdd2e.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_d3001f53-d5a7-4ef6-b3f3-30fd3b8fdd2e.png index 830a9ad0e4701c284fd98c305b2e85a22086859d..4ef8ae04fcb3613e58b2c027c35a542c41d565be 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_d3001f53-d5a7-4ef6-b3f3-30fd3b8fdd2e.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_d3001f53-d5a7-4ef6-b3f3-30fd3b8fdd2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45e920cb9b307467c995ec47cbf43b78a408d1ce86e64dcf7c9f596050d0280c -size 1086375 +oid sha256:5839886815440040af95b0444809b3341cdc8f148b286d3dfc18960eebdd0929 +size 1288103 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_d50a905e-f895-4188-9ca6-63081d81b204.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_d50a905e-f895-4188-9ca6-63081d81b204.png index e3eab01112f97b3420bb220fcb4ad3efd150a700..b32db61d4f662ed845683d667a85df85659d6ba8 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_d50a905e-f895-4188-9ca6-63081d81b204.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_d50a905e-f895-4188-9ca6-63081d81b204.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07bf27d2020ae3491f28526ed147a400fa2fb3e1d732543182836bff5ada747c -size 1079964 +oid sha256:f7f8b0f8821c8952d2094ed7801447fe4252b3efd5636e9c14b980e4abac5d5d +size 1281781 diff --git a/images/54d60a7c-f52d-4d79-b879-34698507e22c_f5a28d3e-5195-4f57-9cb0-b69fc4e39b1a.png b/images/54d60a7c-f52d-4d79-b879-34698507e22c_f5a28d3e-5195-4f57-9cb0-b69fc4e39b1a.png index b5781694d3d4df1ee787ec4cf874c359c9f6143e..b6f9a92089d09152b6abf8e6f02510a7b7f51909 100644 --- a/images/54d60a7c-f52d-4d79-b879-34698507e22c_f5a28d3e-5195-4f57-9cb0-b69fc4e39b1a.png +++ b/images/54d60a7c-f52d-4d79-b879-34698507e22c_f5a28d3e-5195-4f57-9cb0-b69fc4e39b1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa46b4f7d76d69e0279a113639d639cd665dcbfaa7f631ba869dfd7276d0c5dd -size 1084335 +oid sha256:30db4e6d0710bd1729a8b606422c3f29b5d7d76c39cbb636f6b4bac8b6ba4c3c +size 1286089 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_109f902d-9671-436b-9870-8f7358032809.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_109f902d-9671-436b-9870-8f7358032809.png index e4eddec1db369f6b8c7ea5905899c3479c21c04a..a095b63a382d5c775e3b92e4f27480dd83559b0f 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_109f902d-9671-436b-9870-8f7358032809.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_109f902d-9671-436b-9870-8f7358032809.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc18c5b19bc70bbde769778513b36caf09b2ad06692abe7e0f3df235f31a0327 -size 511474 +oid sha256:f0ba97617c0f3fe08b139b8a545ad9288e885205c3c0d387a625f5b871e86565 +size 362677 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_32c17d65-3a2c-4123-b579-31095e299b66.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_32c17d65-3a2c-4123-b579-31095e299b66.png index 300beff769bee362147abf1e85428d59cdbb9815..adcd06da3140a903af3d01be458e816377448905 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_32c17d65-3a2c-4123-b579-31095e299b66.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_32c17d65-3a2c-4123-b579-31095e299b66.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:005b63e75e75bbd1f1442d144e6cde3d7b9ff1152c387d322015130be8e7d78f -size 400126 +oid sha256:60299116c6ab84d4d504a77fe1261e31aeb9d43d0ab1d4d0823d0d65b65d458f +size 610496 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_44b87654-0fc1-443f-88c2-f8898601f2bf.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_44b87654-0fc1-443f-88c2-f8898601f2bf.png index 3c17253032d2bfaa9c60d56f0e48932c67080df8..da21b9bd5bf7c20a48afeb157cd961bdeaf0c5c0 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_44b87654-0fc1-443f-88c2-f8898601f2bf.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_44b87654-0fc1-443f-88c2-f8898601f2bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90ba3eda8db3350dd034adb051d670de02be2d75d4992613c7b74793ee34b456 -size 554503 +oid sha256:62a3791f1f504ce97928dae20f0ce03160c70258a7bc594a6fb532f520bb4b55 +size 730871 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_46264135-6884-4764-bf80-6f4645b46d2f.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_46264135-6884-4764-bf80-6f4645b46d2f.png index c3b7a95655ebf7e42fe0e6cfaa70ff26abcad08b..1a0bf9b7ed8188e84de49a743b349997afa0f648 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_46264135-6884-4764-bf80-6f4645b46d2f.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_46264135-6884-4764-bf80-6f4645b46d2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59c03688c0a4cbe60280a993ad41eec69555a1e6ff03bb9b48e30949059ba138 -size 513212 +oid sha256:79c7d9dcf60156be64faf76b86623e1638c37756ba90d63111fb4603442dfa39 +size 492274 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_4687dd1f-40b5-4b03-bf86-49541ae51d01.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_4687dd1f-40b5-4b03-bf86-49541ae51d01.png index 76f7d6387d376d3a14a742ba4898739b0286dc31..b5ce6c6595e5c02f81ae69888ef435cad8db23ba 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_4687dd1f-40b5-4b03-bf86-49541ae51d01.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_4687dd1f-40b5-4b03-bf86-49541ae51d01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79df6e5910041c20357640f4cbb21ba357a455b38222f0436dc218e1e2cbad95 -size 490372 +oid sha256:0fcf89cbce59b676cc5af82ca20f33f2a41e6798486d841327c45fdd52a47969 +size 305066 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_552ceafa-2cc7-46fc-a178-1ffd27f5ef89.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_552ceafa-2cc7-46fc-a178-1ffd27f5ef89.png index 915fc644e6d365ef77afa26c79bb82c498a4e9d3..934e7f8662979292c174910ce53afb58f4daf05f 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_552ceafa-2cc7-46fc-a178-1ffd27f5ef89.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_552ceafa-2cc7-46fc-a178-1ffd27f5ef89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ff7493223e30ffff89830e6692dc9661ee4dc737b536433080b348ef8099568 -size 373265 +oid sha256:9552274d38e6c13e4e5550e590827c91a61c627e40196ba3b3832f299ecacda5 +size 339799 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_5d09b93b-3839-4e67-83bc-9cfde7194124.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_5d09b93b-3839-4e67-83bc-9cfde7194124.png index 677f18ad6d71b88150453b9340cd102a1d6a7f87..14460c0daf8836da49ab728fc387c1537f6c4f63 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_5d09b93b-3839-4e67-83bc-9cfde7194124.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_5d09b93b-3839-4e67-83bc-9cfde7194124.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:635ce51b70d8a13c809c4c5697036ea820d5d0e08b2e4028dd6c20df86b141a5 -size 379002 +oid sha256:212920c676af304183d48d181fcc8789330e94f811e130398825546a11b1a19a +size 294027 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_6c672ebf-8e99-41ab-843c-8fac574b2092.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_6c672ebf-8e99-41ab-843c-8fac574b2092.png index eeedb3b0da8c3547d38a08bb1b16451c56a99a97..c7920cd9be9a2941d90c9b708eadc2191ca4561a 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_6c672ebf-8e99-41ab-843c-8fac574b2092.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_6c672ebf-8e99-41ab-843c-8fac574b2092.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c65a00024902464c769044b88b026b4f7940bda15da4cdcb74c462f4623d7d0a -size 371934 +oid sha256:3ac2dc9f25d0f1a842c9c8cf11962ab03be082ad8d06a0897324edaebb76589f +size 637165 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_7337a4ab-f0d2-4e5a-9498-4f133b64972f.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_7337a4ab-f0d2-4e5a-9498-4f133b64972f.png index 75cbe27fbef3d9581e76a1fc6890d8982dd997ef..744bb11d4c9763e22e2b70398e54363bd3bf0705 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_7337a4ab-f0d2-4e5a-9498-4f133b64972f.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_7337a4ab-f0d2-4e5a-9498-4f133b64972f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15f0c467abaa0c243d0db001c375555ba2d758eca2bd8003223f8b744e697f1f -size 373470 +oid sha256:461043f96d359c82f89d5e6d0d2943779396ec145388f731b31c2cf00bc74848 +size 229727 diff --git a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_e3b492b2-86e6-4b47-b744-aa81675abad6.png b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_e3b492b2-86e6-4b47-b744-aa81675abad6.png index 902208ea350cc254157d61749c122e998161130e..8df4397e7a8a98dfc0098d39b74035fe40e52557 100644 --- a/images/54e0d420-ec89-456a-b179-67c6b4b4babc_e3b492b2-86e6-4b47-b744-aa81675abad6.png +++ b/images/54e0d420-ec89-456a-b179-67c6b4b4babc_e3b492b2-86e6-4b47-b744-aa81675abad6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5258e7adb1134bd231fcc1d36b0a5d4070b9524e121a860c83c35fce71ad3d9 -size 800764 +oid sha256:5afc368e91338be9a50c9c31903e2343e95c6a0e98860709d24a47a7cdf43f0a +size 559392 diff --git a/images/551ab381-5bfe-4491-9602-0b0c584d1346_51d5a5f6-926f-4a16-98db-bffa5b3c9436.png b/images/551ab381-5bfe-4491-9602-0b0c584d1346_51d5a5f6-926f-4a16-98db-bffa5b3c9436.png index d4c1a2953483205507d3eea93314881f02a8f723..e46ed203937e21b98afe08d52263d45b2eccec04 100644 --- a/images/551ab381-5bfe-4491-9602-0b0c584d1346_51d5a5f6-926f-4a16-98db-bffa5b3c9436.png +++ b/images/551ab381-5bfe-4491-9602-0b0c584d1346_51d5a5f6-926f-4a16-98db-bffa5b3c9436.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1515efd0d591e399948a6b9c2f0c445aa8037426a00c4820829066ccdc55a88e -size 406009 +oid sha256:6d5fe019f9814159c610a4c3bcb2c3348ac78d6e4a2f571f474776db2466355f +size 407343 diff --git a/images/551ab381-5bfe-4491-9602-0b0c584d1346_d71a5ed7-97e4-49b1-b3e5-d64c46ae7a24.png b/images/551ab381-5bfe-4491-9602-0b0c584d1346_d71a5ed7-97e4-49b1-b3e5-d64c46ae7a24.png index 2fef297662b5811dce680d67d3e314c5d89e16d7..380a5541fb6c1278cfa69d61b245099caa04ac87 100644 --- a/images/551ab381-5bfe-4491-9602-0b0c584d1346_d71a5ed7-97e4-49b1-b3e5-d64c46ae7a24.png +++ b/images/551ab381-5bfe-4491-9602-0b0c584d1346_d71a5ed7-97e4-49b1-b3e5-d64c46ae7a24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63936683209ca70f2d184204cd4b594eee36fb09f25aeeb85cd18267b0a00daf -size 286594 +oid sha256:28003a9edeeef953eb73ec108e4e2135cc561118bb9584683481ea3a2737e7f2 +size 311171 diff --git a/images/55518089-52b9-4504-8e4f-885a9b2943a8_07860790-53f1-4130-90cc-6a68c134f319.png b/images/55518089-52b9-4504-8e4f-885a9b2943a8_07860790-53f1-4130-90cc-6a68c134f319.png index f13a71d657308eeb7b450716702f6f1935b01fef..1f17328f51847bf5a022a0f2d946ea813cba73f5 100644 --- a/images/55518089-52b9-4504-8e4f-885a9b2943a8_07860790-53f1-4130-90cc-6a68c134f319.png +++ b/images/55518089-52b9-4504-8e4f-885a9b2943a8_07860790-53f1-4130-90cc-6a68c134f319.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d86ff89dd9a594241aead5c1142334086a6e76d871a91e7ad6dde6cd04c73de9 -size 501500 +oid sha256:f80733fee8cc61579910fbac3bb95841fa06499468810fa46de1f8969f245f4e +size 401527 diff --git a/images/55518089-52b9-4504-8e4f-885a9b2943a8_a0404d19-6c64-4ba4-943c-303f416d93ca.png b/images/55518089-52b9-4504-8e4f-885a9b2943a8_a0404d19-6c64-4ba4-943c-303f416d93ca.png index b608beea0118f720860cf49bd4cd6bc06b9ddabc..fa822a953758d05f5cba4147ad03eef9f3ad4803 100644 --- a/images/55518089-52b9-4504-8e4f-885a9b2943a8_a0404d19-6c64-4ba4-943c-303f416d93ca.png +++ b/images/55518089-52b9-4504-8e4f-885a9b2943a8_a0404d19-6c64-4ba4-943c-303f416d93ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81f74cff57a1eb8101d51d5e5bde8abc817aca6f3ae74128e229692a3f2f9625 -size 1328983 +oid sha256:25bff4e0d8ad87b96830e020fcb5741c40594f98cfb6ef096f9066f30962064f +size 1447742 diff --git a/images/55518089-52b9-4504-8e4f-885a9b2943a8_f1ee27e8-e8cb-43fd-882f-97d3c7dbdfb8.png b/images/55518089-52b9-4504-8e4f-885a9b2943a8_f1ee27e8-e8cb-43fd-882f-97d3c7dbdfb8.png index b2186e58bcd562fcd1ac3772a497b794ab27e55c..037cd8ec6da1326740656291515b92d01859b019 100644 --- a/images/55518089-52b9-4504-8e4f-885a9b2943a8_f1ee27e8-e8cb-43fd-882f-97d3c7dbdfb8.png +++ b/images/55518089-52b9-4504-8e4f-885a9b2943a8_f1ee27e8-e8cb-43fd-882f-97d3c7dbdfb8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c129ded3140144e833dca2218e985bc2555985e0af2290ab7373b4db64b75787 -size 286087 +oid sha256:fc2c552dc89fab366f77756c94bb95ef256a85cf4465edab7657073c0cbf3974 +size 446497 diff --git a/images/55518089-52b9-4504-8e4f-885a9b2943a8_faf7bc28-9f05-4e7d-ba4d-8ada377c3d0f.png b/images/55518089-52b9-4504-8e4f-885a9b2943a8_faf7bc28-9f05-4e7d-ba4d-8ada377c3d0f.png index 5aab8ddcec5c3067a4ec83285e0c9b8dba3a1391..502cde380bf3558c261ae467eb1b747a196feba7 100644 --- a/images/55518089-52b9-4504-8e4f-885a9b2943a8_faf7bc28-9f05-4e7d-ba4d-8ada377c3d0f.png +++ b/images/55518089-52b9-4504-8e4f-885a9b2943a8_faf7bc28-9f05-4e7d-ba4d-8ada377c3d0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53a468718719ad32f380f8221ffe21d29b6d139deacba2a31ba5327da4a1f58b -size 871982 +oid sha256:d553e14febf1c1eb7475d9231717fd75fc6ef822b98c1fc60f3f06cdb1dbd4aa +size 1221653 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_0f984b4b-992d-4e92-b019-f3e933eb6465.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_0f984b4b-992d-4e92-b019-f3e933eb6465.png index 38f4bb1512c56c54291bdc58d11d70324540163e..99e8f027bb423a73e0e819eef4c07760effed5b2 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_0f984b4b-992d-4e92-b019-f3e933eb6465.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_0f984b4b-992d-4e92-b019-f3e933eb6465.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02347415fea87e1942999e40b8221ed27e6137f139e31114668c973667d92de4 -size 681222 +oid sha256:5b8a7e7054fef7ae6fe6abf5dc67dffad09fcef8c268b0c050754b38c2d7db42 +size 248984 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_1e44ded7-35bd-463e-a135-7e2098862504.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_1e44ded7-35bd-463e-a135-7e2098862504.png index b61832d35940151ff74e22cca3de9c9d84f03d0f..c0c9ea5afd76943754f5aa2497f8d58858ec5b02 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_1e44ded7-35bd-463e-a135-7e2098862504.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_1e44ded7-35bd-463e-a135-7e2098862504.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2010b3b97f49a08bafb8ed6397485b1dfecf91729c816cea4e801bc88c17913 -size 532478 +oid sha256:dad23f02062afd044bbd96a4dfe9138e82c872d23f47666846d7402f84d51ccb +size 248012 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2a5cdf7a-e6ca-4b12-bc94-645a6360642f.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2a5cdf7a-e6ca-4b12-bc94-645a6360642f.png index 84dfe07d597ee748193eb78b1ceabc10af385964..db56b37ff05f8c383f9a0f751bdec0766404b73b 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2a5cdf7a-e6ca-4b12-bc94-645a6360642f.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2a5cdf7a-e6ca-4b12-bc94-645a6360642f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91d054f6f37b810039cedbfa514caa7e930fbebdb3ceb541ba88ec8a317ab145 -size 682592 +oid sha256:9c7ee4d6a025409ff2be9e0794849e53d19ada056a96be57051d7f9b737332d3 +size 476778 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2c84e548-890b-4c83-bb17-731112429425.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2c84e548-890b-4c83-bb17-731112429425.png index 405cb66bc41d75f5172ffecce3099c1d8dd4f3af..59e4fb212899a41377ab0bbc19e62ea2e016b22f 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2c84e548-890b-4c83-bb17-731112429425.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2c84e548-890b-4c83-bb17-731112429425.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6502b973fc288d3a21a9d1c6184929b4352f56e70e0eb4e89a4d69adb92f4535 -size 684399 +oid sha256:afcb2d80509e90a7b91485d0d88c06c4031d9a1305a9a664106732ef050c91be +size 291169 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_30499164-5c21-4aa3-861e-81c8b848a22d.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_30499164-5c21-4aa3-861e-81c8b848a22d.png index 5b142163717c97c1b7dd9adc6b25bbca4ce86ae3..e247c0dba76aab4862e22a1d496d144e338d0fc6 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_30499164-5c21-4aa3-861e-81c8b848a22d.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_30499164-5c21-4aa3-861e-81c8b848a22d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdfdb85e2a8d61600b9639b9f004a9f877cb3c3ed46945a130e82ad450d1d80d -size 666672 +oid sha256:ca68a442184e3b9b2558e0d40c391b38e671e1c30c00a6014f438601d4fda3dc +size 320438 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_60374600-f447-4297-b386-44c4c154ff42.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_60374600-f447-4297-b386-44c4c154ff42.png index 46b55fb128faf1de07523fcda6773d2f70272431..5355198c81eb63be8a693eb1344a3c6515e2859e 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_60374600-f447-4297-b386-44c4c154ff42.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_60374600-f447-4297-b386-44c4c154ff42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a6cd4e832095a356e6fc24a2f5d21e3b418963d7a558b91d0f741c87c814a9a -size 455348 +oid sha256:f71ff298b8965d15170e476dbe886966300419e7ca16845beed4876dad5d4b5a +size 458781 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_69c07f95-bec3-47b2-964c-db723e729b84.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_69c07f95-bec3-47b2-964c-db723e729b84.png index 38b0bdd1a2af0243d923f504f4af0a22978c00c7..03c8b76302b89b5bea16046541aae7303e1e109b 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_69c07f95-bec3-47b2-964c-db723e729b84.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_69c07f95-bec3-47b2-964c-db723e729b84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:818fe982a902b8d439638d64e0cb23613cfd46d340f5b724c0168df22e4d0045 -size 675441 +oid sha256:92d82fd154f0ff5562b2663dcd3a7ddbc86e0bd5102aeeff9b4196b9e05110e4 +size 233951 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_9be7b356-89b9-40c0-827e-a23d85da1644.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_9be7b356-89b9-40c0-827e-a23d85da1644.png index 4fcfe360eec1283a0de932f556b58fa14f52ccff..d8bd581b688a4d33a31916d21eed6b9c5eeeebb6 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_9be7b356-89b9-40c0-827e-a23d85da1644.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_9be7b356-89b9-40c0-827e-a23d85da1644.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef24cf9fb17479407dbb3b5e21ce478b0c450ff0243f9640b18579d99271269f -size 645494 +oid sha256:8c257cb8b04eb1b36756a937dcbe2f2de11f7b8008e7644e8886472991d35158 +size 583394 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_a7a0f732-8940-4da0-b0a4-6aa68777441f.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_a7a0f732-8940-4da0-b0a4-6aa68777441f.png index 80f72b4c3bfef6cd6157c3cdfb7d057d1777871a..2debd0b1d7203d0b196665b2d4e504410bc04d57 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_a7a0f732-8940-4da0-b0a4-6aa68777441f.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_a7a0f732-8940-4da0-b0a4-6aa68777441f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcc82a1a366485f7ecdcbbb98aec4f93d0b66fcbbe623716973a2ac2d347818f -size 671151 +oid sha256:07a7e605c1491bb630532f3cb92fae500bdfc4e861141e38f02ee8b89cec0b3a +size 394881 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_b4a42ac9-e109-4952-8e2c-206e39e788e1.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_b4a42ac9-e109-4952-8e2c-206e39e788e1.png index b2f34c9d2bc1f2198388102bb14015ff2762fe66..a7acdea2925daf2fb571942a36fe6499e79e7888 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_b4a42ac9-e109-4952-8e2c-206e39e788e1.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_b4a42ac9-e109-4952-8e2c-206e39e788e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa5ed5dd612c4860cce6a262053e8119309494f59f8f7b141e81e5a7a2143f32 -size 677032 +oid sha256:fac52e24d496fab3a6c7e2bbd3062621f355e30861aae7605d337165dc7497a3 +size 210135 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_bc8c7895-1ab4-407c-85e6-11dfd925cfa6.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_bc8c7895-1ab4-407c-85e6-11dfd925cfa6.png index e4a810420e7e0d0c0dddae25e9c011fdd93d29b8..0c8c5591e364474fe14cac6b6941996ac6f3b3c2 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_bc8c7895-1ab4-407c-85e6-11dfd925cfa6.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_bc8c7895-1ab4-407c-85e6-11dfd925cfa6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b043673afecf0921cb0379e4fa657b798301d531cd2e8f4feba71776486f5b2c -size 1484424 +oid sha256:71208226e5582a62112da54c32dc215dc880c95d96336d86039ba959f70413fe +size 1145175 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c155e0d8-1093-4266-8b0c-bc68546903ce.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c155e0d8-1093-4266-8b0c-bc68546903ce.png index d7ec7c0e64ef086640299141fc58304c5a0e6a1c..f553c8eb3aad57d8f751b564075820982f0c8154 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c155e0d8-1093-4266-8b0c-bc68546903ce.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c155e0d8-1093-4266-8b0c-bc68546903ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d9be92c03ba96c638bc3cc6ee6aeaf6a5b9d99d65bf19eb43de1a391df86efc -size 527712 +oid sha256:4079aecf78e18add9c6b188967aa56bff240b0c282bad332c39cbf3073e9350e +size 336628 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c20e44a2-0e7e-4e24-865e-91167602faee.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c20e44a2-0e7e-4e24-865e-91167602faee.png index dac58e7117847c7f1883f5c88a1dc6936b245b9d..3816e947427beed86724c9790da2b3ef769e7e21 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c20e44a2-0e7e-4e24-865e-91167602faee.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c20e44a2-0e7e-4e24-865e-91167602faee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d2134383880d1de4df8dca0d8aaea86db155d7cff1219b0b07390ae03412eb1 -size 652619 +oid sha256:fc3c6216e66485197f2901122d13bd82ea77b7e32da00837ee9993a4e1f42bbd +size 360258 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c9345036-4aaf-4175-9cd9-1ea6debe5fe1.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c9345036-4aaf-4175-9cd9-1ea6debe5fe1.png index 9156c9e06048b69a3ceffa9711994ab9d63b0b58..96fd263264a379562dd1d51b0ba33a3fb6440521 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c9345036-4aaf-4175-9cd9-1ea6debe5fe1.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c9345036-4aaf-4175-9cd9-1ea6debe5fe1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29ae502f45574a444bd30aea3733ae65a7325436ea6d053e01e7714b7c75772a -size 492322 +oid sha256:043ab1cf79fe81f48129acf8aa0d440d24cd7fbb498cebcd1f22330234a836d4 +size 412868 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_d9a18977-172e-40e0-8bf9-513bbdf8ce7b.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_d9a18977-172e-40e0-8bf9-513bbdf8ce7b.png index 6678a158a43c2b8264edc348f372c0a72d5aff7c..45788a2caa056b2e17162ca262e95d4921ef750d 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_d9a18977-172e-40e0-8bf9-513bbdf8ce7b.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_d9a18977-172e-40e0-8bf9-513bbdf8ce7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41aef6c76d654cdd17caa7045d7f67d8470957b9db365bbf190521204dfe8a23 -size 687715 +oid sha256:f269fe62624a81b69424613597f36cf50c2b30912d4b8a72955b1456fdfcb59c +size 316485 diff --git a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_e2ae9f11-253a-4887-856a-20a5f2a77659.png b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_e2ae9f11-253a-4887-856a-20a5f2a77659.png index a1aa0abea6d369a4cc413e20d332b6ac4fb0a8bc..c4fba405cf285be0d8edf5bbeaf91aa287185d7b 100644 --- a/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_e2ae9f11-253a-4887-856a-20a5f2a77659.png +++ b/images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_e2ae9f11-253a-4887-856a-20a5f2a77659.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c168afff9774b9aea4cd5090932828eefe930c441879e1e0e69ff26b5895230 -size 579243 +oid sha256:f896ffac49bf369c5e3feb28c7d716662c995ea7d6231ca75357ce69f2130945 +size 387109 diff --git a/images/55631305-2957-4343-b4f7-73aa68acba47_64eb802f-8390-4ffb-8a72-1f10211fffbe.png b/images/55631305-2957-4343-b4f7-73aa68acba47_64eb802f-8390-4ffb-8a72-1f10211fffbe.png index 949abd2df43a470bed639095c4b8cd13dffa6f5d..6dba53ef40674d4cfe89d64209437f3e748f6f47 100644 --- a/images/55631305-2957-4343-b4f7-73aa68acba47_64eb802f-8390-4ffb-8a72-1f10211fffbe.png +++ b/images/55631305-2957-4343-b4f7-73aa68acba47_64eb802f-8390-4ffb-8a72-1f10211fffbe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4cbf4e722b14f4d1e370bf1dbf59bf60a2834872758550dc550c13cc79a25623 -size 1220300 +oid sha256:c9a80d3fd5a0605e02db45749a93b2ff94d08035b466cf8727e2dac57f691138 +size 1158661 diff --git a/images/55631305-2957-4343-b4f7-73aa68acba47_aa72534c-0de6-4c4c-8d2e-378dd9bb25ec.png b/images/55631305-2957-4343-b4f7-73aa68acba47_aa72534c-0de6-4c4c-8d2e-378dd9bb25ec.png index b401482277d777e6d05992219733febe027adb64..f3f0237d192db588e6125d34d460e4673baae4ae 100644 --- a/images/55631305-2957-4343-b4f7-73aa68acba47_aa72534c-0de6-4c4c-8d2e-378dd9bb25ec.png +++ b/images/55631305-2957-4343-b4f7-73aa68acba47_aa72534c-0de6-4c4c-8d2e-378dd9bb25ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:220575fd3f20580233795e21bd5cdf394c904daa8252c30dc00598e13abbcc0d -size 1751957 +oid sha256:d16ae7b47d4ed08c07a9952998dd4f6fe49262c09443aaefe76c7d0b659c051c +size 1985173 diff --git a/images/55631305-2957-4343-b4f7-73aa68acba47_b0a24212-9aae-4fbc-a62d-bc7129890aec.png b/images/55631305-2957-4343-b4f7-73aa68acba47_b0a24212-9aae-4fbc-a62d-bc7129890aec.png index 0d482da69ed200b48eac1af47da0989737279279..80285b0eb8f0349f21489e2c05551ae410ab3fe0 100644 --- a/images/55631305-2957-4343-b4f7-73aa68acba47_b0a24212-9aae-4fbc-a62d-bc7129890aec.png +++ b/images/55631305-2957-4343-b4f7-73aa68acba47_b0a24212-9aae-4fbc-a62d-bc7129890aec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:343b119a8597ba80a61c1ec5adec1bea3e7faa34ff4a32db635e31cb646a4ee9 -size 1754117 +oid sha256:49f0e40fdd3fd1a5e0d9e5342ff23bfd6e9d7278fa1ab86c3d8f9748180aa85a +size 2026186 diff --git a/images/55631305-2957-4343-b4f7-73aa68acba47_b11ff5db-7e2a-487f-846e-fcc2b67d1485.png b/images/55631305-2957-4343-b4f7-73aa68acba47_b11ff5db-7e2a-487f-846e-fcc2b67d1485.png index 8fc459d3a333d4e713cd29a3ca246cdca0a5183e..d52adaf2a48fa5f64dffdacf13661215a9855e0f 100644 --- a/images/55631305-2957-4343-b4f7-73aa68acba47_b11ff5db-7e2a-487f-846e-fcc2b67d1485.png +++ b/images/55631305-2957-4343-b4f7-73aa68acba47_b11ff5db-7e2a-487f-846e-fcc2b67d1485.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5eafdfb85ca7fbcf1f68ca4962b45b30512de7892e06c7e349d9219263e67a9e -size 617002 +oid sha256:f59804ac04e99d3014439c9331a4cc3a36ecb15bff0af8707cc4b785268f55e3 +size 731721 diff --git a/images/55631305-2957-4343-b4f7-73aa68acba47_d1c03c4f-03c1-42df-a1eb-752d2d674a7f.png b/images/55631305-2957-4343-b4f7-73aa68acba47_d1c03c4f-03c1-42df-a1eb-752d2d674a7f.png index e6c9e8b7e32cbec90b426575cebfbf7b09c6932b..7dab2721c7b1da356065cf49f7536429f3683b53 100644 --- a/images/55631305-2957-4343-b4f7-73aa68acba47_d1c03c4f-03c1-42df-a1eb-752d2d674a7f.png +++ b/images/55631305-2957-4343-b4f7-73aa68acba47_d1c03c4f-03c1-42df-a1eb-752d2d674a7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:088674eab60d606966d15fdea559d0de26e40ab98b54fede5084028b9efe957a -size 1191538 +oid sha256:ad693fd856852e4f2f5bedd9a94b4ffdb7bcb3e9a0485196752f4d27cb2906a0 +size 1156819 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_1b07bed7-815f-4c71-8b77-0f9abc587b36.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_1b07bed7-815f-4c71-8b77-0f9abc587b36.png index 2f61e14ca9fe097d55bdeb227de27b8254c681c6..41c3108557e277ed9a5bd138e0f33cf4c54bdb6b 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_1b07bed7-815f-4c71-8b77-0f9abc587b36.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_1b07bed7-815f-4c71-8b77-0f9abc587b36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f733355426aac354d01a3a7bae3f27a6aa40a578840ca10457a1ea0064cbf671 -size 754736 +oid sha256:bc3d86e836eceb27b12cc7aa19d34f22e07a5a8e256bd57852e06a2ff975a35f +size 796246 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_205dfea0-1032-44c3-8c8d-3b2e3c7d1daf.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_205dfea0-1032-44c3-8c8d-3b2e3c7d1daf.png index 9ea367975cf3f21e224aa7ab6467dd066262dca1..7660b58fd53d0996ab5d6d8cbb60c284bf19518e 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_205dfea0-1032-44c3-8c8d-3b2e3c7d1daf.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_205dfea0-1032-44c3-8c8d-3b2e3c7d1daf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0240be5b005bff2b70351f9cfba010d598cf02ae673411a37795a60d764e3dc7 -size 1058602 +oid sha256:303115a712d21db87f357e3fba9ca3b3653a38ea35285f19fffcfb0b0ee006a4 +size 989121 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_362374fe-0388-41ab-bc3f-222224b2119b.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_362374fe-0388-41ab-bc3f-222224b2119b.png index 894925fa368765084278d8fbb5708719ef20d3d5..ba2168d6d0c056960df061ea9f17a62767d10e99 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_362374fe-0388-41ab-bc3f-222224b2119b.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_362374fe-0388-41ab-bc3f-222224b2119b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc2d07b45494f62389b2b7b0d3870c57e98061d15f24bb6a15d851dcf6d8b1ee -size 1684063 +oid sha256:41d2efe28e30c228eadd8d838fbb6f14a7c7421a9ed1d222768ff505b6bf2072 +size 1703418 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_518c52e1-9005-4b0e-b702-b4847a54b9e6.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_518c52e1-9005-4b0e-b702-b4847a54b9e6.png index 6f19d6982fb7736d4c1e677a89cf495ec5906636..9b4a1837a5096b5fa61809d195751721f68cb959 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_518c52e1-9005-4b0e-b702-b4847a54b9e6.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_518c52e1-9005-4b0e-b702-b4847a54b9e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3cef1e8d207eb15bb40e7a6dd33f33697bfbacf76cb130e9b659fb4a074a1c1 -size 1400016 +oid sha256:e375aea14a6c8eb0e6e35ca32ba514a744ce7edfd7f34b9b223895ed55ab2458 +size 1346823 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_649b764f-bbb7-4b14-a135-4ecdf1d73419.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_649b764f-bbb7-4b14-a135-4ecdf1d73419.png index 7c593711b9a9ca60880901d87a120c19e4a22b8f..0f3e95ed03b22dbbf71a1b08800325a9e85bcd99 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_649b764f-bbb7-4b14-a135-4ecdf1d73419.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_649b764f-bbb7-4b14-a135-4ecdf1d73419.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e3c8fcba71d62a765e92787586d7dc574b9670fc15a8c2478d40105cff22dbb -size 936179 +oid sha256:5651037b9464a883cf415dd35a5ea5afe3795e5c429e7233a98827a6de8268f5 +size 796204 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_69b8b5e3-07de-49f3-a2dd-149dcd1bef3e.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_69b8b5e3-07de-49f3-a2dd-149dcd1bef3e.png index cfc64d6586bfdb0b0ee28916507f30a388a1e24e..a0d62b73244a9c5653dd4a6a352b943ed7900685 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_69b8b5e3-07de-49f3-a2dd-149dcd1bef3e.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_69b8b5e3-07de-49f3-a2dd-149dcd1bef3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cd92616e054977ff2f335c0ddbfbe5bdb6b7a0b441aa2d482205bbf95329344 -size 829354 +oid sha256:0009b605d340fde8c42fa5fb8350cd6408c753223143fbe46e180d3f6527800b +size 1142798 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_a515f870-4760-4d67-b2bf-1d756fe18960.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_a515f870-4760-4d67-b2bf-1d756fe18960.png index f33458a446a9b1e06c7620520bd7b4526f67662c..7089217ac76b907214dac5c3b404fa26992e83c6 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_a515f870-4760-4d67-b2bf-1d756fe18960.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_a515f870-4760-4d67-b2bf-1d756fe18960.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63994372703b84b373c2d7f8312e6e107df790c50a8dfec24fb5ecd564638e3e -size 896866 +oid sha256:591cf1f96f66fb3f70cab24b3e427054d1a4f8e58944e7f6d63ba36b51e31b1c +size 893388 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_a8fc3743-1bc9-4364-8cf4-243301d9ad7a.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_a8fc3743-1bc9-4364-8cf4-243301d9ad7a.png index fd2a5c3dacd2b27bf99da9f6df6213df528bb389..608fb6cc406761c50b851f22344953d535242ca9 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_a8fc3743-1bc9-4364-8cf4-243301d9ad7a.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_a8fc3743-1bc9-4364-8cf4-243301d9ad7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54a0b82faf64867d9f237ec13432433274b879487473df07e91543601efd1664 -size 690565 +oid sha256:a8046de505e37680592c90b5600ed30a9ffa8945d5413a11e35d11a793a9a0d9 +size 854871 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_ba013772-a229-43d2-881f-3b1edf1d1cf6.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_ba013772-a229-43d2-881f-3b1edf1d1cf6.png index 1b08a3708af594b4b06942d03a2c3ffd5ac8dde5..2caadfc3af5047d7dfc6fabed90015ce339c2894 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_ba013772-a229-43d2-881f-3b1edf1d1cf6.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_ba013772-a229-43d2-881f-3b1edf1d1cf6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3b0f57eb7038af78119da3d138162b727af3bd551da14f418eee84279c17f59 -size 901094 +oid sha256:7957f536698692933ca2b09d1330d7bc13f0c321f40503dfd7e98f530d5576aa +size 653416 diff --git a/images/562d8516-e1fa-464b-9bb1-542fe818c721_d7379735-b499-442a-ab5b-b0b0d6e6d906.png b/images/562d8516-e1fa-464b-9bb1-542fe818c721_d7379735-b499-442a-ab5b-b0b0d6e6d906.png index 89835d98421f1e78cc3c4a817ad231fa801c63bd..059f7a2b4ee19abf25f5f1c4be796a8d13dfe881 100644 --- a/images/562d8516-e1fa-464b-9bb1-542fe818c721_d7379735-b499-442a-ab5b-b0b0d6e6d906.png +++ b/images/562d8516-e1fa-464b-9bb1-542fe818c721_d7379735-b499-442a-ab5b-b0b0d6e6d906.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57633453686d310fb8c929700d83c73aa52768957c6c7a5d1c926caf7f1ff0d4 -size 736474 +oid sha256:bdc60e42c4a076a78eee6d84cb40c3d7acd422834db3c6548b284911265e44f6 +size 1022323 diff --git a/images/563ec938-b259-45b7-b0d3-6360b74e601d_65751985-a337-44ea-92ee-e6539bda7fd1.png b/images/563ec938-b259-45b7-b0d3-6360b74e601d_65751985-a337-44ea-92ee-e6539bda7fd1.png index b048c20b761e642cda512403fc3ca6ba16d203bd..52bf8522c422335a74ead40012f3122e179272dc 100644 --- a/images/563ec938-b259-45b7-b0d3-6360b74e601d_65751985-a337-44ea-92ee-e6539bda7fd1.png +++ b/images/563ec938-b259-45b7-b0d3-6360b74e601d_65751985-a337-44ea-92ee-e6539bda7fd1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:754b7214afbbbb87f50bede997ef0ef5f57bcb8b7e808a859ee97e39f2eb40bc -size 708594 +oid sha256:ccde07427d1ba61f04e41577ab974091afb0919d05f73dd56fd869302639cd52 +size 568848 diff --git a/images/563ec938-b259-45b7-b0d3-6360b74e601d_67703df2-19ca-4bac-b1d6-272be445bcf9.png b/images/563ec938-b259-45b7-b0d3-6360b74e601d_67703df2-19ca-4bac-b1d6-272be445bcf9.png index 0cf9d6db77bf33af09383943c10770e9d63a8e44..b0a85e9de5c5d617d8ca0b1421a6d9ad59d713f7 100644 --- a/images/563ec938-b259-45b7-b0d3-6360b74e601d_67703df2-19ca-4bac-b1d6-272be445bcf9.png +++ b/images/563ec938-b259-45b7-b0d3-6360b74e601d_67703df2-19ca-4bac-b1d6-272be445bcf9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e88fe414378a4acc1d18ea9a857b4ec98be8b4fd68082441a2c415611c38a2 -size 1563192 +oid sha256:bf2a0e54a927dc829c25ae46352d90b8e9740e7b1471dcec907af22225aec0a0 +size 1084568 diff --git a/images/563ec938-b259-45b7-b0d3-6360b74e601d_b209f671-5caa-487b-a43c-666ba609b584.png b/images/563ec938-b259-45b7-b0d3-6360b74e601d_b209f671-5caa-487b-a43c-666ba609b584.png index 0cf9d6db77bf33af09383943c10770e9d63a8e44..ab1e1e97d136231a0536ae1c1da1580fbdc794d5 100644 --- a/images/563ec938-b259-45b7-b0d3-6360b74e601d_b209f671-5caa-487b-a43c-666ba609b584.png +++ b/images/563ec938-b259-45b7-b0d3-6360b74e601d_b209f671-5caa-487b-a43c-666ba609b584.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e88fe414378a4acc1d18ea9a857b4ec98be8b4fd68082441a2c415611c38a2 -size 1563192 +oid sha256:fceab109522193d27512e9ddb97cfc91382e946185c29c9223b094fcfcb54b22 +size 1599549 diff --git a/images/563ec938-b259-45b7-b0d3-6360b74e601d_c5dd3eae-488c-4ece-a2fd-8bf08531a739.png b/images/563ec938-b259-45b7-b0d3-6360b74e601d_c5dd3eae-488c-4ece-a2fd-8bf08531a739.png index 0cf9d6db77bf33af09383943c10770e9d63a8e44..19375eb804353a83afcafdaddbe59f3dbf83ee6f 100644 --- a/images/563ec938-b259-45b7-b0d3-6360b74e601d_c5dd3eae-488c-4ece-a2fd-8bf08531a739.png +++ b/images/563ec938-b259-45b7-b0d3-6360b74e601d_c5dd3eae-488c-4ece-a2fd-8bf08531a739.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e88fe414378a4acc1d18ea9a857b4ec98be8b4fd68082441a2c415611c38a2 -size 1563192 +oid sha256:198d6c606c3db803c6a4fbe61ab125068b70c0eea1c7aed6ca0ef5c36c633d37 +size 807845 diff --git a/images/563ec938-b259-45b7-b0d3-6360b74e601d_cf9e9473-f15a-4e35-a52d-fc2f8f6a9d9d.png b/images/563ec938-b259-45b7-b0d3-6360b74e601d_cf9e9473-f15a-4e35-a52d-fc2f8f6a9d9d.png index 3023212c02ab66dc7f0a88ab304f17f814d1b41c..891bc37690c0a84a336ffb5be222f098fd131fbf 100644 --- a/images/563ec938-b259-45b7-b0d3-6360b74e601d_cf9e9473-f15a-4e35-a52d-fc2f8f6a9d9d.png +++ b/images/563ec938-b259-45b7-b0d3-6360b74e601d_cf9e9473-f15a-4e35-a52d-fc2f8f6a9d9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6bfd5e61898bf5e37cd5e2fd07d072bd1f17024046f5f18ff78c1b973f8c6d0f -size 546893 +oid sha256:1d9a748940110e3c8d03b61559832b61c06dbd7a1a0fc969c88e9c1b90f4036a +size 499742 diff --git a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_0f4a4727-3b2d-4295-b8fd-52f2e3c17124.png b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_0f4a4727-3b2d-4295-b8fd-52f2e3c17124.png index 207d9546f627f01b1c7d7969c6980d4cc67dce47..12deae4af5ea8b465e66a4d0a11aad01ebcf9fd4 100644 --- a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_0f4a4727-3b2d-4295-b8fd-52f2e3c17124.png +++ b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_0f4a4727-3b2d-4295-b8fd-52f2e3c17124.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd214313340c26ff4b9e760acdc19cd0aae7f8379b0160884788e2ce8ece9f53 -size 861264 +oid sha256:ff19ad7a83ee34a68559d07bf31551b9752921bdc66ce9caedce2456617c45ec +size 1062556 diff --git a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_1553af30-7f22-4c4d-8037-4ac6c3a3b72b.png b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_1553af30-7f22-4c4d-8037-4ac6c3a3b72b.png index 89b8ac6689cac18a833b694d34821a413c9f127a..7fb1cf60c677814a3a4bd672968a546f9a2eac03 100644 --- a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_1553af30-7f22-4c4d-8037-4ac6c3a3b72b.png +++ b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_1553af30-7f22-4c4d-8037-4ac6c3a3b72b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16df21a7067f62e2ea5983bafc4fabee4ee3f9481fcd997e040458c2868bd5e3 -size 858168 +oid sha256:8006c0f0ce9554c3b59655e4d6aacd2c3cdbf27b680ebb643efcafd696f7fada +size 799060 diff --git a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_394dcce1-2df9-4a3c-8088-31e132733f32.png b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_394dcce1-2df9-4a3c-8088-31e132733f32.png index ec8f0bb7fb262d1d05e7474e6776077911f5b3bb..d6818c8c19a901d383486a37738d2bb8769a27c8 100644 --- a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_394dcce1-2df9-4a3c-8088-31e132733f32.png +++ b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_394dcce1-2df9-4a3c-8088-31e132733f32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48bcddc98450132206d4e5dc82cd50c31209701c39b92b3ba9f14d7774197fba -size 586881 +oid sha256:391339e8b9711f3d4f7f262847099be337f857f77d0b71b8fa4ae7ead676fc70 +size 804791 diff --git a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_7bf6f0e5-aacb-414b-aa02-8cdd5e2677c7.png b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_7bf6f0e5-aacb-414b-aa02-8cdd5e2677c7.png index a165fb7a481cee007c83a90a79474dcf17e57b0a..c57aaf5d1d3887c24190ec3feddb8f978b0f4f38 100644 --- a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_7bf6f0e5-aacb-414b-aa02-8cdd5e2677c7.png +++ b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_7bf6f0e5-aacb-414b-aa02-8cdd5e2677c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef97095fb373c15d0b21efff2c48f4591c6bbace8e56d4ebba666119dc4c0a55 -size 860145 +oid sha256:e7a475fd3b517a8706ecbf8c1c65f4da8f5d1500a88cb7512ee764e1530dab37 +size 925942 diff --git a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_acffa63c-d008-44bd-9e78-eb0247cd85cc.png b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_acffa63c-d008-44bd-9e78-eb0247cd85cc.png index 7a41cc88559c9d5754083f07e55a00b506cf660d..e119798240506ca6efe159a754686e17f93fc1d4 100644 --- a/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_acffa63c-d008-44bd-9e78-eb0247cd85cc.png +++ b/images/56cac423-4be9-4f74-9031-7cef1fe60ef9_acffa63c-d008-44bd-9e78-eb0247cd85cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9ef801c67f65b2887dfe830ee84deef878d584257d449b08df0b5618278b9d6 -size 782288 +oid sha256:cb8f9b497675ccd346f09d97be3625f425e05c8bdb2d5844bf346f8ff063cfa1 +size 751787 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_0cc51631-52d2-485f-a503-60d18725d858.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_0cc51631-52d2-485f-a503-60d18725d858.png index bd5d00e569c248bfa4a7ac41829d246228f23dfd..38510896d0a827fdc5191e19ce25fdd1589b551d 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_0cc51631-52d2-485f-a503-60d18725d858.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_0cc51631-52d2-485f-a503-60d18725d858.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c6551f92b8768884eefd0eda58fa8531a04454d0b25648a7c44fd0670757a2d -size 863172 +oid sha256:84ab1a362746bd2a231cd408b8bcdcd18638f941c7cd3721ee3128b74725afb5 +size 1048086 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_19e36673-154e-407f-9425-7d8c2dfdd30c.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_19e36673-154e-407f-9425-7d8c2dfdd30c.png index 535b5519ae9ce3454cff0c63c06f82f76754f6c2..8f4504bdde34f2512172dec3b16f7f4e0dc8b593 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_19e36673-154e-407f-9425-7d8c2dfdd30c.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_19e36673-154e-407f-9425-7d8c2dfdd30c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a51f7bb5b4c9b8e0e89fd4e154ee93d01447b16fe35670d52cf90339d17d0bba -size 864903 +oid sha256:fc7247eca6e5e4b72c360da45336a69d667f2e4f2f22934c69becfdf455b826d +size 968169 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_1c629567-4fe9-4654-8aba-72a300154818.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_1c629567-4fe9-4654-8aba-72a300154818.png index 3f7244d8ed7ce1edff0be59823f01363071b446b..5f1059fa20929c5ce8ff85669c9dfae7506c68dd 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_1c629567-4fe9-4654-8aba-72a300154818.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_1c629567-4fe9-4654-8aba-72a300154818.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d46c7d1dc6270c98606664577d0615ac74cf34fccd4d213d8f6794d05dda9497 -size 847042 +oid sha256:e8c565351efe31b8d2a34d0191e7544198f8e0b97ef7644b3b66a406b56ce6bb +size 772505 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_24c1b90a-2057-4926-9fac-ee342f7d7299.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_24c1b90a-2057-4926-9fac-ee342f7d7299.png index 6751cf5225d5ec81380af60068603174eb11463f..d4e7b17f56aa443bef4b08c3779a1b23417f3d12 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_24c1b90a-2057-4926-9fac-ee342f7d7299.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_24c1b90a-2057-4926-9fac-ee342f7d7299.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba306ae80b5ac6d24850ede587dece7dc68735ac8229115b7e7713dd7e6cf587 -size 752233 +oid sha256:4f054729a07dd0aa1677d4b50c13ea86c8d68411d4322ea2683d0ddb39fab15e +size 997656 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_26e354fd-cad7-453f-a070-138d9ebc55ff.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_26e354fd-cad7-453f-a070-138d9ebc55ff.png index dd74ef041bee175e845244322cab7e7f0c4f7119..13d721800424ff61c78284de69d608b881886908 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_26e354fd-cad7-453f-a070-138d9ebc55ff.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_26e354fd-cad7-453f-a070-138d9ebc55ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e208c8f9640cb213c9d9d709a999c299b1f4096032cb875ec33a96e01125db56 -size 751800 +oid sha256:5c77311fe9720e29a95f1297366387e8bba383de435921602aebaf7be6ed206b +size 562272 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_60083201-3aa9-4224-ba53-064f1337c834.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_60083201-3aa9-4224-ba53-064f1337c834.png index 23035324d142c5fac5ee70c6e23cb6267857a819..035b6fa8b82896ec55afc24e8449339366c7b9f9 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_60083201-3aa9-4224-ba53-064f1337c834.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_60083201-3aa9-4224-ba53-064f1337c834.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c6c830592ecd8a789ff71b667fbe7b54563d014a4e4cb9070fe2ad46aaa34ad -size 1348988 +oid sha256:504923838d52aaca5f54673a982ca0d7370162f9a96b3c7fea98fcce5c23e2e5 +size 1362484 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_60bbef2f-114c-4dc4-bbb4-3928f9225c62.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_60bbef2f-114c-4dc4-bbb4-3928f9225c62.png index 4ade20dd43d3dcfe6c3af2b1920f2689e32579d8..7446b1727c47615309eb01e737f583a80ca704b2 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_60bbef2f-114c-4dc4-bbb4-3928f9225c62.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_60bbef2f-114c-4dc4-bbb4-3928f9225c62.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28e1a040214b31a1a91ea1a10fd29b7dd11d8070fa61ab68cae8813aead5ead2 -size 858317 +oid sha256:3ab2860c02f40070eb2babf11648d3488256d8a9f43e2cfd2db73be86b12e6ad +size 965751 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_9e02ef91-c028-4e4d-a052-20a65eddc765.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_9e02ef91-c028-4e4d-a052-20a65eddc765.png index 7cd5de3976fd82ecdcf3293d63650e9e0eef7736..7991da085686bcb8dce0d5b8def2f601601b4ac8 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_9e02ef91-c028-4e4d-a052-20a65eddc765.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_9e02ef91-c028-4e4d-a052-20a65eddc765.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2fc7c47404a12a31c6d7f3d196b36af6b6198b289af6b86e15b7a55de8442168 -size 864817 +oid sha256:2b22459668567c41d3d6a1102aaaf92e342ff63de09a6c9ead4b8fab4db6be52 +size 1158178 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_a0a21e4c-4d0b-43da-9605-49c7968f34d0.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_a0a21e4c-4d0b-43da-9605-49c7968f34d0.png index 21651d45d6ae7bea77e394557e4af7012e6eb4ef..129cfdfe8195fb85122b1190c4b9a29f18cdde01 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_a0a21e4c-4d0b-43da-9605-49c7968f34d0.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_a0a21e4c-4d0b-43da-9605-49c7968f34d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6b5322fecbbef1a91e4e383a990cd7f6b777afdd662304082c206aabd2d7945 -size 865338 +oid sha256:4ccafda5e484c303b63445dbd9d16b0d6690d0e59fdcabe6dc254d5d4b5c8c9d +size 1159074 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_a9810361-2044-4872-9fea-484bc49072e3.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_a9810361-2044-4872-9fea-484bc49072e3.png index c944bf83d4165e678e327065556d97a892d0c7f5..117598df556aef31631d511eb658ab7c9e2ee7e3 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_a9810361-2044-4872-9fea-484bc49072e3.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_a9810361-2044-4872-9fea-484bc49072e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd2c7ae89aae50f716471eb93e32c1b44184e4b783e62afb1df470c006524a6a -size 861995 +oid sha256:38b811e742064060eab5fe2f9fdd4792b76d64d7d9bb2dfa56bc6f614b8434b0 +size 1045622 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_b5eb98db-7b56-403f-8497-7bec0ac4fd57.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_b5eb98db-7b56-403f-8497-7bec0ac4fd57.png index f7a214b812003eba9890b0829735540c3f6b930a..1367f52636ef1b4e97310f57f03e3daaa9d864f2 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_b5eb98db-7b56-403f-8497-7bec0ac4fd57.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_b5eb98db-7b56-403f-8497-7bec0ac4fd57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65225413f0254cdbb89889fe52b535f6a8d42ea8f0addf69e4a5b96a03ac133b -size 1520440 +oid sha256:f0fb2b84715f4c78b3c0159acda46d628291633b4af7743e005e00f0217f0dbf +size 1728529 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_c6dad653-0f55-4f00-83a7-7ac9cffc7316.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_c6dad653-0f55-4f00-83a7-7ac9cffc7316.png index c9df471bfe0d2a03ab37535f2d7f634073f15ede..a15258465114608467926b45e2561ffc58b61187 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_c6dad653-0f55-4f00-83a7-7ac9cffc7316.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_c6dad653-0f55-4f00-83a7-7ac9cffc7316.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a930fc1735c52858d9e0750a80df13dd1f6f4a396f1d123570fb4daee4d1836 -size 924239 +oid sha256:3aca944b4edf8f53165b8a99d31873b77f1f458a371453177f638099c3edf838 +size 842800 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_ca5a6e89-1bbb-4600-89e3-030ef9d18217.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_ca5a6e89-1bbb-4600-89e3-030ef9d18217.png index c62b902eac3bf1336a1d7f9a6a9842c4a5777b0d..4e775188f4099bb78729ed198a8f90964fcba137 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_ca5a6e89-1bbb-4600-89e3-030ef9d18217.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_ca5a6e89-1bbb-4600-89e3-030ef9d18217.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d099787a040ba619d25d8dd92d1081cd1f4b120f05c3f1ba1574116ca74233f9 -size 812715 +oid sha256:21ce291c77a4aef2fd1e3fddb8e5c899c90e3f9043cfdc748831ec10f8ba18b5 +size 759828 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_f20ca446-98cb-4ce8-8ca2-96c8fb4fbb69.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_f20ca446-98cb-4ce8-8ca2-96c8fb4fbb69.png index 29ec7560f1e0cff97b7d064ad9ca274494b136e8..3e14372891aa6144280bf36baffb493968fed936 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_f20ca446-98cb-4ce8-8ca2-96c8fb4fbb69.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_f20ca446-98cb-4ce8-8ca2-96c8fb4fbb69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f755c3bd5f7e6c3d43401301b475f6e18b2a7534f06cdbb9271f058104d76f5 -size 825582 +oid sha256:906318eca1838b440236285583d7b22d6a353d1cee82ca6514f7a16a96ccba5e +size 1087808 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_f49f5305-0a5f-46b1-af16-a1fa43ae89b1.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_f49f5305-0a5f-46b1-af16-a1fa43ae89b1.png index 72613d8d36161464745fcba583f444d3e500d3b7..6deddca7017c2d954bde77b86503616499fc8205 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_f49f5305-0a5f-46b1-af16-a1fa43ae89b1.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_f49f5305-0a5f-46b1-af16-a1fa43ae89b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3a9050221fd473f5601a16cba446a8d80049e476b77d259feafe4d1f2260734 -size 857529 +oid sha256:7a62fc16b91a346b104718284ad99eba06bc9d73a57b055eccd7f1f9b715cfca +size 937298 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_f96e7319-7712-4074-9b25-48a0c4769033.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_f96e7319-7712-4074-9b25-48a0c4769033.png index e2b42ebf1894bc36b2fd2b757d8bb94bc030c344..65f8ed709f96b85321bab7c093eb71de021cf09c 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_f96e7319-7712-4074-9b25-48a0c4769033.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_f96e7319-7712-4074-9b25-48a0c4769033.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ae6bebc0f52a961d2f471abb882f11ccbb01f0805ec0c09da694fed821cbac5 -size 1529328 +oid sha256:090a99578b349fab8c40356aa2e45cbbe50296f915f65b4948d2d71f79bfd343 +size 1205257 diff --git a/images/56cfe20a-f008-48d3-b683-002ce7790616_fe049523-2d6d-4d2e-9721-982583f3b2bd.png b/images/56cfe20a-f008-48d3-b683-002ce7790616_fe049523-2d6d-4d2e-9721-982583f3b2bd.png index ce580ec32c112145f8e5d28459ea35882247c810..4af8071bafbdbf504f27140b55b03d22fa94db92 100644 --- a/images/56cfe20a-f008-48d3-b683-002ce7790616_fe049523-2d6d-4d2e-9721-982583f3b2bd.png +++ b/images/56cfe20a-f008-48d3-b683-002ce7790616_fe049523-2d6d-4d2e-9721-982583f3b2bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1491ee0b51b621ebce204add1a2f33b9775ace886539bc87eeff437f4fc0f398 -size 832361 +oid sha256:932f26a224e7e72eff696af2825be6a48162c22df726eff0f4eeaca21107dbc3 +size 1080895 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_0f8ca7c0-8ab4-4a9a-b0e1-3a10056f7f2f.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_0f8ca7c0-8ab4-4a9a-b0e1-3a10056f7f2f.png index 86f6542948a414fb85d5abf476ef595332c652a9..334944f20bb26c1cdcab759eb140a8b70ec77faf 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_0f8ca7c0-8ab4-4a9a-b0e1-3a10056f7f2f.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_0f8ca7c0-8ab4-4a9a-b0e1-3a10056f7f2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc2149e8a27528d48f6a5378f6f9fa769f540d3ceb409a41abb9c345245ff824 -size 1379683 +oid sha256:a73578472fd3aa6201c1cf260003887bb9c2a7be08bbebedbd2e8c193d9cdbad +size 1339897 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_171cf048-50dc-47a6-90ed-3eb5fa533fc0.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_171cf048-50dc-47a6-90ed-3eb5fa533fc0.png index b09a808dd625bc93071086d01b8523addb1e9413..c387d9289b5587a1aa359e2c474a2199531c4cd1 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_171cf048-50dc-47a6-90ed-3eb5fa533fc0.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_171cf048-50dc-47a6-90ed-3eb5fa533fc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4aa26a477de1e07e2fb492d51bbb7cf6dedf0b21b924da99c26cd27112d66fdd -size 1466965 +oid sha256:e7a4f6617e14a1c7137042765498f632799a0efa5cc9a38c960b4f2376a577ad +size 1072318 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_17c7b831-aee8-4f2b-88f2-a4ebe641fe61.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_17c7b831-aee8-4f2b-88f2-a4ebe641fe61.png index f31f79afaecd3786c8bfa3e006917b775a609200..ccb55cdd9d88526dfed061441100248829c90281 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_17c7b831-aee8-4f2b-88f2-a4ebe641fe61.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_17c7b831-aee8-4f2b-88f2-a4ebe641fe61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:512d653ce1f764b71357bd67b38bd6496c6d582d173ba32cfe57bdb4f6e20be8 -size 1378759 +oid sha256:bbf19736fddea88a0eee63259f3330e9e89897c1c17a59101cf16261ced2ba14 +size 1471063 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_47029a14-ef8d-4d8e-89a6-9d7a672a7f00.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_47029a14-ef8d-4d8e-89a6-9d7a672a7f00.png index 4cc76b9342af0f6ff50b51a1e558411fa5be62fe..b8779f5f92af6dde3f5ba92a5d0af86a813b97b5 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_47029a14-ef8d-4d8e-89a6-9d7a672a7f00.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_47029a14-ef8d-4d8e-89a6-9d7a672a7f00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c063507e0ed88c70241f2420d906eb7e62780a0e9f15f38b02edaa49cf3b4324 -size 1448883 +oid sha256:194e44905f980a0f5c2c4e8654d9cc029f7c6bfdde33596777d550245237447a +size 2134577 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_62676939-ee3d-4810-b690-a00986baf799.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_62676939-ee3d-4810-b690-a00986baf799.png index 590d5d1fb313e66fbc6e4e93bd9b630f83f2fb70..fe96c6e9e83ad093f8c757ccee6b5d3f0411e6fd 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_62676939-ee3d-4810-b690-a00986baf799.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_62676939-ee3d-4810-b690-a00986baf799.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c7558e36e8c4f4a6290f51a3c30fa9ab1db0e6bb7c5bb426c41fa8771f9accd -size 1449828 +oid sha256:76d6d957cc80bd653a35922d1324dba7cc5fc9af99a5143502353965012485c1 +size 1505146 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_87606ebb-a36a-4bdc-ada4-c0fee1eb610d.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_87606ebb-a36a-4bdc-ada4-c0fee1eb610d.png index 963d88437ca0af1067f885af361a24e896e2881e..560d6ec9e9f57e1dda258b99e97cc7ec6f5f4e40 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_87606ebb-a36a-4bdc-ada4-c0fee1eb610d.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_87606ebb-a36a-4bdc-ada4-c0fee1eb610d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22745dc1047f2c0d3606b96ae24c4302d443e5bb06046ffdffe38e9b0e548644 -size 1448664 +oid sha256:ffb347bec6d61b84a60e54491bd8412a3fd87e3d0389c2d197bebd9df3d7f088 +size 1318196 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_99d8f741-6ca7-4310-9914-0a821e9e9e48.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_99d8f741-6ca7-4310-9914-0a821e9e9e48.png index 79585b9e8444cd607c4104c44f8abb41350726d9..9ca97b28ea5e8f2f2f118ba936d49da67780e37c 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_99d8f741-6ca7-4310-9914-0a821e9e9e48.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_99d8f741-6ca7-4310-9914-0a821e9e9e48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4c268c1d1af1e846cef4d1b5aa2920b5240d5425fd6f5c5eed631a4848d842b -size 1371878 +oid sha256:239885d7552a442bd09cea0514155fb0df92088ae4cdb8f8ec735d86cb664c6a +size 1280141 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b1259dba-320f-42b0-97a0-41dc930a594a.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b1259dba-320f-42b0-97a0-41dc930a594a.png index 712b84fafe451261ca2e9cf38938e66f9f156e55..fb7b90946d48a99fbba8ccc3206c2ee70d31b071 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b1259dba-320f-42b0-97a0-41dc930a594a.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b1259dba-320f-42b0-97a0-41dc930a594a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85f8c8d75b5099077a8925ba69c1ddf80f96516f30dd1dfdfec168365ff6d48a -size 2102900 +oid sha256:e389a009cff3cdadf7ec3da87c81e4c5290cd9cfb3b25ec346425a5235fef196 +size 2120796 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b45b2866-9761-4b0b-8e03-6b4264113621.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b45b2866-9761-4b0b-8e03-6b4264113621.png index 29abf8a96b6e171c789680a031ba3b8409dfae35..49d44fb0ca3a2feebf9a579e3884f1d30d35b617 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b45b2866-9761-4b0b-8e03-6b4264113621.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b45b2866-9761-4b0b-8e03-6b4264113621.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4dbde13d6e4b5fc5ab88e962cf36f94e2d90a02d4040b19cee91d4fa7523f8d3 -size 1511007 +oid sha256:03287d8f8e9895083659b2a55cc141a871d15795736b57867ea9f3be9cdcbeaf +size 1641140 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b4b72b7a-2b9a-4bc6-9d43-34f2094f2bb1.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b4b72b7a-2b9a-4bc6-9d43-34f2094f2bb1.png index 757c9360b88d61020ec0f98985e2f5dce1700ca6..c0f1cb3440d05d9394adb4efdf715715c65f9c2d 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b4b72b7a-2b9a-4bc6-9d43-34f2094f2bb1.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_b4b72b7a-2b9a-4bc6-9d43-34f2094f2bb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b964db1d506b2fa05148b0225cbb85d9ab55a80392c0ad4e3bdfecbcfd1309fb -size 1451933 +oid sha256:96c1c769e105d5190471a2adc493cf5656ca14277a10ed10abec31bc3d9d6262 +size 889807 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e11c9f8b-0365-4908-b2cd-c64e9f0e9b7d.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e11c9f8b-0365-4908-b2cd-c64e9f0e9b7d.png index c1fa86ed03509ebca70143c00568dcf5c29c8181..c93e950d5d3cdc1819d49382e6c85f41ae4e8f6a 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e11c9f8b-0365-4908-b2cd-c64e9f0e9b7d.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e11c9f8b-0365-4908-b2cd-c64e9f0e9b7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9af07914b6bff1c4bcac6aa0d46ff9dfbda6020198bdf8371ac02978aa63faac -size 1447060 +oid sha256:72402f0d343b639992f7a44886f79ffeaccd81abd941c6b733abcc24788e8104 +size 1184238 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e1e17c9e-26b5-4a08-a7ad-8e999a9870d2.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e1e17c9e-26b5-4a08-a7ad-8e999a9870d2.png index 0fc19cdaa12e72216934ab780969bd3cfdcf2d1d..c894addcfb85fc4e7b92305bf7e622c36eb3a51c 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e1e17c9e-26b5-4a08-a7ad-8e999a9870d2.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_e1e17c9e-26b5-4a08-a7ad-8e999a9870d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a9fccde37527e149e1d45e8194d948abceef0749c169fcc89a4f1da808f411c -size 1452317 +oid sha256:06c962d532b3becebdaea45fc82b0d7c40524ec8f518e092152b7a266a78ccfa +size 1032778 diff --git a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_f52cc040-3159-499d-8b13-ea4613b23b63.png b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_f52cc040-3159-499d-8b13-ea4613b23b63.png index 22936f36662d8526f9a22b1f6fced63fe08a3596..9c08b07f818751bf63a171c6fbffd3089f6e3cd2 100644 --- a/images/56d210ec-22eb-4b3f-ba76-ee531403701a_f52cc040-3159-499d-8b13-ea4613b23b63.png +++ b/images/56d210ec-22eb-4b3f-ba76-ee531403701a_f52cc040-3159-499d-8b13-ea4613b23b63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19fd1b6499be25aebfc5eabe709f1567654c60b3581c7699af6e5a19b71a6371 -size 1438818 +oid sha256:5af310be0d1349fc99cc684a9851f64691a5a8f6b25cf028084f86960af1a131 +size 859003 diff --git a/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_05b31466-ca97-48e8-a8b5-d7be869d2c7f.png b/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_05b31466-ca97-48e8-a8b5-d7be869d2c7f.png index 5564c17b687f5e07306795b0fd6684c799a10d7c..a4444abc3ca1f38a559071a5bfbe18a64e633d79 100644 --- a/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_05b31466-ca97-48e8-a8b5-d7be869d2c7f.png +++ b/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_05b31466-ca97-48e8-a8b5-d7be869d2c7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:460daeae722074bf179c44c0fb9e55d94d1ff397ecd975120b07684d5edb85a3 -size 1289001 +oid sha256:a46c09a3f1316236a218ad589eaa657084cd4d1a07f2a29aecefea0889164479 +size 1957456 diff --git a/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_80f11b1b-c7f8-4ad7-be9d-68556e06ba5d.png b/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_80f11b1b-c7f8-4ad7-be9d-68556e06ba5d.png index 543e951fc6018784a57a68e55a7e64c663835d1c..743c926fb7941aeff8fa99d28d01bb5b15823d37 100644 --- a/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_80f11b1b-c7f8-4ad7-be9d-68556e06ba5d.png +++ b/images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_80f11b1b-c7f8-4ad7-be9d-68556e06ba5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d501a6c8fb103633d7051ce6c201ff8bf974faae4217109d5c51ca50a7bdcd2f -size 1195678 +oid sha256:2cb6f1e63ee9725a4504e4f42ac1c17c74221f62d04268b977f7a56c4a2f8d66 +size 1298852 diff --git a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_10249103-9f3d-4098-9ea7-b80db7f8af9d.png b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_10249103-9f3d-4098-9ea7-b80db7f8af9d.png index 640a88c92458dd5e9db6fae44a057fb0ac6318e4..75e3a8e0cbed50bd15f669a36066319236079992 100644 --- a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_10249103-9f3d-4098-9ea7-b80db7f8af9d.png +++ b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_10249103-9f3d-4098-9ea7-b80db7f8af9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea6fd53d7df8a5fd2c7b78814109e64ff81eb09f81e4474eab2abd40b73fa73f -size 314077 +oid sha256:45f248b16b5245cbdf642b3cb8b202a1c2ba0e3ab048925fb3e0850734930492 +size 312596 diff --git a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_28857cab-172b-4651-b610-831598ecf7e4.png b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_28857cab-172b-4651-b610-831598ecf7e4.png index 0db58a0e26f1668a8f94502992b3bc275acd3b5c..21b4d25547537c71a79049f672464712d6931862 100644 --- a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_28857cab-172b-4651-b610-831598ecf7e4.png +++ b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_28857cab-172b-4651-b610-831598ecf7e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9bbe52ea600e6c726323a666f76f94944929bbc623a0df254b1c03b92da3708a -size 1776924 +oid sha256:6db837cf3a98a60ce95e8d1493bc91e0e41e5567d154d68c9093232f65c41633 +size 937204 diff --git a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_59d223e5-8ce1-47cc-a614-ad75954151b6.png b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_59d223e5-8ce1-47cc-a614-ad75954151b6.png index 751de6217bd3a36b74dd738ee38e02f9aadf8225..f2a3d6fae11f3e1a30e45d97f3d44aa8c7ef3c88 100644 --- a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_59d223e5-8ce1-47cc-a614-ad75954151b6.png +++ b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_59d223e5-8ce1-47cc-a614-ad75954151b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6909d5ec73da4d0286580dc76ccbd05453ed4ca33e54faf41df7b0bb4537b61 -size 277970 +oid sha256:0856e31991f62653c93dcaad8537f6c5f321c1b335750e7caf1c5cb90a046ec9 +size 259901 diff --git a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_6c38c5ba-28c2-45c8-bc24-96c1388d7b6e.png b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_6c38c5ba-28c2-45c8-bc24-96c1388d7b6e.png index 3f80a1c25439157d7baab48d1195d23837572930..d09cf48d8e66a63334256dc60534e66640bd9937 100644 --- a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_6c38c5ba-28c2-45c8-bc24-96c1388d7b6e.png +++ b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_6c38c5ba-28c2-45c8-bc24-96c1388d7b6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8c97e1ac7c9fe8926fb1f817cd4e14da1018891cb7557d6b60fffc365bacf5d -size 480850 +oid sha256:06274586f254f6d759dec572e89fc3f6a95d5ee02a03851386acc4bad9ef4625 +size 474517 diff --git a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_a8ec1dff-5f2e-4bf7-be21-9a534e37ac41.png b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_a8ec1dff-5f2e-4bf7-be21-9a534e37ac41.png index 440a41ebc43d797f4ce000b14758f03389bc58ff..3a1b118af796f1cbf965ca40af8b71b66401d1eb 100644 --- a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_a8ec1dff-5f2e-4bf7-be21-9a534e37ac41.png +++ b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_a8ec1dff-5f2e-4bf7-be21-9a534e37ac41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6668e334ea27024ed3657262c4462571ab80c9f800e5673b6fc70665ab668a97 -size 410889 +oid sha256:f3fcf69b5c646e14409755b75d36173137c8c57783fb288e65aa2f37a285c58b +size 392330 diff --git a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_bc7f8665-64d1-48f7-97df-63fdf82ac826.png b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_bc7f8665-64d1-48f7-97df-63fdf82ac826.png index dfd21b7ab3ae46b760dd654d9abed58db7fc1b5b..0f45b2755315ff52d137c65307e62a03f51bca12 100644 --- a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_bc7f8665-64d1-48f7-97df-63fdf82ac826.png +++ b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_bc7f8665-64d1-48f7-97df-63fdf82ac826.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c254149859a68f86a1a98d1f058b6d13ced1c4bda4ca61b649e55094e591646 -size 278180 +oid sha256:356f68434a79812aff02780b6a337ac36ca7d188718a16d7129925bfe65b674d +size 283575 diff --git a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_c09c8617-efb7-47b0-b638-3aa6dab6eb6e.png b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_c09c8617-efb7-47b0-b638-3aa6dab6eb6e.png index 5eb8e1ffaedc8b0ff65de04937e4bb84ecf876a8..d47b8100dceee8406d0462f6b96cd4adf9d1d2be 100644 --- a/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_c09c8617-efb7-47b0-b638-3aa6dab6eb6e.png +++ b/images/577ac962-aefc-46c5-9050-a6069bf2a8fb_c09c8617-efb7-47b0-b638-3aa6dab6eb6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:918f60cf447fcde7c698e9bc31ef6a5647f50bd4a0356029f350fef6abc7d005 -size 270747 +oid sha256:fdba0d8bb0402c6d7512e9969931411723c8f52a9b02c171dc768fa723e3ac72 +size 347091 diff --git a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_326b47cd-cccd-456f-b004-592a3038e94b.png b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_326b47cd-cccd-456f-b004-592a3038e94b.png index 8b4b2a26a8afc8190ed0f2703e2c3e10f972e854..d7ff157203c1a589ac8f9e830ea22b2a32175aa0 100644 --- a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_326b47cd-cccd-456f-b004-592a3038e94b.png +++ b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_326b47cd-cccd-456f-b004-592a3038e94b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e027d1d72996ef970c49a04750e7fdb2f70382af95d442fd24b0dc877252275 -size 1512007 +oid sha256:fb4b83c6d2bc6f23a0611ffbd148274b5d63baf71c182d76453b27e6443d0113 +size 1093976 diff --git a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_720c5c98-aa5c-4e3a-b84b-4edf4fc74ac7.png b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_720c5c98-aa5c-4e3a-b84b-4edf4fc74ac7.png index ad126d42c527d0591199bd728df37bc32ef0d7da..ba95ef05ad5718fae8455f48e34fae5d3d2b4615 100644 --- a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_720c5c98-aa5c-4e3a-b84b-4edf4fc74ac7.png +++ b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_720c5c98-aa5c-4e3a-b84b-4edf4fc74ac7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36d5363407b190b9c39d802aa5139fa9854fe8c38c7dca8b24d44a2a71242b72 -size 635684 +oid sha256:2b3e33046281bc766b1b295caf35d82bc8880d2fa4c9befc2f58e03e6439f667 +size 781340 diff --git a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_7f718732-3846-4fe5-9b78-053b204a1731.png b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_7f718732-3846-4fe5-9b78-053b204a1731.png index 8add8dbf462e8aec6f87c7df6f9ab56df3abe598..207738b5b3818cc4ffd99bb3f3203c65118c315b 100644 --- a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_7f718732-3846-4fe5-9b78-053b204a1731.png +++ b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_7f718732-3846-4fe5-9b78-053b204a1731.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6e51b97953607ba81bcce6a5bc4dfe0743a9574942b757ff72c1bdf5f39ca31 -size 602510 +oid sha256:d74667ca1c28e385203c2740525888144942ecd9b796b825e1d5416fe7904e34 +size 775917 diff --git a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_b84e4315-a4fa-4c98-85d7-362aa485addc.png b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_b84e4315-a4fa-4c98-85d7-362aa485addc.png index 82347ee7a1c7482cb7e9282ad4540074d2ffd77a..91f8dace78c174fff539a1717f56cdac973dcf36 100644 --- a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_b84e4315-a4fa-4c98-85d7-362aa485addc.png +++ b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_b84e4315-a4fa-4c98-85d7-362aa485addc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54b75bf833e842f22ed7a3abac542e63aa998b46227dd3ca9d90f4b6d8b1d703 -size 654194 +oid sha256:d0207d3c084371a56dc1547bee93cfafa51c6827e7fe795c115e997926a44d1d +size 712388 diff --git a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_caa63937-8a3f-4ea6-8013-fb602b62e01c.png b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_caa63937-8a3f-4ea6-8013-fb602b62e01c.png index ab50de93537db8ffe54b9ea6e19f3a16ea281309..246e193c8401b7aedb5c27e99fae2540dc7729ea 100644 --- a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_caa63937-8a3f-4ea6-8013-fb602b62e01c.png +++ b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_caa63937-8a3f-4ea6-8013-fb602b62e01c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9741da636c4a4885860adffda550964f4b00b71ab20fc2c4db9271bb262d2aa2 -size 613742 +oid sha256:fcc1c1c3caa4d9c0adc039464122b27057b0ae741116b8f6ffc02a2adf5465ba +size 754627 diff --git a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_df3e9cc5-5632-4d45-b234-7994469d1625.png b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_df3e9cc5-5632-4d45-b234-7994469d1625.png index 68d27b6aa15f9843f181d642aeb888b42c52029c..c9a388d0980df8153ae72541fc2829e962f14c9e 100644 --- a/images/57f72023-3633-4c97-93f6-af12fe2edf4f_df3e9cc5-5632-4d45-b234-7994469d1625.png +++ b/images/57f72023-3633-4c97-93f6-af12fe2edf4f_df3e9cc5-5632-4d45-b234-7994469d1625.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10fa4ec0a4cf4a980a657df116ce9490e79e363fb8744d991e335652a79bc416 -size 1467416 +oid sha256:b3922fb8cb55c0211c1697c7bf90f5912179400779b2578e8b764505720a3544 +size 623173 diff --git a/images/581da9fe-4d75-42a7-b138-9b287a153575_38d43410-d666-4a82-8c4c-514bd2c40a0e.png b/images/581da9fe-4d75-42a7-b138-9b287a153575_38d43410-d666-4a82-8c4c-514bd2c40a0e.png index 67e38e17b0c551c309b4ff3a7cb85dd69c993293..8863048081da7765b08a73d373a0f36e1a181f06 100644 --- a/images/581da9fe-4d75-42a7-b138-9b287a153575_38d43410-d666-4a82-8c4c-514bd2c40a0e.png +++ b/images/581da9fe-4d75-42a7-b138-9b287a153575_38d43410-d666-4a82-8c4c-514bd2c40a0e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8636e2117cb8d6cf9cafa935c68fedd8a3df3fc78a07032ea37ea5d41615cbe7 -size 654839 +oid sha256:f1e19bf646f7d694fe8a84c77497f43741e6bb0763c608eb609bd2be3f3912aa +size 858056 diff --git a/images/581da9fe-4d75-42a7-b138-9b287a153575_5382b192-80ec-4d29-8cfe-cea3aa9af99b.png b/images/581da9fe-4d75-42a7-b138-9b287a153575_5382b192-80ec-4d29-8cfe-cea3aa9af99b.png index 7dc0837068b75b9c0c3dbde148dd96282f1847f2..3bb069642b0be9cf1ac74529a60930356677e730 100644 --- a/images/581da9fe-4d75-42a7-b138-9b287a153575_5382b192-80ec-4d29-8cfe-cea3aa9af99b.png +++ b/images/581da9fe-4d75-42a7-b138-9b287a153575_5382b192-80ec-4d29-8cfe-cea3aa9af99b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9eab22c72d761449d6f71cca37435b450c052c77fe2f08f039c7078f11c7cee1 -size 872010 +oid sha256:9023432606c6fdbfa972a312ae764c658ca2801306d6f81e7fab352fbb76a50d +size 860270 diff --git a/images/581da9fe-4d75-42a7-b138-9b287a153575_71a88ba7-ab8d-470d-bcab-c04236870135.png b/images/581da9fe-4d75-42a7-b138-9b287a153575_71a88ba7-ab8d-470d-bcab-c04236870135.png index 922f6e91818b380913d6efda66da0b8f297a6965..57d7f27e9c7e748441c89af83d9b28863ed50d58 100644 --- a/images/581da9fe-4d75-42a7-b138-9b287a153575_71a88ba7-ab8d-470d-bcab-c04236870135.png +++ b/images/581da9fe-4d75-42a7-b138-9b287a153575_71a88ba7-ab8d-470d-bcab-c04236870135.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37157bc8968722ab788e3338925d6459c320e9cfcb9e13c4ac53f2bc7d383539 -size 2216617 +oid sha256:cbe32574fbf8e89e4bc6074c03b23a58f2eed88333730ce1700fb5f4bab6ee9d +size 1191473 diff --git a/images/581da9fe-4d75-42a7-b138-9b287a153575_b4e28822-72ba-426d-820b-e5984992fff9.png b/images/581da9fe-4d75-42a7-b138-9b287a153575_b4e28822-72ba-426d-820b-e5984992fff9.png index 502be544754b7eabe98a1e804ea675daac200171..73fa37d0e9dcc33b5d1979a3f3109fb8ce706e2f 100644 --- a/images/581da9fe-4d75-42a7-b138-9b287a153575_b4e28822-72ba-426d-820b-e5984992fff9.png +++ b/images/581da9fe-4d75-42a7-b138-9b287a153575_b4e28822-72ba-426d-820b-e5984992fff9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2935447a6c792bf294a25a19a44a9beb70fc42e4267f73f0ec79b3b5e3d9727 -size 964375 +oid sha256:9a2911dd6c8b6212a134b3b94794894a9804db67e00807b8f817f5f18fe5579e +size 1256624 diff --git a/images/581da9fe-4d75-42a7-b138-9b287a153575_d19d4129-17c8-4d55-8922-f2e6468c09fe.png b/images/581da9fe-4d75-42a7-b138-9b287a153575_d19d4129-17c8-4d55-8922-f2e6468c09fe.png index 96565eb30ec34d5702fa8986a9478f616833306c..377d98847f8789baca75d99248297ed0f4b4dc83 100644 --- a/images/581da9fe-4d75-42a7-b138-9b287a153575_d19d4129-17c8-4d55-8922-f2e6468c09fe.png +++ b/images/581da9fe-4d75-42a7-b138-9b287a153575_d19d4129-17c8-4d55-8922-f2e6468c09fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfa8f7a116264357bf01db383f4ec03f7472130debf1871709a330f8a1fd8184 -size 942539 +oid sha256:3c511eb90fff6748195561aa25b6e2f3157d7a625676540eae4e4a04039ef484 +size 937857 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_03fa7043-d0b4-486d-846d-27c2930cf768.png b/images/58394242-6531-4791-a7fc-6f279037706c_03fa7043-d0b4-486d-846d-27c2930cf768.png index d072bf9b80c3be9c3ce616d1c0ca48cc8f712787..e6316a054d56f7453f3a6b948135fab0541581ea 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_03fa7043-d0b4-486d-846d-27c2930cf768.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_03fa7043-d0b4-486d-846d-27c2930cf768.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e844b0187a56bc89421169eb18ad89b5a04a8d31a1e01986ea60a12fe63d7f58 -size 1576234 +oid sha256:f4ad298ac5f70b2cc16f1eade7e737a95a72123ba4bf9e69edbaa24ed36e2842 +size 3120323 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_2193b3f2-8f8a-4bca-b688-831462294ca0.png b/images/58394242-6531-4791-a7fc-6f279037706c_2193b3f2-8f8a-4bca-b688-831462294ca0.png index bbfe1aedec8c731deec25e47a0b448dca61c2b39..e83f85bfee4461d188203e75a1e424c9d2c9bdfc 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_2193b3f2-8f8a-4bca-b688-831462294ca0.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_2193b3f2-8f8a-4bca-b688-831462294ca0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba76683eee186a5ced943b3fed5b447290084204e010698704b592762196ece9 -size 1974148 +oid sha256:c1c465f2c5e9f19e14a4dbf7a9265c9a321779c0063f0d1464f7cfbdeb95a9a3 +size 2326378 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_4749e515-087a-47b2-a652-3a8342d174a9.png b/images/58394242-6531-4791-a7fc-6f279037706c_4749e515-087a-47b2-a652-3a8342d174a9.png index dc85ad95f0f6aad3626705c00bc3f8ffb2f111fb..930f45f49c380b4586e3120fdcf855afe641291c 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_4749e515-087a-47b2-a652-3a8342d174a9.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_4749e515-087a-47b2-a652-3a8342d174a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42a6d0a717328dbbdce3005765ee2112bf9a89b70596036b49dd1fed77609686 -size 799757 +oid sha256:be0278cdf10bd033f8cffe4d0530348fe97333e559be292c960e2c5d2a884f58 +size 1315515 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_5e85f39c-3a4a-40aa-8120-4491ff59cbc8.png b/images/58394242-6531-4791-a7fc-6f279037706c_5e85f39c-3a4a-40aa-8120-4491ff59cbc8.png index 5ce41a5b372212d5cb2cb6bd9c52ebe8f9f6a8de..3c54109bd3c4e959f4a0d77e5d853a5d4c61a6bf 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_5e85f39c-3a4a-40aa-8120-4491ff59cbc8.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_5e85f39c-3a4a-40aa-8120-4491ff59cbc8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:338e092856a39757c2df2eb55bd9af83140af26dcb7b92756cfe226aeb8660c7 -size 1539827 +oid sha256:01e2fd26383766589f73a14a5b61a5f877e38d89e932fa592e18a1dcfd00323c +size 1775246 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_6714117c-6959-4cbd-9ee2-9cd57f3d627d.png b/images/58394242-6531-4791-a7fc-6f279037706c_6714117c-6959-4cbd-9ee2-9cd57f3d627d.png index 4118fdbf6627f00bbdaec2b167adffb54472ecdf..c5e7a9db331d8562b75349ebb888726507684db1 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_6714117c-6959-4cbd-9ee2-9cd57f3d627d.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_6714117c-6959-4cbd-9ee2-9cd57f3d627d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54ba0e0ca82b6f8e8eaeb5e2d6485f47ab07330b1f29f66f26505cee8ad3e292 -size 1404390 +oid sha256:4589e441799fdf0ef1415b389745e75535a071083774df14a385f0a0f5d6f6f3 +size 1666470 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_6c97e7b4-b514-4509-9c8f-a7f8f802f56f.png b/images/58394242-6531-4791-a7fc-6f279037706c_6c97e7b4-b514-4509-9c8f-a7f8f802f56f.png index 6fb904d4c8cffdfcd87ab74ed411bd2bd4028dfb..f6c7801f72039a215fc244f6a627230211dc80d7 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_6c97e7b4-b514-4509-9c8f-a7f8f802f56f.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_6c97e7b4-b514-4509-9c8f-a7f8f802f56f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af7181fff9ef9a1f3c22ebcc70def36e14e636fb0aff27c7f3ba105ab1e7a22f -size 798284 +oid sha256:ff651fc4b2509c520fa7e75003eed619c28722bea0f960cb73a09b76662e1fbd +size 881112 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_95141533-9d98-44d8-892a-27fafb078c64.png b/images/58394242-6531-4791-a7fc-6f279037706c_95141533-9d98-44d8-892a-27fafb078c64.png index 4364bb7897a2e9db373d5a31353c457131a21acb..ed294f0027d4924e5331f4097fabfde170e3d4fc 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_95141533-9d98-44d8-892a-27fafb078c64.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_95141533-9d98-44d8-892a-27fafb078c64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:211af46d116d75805c5081f9d1ac97948667b3be5e17d86b03e09429e74fe6a7 -size 1466226 +oid sha256:a47f86b3335f25feb5519f6646b762694804df680c35b7873bcec3e90d570624 +size 1336646 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_ac2eb761-67e0-413a-8388-b0e85e06601f.png b/images/58394242-6531-4791-a7fc-6f279037706c_ac2eb761-67e0-413a-8388-b0e85e06601f.png index ae7481c77012a2b6fa0fbb6c14b5f4872b1d9cab..72a1c05e38d68b102a062b688fb7ce418c8ee3b3 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_ac2eb761-67e0-413a-8388-b0e85e06601f.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_ac2eb761-67e0-413a-8388-b0e85e06601f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58875cb0a4c7f27b681ca2b662e008af4bc7b5c1f51baa290a6ce6ba06b48264 -size 1691637 +oid sha256:05244e1935698d4474a570ebda1a749b7a50cdf849c1301bc74e51e7b2904e41 +size 1493450 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_c3bca17f-7481-4506-a7fd-bded60c14834.png b/images/58394242-6531-4791-a7fc-6f279037706c_c3bca17f-7481-4506-a7fd-bded60c14834.png index 42667aa2980e1a95171633227dba6b0288455c3c..7b7f38714bdcaad1f75df7da75e68a45879c09f2 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_c3bca17f-7481-4506-a7fd-bded60c14834.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_c3bca17f-7481-4506-a7fd-bded60c14834.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:470951cea845412cce800689a799ceaa724be314996ec7b312b4f773e4a2e915 -size 1757088 +oid sha256:5d1886da0d6b9ea6206a8dc12cdd9a249a068693ec4dc82c5311c5b3caa81b32 +size 3393999 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_d77a38e2-0f86-4a9e-8466-acaa6d9b8aa6.png b/images/58394242-6531-4791-a7fc-6f279037706c_d77a38e2-0f86-4a9e-8466-acaa6d9b8aa6.png index 2bd299e0547c2b867638ba3b24361fc288ae9766..da35df73682ca6203fa48684ee24a16c6832e656 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_d77a38e2-0f86-4a9e-8466-acaa6d9b8aa6.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_d77a38e2-0f86-4a9e-8466-acaa6d9b8aa6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58bec696eb1b6c138f9275358d943594bc2536b528c7f711cf861eb82394481a -size 1419566 +oid sha256:6b776f66447f2b496f29ab58af5ffc18eaa4acf4a66209ca457c75bf9fda81dc +size 1236318 diff --git a/images/58394242-6531-4791-a7fc-6f279037706c_eac642da-a78b-49e2-a39a-8d9b8f0c1baf.png b/images/58394242-6531-4791-a7fc-6f279037706c_eac642da-a78b-49e2-a39a-8d9b8f0c1baf.png index 7602055eb445dcc9607f57a22f93d21a4a5bad10..2cb52e68bf587a80d4159c61914e84d56b4b2bb9 100644 --- a/images/58394242-6531-4791-a7fc-6f279037706c_eac642da-a78b-49e2-a39a-8d9b8f0c1baf.png +++ b/images/58394242-6531-4791-a7fc-6f279037706c_eac642da-a78b-49e2-a39a-8d9b8f0c1baf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04fbd005d069d4a197f8cb312edbcf0778e53eea1f17dc444cafd83cbb475952 -size 798722 +oid sha256:4072068cfffba03ad6a95b82a20fe7a709d46f00696b7af2598f75d903f45d21 +size 1300012 diff --git a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_121024fb-90f8-4d41-be93-5f26d9dabfc3.png b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_121024fb-90f8-4d41-be93-5f26d9dabfc3.png index 0eb5407f81264a108cd3978b38e306a306cbe2a7..4e354d4e442a56c5a9df1194ed9cb081d0734e61 100644 --- a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_121024fb-90f8-4d41-be93-5f26d9dabfc3.png +++ b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_121024fb-90f8-4d41-be93-5f26d9dabfc3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30802c8cc3303ffd759dc835c74ffb867761572aa7b98208683e52cd7eb0592d -size 486220 +oid sha256:5a1200add70e1dbd6638ec171fcdf8ee1ddfc4ff3afea25cdcdb5c776161ccd6 +size 389336 diff --git a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_2b13428f-ca4c-4db2-bec9-35b0966a4e75.png b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_2b13428f-ca4c-4db2-bec9-35b0966a4e75.png index 52c2b2c488a3d6ed10a8530f88bb2b3f3b3d4ced..429c2211c7edd76a565949979c6379891a25b6c9 100644 --- a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_2b13428f-ca4c-4db2-bec9-35b0966a4e75.png +++ b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_2b13428f-ca4c-4db2-bec9-35b0966a4e75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8507a769be24a631c795e3dc5d60ea20343549ac063c46ad675376d44df333b2 -size 418370 +oid sha256:c613a7b2a6fcc600f110e8fd43613b21e423c512dc69e88a21694058cf809640 +size 626011 diff --git a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_332e2265-61e7-4ed9-b753-4fe9255dc1e0.png b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_332e2265-61e7-4ed9-b753-4fe9255dc1e0.png index 41f308b767c454c70f67fcdcb3ea952a2567b462..3d340d5396c5ddbe80855f611d4835b609f79e97 100644 --- a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_332e2265-61e7-4ed9-b753-4fe9255dc1e0.png +++ b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_332e2265-61e7-4ed9-b753-4fe9255dc1e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be410ea1ffaaf4bb7a7be58e48fd82e1b765ab597fbd93891722d3f20264b28b -size 582690 +oid sha256:afe6e0dfbb84fe035ec76f95325e163562316e55e90acf29819433da3d335a7f +size 534795 diff --git a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_40062ba5-e84e-4672-adca-053020ff758e.png b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_40062ba5-e84e-4672-adca-053020ff758e.png index 42918b03e3cdec51b48b11f3b10909d971df8f65..0bcc412be83395627f4a663c46c85e8cf8972000 100644 --- a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_40062ba5-e84e-4672-adca-053020ff758e.png +++ b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_40062ba5-e84e-4672-adca-053020ff758e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fa87a1d506881ffc00a76f52cb0eb7d6614311fabac061f58de89cd3f382881 -size 1658524 +oid sha256:7559c688938cdf440aec7954b4be3f7698ac84fde4833da6ab944b77df8707bd +size 879477 diff --git a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_a603d9a4-649e-4daf-8218-cba78f032b30.png b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_a603d9a4-649e-4daf-8218-cba78f032b30.png index 3a245ea2c42e10b1271a94e6912a5738bb2ce1fa..3bda60e5727c5ba79de21948958892c8b765547b 100644 --- a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_a603d9a4-649e-4daf-8218-cba78f032b30.png +++ b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_a603d9a4-649e-4daf-8218-cba78f032b30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c33654fadc20c3d65de7eb0e8b084e8eb3e1916042c441af5c4e07b99f6bfaa1 -size 1664214 +oid sha256:233055408cccd03f3052d17db71fc87af39d30abbf4bbe4438b1a107d296a69e +size 846190 diff --git a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d2244643-d85f-47f6-a9c3-8db219104141.png b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d2244643-d85f-47f6-a9c3-8db219104141.png index fde9207c4ae2972b982d2d0f458d6bed98708ed7..ff1c5f84c0d084fbad5b60c768de3a2334bea76c 100644 --- a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d2244643-d85f-47f6-a9c3-8db219104141.png +++ b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d2244643-d85f-47f6-a9c3-8db219104141.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d8195db52713b53505f3bbf49382cb9a6e5e4ecdf888c770d06219d9fa259d0 -size 472458 +oid sha256:0e19753d754f24300a5d6a7e6e327fe078066848f9a36c8db88fc2264981a012 +size 446375 diff --git a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d91f9ef8-1eb0-4b4e-97f4-53ffdd24f253.png b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d91f9ef8-1eb0-4b4e-97f4-53ffdd24f253.png index a1bcac4b4d45d35cde03fbfa026545d43114f041..6ff8ef917c91c0d50e8d7d67dfd02420d2990337 100644 --- a/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d91f9ef8-1eb0-4b4e-97f4-53ffdd24f253.png +++ b/images/58badcfc-343a-47c1-8aec-f609925eb4ed_d91f9ef8-1eb0-4b4e-97f4-53ffdd24f253.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16c215f6e731b0e02b9d1ef12139ddb5d1beeb5e95db4128196c6ed29728dd6d -size 578272 +oid sha256:fad52bc9df394903f2fd3b5e2a0364cdcbd22314b1b08399d18748b4ac421d2b +size 638935 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_0786979a-b7d5-4a76-8b9a-5b24c2ed095b.png b/images/58f811fd-0f17-430b-befc-885605e13e41_0786979a-b7d5-4a76-8b9a-5b24c2ed095b.png index 36b2fb464715e5a0e5408b6e6cd3724eb804945b..26e65158647fcb98ec9f6c119eed4e425764acd2 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_0786979a-b7d5-4a76-8b9a-5b24c2ed095b.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_0786979a-b7d5-4a76-8b9a-5b24c2ed095b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86a60d2d17f5c835254855533de0c3f8851dab131a59f3833d2224a714734fdd -size 2411530 +oid sha256:ffdc1c509172a76b78737fbaaa4bb06e9780ed9fba1a2a7c9d69b0cdf55d08d9 +size 598542 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_0ef22904-d5ae-48f2-bdea-a4f32b28521a.png b/images/58f811fd-0f17-430b-befc-885605e13e41_0ef22904-d5ae-48f2-bdea-a4f32b28521a.png index c09b03705618125fefe236a5da5cf1c8ed303ccc..4942c70aaece39c264c2217203eca31c5346261c 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_0ef22904-d5ae-48f2-bdea-a4f32b28521a.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_0ef22904-d5ae-48f2-bdea-a4f32b28521a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9b31df367413a15700529f7e146d492f5243e751ad9dd1c79130a337be3350d -size 331736 +oid sha256:962f2e1457d9c1d7bd7ba342c08bfedf1e2e10bcbed9ea0506e328d89c73e1e7 +size 336859 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_1c9cabc3-3fc6-4561-891d-5b473220ab9b.png b/images/58f811fd-0f17-430b-befc-885605e13e41_1c9cabc3-3fc6-4561-891d-5b473220ab9b.png index 5efeebf2e460af1b6b0fbda5e724823b9da77135..5753be13bc97770a95f9c5723301dac9d584dc55 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_1c9cabc3-3fc6-4561-891d-5b473220ab9b.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_1c9cabc3-3fc6-4561-891d-5b473220ab9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80ae65c05b273c2a56b10ca7a4a5869459f44d768c837b9e46f5f72c6728cbbe -size 333713 +oid sha256:9fdd3f7a9ba090a6858858b635558717c73bd26971799af594b94a72187b0643 +size 338834 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_45d235df-68bb-4808-aab1-97e2fdf6fee8.png b/images/58f811fd-0f17-430b-befc-885605e13e41_45d235df-68bb-4808-aab1-97e2fdf6fee8.png index 8aa70a0eac22830ac8732b296c03df39129f65f4..8cd9d14b66dd546b39643db66bfe7ee7c883f698 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_45d235df-68bb-4808-aab1-97e2fdf6fee8.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_45d235df-68bb-4808-aab1-97e2fdf6fee8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db36c39c2c491985970672b6424821fa6fad828f7f895e2f3043fee161adbb26 -size 323722 +oid sha256:040d9df031e7bbde45705be64e8706ded80a03a3d7b14e4785a065416f6d1165 +size 328812 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_53a1b63c-5d8d-48eb-a2b4-7246f1da4b0f.png b/images/58f811fd-0f17-430b-befc-885605e13e41_53a1b63c-5d8d-48eb-a2b4-7246f1da4b0f.png index 227a8c12cf1fe898ffab0810976de7219fb896dd..0686184aa444502a923152f0a23647c31b708c98 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_53a1b63c-5d8d-48eb-a2b4-7246f1da4b0f.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_53a1b63c-5d8d-48eb-a2b4-7246f1da4b0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a2855a23f45a4b4bbae9538ce69cf321daff3bebb1113de7c891292e4ca1051 -size 326126 +oid sha256:7f42da949c662d341cf3390971c7e794cf0f3df7d9aa384679bff8c902e826b6 +size 304062 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_94d5fd23-1ec9-4921-8aeb-0f577f753252.png b/images/58f811fd-0f17-430b-befc-885605e13e41_94d5fd23-1ec9-4921-8aeb-0f577f753252.png index a1e6410ed22b24af9be15bbd11af5362e154f956..7cc119547c841f936b33162290993c28b1ce22e0 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_94d5fd23-1ec9-4921-8aeb-0f577f753252.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_94d5fd23-1ec9-4921-8aeb-0f577f753252.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28341ca42e449e62309aa2d71c08eb9999b3b5168ac48027ebaf20ed4c696dcb -size 340413 +oid sha256:f9d0bbed322b964446ad8ea0d3aa7d99dfa6a43e28f7d76cde313bae66b9bfec +size 345532 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_ba02fb70-2e69-4906-bb2b-34f4731545f1.png b/images/58f811fd-0f17-430b-befc-885605e13e41_ba02fb70-2e69-4906-bb2b-34f4731545f1.png index 2b0c98d1a41e99745b849320e7af5804d2043b8e..e9bee32a75faa98d8aa7f8b504c4aa819587370d 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_ba02fb70-2e69-4906-bb2b-34f4731545f1.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_ba02fb70-2e69-4906-bb2b-34f4731545f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8265d901d5737330a5738f8f85f4d5271d0273ac50cb95ac531ae1cd438d7dc0 -size 336207 +oid sha256:7f4bc47fbb12835cb52969b81632ad6e4fdcfe03b744652f0acc0926d9158254 +size 341330 diff --git a/images/58f811fd-0f17-430b-befc-885605e13e41_e049fd1b-420c-4d5a-8879-da5d9e7c436d.png b/images/58f811fd-0f17-430b-befc-885605e13e41_e049fd1b-420c-4d5a-8879-da5d9e7c436d.png index 1a1db90613bfb8e99d14dace505ce1e691efd8ef..b5b0de26d589b1a8b9cd4ee2b0c9b0738d7b3042 100644 --- a/images/58f811fd-0f17-430b-befc-885605e13e41_e049fd1b-420c-4d5a-8879-da5d9e7c436d.png +++ b/images/58f811fd-0f17-430b-befc-885605e13e41_e049fd1b-420c-4d5a-8879-da5d9e7c436d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61c82f8f58a378321bb7a32bcd3ee559517df3c92a296fbc2e63cce78f10c52a -size 345576 +oid sha256:0fa48c99612a38305b59915b669f6d31511cca85b510d9a180d075535553a4d8 +size 350704 diff --git a/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_57f1736e-adf3-46e0-bc47-5cb8910dd878.png b/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_57f1736e-adf3-46e0-bc47-5cb8910dd878.png index 68e706863e2b6be500b406d8bc3308730474b75a..93b5b8b062d893085fb3d48d2c54420cf882cc11 100644 --- a/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_57f1736e-adf3-46e0-bc47-5cb8910dd878.png +++ b/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_57f1736e-adf3-46e0-bc47-5cb8910dd878.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f964fd6f88acfcb737a9df33ca2d04ce8accc26182f8298dd3ff315b221cf699 -size 716393 +oid sha256:298de64dc9a56b30162ff382a23b80cc38f0560b42862612f3d4c13cde7897cb +size 783306 diff --git a/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_decbda01-c8ad-439a-a719-9fae758733b2.png b/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_decbda01-c8ad-439a-a719-9fae758733b2.png index 91cc73d6f4cc414122502533206c711b85000055..208785f9639b6b209b4ea61b64642794c60feb05 100644 --- a/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_decbda01-c8ad-439a-a719-9fae758733b2.png +++ b/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_decbda01-c8ad-439a-a719-9fae758733b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61b5865453578bf8d904442b6087d297b57fe8bde424d091b42a8ec0e9cb3d0d -size 709190 +oid sha256:2c41947b5d2f27929f95c97fb976481aa615518c812c12782f9f59cdec6a5dbf +size 709097 diff --git a/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_fd110d4a-c93d-432a-860b-76873aeb4d0f.png b/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_fd110d4a-c93d-432a-860b-76873aeb4d0f.png index fdcf07d045035b891782f32e4ecb25f79fd6431f..77e3a260c632de918176362923cc544b91096492 100644 --- a/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_fd110d4a-c93d-432a-860b-76873aeb4d0f.png +++ b/images/593830ff-fd2c-4479-abf8-8fddee2cdaea_fd110d4a-c93d-432a-860b-76873aeb4d0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a1c5af2e3fad9edc923cd509fd1d32cbd36422c72adf1474ecca3625e4a3ad4 -size 598911 +oid sha256:35c1e1563f7a7f02201705cce48f8039c6fa66abca4bf57d79750ec1ddd5bd74 +size 648894 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1036b5f0-eb6a-4ea1-b0f7-ed1c0e37abae.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1036b5f0-eb6a-4ea1-b0f7-ed1c0e37abae.png index c47c2be7d2551be12c275d7d3d62b53f69fbfdbb..18b4754958839d800b55bc7aefd0743211369e04 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1036b5f0-eb6a-4ea1-b0f7-ed1c0e37abae.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1036b5f0-eb6a-4ea1-b0f7-ed1c0e37abae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7df02c6259cabc6fe35fe16592df1ff1548989c9cb4ff8883e2bfd400bb22433 -size 1057271 +oid sha256:4f0253b471fd6266db81ccc72f84368bb9cac082fe2c95df9536315977ef098d +size 1052347 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1967f971-1242-4c07-8421-62e434f90fef.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1967f971-1242-4c07-8421-62e434f90fef.png index 6e33efc1be4fbb562cfb8cf2acfb03cec4c90a25..679fbcd7b1e4adcffe5582232f188f7c1c11e1bd 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1967f971-1242-4c07-8421-62e434f90fef.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1967f971-1242-4c07-8421-62e434f90fef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba7f7d6fc4f77e81b5ce42b97c3416fadddd1da68dd319d778ab0667ea92e74c -size 1137093 +oid sha256:866341c6610c5488c6e736716f5179dd168b5472f92db44eccbd353d660934ab +size 1041706 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_31676c0a-b906-4ef6-a036-5b82635f521a.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_31676c0a-b906-4ef6-a036-5b82635f521a.png index 7d2eaab8cb65525f1359135527d0c2d7b92eab56..6e56c962941fde973e71e73e34f82f8e833b1249 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_31676c0a-b906-4ef6-a036-5b82635f521a.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_31676c0a-b906-4ef6-a036-5b82635f521a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b82fbec2993f812d0ee4b447cda56fdf1bf777d70862267e54c3b356d79cca78 -size 1188571 +oid sha256:9b5c971a3e6febcb22f5f96b0a57620cd9a4aef32832079eb3c38019cc40ed02 +size 1105021 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_4908afca-3881-4d5f-bc9c-d1bd00895602.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_4908afca-3881-4d5f-bc9c-d1bd00895602.png index b97f9733a912ffb2799a379907eb268087aaf353..b4c49cb8febcde92d31c71edbfafa9d04f6587a1 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_4908afca-3881-4d5f-bc9c-d1bd00895602.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_4908afca-3881-4d5f-bc9c-d1bd00895602.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7c018c025104982cc229b6e0c015b9aedf7db765b359255a44d51d2ddd2c8cc -size 527893 +oid sha256:c93218c52b75b30c574370e46950db843ea91b2796f4bc303b78ca5e6555e42c +size 652936 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_5c4b121e-b7a1-4a58-9f37-8146db77190e.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_5c4b121e-b7a1-4a58-9f37-8146db77190e.png index c685a908ab5b593dcfafac0b3292f81048a38459..c1bbdabce806517f7c776c15ef5f79ab9f11e95f 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_5c4b121e-b7a1-4a58-9f37-8146db77190e.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_5c4b121e-b7a1-4a58-9f37-8146db77190e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b25d1ecd4142dbcf766384579878ec66e79d06e712a0801f7248609add1c509 -size 756335 +oid sha256:4ef92cb586c19d0eb1a9dedc47cbb9308200670d7acd89e236f16aca884154fb +size 693883 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_797b5624-5baf-4735-a64a-a49edb4a6914.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_797b5624-5baf-4735-a64a-a49edb4a6914.png index 251e4afdca187d02f8eea5f28781c08841a59925..e4cd5a0f58b49fa2bccdb6f89f042af35853764b 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_797b5624-5baf-4735-a64a-a49edb4a6914.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_797b5624-5baf-4735-a64a-a49edb4a6914.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e90bb4c6b91c70f4260dbf2f541af3c05b89a2e8b1559c23a782372359780e9e -size 537758 +oid sha256:c2079c7e5e78fc12469f7ffa4466367dadc6940f0f1a2101e666379ba61b1e0c +size 601713 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_80df35d3-a409-4097-b3c7-30f24edbb24c.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_80df35d3-a409-4097-b3c7-30f24edbb24c.png index 0293e9a3984d1869bf3486886fe1a6b193b8e777..2f21d435877a5d0437eece5b8516d38ff5e80646 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_80df35d3-a409-4097-b3c7-30f24edbb24c.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_80df35d3-a409-4097-b3c7-30f24edbb24c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a431e7b69b0ad29e0f73d90d56fa3f125e6de44d0ce5d11f449f2f8b36ad89c3 -size 1543889 +oid sha256:d822b2e16e4ef6794675a180a790ac98b64fdd1f762874020132d9ed036c6505 +size 1599695 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_b4ba250f-9281-419f-8443-0ae4a34417ea.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_b4ba250f-9281-419f-8443-0ae4a34417ea.png index 638ade5d2c85f92d9dab49ddab4f151a106f2afb..60a1b0e5c705f2e8348b219c06c09819c0f207ff 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_b4ba250f-9281-419f-8443-0ae4a34417ea.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_b4ba250f-9281-419f-8443-0ae4a34417ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:badb29ff80b169adb9254b829d592de39a1100df68be38b806d7609c389557f2 -size 1242217 +oid sha256:89f1f74786291f4ac114fbcb8ca19571d237088dd746d7185a4d3129ce2581b7 +size 1371503 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_dde2fc42-7cd0-4124-8cdb-3f51b425bf9e.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_dde2fc42-7cd0-4124-8cdb-3f51b425bf9e.png index 4c9a69b3460bbccde5102b9533377a5132bfb2aa..449184884e37a32d27f14e06581908307ae5a888 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_dde2fc42-7cd0-4124-8cdb-3f51b425bf9e.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_dde2fc42-7cd0-4124-8cdb-3f51b425bf9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:518c743e9c820177c2f7e1a3f7765c3f30fe5b74a71f109eecca555d6c2a4665 -size 1563247 +oid sha256:7657ba7e173c54cadcca07164c3f5028a9d1bf839caafc2eed9595ecad56fae6 +size 1532146 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_e9d3601e-1394-445d-b999-b957959694a5.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_e9d3601e-1394-445d-b999-b957959694a5.png index 327b3c956890c3e29b12f128ea3a5221b0c9d424..2a7cc0180e611399a94d6297e9474f26704e3555 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_e9d3601e-1394-445d-b999-b957959694a5.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_e9d3601e-1394-445d-b999-b957959694a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:888720320d31e55415576a8249f5dbf98dc7e23028aee60bf7c979f687f341e9 -size 1556761 +oid sha256:b620a86a1af931165b0241bc76ae5fa215d467253f43a17ae6539f5f00db5d03 +size 1637690 diff --git a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_f6deabe8-871c-4244-a62c-a369378c0352.png b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_f6deabe8-871c-4244-a62c-a369378c0352.png index 79ba3abfffa3f0731028e1b8bcca13898bbd3b1d..314bc2dc296c6a3c29584f509af053911c26f095 100644 --- a/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_f6deabe8-871c-4244-a62c-a369378c0352.png +++ b/images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_f6deabe8-871c-4244-a62c-a369378c0352.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57b190d90844670ff8ebd7d2c1910e655b1685718a248e36ab3ed6b173d37aa8 -size 925120 +oid sha256:650845c8d162c3f8ec61a1dc616d91184dd70617f50663f30f4c00269dd8652b +size 867582 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_12632bc3-c1f3-4dc7-8320-0923fcbe924b.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_12632bc3-c1f3-4dc7-8320-0923fcbe924b.png index c6647df4147a22cd73c101fb0db5076343b26447..12f4284abba2a0179fbee99daa82c7cdfc52ca0d 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_12632bc3-c1f3-4dc7-8320-0923fcbe924b.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_12632bc3-c1f3-4dc7-8320-0923fcbe924b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef4e03888f9d5e21ec0141726c5701262f036bfd884246ba384a3e894c0408ba -size 934941 +oid sha256:ff6ae7388cd959865c1ab398d39eeba6ba1762837bae4790cbe57ef1652b617f +size 1060655 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_3dd8bf82-b783-4ede-b42a-0b632c8cb365.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_3dd8bf82-b783-4ede-b42a-0b632c8cb365.png index a4b85daa5f084f0d2559f7d4efaa9c0358332f46..c6312ae6b334e15218e17aaaf2a2259c5d003b2d 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_3dd8bf82-b783-4ede-b42a-0b632c8cb365.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_3dd8bf82-b783-4ede-b42a-0b632c8cb365.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f848f96cd2d6b08147e0ffb374faeda87701ad35cdb16cda11dcde16f8cc5af2 -size 1324224 +oid sha256:bb2d2d4a218cc08a4043722477fc8165ddc61d9efcf2c6b1f55e299dc6ef3f4b +size 1159013 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_56dff79d-4441-4435-a096-71da4f343a09.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_56dff79d-4441-4435-a096-71da4f343a09.png index 1ca262e50d3550debebe1c2438fa818b53ea576c..0bff96cb64c42465d424efc47651dd07b262084d 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_56dff79d-4441-4435-a096-71da4f343a09.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_56dff79d-4441-4435-a096-71da4f343a09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7fcde2fce46cdf2a0fad7bd073909df6d4ebac78f41f1041043d98e815debd8 -size 1073202 +oid sha256:acdbc50501a5b52ca8f28ca1daee6a6d1f11476c8643738ec049c8301ea7e9b7 +size 1192205 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_614dad39-55ae-45d7-8e8b-f51b7daa07fc.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_614dad39-55ae-45d7-8e8b-f51b7daa07fc.png index a26916d0c4341d86f2231a5679013f3100b1eeb6..b60f5e27cc9768fdfa23a1ff1928b25bfd1aec4b 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_614dad39-55ae-45d7-8e8b-f51b7daa07fc.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_614dad39-55ae-45d7-8e8b-f51b7daa07fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb95db69a3a8f5879c9291b5fcd1819d1e8c3221b074ba5afd7a70b866894400 -size 1819360 +oid sha256:f9c3f76b18d5406725e71e2fb19b523ea2889431979b368d5359e0bd849f6d8a +size 1772544 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_9cbe4f75-eb82-4ae9-a013-d12eea58f7a6.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_9cbe4f75-eb82-4ae9-a013-d12eea58f7a6.png index 9bfb8f244690d08229540c092780b123ea499530..5df41bec3a6b4468dc6cd29074810897c4feba5f 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_9cbe4f75-eb82-4ae9-a013-d12eea58f7a6.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_9cbe4f75-eb82-4ae9-a013-d12eea58f7a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbcd0f589113601c46b32bb80c14d83b18c8fffdbcc7606799199ba519b73f5a -size 1163524 +oid sha256:8f87a5e9977230d31869a01055cdbe58de2eae768bad1f2c34878ee9b2a7fcf1 +size 1087576 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_c8d98ede-94ff-4686-80c4-d63369045443.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_c8d98ede-94ff-4686-80c4-d63369045443.png index 746f7c78f85567ef6276d9afe82f4671de6abd25..89660b28ceacc471c9a73cf45ed8a002ecf63e86 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_c8d98ede-94ff-4686-80c4-d63369045443.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_c8d98ede-94ff-4686-80c4-d63369045443.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:222b1ffa2010f5402fd803a858643652ea613cedb7b55fdeb27e24f8fa77af3a -size 1278968 +oid sha256:9e381b5a474240c02ea47a53e7d7d2bcb0a43b6f009aee49c92453e934e7ea27 +size 793690 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f8928c84-d27a-42db-a6d1-dcd1f656d6ab.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f8928c84-d27a-42db-a6d1-dcd1f656d6ab.png index 0292cbfa2accaf6f8199d9f50679a2102206e4c7..6221c64e7a2689af018ba2d670c46ed6ac16606b 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f8928c84-d27a-42db-a6d1-dcd1f656d6ab.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f8928c84-d27a-42db-a6d1-dcd1f656d6ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cfe11647f48cbf004c590203aa9ca56c46af1cc7c7564e772257633b5c24af55 -size 1447702 +oid sha256:8d9ff6cca36a22a19bd9bcf2ac8ca89b46976acdd0590bf9344909cf52a8229b +size 1193789 diff --git a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f9801b57-8f15-4dab-9e72-aa767e19f1c1.png b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f9801b57-8f15-4dab-9e72-aa767e19f1c1.png index 20613deb623980dad099b171c182759f66a08863..8e2510fe8b386084f70c7f0db926221108d02e5d 100644 --- a/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f9801b57-8f15-4dab-9e72-aa767e19f1c1.png +++ b/images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f9801b57-8f15-4dab-9e72-aa767e19f1c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3c9be0bbbaa16677c43d360b70bfa4e87686500dc04afb3e6a567b2b895a236 -size 1032544 +oid sha256:dbdda37a7adf2cb34ec29d1810248729393763cd21087fb0a90c9ab92985f19e +size 744781 diff --git a/images/5a181549-c79c-499c-b7d7-90860f0e0068_2682ad2f-8cd9-4b44-a3ac-40ed813b6192.png b/images/5a181549-c79c-499c-b7d7-90860f0e0068_2682ad2f-8cd9-4b44-a3ac-40ed813b6192.png index 2683bec7a14d75d409ac13bbaf9de2dad1438ac4..07fd9790791d720fef45c8b61b53335815fb2a81 100644 --- a/images/5a181549-c79c-499c-b7d7-90860f0e0068_2682ad2f-8cd9-4b44-a3ac-40ed813b6192.png +++ b/images/5a181549-c79c-499c-b7d7-90860f0e0068_2682ad2f-8cd9-4b44-a3ac-40ed813b6192.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d393f64456ee343d492e4588d30928a15a6eb565e46bfc9b8705e0164d3096d9 -size 165005 +oid sha256:07fc35c47216b2fe7d3a48d5582cc203ddf64d3119f7d76b53b11e66f748734d +size 164261 diff --git a/images/5a181549-c79c-499c-b7d7-90860f0e0068_a2f3a3c8-b17a-48c3-9762-f1311a93667c.png b/images/5a181549-c79c-499c-b7d7-90860f0e0068_a2f3a3c8-b17a-48c3-9762-f1311a93667c.png index a7e6cc24e66395c4cbd8ac92d70b1dd3e22b538f..08c38973272c8843c2bdd341fc684af26e309c08 100644 --- a/images/5a181549-c79c-499c-b7d7-90860f0e0068_a2f3a3c8-b17a-48c3-9762-f1311a93667c.png +++ b/images/5a181549-c79c-499c-b7d7-90860f0e0068_a2f3a3c8-b17a-48c3-9762-f1311a93667c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc57bcf646de3d8108ef69be58f8865d61150b73c484b10aca9eacb8d52a50fb -size 156734 +oid sha256:1703800a989a2194b71f2d57fe3f059c23fd160f51f34a9d56a535cf953d270c +size 172900 diff --git a/images/5a181549-c79c-499c-b7d7-90860f0e0068_df574cbc-d7ed-4bbe-bbc5-3e0694b79f58.png b/images/5a181549-c79c-499c-b7d7-90860f0e0068_df574cbc-d7ed-4bbe-bbc5-3e0694b79f58.png index 5ddb1c23c2b69e0edce4dd400f1e5b9115a5eb6c..27218a3a31e1ea8a5d77c00e9a5277b176ae1c89 100644 --- a/images/5a181549-c79c-499c-b7d7-90860f0e0068_df574cbc-d7ed-4bbe-bbc5-3e0694b79f58.png +++ b/images/5a181549-c79c-499c-b7d7-90860f0e0068_df574cbc-d7ed-4bbe-bbc5-3e0694b79f58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b789cc49a77fa45d13d41181ed33c112dbaddbe7730b681030a9e5d7c2f09265 -size 400579 +oid sha256:d3262da6695ac5b372e2e307ab397cb102397b19b571c32ec0f27596bdc55d49 +size 433073 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_0dcb9111-8f0c-47a9-a1ab-f8d5b5043ae5.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_0dcb9111-8f0c-47a9-a1ab-f8d5b5043ae5.png index a31881284952f0189faee285d67d8d83e10708e8..535c93813cdf1499073789a5284d4c05a4174cd4 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_0dcb9111-8f0c-47a9-a1ab-f8d5b5043ae5.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_0dcb9111-8f0c-47a9-a1ab-f8d5b5043ae5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c2b34f87714a0082b8df221244799245bba89e0ad5472837e90ea087b5fc9bd -size 709578 +oid sha256:8c2465b24bd33d8e265ba670d23afdf6fcdd3582de1d8ae06631783018e634a7 +size 711818 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_26c209a2-46d8-42f1-bac1-7f3ed1d525bc.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_26c209a2-46d8-42f1-bac1-7f3ed1d525bc.png index 9933d9299372220d59401972da7eb7d48c0add5d..33479e0a988be1511f85520b46c4cf6c91c1c829 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_26c209a2-46d8-42f1-bac1-7f3ed1d525bc.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_26c209a2-46d8-42f1-bac1-7f3ed1d525bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9307c2f06259f655a248c8552b69b873af04537a499e840cd7142a31dd2dcad -size 356395 +oid sha256:d5b3c1949deb56b1dc2fd683f801ebffe2e30aa0e73dfc64af8c8f905d87b0e2 +size 768381 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_3eacf133-cbe2-43ec-8bfc-839b3bc960e7.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_3eacf133-cbe2-43ec-8bfc-839b3bc960e7.png index 028988997e7b524e61c5815bf3e4cb339d82e7e8..b16f63e30c983c76b083f9dcc624373b299979da 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_3eacf133-cbe2-43ec-8bfc-839b3bc960e7.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_3eacf133-cbe2-43ec-8bfc-839b3bc960e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87af546e83de40c7f6d64c8c6c20d5be8157e0e9e26874255fbe64d8943886cf -size 1266073 +oid sha256:25a7f0df0f34a21064305fb26a4d39f4a6691b345dfa167fcfb6088face5232d +size 415519 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_6474e85f-0cdf-4124-be63-64495755c3dd.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_6474e85f-0cdf-4124-be63-64495755c3dd.png index 4927608e77cef6756db491f8c5c0734dda743aed..fa7ebe764d8bf07c3c0ee4c1a1512086ada0ff42 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_6474e85f-0cdf-4124-be63-64495755c3dd.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_6474e85f-0cdf-4124-be63-64495755c3dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e140e6d2743a9cd9a5fcd7dba44801d155c3fa09e52ade7ee56135bfaf700bc0 -size 969267 +oid sha256:3a8298df0fb8494b48a6281d964f31155c99a238ae19843b26b4c883b3190a41 +size 316137 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_8254ee13-e78b-4f68-8a4a-f3b80026d454.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_8254ee13-e78b-4f68-8a4a-f3b80026d454.png index 92bd8808f5fb04d661bf2884c3d2d48b2f95af28..4eef396d8655de8fa666aba676f4df75ffd7a8e0 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_8254ee13-e78b-4f68-8a4a-f3b80026d454.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_8254ee13-e78b-4f68-8a4a-f3b80026d454.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a01290c8831c63c82f061c08aa368c17e1c7c13fed41b96ee5a9e87cd0066940 -size 451974 +oid sha256:a355b280e2ce99f301155f7dc4a0d01033baf35a746a8d157e4562c1ec2b74fb +size 915108 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_a622a437-2d91-4253-b902-699ec35998f5.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_a622a437-2d91-4253-b902-699ec35998f5.png index 05353f71a6c7ae43d8c2d6c9dd8dec90c3561394..5c8b15ba46ec011babc01ba4cf00979e8d5bc760 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_a622a437-2d91-4253-b902-699ec35998f5.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_a622a437-2d91-4253-b902-699ec35998f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac1412bde5098533a9100ec9b56e2cac129d3c73c47adcf7becee7a13955cdb0 -size 880808 +oid sha256:a0a724396cdb972411edc75f1ec0160bf573e21c08cfd247ec3aec027dcabd35 +size 897191 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_cfaa3b20-c491-40d0-ad34-ebcf44393172.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_cfaa3b20-c491-40d0-ad34-ebcf44393172.png index cd63ddae0d53c8c32164058d1a39313e4b223df5..09b82a20cbfa69ab900072148a45f5a691adf8cf 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_cfaa3b20-c491-40d0-ad34-ebcf44393172.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_cfaa3b20-c491-40d0-ad34-ebcf44393172.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcc110d6a506339ed94003d1ffa28012e2e38dd39a092ef94d02202109cdc7ce -size 889396 +oid sha256:7077c18c01c5727e8bec9cfd50abe2c005607b80d79c4381350216572d647d50 +size 444956 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_d248d946-05c5-485a-bb16-dd322317f149.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_d248d946-05c5-485a-bb16-dd322317f149.png index ace56affaf84817aab148ef114a01e5248272f5a..2b602be0032b0285ba6b6f65e23de1f088aca05a 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_d248d946-05c5-485a-bb16-dd322317f149.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_d248d946-05c5-485a-bb16-dd322317f149.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e73377122f1522c7fbcaffa1aa84ec13f2702a5e4c59bd4b86a66ab7d5439a72 -size 473896 +oid sha256:29f299d0145eb4fc87fabf6c2c09aa616283e47d88a4a29803ebb2f00215ecde +size 285963 diff --git a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_e644a94d-62bd-43a0-8279-1ea74b67e337.png b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_e644a94d-62bd-43a0-8279-1ea74b67e337.png index 04e0bb4214114535eaca5f1d931eaafa0591bb9c..62864321ce5c72ee22fc37bed36ea2044a8e6f0a 100644 --- a/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_e644a94d-62bd-43a0-8279-1ea74b67e337.png +++ b/images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_e644a94d-62bd-43a0-8279-1ea74b67e337.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:872625b1a731d61433b78fa0a663562922df5c940a8ddb0294f47d4789f4a3ac -size 1177987 +oid sha256:ddef3c784abc19a3c598679252cf22e75f6a0a96098b233070144233673768fa +size 848316 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_03937750-0402-4b65-b1c4-f83e6b72cca9.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_03937750-0402-4b65-b1c4-f83e6b72cca9.png index 3cdc7c5c5e3e13d88de8497a12857513d30b4e29..41d00012fc907d4801087ef6d274d58d3770662c 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_03937750-0402-4b65-b1c4-f83e6b72cca9.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_03937750-0402-4b65-b1c4-f83e6b72cca9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d16b3d344986c3adfb757e7f5099d9f0803548ea46d14f003bbbe02a0eb58b29 -size 511088 +oid sha256:d928c294774f15e524ee6ff34561be69ec23dccdbaad57770233a41b338d1866 +size 544054 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_0e970a9f-f965-4bcd-8555-8f396193105e.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_0e970a9f-f965-4bcd-8555-8f396193105e.png index 82530e15ad6b62499bd39fcd80c2e43ae3138a1f..335597191ad6b50c9604dc3acca754d8e6f82d7f 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_0e970a9f-f965-4bcd-8555-8f396193105e.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_0e970a9f-f965-4bcd-8555-8f396193105e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a99ac49cc5530297751d11dae75c39ec97c03f2fd839f1a6bc31c4d36226b24d -size 1546852 +oid sha256:111bde4cfed81701d2301e17b4a209809e8a87745922042118f7081c7115d3c3 +size 1550452 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2911643c-507a-480b-b496-9cfb4b77d8f5.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2911643c-507a-480b-b496-9cfb4b77d8f5.png index e3062b2bb6c050f0fbca31711b61da6cd40acbb3..1664ea08f942b18d4f640b9ac299aa7ff8c5e5ea 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2911643c-507a-480b-b496-9cfb4b77d8f5.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2911643c-507a-480b-b496-9cfb4b77d8f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fbe3f01b4a87ab5135ae64b640040975217370a45953babe00d628822b6672d -size 1454985 +oid sha256:c059dcd69f417c91df51eb97b3eeb5b0546b6a5ed6fe3c587b5ff3e8f262e26f +size 1454179 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2a757624-40fe-4ce8-ae18-82fe7d2a1c97.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2a757624-40fe-4ce8-ae18-82fe7d2a1c97.png index c54e4cf5a78659e0c99ce5f5da9ba4221fcc5d7c..0110628e59be7a21bc1b32a9db29fd6ea7fe3fa0 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2a757624-40fe-4ce8-ae18-82fe7d2a1c97.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2a757624-40fe-4ce8-ae18-82fe7d2a1c97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfa5005ba40255022bab45b9843ce929bee86e24b8ba23a320da9de13500645d -size 1475589 +oid sha256:f14c46332a240d7c8b1c105969ed92dabdef10579c90913b51c198712842ce8d +size 1430135 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_3b6fa8c6-be91-439a-b3bf-004e9f5da22f.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_3b6fa8c6-be91-439a-b3bf-004e9f5da22f.png index 36c1200bddbad3824bde68d59303b8482774032d..3e9dea606d97d3f782ef2c84b14ad6e3586d8642 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_3b6fa8c6-be91-439a-b3bf-004e9f5da22f.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_3b6fa8c6-be91-439a-b3bf-004e9f5da22f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57bc3d97954f98971be9d600ef58d41786e5a50aebe4ada29ca229a9cfd029e2 -size 1467288 +oid sha256:28a174f201f9f0684a8fb9f7eef464bbdc3bca709302d2ff94bd30b2f8d42af3 +size 1364179 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_4607b007-3775-44ff-8b39-d20807e3572e.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_4607b007-3775-44ff-8b39-d20807e3572e.png index 6382edb97608df08b794f36a6a60dc48b2f252eb..66ab62fbecd2575b49b244a3eaa869b72e87345a 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_4607b007-3775-44ff-8b39-d20807e3572e.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_4607b007-3775-44ff-8b39-d20807e3572e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04bab441a002a73aa14c3c85bde3afd4b1108be9f0561b036e190368c6da1245 -size 1477215 +oid sha256:d7df6484bc0db8d3b53ae085a83f821fb239608b871410bbdbc14420f74ffb30 +size 1477435 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_51154c4f-01db-42c1-8081-1c18d4786dea.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_51154c4f-01db-42c1-8081-1c18d4786dea.png index 9886cc6b43e41bedd285d6c2cfefca805e6df45c..b874b6e22f7cd8b431539752886fd5feac1786b7 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_51154c4f-01db-42c1-8081-1c18d4786dea.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_51154c4f-01db-42c1-8081-1c18d4786dea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28ad1c3c84a11ad5fbd3d4604a83b881c0d34171982cd24e7c0923232fe5ffa7 -size 1397961 +oid sha256:acb3bd9ff90db8da43f8535c1c2a2fcfab40291e6f99722c70a96a5e20e8c417 +size 1482606 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_5c046918-7e8b-44d4-a49e-521c81e5d12b.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_5c046918-7e8b-44d4-a49e-521c81e5d12b.png index a5dd0a46e4742f1e997471194c82a4d00357abbd..cc383f4a48d6e5d1156139ff7ba6904f4aa32c22 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_5c046918-7e8b-44d4-a49e-521c81e5d12b.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_5c046918-7e8b-44d4-a49e-521c81e5d12b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43f4323e5e1bebc2d5fa83465c24944c05fd498ff350b1356ad789c08cd80070 -size 1475897 +oid sha256:56c457b1620ad3b948ce706ffe596243b7a7a77fe64d807456b1a16eaf5f127a +size 1520197 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_60dc39b7-782e-4aa3-836d-62fc57fe8819.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_60dc39b7-782e-4aa3-836d-62fc57fe8819.png index a110fea652700aa6ef2e3ebdf2fc5671816cfade..8429fd63baa3f755e5df1f5964af85fdd6647066 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_60dc39b7-782e-4aa3-836d-62fc57fe8819.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_60dc39b7-782e-4aa3-836d-62fc57fe8819.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3c1f5208dbc58dcc7b5b112ee6c9114243963652b3c4c4db034a09db617b467 -size 249811 +oid sha256:21908c665acea08a93974292788400ce2c97db280f183d1c4df97b5b89d1ed56 +size 251167 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_88026934-0d2a-4303-86b2-0cbebe66da86.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_88026934-0d2a-4303-86b2-0cbebe66da86.png index de15e29aa65567275b75c7f05616514da163fd24..7c5033f0b84578d7627a9efd6b259978bc04736e 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_88026934-0d2a-4303-86b2-0cbebe66da86.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_88026934-0d2a-4303-86b2-0cbebe66da86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:166a207b590a96a384851a200aa1218315c3fc854fea69827352a0b9d7f006f8 -size 1414776 +oid sha256:267d67b71e544a5ebb5a1511fc4c32e920f716bc247a93337daf5a03e6a4d161 +size 1507866 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_9f55a450-95dd-424f-950a-6e250aadc6a7.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_9f55a450-95dd-424f-950a-6e250aadc6a7.png index b5a2e8382087618328c75693a99212bf95900d3e..65fc18721a19e0f23e1355c37d5a20705927d929 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_9f55a450-95dd-424f-950a-6e250aadc6a7.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_9f55a450-95dd-424f-950a-6e250aadc6a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37fff3a97e8e51c001a32f9b830f27565ca664ba361b05a4cf60817fe4acecbe -size 249880 +oid sha256:3492272a6058f6c642419a88f5898f6f9f8ad30c10c4ae852cf39d0d327ef602 +size 270357 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b2e40a74-71d8-4594-963c-04d6c99d9924.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b2e40a74-71d8-4594-963c-04d6c99d9924.png index f3990985eb385deb5185cda5776e785ee6601e66..101199ceea2623e0aa55f97e03fec51aa32eaa63 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b2e40a74-71d8-4594-963c-04d6c99d9924.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b2e40a74-71d8-4594-963c-04d6c99d9924.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33c64c41ea4e915e9ddabfedb28d1ce2e8dab9e105f4bc82d22c7e93e503e3d3 -size 1469205 +oid sha256:360d8d078eb1dc99e4d1ae75b8b8de9962d5762f99ac7ebe3682856423836a08 +size 1203726 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b9964889-17f7-4897-92d3-a2221740f0bc.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b9964889-17f7-4897-92d3-a2221740f0bc.png index 7a506911ffc77b806968501d718c11e0934587b3..f8799cff503b2d22bf567de786f19c27e19d4270 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b9964889-17f7-4897-92d3-a2221740f0bc.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b9964889-17f7-4897-92d3-a2221740f0bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0750c2fafa994f0ac3143e9c3e1012911ae68428bf488d637793eca681c9af14 -size 1449927 +oid sha256:e7def183d24355838529a2060a51bf88e97444c20268b9225501487d2db48ea0 +size 1359635 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_d2c8159b-e150-4b43-8385-f0fc12d07bf3.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_d2c8159b-e150-4b43-8385-f0fc12d07bf3.png index c51883d07c08d7f8b23061df084976f037b182c0..49c8a3a4780714c812eaa9374fd1cd0773bacb9e 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_d2c8159b-e150-4b43-8385-f0fc12d07bf3.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_d2c8159b-e150-4b43-8385-f0fc12d07bf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:961c198490487eb83214a73f43a399c8900212b88674f9697f24f8c6ad88dbeb -size 1475673 +oid sha256:a7cf508c67e3e5d312971de47988cbc001fffa5f4a00ebbb42dd07d4584cee7f +size 1572035 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_ecb09ce1-7354-4d48-a022-e402dc19cc48.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_ecb09ce1-7354-4d48-a022-e402dc19cc48.png index 5841da9461ed259a9a898c95f1f388a55ce4ad55..0e23df6a944ab1cf6398b5bb91af0537a6242e9b 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_ecb09ce1-7354-4d48-a022-e402dc19cc48.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_ecb09ce1-7354-4d48-a022-e402dc19cc48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50e15a628d6d651d1a2f035fa8f99ac10d918c088729ef694a38fcbbe3cbe73d -size 1475291 +oid sha256:aa4f18b8df391d20831fb4f319943359d8dc10949b3060f1fd87b95c7914d255 +size 1566066 diff --git a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_f8c7c15b-6d3a-4e6c-b1af-f569d552ca2b.png b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_f8c7c15b-6d3a-4e6c-b1af-f569d552ca2b.png index e6ac6c7506cfdd35b56db5181716ef6a23bb1f4d..860bd814a3f7cca0389c10827b04e22399bda7ac 100644 --- a/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_f8c7c15b-6d3a-4e6c-b1af-f569d552ca2b.png +++ b/images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_f8c7c15b-6d3a-4e6c-b1af-f569d552ca2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74a430f0d3d7ed163430bddb5d993dfaf9d0987accbd2a43e8785e93f8b4a3d5 -size 258522 +oid sha256:509a8413190b2e899ecc7cac2d824d3419d37a2c344387a3da411dce25e3e836 +size 332204 diff --git a/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_69d65ccf-9e5a-4fe1-a6f9-cd639fb37903.png b/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_69d65ccf-9e5a-4fe1-a6f9-cd639fb37903.png index 927bbb104368f9baf960f681d29ee23d5c2b3199..068151b8247176dd3b8e18f82ba71c0623fd5575 100644 --- a/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_69d65ccf-9e5a-4fe1-a6f9-cd639fb37903.png +++ b/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_69d65ccf-9e5a-4fe1-a6f9-cd639fb37903.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:964401fb8c905a0712fc265e9f3f5db15a222e73eede33381b1ae935f225b02d -size 1343415 +oid sha256:eb7f6d4dc30d5a8a61b1ac76b45d6488b54beb47369d962903151d47363c2bf8 +size 742807 diff --git a/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_8df36e50-14d2-43c9-85af-0e2c507c74c5.png b/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_8df36e50-14d2-43c9-85af-0e2c507c74c5.png index 392a35aa14c6915570e6057bed513e06d3b3dd7b..03e1fd3d4997754ebe6770632160c917526e395a 100644 --- a/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_8df36e50-14d2-43c9-85af-0e2c507c74c5.png +++ b/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_8df36e50-14d2-43c9-85af-0e2c507c74c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10ef5ac049a972c7fdbedf9bc5f405ede62afa5dad72c4e12cc1ea43c9773f8a -size 985346 +oid sha256:7c3b28c5df28d6e113fe063a300cf9938405d825610e5fd32a3c1f5183c0fee9 +size 1184215 diff --git a/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_eb2f0b10-9e1a-410a-b238-358836e1ed04.png b/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_eb2f0b10-9e1a-410a-b238-358836e1ed04.png index 60097ddc98afd0ee4fe455ac4c17d9b92cfb7e3d..d73d77febd5350208b61ebff34320aea74a1b8f4 100644 --- a/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_eb2f0b10-9e1a-410a-b238-358836e1ed04.png +++ b/images/5b433cc4-26bf-4e62-b406-f00dc09c274d_eb2f0b10-9e1a-410a-b238-358836e1ed04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1455c5f9dff383cf57a5ae3e69fefce80fbc81b89fd450e6d6390854a9b4471 -size 862849 +oid sha256:ff326f0d526a909a7dbf3b9764768e0440a38a4fb2ab7f38e880652ed6bc11e7 +size 560802 diff --git a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_00aa52ec-0e86-450f-b72e-2dc795817cac.png b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_00aa52ec-0e86-450f-b72e-2dc795817cac.png index 68467f19ef8fc2dd75e03f6a47ffde31428538de..bc087350c874f33907fa2b23415e9ab84928f327 100644 --- a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_00aa52ec-0e86-450f-b72e-2dc795817cac.png +++ b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_00aa52ec-0e86-450f-b72e-2dc795817cac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:313b95f4f809a064db9a0c4346d5da6832ea5e659fff5b15666887381f919538 -size 2270247 +oid sha256:2eb55b2f3abbcd7f431953bfc7b49664c34a2505c5fe70e0fc79e68e172e2f04 +size 2805084 diff --git a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_9404fc4c-c485-4e47-af68-762a4e97965f.png b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_9404fc4c-c485-4e47-af68-762a4e97965f.png index 6265638177fc59736ff8d18d2074d586dd88fb4f..f0d2cfb69c8945175f6c70397fa21231bbf60405 100644 --- a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_9404fc4c-c485-4e47-af68-762a4e97965f.png +++ b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_9404fc4c-c485-4e47-af68-762a4e97965f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:414f82c56d8132cb6891cac98b3d0bd2288a6da5c87addd2d46874fff08b66c2 -size 2249857 +oid sha256:c5931543adfe158a0c4546644aed2560ce26a64ee62d2f07e4017f373efd0ac4 +size 728800 diff --git a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_a7f6b888-5de1-4223-9684-6cb8f17c2402.png b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_a7f6b888-5de1-4223-9684-6cb8f17c2402.png index 778dc287084f70ee6a82ea3811aebe02ca46db80..d7bd525b877e963a82d90d6f3bb519c87eef9ca7 100644 --- a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_a7f6b888-5de1-4223-9684-6cb8f17c2402.png +++ b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_a7f6b888-5de1-4223-9684-6cb8f17c2402.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ffa56cadf84e41c0d0eb9c62c28fa4fb18eae729ce539655fa597051daa6b7be -size 2183300 +oid sha256:c8060320fd7a5cbff5510dcbb42379f383163b819228b71b332e0e2efec2e388 +size 2139644 diff --git a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_edb09eb7-6a8c-4aeb-9b52-796762ca821d.png b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_edb09eb7-6a8c-4aeb-9b52-796762ca821d.png index 63d7c66e3fdf12d4575a7f0179954f0e62db08d1..d87604431ebe40dc3db85864f19b5c30dfe3efda 100644 --- a/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_edb09eb7-6a8c-4aeb-9b52-796762ca821d.png +++ b/images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_edb09eb7-6a8c-4aeb-9b52-796762ca821d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba95fd806c88dee398117c9ae9a39cada0c0e0d14f936e3f9bee53c64ff72064 -size 3175360 +oid sha256:9d6ad6351dd36e40a6a7657b3c78f7863911965043785e6cc6a71ba3d581d0fd +size 878953 diff --git a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_428e6ce9-bdfd-4278-8ac1-7e2a35aa7d0c.png b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_428e6ce9-bdfd-4278-8ac1-7e2a35aa7d0c.png index 09b5d5d069997a3e8389945620046a05550bcf8b..57aef8e9087339d0d566303912357cf4088f249c 100644 --- a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_428e6ce9-bdfd-4278-8ac1-7e2a35aa7d0c.png +++ b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_428e6ce9-bdfd-4278-8ac1-7e2a35aa7d0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb36248af2baf6086322a37be6d6b19178f7a2ce3cd6af61ba7856cecb881e79 -size 1081417 +oid sha256:c728c046d05146904db981328a99d72b0c456592bbd3f1d2020ad7016c134306 +size 874663 diff --git a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_48938eb1-60ff-48ac-880b-4ffac70ac2ef.png b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_48938eb1-60ff-48ac-880b-4ffac70ac2ef.png index 6aaee6ad71df0418f06a3f09c0d59a11834808b4..d4edf370a31cda51edd8d2e6cac65a9622020d52 100644 --- a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_48938eb1-60ff-48ac-880b-4ffac70ac2ef.png +++ b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_48938eb1-60ff-48ac-880b-4ffac70ac2ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec44c078abf2cf90b5415b9041daa039601c4e0de4fa3fda8c120a2c36b97ff3 -size 2138054 +oid sha256:993f7d299c926989bf0f62022a9469512f223afe5ff84a83aa62dc9838df530d +size 957526 diff --git a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_6c9ca4fe-cd29-4b39-ad58-b099603ccc63.png b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_6c9ca4fe-cd29-4b39-ad58-b099603ccc63.png index 83d7f5f72d9779cbb6e31d8c907d16092cfdc6d9..222779ea4ab582683d628b74f9b58b69609c6cbf 100644 --- a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_6c9ca4fe-cd29-4b39-ad58-b099603ccc63.png +++ b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_6c9ca4fe-cd29-4b39-ad58-b099603ccc63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ae9666f72650600f7143a63a8c4b4b76c60242efdd2291496a8cbf15629b610 -size 1204593 +oid sha256:9f430b54aaa7a58e5b2dfbbfde5929cf1474891f44cc2e54e718d728b3ddf1c9 +size 1348653 diff --git a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_fef93795-3e62-497a-bee0-d9cee88d0932.png b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_fef93795-3e62-497a-bee0-d9cee88d0932.png index 5090878bebc4cf43831243e259663126c790dbcd..f05a3cbd859d318b4a8059451ef323b2bb5ac829 100644 --- a/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_fef93795-3e62-497a-bee0-d9cee88d0932.png +++ b/images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_fef93795-3e62-497a-bee0-d9cee88d0932.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47046d742ba071ea1bcb6581a993c3d620043d321ef1b4707ab3a7ebbe1fd090 -size 937093 +oid sha256:46cd5e5433f5562e3faa06b06a8a35c27a9895694395a2ec9dc891cd4a0bf02d +size 937033 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_3e7c0282-8e70-4f28-af2d-3f6c13c55221.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_3e7c0282-8e70-4f28-af2d-3f6c13c55221.png index 33eeea90c3b99a0d662bfc793cf79c6936941993..2b7f01bf28e4732581d05b1fc4688e4f260e909c 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_3e7c0282-8e70-4f28-af2d-3f6c13c55221.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_3e7c0282-8e70-4f28-af2d-3f6c13c55221.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5da5ec7b47f16fed75ab46f2092c0add033543bf7f2db299ce5da47d40a17d5f -size 1023955 +oid sha256:0ad1778f7d29c2f69fde89fd850b02ca0be90dd1ddbc99178b677e4945deb996 +size 1008257 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5224c061-6b17-495f-981e-d40d5de3af4b.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5224c061-6b17-495f-981e-d40d5de3af4b.png index e2186866e92544a2f40370f4ed55040fb0a6de3a..a49ebf8e5dc281637c3ceff09591b4cb57fe0bb7 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5224c061-6b17-495f-981e-d40d5de3af4b.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5224c061-6b17-495f-981e-d40d5de3af4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8dad9e0c159932cbaa4e8d5b179d6bbca6f03ad9b35128745cd35a2e6d7384e -size 691044 +oid sha256:139c76b0bffaf260d16767a6eed2ec71cd9f463b71b41bd5b7beb796708c1a46 +size 947564 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5aa0d023-f4c7-4939-b947-5dc59943b1c4.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5aa0d023-f4c7-4939-b947-5dc59943b1c4.png index 2263a3b26499f4e62212fe6ffda142f958dc5666..a55a986eafe13ca16f44c25108b367ad4c438803 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5aa0d023-f4c7-4939-b947-5dc59943b1c4.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5aa0d023-f4c7-4939-b947-5dc59943b1c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bd2b1f1bfcb34c46e16eb2a207e49f0296c60da78ce7d7efdf4495f01108748 -size 214320 +oid sha256:d10221dcfa5b0db48a86299a610e6edb686bfb947a59ddd9146807e1390ab98d +size 859610 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5e6fe782-07c7-4444-b163-1b8063d3aafb.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5e6fe782-07c7-4444-b163-1b8063d3aafb.png index d9abc45bb696dd78fdb338fdbff26b8e140e130c..e729ce509327e62fbe01cc2b2911a5e20ec7b3eb 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5e6fe782-07c7-4444-b163-1b8063d3aafb.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_5e6fe782-07c7-4444-b163-1b8063d3aafb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e08dddec4f8885175ec6fba28d1794474dc8e744130b68183cebc528174f03a6 -size 191352 +oid sha256:6d5bcbbd5d24f0f1d42c47e858ad3d23850060c1cdd2a2525705d45159a5a706 +size 160265 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_6a59bf9d-e498-42ff-9361-27f824894bd8.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_6a59bf9d-e498-42ff-9361-27f824894bd8.png index 2cc1c7e23ff03e5625b8d9f10739d668acde8d6e..0edd2af40eee1a8fba6b83c2185d1fccb033ae16 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_6a59bf9d-e498-42ff-9361-27f824894bd8.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_6a59bf9d-e498-42ff-9361-27f824894bd8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd2e402c7644bda049337ce7dd5c85042e30327d59df702923f63da9cf70a882 -size 297542 +oid sha256:62e15f1a8a5f7a6e394705516eb86a1ce21d43e289313397ab1b3b5f101fecc8 +size 321455 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_73b6d459-3322-42df-b999-02a0b249731d.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_73b6d459-3322-42df-b999-02a0b249731d.png index 4c0a9431f9e94559a9209d8887ec5dbf98543b19..8a71038bcf9f51d8827306146c7dd3c5e55aa417 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_73b6d459-3322-42df-b999-02a0b249731d.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_73b6d459-3322-42df-b999-02a0b249731d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:967cdcb2e26a94168343864e5903018dc980142942144a0c078e640dd9834a90 -size 223176 +oid sha256:14f1cbe6f571807cf36c9ef9c6f25669e70ebaaedb99c1268c4c10f94cea1887 +size 225508 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_753c3a25-32d1-4440-bc15-21fe074f1507.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_753c3a25-32d1-4440-bc15-21fe074f1507.png index d08df58bbdb5de1669b12164adf7e6bc4e2e6da1..5c91236fb2b7a51b5e718e60f55cc61cbdb638e7 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_753c3a25-32d1-4440-bc15-21fe074f1507.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_753c3a25-32d1-4440-bc15-21fe074f1507.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1373857b273399ccbec6af213a5e00bac5478c53df90989c822c1e55edcffbf -size 691697 +oid sha256:ada1cf01f4ee528754a40776b2065b3d57aa6cdf9e2f0ad5c30586a2638ed140 +size 779066 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_870c171f-328a-46b1-8d81-e111a3d7a5f1.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_870c171f-328a-46b1-8d81-e111a3d7a5f1.png index 76a2aeb7e3f270bae5bd93281e780658b4293b9e..4967d2fce5e16ea42157ee3460034125604be784 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_870c171f-328a-46b1-8d81-e111a3d7a5f1.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_870c171f-328a-46b1-8d81-e111a3d7a5f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa358a0ec52f8d5f824d00c7672fbac0ec8bc3404e92eabbed73dd90d568ae47 -size 181170 +oid sha256:e4e02a5299f328c1cd4c5a6d9753ffab2f0752044bd87f561046c127849f2f1b +size 182014 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_bda37da7-1fef-4ff8-9174-51730582abd0.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_bda37da7-1fef-4ff8-9174-51730582abd0.png index fe556b924f396cc368bb0a1ec517fd96297fb692..adbd81c45f0fb5a743535270f3541dc7f910a3d0 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_bda37da7-1fef-4ff8-9174-51730582abd0.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_bda37da7-1fef-4ff8-9174-51730582abd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccb1139f96699187b5d7d965dc2471b165a2d935af093498afcdfbb4cb486937 -size 266859 +oid sha256:93e59c3b533f745b0f5282d0a6710d4a845f87454ada9a962ff58141f7ea831b +size 268878 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_dc06c01d-2251-4bed-b48d-e4f0c2639a7e.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_dc06c01d-2251-4bed-b48d-e4f0c2639a7e.png index d984a7a68565f07cb916fc00fc94f0959d36dcae..3ba7a5c129977526559b104833265cb515a7490f 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_dc06c01d-2251-4bed-b48d-e4f0c2639a7e.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_dc06c01d-2251-4bed-b48d-e4f0c2639a7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e4add5e26b57a3ab723824a9f14806feaac4d479b9d237eb08ae5a509d3f1a3 -size 180710 +oid sha256:6972e5cf0fafb4f9cd5b62261d5d302dfa7fdd705e1e20a030bb87cb4ed95631 +size 192582 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f5829ff5-6294-41b7-b00b-3433d86971d4.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f5829ff5-6294-41b7-b00b-3433d86971d4.png index 02ea436908df0f0b7a59eeb457d57146bfc746f6..b4044b924f68ae5bdf82806d7d884fa632cdfca9 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f5829ff5-6294-41b7-b00b-3433d86971d4.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f5829ff5-6294-41b7-b00b-3433d86971d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79453c08c20d3dadc6a53dfb83b4711ddd23df22171b3c09586551fbe78ddbb8 -size 292651 +oid sha256:f7a60a43d21fc28e68b8083e8bab552244d339f7730f7d9cfc45a0d11a6e2784 +size 294331 diff --git a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f8f537b6-6859-4811-870b-70ea8462e472.png b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f8f537b6-6859-4811-870b-70ea8462e472.png index a4395fdfdfe3409efa3c1aad5196ed04a422f9ba..801f7b6ea6af87ca5887e9e80feca1326bc8c1b5 100644 --- a/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f8f537b6-6859-4811-870b-70ea8462e472.png +++ b/images/5c52af02-ccc7-491a-bea7-05de278bf7da_f8f537b6-6859-4811-870b-70ea8462e472.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb4864f12e06ef91d9926962b8056f84602663393861be6fdc4d775035b1d76b -size 720524 +oid sha256:d55f5d5a29165cfb1c8fc748b82fe5d823ce5c27d0b09cc596a7ed4ea574b28b +size 796168 diff --git a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_3c20ee92-54ff-4e67-9882-d6a25db69802.png b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_3c20ee92-54ff-4e67-9882-d6a25db69802.png index cb009f0bbdf604da8163ba2f5ca0a2efa7455618..c0cc8da57f4a86aba0d5e356be35e8d938274878 100644 --- a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_3c20ee92-54ff-4e67-9882-d6a25db69802.png +++ b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_3c20ee92-54ff-4e67-9882-d6a25db69802.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9699ebf6155695b176486b558d4a61d7da3faecca9961b7a6b6f87e2b7efb3f9 -size 1355634 +oid sha256:1a2a1055949ffcb31e6559941b20c38ea1cd4c8bf4c749d2b708073ff3d91583 +size 737457 diff --git a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_443327ef-bef2-4f4c-8aa3-77669cbad78a.png b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_443327ef-bef2-4f4c-8aa3-77669cbad78a.png index 00c7a215fed6b62c6635cf5bcad05fcb64af886b..2c788aeaaace00f7d7a62125ab85c3e42c0fdb12 100644 --- a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_443327ef-bef2-4f4c-8aa3-77669cbad78a.png +++ b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_443327ef-bef2-4f4c-8aa3-77669cbad78a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53c45569c7e1c198ed89b9a93f676af1134d7b9815d3152e07c40fe3fafb5070 -size 1173217 +oid sha256:dee2f3eb11e17d9873466d0a2f5de6d10cee12b50562b0dbc012087bd5722122 +size 1388593 diff --git a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_643210af-4c59-4a5f-af43-ebfed3c9b5e5.png b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_643210af-4c59-4a5f-af43-ebfed3c9b5e5.png index ec40fcba750d9aae688317825c0fabc0c5b1d65e..c1727051544999e5abdf84a76c2a991c6ff50377 100644 --- a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_643210af-4c59-4a5f-af43-ebfed3c9b5e5.png +++ b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_643210af-4c59-4a5f-af43-ebfed3c9b5e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16a5e92660ee8e0f34d48086f7d90af20aaaf956c176e50f5f7a94fefd8e2e39 -size 655411 +oid sha256:efc157141aae58921fe6e559c93be94576c3e986fc315f41dad49ee244902220 +size 634990 diff --git a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_a2f646a1-bb6e-4bef-8e80-9f65de82161c.png b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_a2f646a1-bb6e-4bef-8e80-9f65de82161c.png index 171a882cabfeea6a2a370b867ad1a3ec28987fb7..c569d02b7aea9b9938dd514e31468399b372bfb3 100644 --- a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_a2f646a1-bb6e-4bef-8e80-9f65de82161c.png +++ b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_a2f646a1-bb6e-4bef-8e80-9f65de82161c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04b216f2ab8d3f727a3956beaf5c0dcadef05846b5af2e04d74b2c372c201abb -size 303790 +oid sha256:5df386f6487f9ea5d730b0088852edb119df7ae66aaf9128055717a6290d5fae +size 362172 diff --git a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_ed78af31-521a-4b45-b4f7-9b09e5b15a09.png b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_ed78af31-521a-4b45-b4f7-9b09e5b15a09.png index ce64adbaaec33a1acedf1d24f59b999be6411426..e3cace89dba584fb7ad698a17df6b65bffb2021a 100644 --- a/images/5c91b907-39db-49c3-af73-5eb5c2390a93_ed78af31-521a-4b45-b4f7-9b09e5b15a09.png +++ b/images/5c91b907-39db-49c3-af73-5eb5c2390a93_ed78af31-521a-4b45-b4f7-9b09e5b15a09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f39d7e7296051d74a563389e4988b933a120f31a08837da6e759657bae3cef3b -size 305109 +oid sha256:66167a7665fd356b9801308c1394f6166786cf5a0a95a9a0eb3dcbd1b95ba2a9 +size 380166 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_178b5b80-98d2-4169-895c-8e4eada72f72.png b/images/5d69e640-7765-4381-979c-9881afc048dd_178b5b80-98d2-4169-895c-8e4eada72f72.png index 2a049753c5de08e7d7c38bf8944d7e347f982a87..1a520175a8520d1e2717a45006429d4f420c0d96 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_178b5b80-98d2-4169-895c-8e4eada72f72.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_178b5b80-98d2-4169-895c-8e4eada72f72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46ad8de724e827ddc1c5b9e843bdc55df45781b974b2406c27f58e8938cd13da -size 659117 +oid sha256:ebe117bbeaa797c5f1222b9620ee5d8b315165547bb6153fb9727a3eba8493a1 +size 582216 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_43a16107-29aa-42ef-b84e-d58837934892.png b/images/5d69e640-7765-4381-979c-9881afc048dd_43a16107-29aa-42ef-b84e-d58837934892.png index ffa8e73529800919dd52b25798df10c1ab0f38a0..7bc65ab00cecc658059a1c6dd1246c82d7f8237b 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_43a16107-29aa-42ef-b84e-d58837934892.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_43a16107-29aa-42ef-b84e-d58837934892.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:311b9cbf858d66869a09a4b36ac8cbe03e0425e9c7a692c8e4fccd0907e591f7 -size 642475 +oid sha256:f01ce6dd837123bca7f8888b417776b9730560b53e0a74dafe618a53a7d72790 +size 605610 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_85a2842f-8d86-4d33-b7ae-a9a5af111f9f.png b/images/5d69e640-7765-4381-979c-9881afc048dd_85a2842f-8d86-4d33-b7ae-a9a5af111f9f.png index 110f3acbadddc399f4ddfa14d570a5376fd8ce1d..0977a5697cbf72177fd3793aa684db881ad2493b 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_85a2842f-8d86-4d33-b7ae-a9a5af111f9f.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_85a2842f-8d86-4d33-b7ae-a9a5af111f9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05680901194bf9b56f65782072ed2562fb372bb21919c0d5c028959aa8ab085b -size 1282079 +oid sha256:042bff231943eb6f2521512b861922256aa64c63e2db175a934653435dc794d4 +size 1733014 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_b2ff3565-c9c1-4ad1-8be6-a68c94ff24e2.png b/images/5d69e640-7765-4381-979c-9881afc048dd_b2ff3565-c9c1-4ad1-8be6-a68c94ff24e2.png index 70ac2a71c65ce51da0ac0b4705c633ce5339a47a..1b0458991ff7131b718fcdb55697c3d88d14cc7f 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_b2ff3565-c9c1-4ad1-8be6-a68c94ff24e2.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_b2ff3565-c9c1-4ad1-8be6-a68c94ff24e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01128d581bb3d0c42e0f7e488b9bdf02ecdeefce2e8cf4c71a55f094d8f19f5e -size 1071710 +oid sha256:d7c465bf18f11e17b5faa33b10170ca083d32e5676fd2c00c61f346846cd2dd7 +size 1236619 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_b9ea57b4-ce6f-4010-b79b-f3f8fc031d1b.png b/images/5d69e640-7765-4381-979c-9881afc048dd_b9ea57b4-ce6f-4010-b79b-f3f8fc031d1b.png index 6ad628e87748fcab8d7136a49a4a57518276e2c4..0559d1c420c1e64c2d29bbe65f341d3e24affbd4 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_b9ea57b4-ce6f-4010-b79b-f3f8fc031d1b.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_b9ea57b4-ce6f-4010-b79b-f3f8fc031d1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63f78a74ff2b2900a0c3a9e12aa9657aced64a614f89599bbc5aa25135b10ad7 -size 642962 +oid sha256:385be25ac121f62158b98112c9be88fa0f9c250d6724e360bb1dc158c423d8d0 +size 785937 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_de548550-7457-48b1-8215-d63d7fe2643e.png b/images/5d69e640-7765-4381-979c-9881afc048dd_de548550-7457-48b1-8215-d63d7fe2643e.png index 02831c85fc27b9a511a0acac6f87f84435e84c1d..7cc5f3965acd6083811ab6fea171fad9c08e1875 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_de548550-7457-48b1-8215-d63d7fe2643e.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_de548550-7457-48b1-8215-d63d7fe2643e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be185f076a7b6eb349b29fe5dfd1a868f9f54926ab0710b49842d06c9515d610 -size 983526 +oid sha256:1f55c95241fcc400d4aa0ad1752575289f4b9f1492110ad4656ec17e208c45b5 +size 858030 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_fa1a703b-4bed-4c72-9d39-92f378ff4b48.png b/images/5d69e640-7765-4381-979c-9881afc048dd_fa1a703b-4bed-4c72-9d39-92f378ff4b48.png index 4ff87b2f755baa5b0808d2595d5c158c6e9a85d1..4e20841ab80109c848db56762ca6eb1483807dcc 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_fa1a703b-4bed-4c72-9d39-92f378ff4b48.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_fa1a703b-4bed-4c72-9d39-92f378ff4b48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:111d3e8c04af1a407316af6a978d5fb4f8b230fad10e42c12ac47de318efbf15 -size 642197 +oid sha256:8037991d88e2972d0243400c3436954c2cd43c38c1601d78c12e16fa301ed18b +size 996092 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_fc65a09b-bdf4-48e6-899b-e01ec2453e7e.png b/images/5d69e640-7765-4381-979c-9881afc048dd_fc65a09b-bdf4-48e6-899b-e01ec2453e7e.png index 2c10fffa7a1c5cc584f1df424b75a1bdc3745d14..82f1b76f4db5f6f441aed48377f9bbe539045aeb 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_fc65a09b-bdf4-48e6-899b-e01ec2453e7e.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_fc65a09b-bdf4-48e6-899b-e01ec2453e7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8544e8351fc6e266dab56f2d43d156338441d8aa71545accd58d9f66f3449233 -size 643915 +oid sha256:74ecaf143b6eb88020fa86229fd994d1da9458873edaff33cefea4215cb13ee3 +size 905859 diff --git a/images/5d69e640-7765-4381-979c-9881afc048dd_ff85d6b0-e760-47c9-9cbe-0b2c40ea369f.png b/images/5d69e640-7765-4381-979c-9881afc048dd_ff85d6b0-e760-47c9-9cbe-0b2c40ea369f.png index fec37c238038da4065247355fe03322881794ca0..2bd3e00721321887b53e14d434abe5424169a5a2 100644 --- a/images/5d69e640-7765-4381-979c-9881afc048dd_ff85d6b0-e760-47c9-9cbe-0b2c40ea369f.png +++ b/images/5d69e640-7765-4381-979c-9881afc048dd_ff85d6b0-e760-47c9-9cbe-0b2c40ea369f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48dbe36a69217b1e8268f344b5b93a7ea2d10a9fee0d964b1b1f2c27542cccc3 -size 642118 +oid sha256:17e0e289fe2d985699787a57cc64006d1e0b8ceb69d31d3b087c0dd3a430d317 +size 952030 diff --git a/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_3490d209-2313-4fd3-80fd-52801298b816.png b/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_3490d209-2313-4fd3-80fd-52801298b816.png index 38a080f81291794124fc8c889110c0923742388b..5a4972bf1790ec7e8e69ab338e65bf2a636b132c 100644 --- a/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_3490d209-2313-4fd3-80fd-52801298b816.png +++ b/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_3490d209-2313-4fd3-80fd-52801298b816.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:268613e191c25d6b17ec403fcec927179ef5130df3d26e7944014dcc7dbdc225 -size 1750885 +oid sha256:3a77c7c2190d48d2ca1df585387f76057543626a312d64eab0c5107fb0daf1df +size 1509764 diff --git a/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_8ea37fac-98e7-436e-ad0c-0264750abc6e.png b/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_8ea37fac-98e7-436e-ad0c-0264750abc6e.png index 792847a1eb944944e4151a62cd804075c36acee1..ee0d32d465c7cf81654a32c7b2d2cb58d0a1e49a 100644 --- a/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_8ea37fac-98e7-436e-ad0c-0264750abc6e.png +++ b/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_8ea37fac-98e7-436e-ad0c-0264750abc6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f39954970ae7858209e1acb7a52e9220aee3b871136c31eead2b229535c2717e -size 1936171 +oid sha256:3450a0cdfb4a2913662c9f61d90a2e80f6ed85dcd7b4efe4be551070dce1aaa1 +size 1038593 diff --git a/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_9a4dabad-f38c-4fea-9345-202450e96322.png b/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_9a4dabad-f38c-4fea-9345-202450e96322.png index e7b81e10256df22de01a0d37ddde98449e7408d4..5d23c18b2e573a1d80e85d43f05c93a7056420f8 100644 --- a/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_9a4dabad-f38c-4fea-9345-202450e96322.png +++ b/images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_9a4dabad-f38c-4fea-9345-202450e96322.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf0ade39ddbb0ae45f4b9e1a2f62137f46f2d0b692c07e68a8eb06651effe120 -size 1795838 +oid sha256:bd4dec108c6bfcbfde1e1bc6b79642fc38d49f25948c943a38e95122f9d60c02 +size 1146257 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2cc502de-3f64-4412-9dfa-d6311cbc490a.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2cc502de-3f64-4412-9dfa-d6311cbc490a.png index 31c92ca8f0a1471398a3e51e8930fafa138e28a1..c64731ca74735ef76acdc03b892bf75b73b415c5 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2cc502de-3f64-4412-9dfa-d6311cbc490a.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2cc502de-3f64-4412-9dfa-d6311cbc490a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:490beffdb0c94f6bd011433afc04791d308c28922af3e1bf41e289fe039da44e -size 1159460 +oid sha256:17960f62f136f06fd53643dc3a95a4a5cd5f001b4803650bc037b069dfd769d4 +size 1324708 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2e501862-ee92-4af0-8eb0-7594690edef9.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2e501862-ee92-4af0-8eb0-7594690edef9.png index 5bf5d720b9a91a3a058a1ccd9eaab0235ad8f9d4..f028e0380d79ebc040eb2c16176ede4d1b941b76 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2e501862-ee92-4af0-8eb0-7594690edef9.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2e501862-ee92-4af0-8eb0-7594690edef9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a709f6d62a803b23a181b82308440890f78c7bff716202bac21229ab0254ef91 -size 680904 +oid sha256:f3c8df1f1b9a6bb7139a726aa5672288ec7f15814ddf590c46378a255664e3fc +size 653861 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_5081e4ab-c126-42d1-a018-1794aa0466d0.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_5081e4ab-c126-42d1-a018-1794aa0466d0.png index 3c4f071301c7f65318b778cf991637f27f11cde8..3c7a3880ea5965b10d7dd305b8baeab0878fad49 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_5081e4ab-c126-42d1-a018-1794aa0466d0.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_5081e4ab-c126-42d1-a018-1794aa0466d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e7fd0d1db4ccf1b7e22ece79b19f128516f307ed833f56d59aafcce43dff795 -size 2720395 +oid sha256:3eece6f20b56b6209bd3931c3beac8d2b77d573eecc68de8ad907adb3766a25f +size 730209 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7ace8414-2b72-436a-84f4-f81ce2d5ecc0.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7ace8414-2b72-436a-84f4-f81ce2d5ecc0.png index a6c4335ddc72b96703bef71b3ac390114f9fe711..2e0c90be50ab7160632962c14a4d91b2fe20c233 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7ace8414-2b72-436a-84f4-f81ce2d5ecc0.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7ace8414-2b72-436a-84f4-f81ce2d5ecc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b64c3b8142f8c5a828e31b525358fddde90021e859e814cd8d576ef429638685 -size 2548798 +oid sha256:337b5638898abc63067797dd8c4730a2bb2a22b7d12716baac6e87c0aa46c882 +size 421306 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7e967714-530e-4ee3-aab8-c8943a08b141.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7e967714-530e-4ee3-aab8-c8943a08b141.png index aec0dbecb852876e2e9ab5f48d668c2e015de8bf..a8dc4a2d0702256a6cd90db15cb8b409cca2633b 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7e967714-530e-4ee3-aab8-c8943a08b141.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7e967714-530e-4ee3-aab8-c8943a08b141.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92dc559745bd3110c96c51e6286743530a199656ec0b2c83599e076da76dfc1e -size 2761582 +oid sha256:f55937ebc94e95a1f040bbb37613c89320cddfad6a1f3adec5ed45ca1de75a2b +size 1218711 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_89271800-5603-4e29-92cc-dd53f66ecbcc.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_89271800-5603-4e29-92cc-dd53f66ecbcc.png index 1db7132f227f5a55a771622f6e74136a9858c00e..5566c5a688a2c4f9ef3badfaeea2beec47d3789f 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_89271800-5603-4e29-92cc-dd53f66ecbcc.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_89271800-5603-4e29-92cc-dd53f66ecbcc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:036aad5e6b3556eb54a55f1042a9df8104806c83476c3f7c316b716a91726957 -size 672117 +oid sha256:4d12d3a2333922cb216d640dfabffe6a2720cf76026f224871600f0e9336d9a1 +size 288690 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_a939b3d8-1cd7-4b45-9cd1-3ef535f86ff7.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_a939b3d8-1cd7-4b45-9cd1-3ef535f86ff7.png index 19fb560dbde6f2b5e31b7e22457b6b64f8afd74e..017c7094f83ce12bcc750275bf02b60b626e030a 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_a939b3d8-1cd7-4b45-9cd1-3ef535f86ff7.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_a939b3d8-1cd7-4b45-9cd1-3ef535f86ff7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b6934eb289c6af17c2f67016807a65b7711f1d00a0e8c63fae551b30b44d666 -size 986011 +oid sha256:be8a8e42a8639efe190702b1a8e2ce3b7d6c458ead52ba8677e4fd75d48d43af +size 633605 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_aff3dec2-7e47-483f-b156-9ff640444b30.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_aff3dec2-7e47-483f-b156-9ff640444b30.png index a7ebc21f9b7dd39bb57793cde58cea4baa0af0d6..e4e3828db2e43b0296ef227597f7da509ebde4e2 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_aff3dec2-7e47-483f-b156-9ff640444b30.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_aff3dec2-7e47-483f-b156-9ff640444b30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56d2e45ff5c23b22e482ac7825af8976e65e5bbe329201a9ba2f6e5f70ca5e97 -size 818906 +oid sha256:4b615627b648282916d6265c19737da1363734dbcb6c2777ac176115ffefced8 +size 768022 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_d1079663-66c1-47c7-a0fa-6f0420a99469.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_d1079663-66c1-47c7-a0fa-6f0420a99469.png index 725de795eaca25049a079e11295d6c66f25ef369..174a37c5e07e74550e1e915a45193b33aaf5fc20 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_d1079663-66c1-47c7-a0fa-6f0420a99469.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_d1079663-66c1-47c7-a0fa-6f0420a99469.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab010b1543963468372368fa373b9e0ce3e743f08e64ea139ab2d18e94d1dba5 -size 1088763 +oid sha256:ee6ff17bb5cc207a191dadea7a33003f2833a0b2662666217ac801b1ed36911c +size 1685302 diff --git a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_de127ffa-a357-431b-9f94-8ad89dfbe7c4.png b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_de127ffa-a357-431b-9f94-8ad89dfbe7c4.png index 4f580c047f7efe14cb15cfa06ed505b7346d4241..6994dcdcc59e4ffbdeb033bfd2753e1fa47521c2 100644 --- a/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_de127ffa-a357-431b-9f94-8ad89dfbe7c4.png +++ b/images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_de127ffa-a357-431b-9f94-8ad89dfbe7c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5055ccdab3c8f16164ba66da138ad1d3ebbe7e2d5fbd1d5b65192a0b40daa29 -size 1259993 +oid sha256:953095a1a04265c61018e6d9993886467b713c95ead1aa2760a37d174d961ec4 +size 812265 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_0551b291-b54f-4dc7-9c5e-60e28d345655.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_0551b291-b54f-4dc7-9c5e-60e28d345655.png index fb8785c6d15e847ffb051de0aef4e8f4a3bb5e41..5cc8f7e31946d38a5808459047ecf1e6cf895c16 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_0551b291-b54f-4dc7-9c5e-60e28d345655.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_0551b291-b54f-4dc7-9c5e-60e28d345655.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8665ea5f4a153914786c2c3ccc7ebbc74b8f8275b7d4e4bc1b79ad283f9db25c -size 1031717 +oid sha256:fc247c839a739e40950d9e4946a8b1538706a4b072977aa702986d71450d9b53 +size 1332641 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_18dc8214-4872-434c-876a-f628e23fcfc4.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_18dc8214-4872-434c-876a-f628e23fcfc4.png index dc3a43582de5d1ed70075e62a6f82bce71af0997..08dc3af40918a91e9eb11d22b1b83055efb21005 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_18dc8214-4872-434c-876a-f628e23fcfc4.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_18dc8214-4872-434c-876a-f628e23fcfc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb48610d7a14e0fee93c4acfd7f3b58f2d4b4bde872f3c5e27a6c750c2efa570 -size 1046344 +oid sha256:c5b67243d0fcb6a5f797df710fdf6e2b50977708f7bd7d9d631fcc940c3a8c34 +size 873818 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_33320628-6d12-4948-a068-aad951d8eab1.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_33320628-6d12-4948-a068-aad951d8eab1.png index e20e2bceef173a8a0f90ca31c0bb0cbcbd8633ef..1cbe120062a2398afeb03f7900b0c86e64bc1284 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_33320628-6d12-4948-a068-aad951d8eab1.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_33320628-6d12-4948-a068-aad951d8eab1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d72dfe8f967a154f01a1a7a2b2602a9db7e4ab4634b3ffe02984591fc4056932 -size 1047015 +oid sha256:a31b11d50501787472636ef2dd4e220b32e10401bcb14daec15eb7300b5803ae +size 1347686 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_346e50e8-6e74-4963-907f-f63753b97234.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_346e50e8-6e74-4963-907f-f63753b97234.png index d46191a36704f6d9f82c5ec8f2dc7c9470279abf..9a8f6d65f61922b3a3cd22cec554321a6c1d544f 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_346e50e8-6e74-4963-907f-f63753b97234.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_346e50e8-6e74-4963-907f-f63753b97234.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd7a0027b9c8f588e6758fb171e35afd410a1ccdf806398b2a11de144f63346d -size 1017501 +oid sha256:4a69651a3c4195ee2d6feeeda02c020ecda7193927bf3cec65f19a131d8f680e +size 1310369 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_4f966b9f-2163-4b2a-88cd-500239870dfa.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_4f966b9f-2163-4b2a-88cd-500239870dfa.png index 9700e570131a0b20e24ba4a9b1cfddab4f518483..85485f7cdb14554f10a89b459e4ba33d1ac64cd1 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_4f966b9f-2163-4b2a-88cd-500239870dfa.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_4f966b9f-2163-4b2a-88cd-500239870dfa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67f2305dd27b2391dc4d694d86cabc134a141a112515f330ec76cb7b766eb72b -size 1052013 +oid sha256:6a9314b7636a09f5c62d572253d7e497bed98986ec1e5255070e0b5118dcbb60 +size 1317366 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_548cec03-b81f-4bfd-8d26-a5bd57383fa0.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_548cec03-b81f-4bfd-8d26-a5bd57383fa0.png index 45e41a15ca5c515271c161963c032aae448b923f..2566343a7045da1241f2a252ff6e47e0b0cf8b42 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_548cec03-b81f-4bfd-8d26-a5bd57383fa0.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_548cec03-b81f-4bfd-8d26-a5bd57383fa0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5e79e8add68256eb6d05b245255a64d6be692914099ba36b730abb7bfbb17a0 -size 867719 +oid sha256:3259750ef05fd71d7b8f89c3e97c50f8e0cec4d6a0cb810b3c023baa28bb2c8c +size 1020571 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_588f22df-6300-45c1-839a-bdaf09f6b27b.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_588f22df-6300-45c1-839a-bdaf09f6b27b.png index 622e59367e72dcf08c05a2f168cb4be865bca627..5e0f09980de598e4bd6e4623e0b7e6dfa8e8b695 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_588f22df-6300-45c1-839a-bdaf09f6b27b.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_588f22df-6300-45c1-839a-bdaf09f6b27b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3609adf92f08cf044b1798ac7dac503345e179989239471592713d90293baad1 -size 1060395 +oid sha256:5846cc34bb38f28f73be89fa34038bf69d184b3a7324807e6db2e2323b88f26d +size 1126056 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_58fb22af-7875-4218-861b-bfca9cc56c7c.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_58fb22af-7875-4218-861b-bfca9cc56c7c.png index 22dbea9f4a7179bff5543505cf9fbec384422504..0d59f7c49d5afda24ada028cabf16637edc6e6d7 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_58fb22af-7875-4218-861b-bfca9cc56c7c.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_58fb22af-7875-4218-861b-bfca9cc56c7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb95b97d65fafcb524670faa6c1a9c14ebb2a3537f2022e97402df76f004ff1c -size 1053797 +oid sha256:ce4dd69532a1aa256cdb35dc200edfe7963e00aca20b1f1a3321b634f3f30c44 +size 1164180 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_7e8bf2cf-620f-4e0f-9a98-cb0a178f6cfd.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_7e8bf2cf-620f-4e0f-9a98-cb0a178f6cfd.png index 9a744ab5f0696b5937ddd6f20f8047822231b0e2..1ef7cf35c403b9995ad6125d84f9214595ecfc42 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_7e8bf2cf-620f-4e0f-9a98-cb0a178f6cfd.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_7e8bf2cf-620f-4e0f-9a98-cb0a178f6cfd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cac4512dac2c46a7942e2a89d97a1b0f8b23d2b6e0fa805d0b5d85a6d5f28214 -size 1046539 +oid sha256:f988655c2d58060a997bb732ffde20433a92165f8f3049f12c6025d0a76f5b86 +size 1348292 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_88177b5d-5f76-4638-84cf-a9abf0abec85.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_88177b5d-5f76-4638-84cf-a9abf0abec85.png index bba5f7c9e595abb700b8b4fa62dbdfbbfdc3862d..b1a62c12bf1e0bf9cc83951d1690ab922a1a0da8 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_88177b5d-5f76-4638-84cf-a9abf0abec85.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_88177b5d-5f76-4638-84cf-a9abf0abec85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb7d786a10d8cea32b04286ec68f9e040b860be3818f8247472b08aa6a4e0f6e -size 1039535 +oid sha256:a0e293efffd2d261e7082d9efbb22da6a9a5c2b537e20953b412293019372bc8 +size 1342518 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bf884456-1c0e-4856-8141-57bf30f5da56.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bf884456-1c0e-4856-8141-57bf30f5da56.png index 5d96e19f7b0f8b820b0fa36f8959abcc1c3d0337..39a0ddf069d422713d6ebd6e0aa04648ad124e08 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bf884456-1c0e-4856-8141-57bf30f5da56.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bf884456-1c0e-4856-8141-57bf30f5da56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a44b789152debc60ec153b242ccc4d5d96bbd614592f5325c28c78b822dbafb -size 1099800 +oid sha256:3e46b40fd5bb2f407d0c1bb821ef74b6e1657d36317a6342d1e0e93c65627bdb +size 1163475 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bfc2aafb-1493-4af0-8bd9-8680ffbec320.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bfc2aafb-1493-4af0-8bd9-8680ffbec320.png index a9ac06e41748fca188f3645c3b7429a8b17aa229..b9776c5bba041c30ed7ccd5d47ed7cbfe88479a7 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bfc2aafb-1493-4af0-8bd9-8680ffbec320.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bfc2aafb-1493-4af0-8bd9-8680ffbec320.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c0cb3f26d0af1475d8e49c523c9d10ff34c264dd1a04e9ec7533c06a61a3d50 -size 1055596 +oid sha256:a1286894c8ec0050d0ed2bdeb890ba9d5b25652b3323c2e72162a4352d568aa8 +size 1091109 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_d8e2d33b-a8de-4eaf-baea-973008afec13.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_d8e2d33b-a8de-4eaf-baea-973008afec13.png index bce86ef2766755f2f7224a642fd591ef41477c70..bb3b8d437e8f049f56e1a40d4817590c4d72c475 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_d8e2d33b-a8de-4eaf-baea-973008afec13.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_d8e2d33b-a8de-4eaf-baea-973008afec13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a87f17ecba436ee3d5ce61b18503780f2a5a8549b98ae2fc53b7822140eff95 -size 872440 +oid sha256:c23ea2adfc76928f0024ce059968cbf25d1b72a994b251b3afdda28a45eeabc9 +size 1059724 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_ec287a0e-e011-48ba-b37d-0ed8176625e6.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_ec287a0e-e011-48ba-b37d-0ed8176625e6.png index 73c56ffb71ffc1b4ab77ef85ae87fd193804a97e..939b6a01bd41d8b7275684a8406a5a939164a33c 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_ec287a0e-e011-48ba-b37d-0ed8176625e6.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_ec287a0e-e011-48ba-b37d-0ed8176625e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e6d6c5f433e79ada0ea265f005a4fdcaa0f8311aa809d10f96ea9308613486d -size 1049402 +oid sha256:4e39f51a0eb50267e90a5e54ccbdb373ff15185ae8d12e4b187f9fd47fac8ef9 +size 1305155 diff --git a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_eea2294a-9dc9-46e6-bffe-3a79e7bf7339.png b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_eea2294a-9dc9-46e6-bffe-3a79e7bf7339.png index 1f8e4b50212ed7de81cf8ddcfa7b3bc01abd6b44..87931edb85401c9c60a518bec2ad5144592c7744 100644 --- a/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_eea2294a-9dc9-46e6-bffe-3a79e7bf7339.png +++ b/images/5ea9ec16-845e-4a99-8848-fd96e8bad254_eea2294a-9dc9-46e6-bffe-3a79e7bf7339.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a70e2c9f9416d8025954ce75b9c3273fe479621d4c3bce879af8c724359fc635 -size 1022914 +oid sha256:0612374cede0213c1b1ce97ac69bdac71a3a85427c591b98866d4ce563548c25 +size 1287972 diff --git a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_1f4c17d0-a075-4249-8621-8b8366006cca.png b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_1f4c17d0-a075-4249-8621-8b8366006cca.png index a80a8e7da42783c880eb0d1f5859a62067592a9c..d359d9ee9e8d58dc59b3ac8cd53b4cb72f6742c1 100644 --- a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_1f4c17d0-a075-4249-8621-8b8366006cca.png +++ b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_1f4c17d0-a075-4249-8621-8b8366006cca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0ea3f9de6e625c6b2c4dc049991c81781a92177432f15a82e752a7fe8cf8a25 -size 4283296 +oid sha256:d64ba2a52371af64949d3cab76f87cbbde7fee8bef43512fd67dda6e97fde36c +size 2242785 diff --git a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_692ed3d3-325b-412e-bc50-f2c834c7c4c2.png b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_692ed3d3-325b-412e-bc50-f2c834c7c4c2.png index 4c05085b74dbc71d5d69e93eced51202f79c4723..66120509dcc165c406fc80dca00570f63596978f 100644 --- a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_692ed3d3-325b-412e-bc50-f2c834c7c4c2.png +++ b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_692ed3d3-325b-412e-bc50-f2c834c7c4c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06112a73503ada8e9fe11077f559666711050c25c50b2c1ed93b2d5ee3b7e595 -size 1329962 +oid sha256:b2520fc271a56aef3d8b1f196363afa741b62aad772fcf669d8502cd67e5c59d +size 1492896 diff --git a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_7ddd7a0d-971d-434d-9fe9-1dee38a402a1.png b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_7ddd7a0d-971d-434d-9fe9-1dee38a402a1.png index 9441234acd6c2029b07f98e4910e896aa5e95c14..a1f3e1ee36270b18e78bc98f73ae2df2b1df72d5 100644 --- a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_7ddd7a0d-971d-434d-9fe9-1dee38a402a1.png +++ b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_7ddd7a0d-971d-434d-9fe9-1dee38a402a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30b51ba21cafc67dacbb8c6a27c8ec62d6cd457e65a4542270210164bebad164 -size 3627596 +oid sha256:a43a92478a3779007e87369758aabbb4730b46d4d3be521500c1a32e8fb8e18e +size 2056144 diff --git a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8922d5d7-b361-488e-bc90-959777b2d346.png b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8922d5d7-b361-488e-bc90-959777b2d346.png index 05b250f1a9ff693c29e82f8d5cee2febfd173b3d..6484337e71f0dcb19e549cc7ea0add3249ac7a3e 100644 --- a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8922d5d7-b361-488e-bc90-959777b2d346.png +++ b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8922d5d7-b361-488e-bc90-959777b2d346.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8f406c17ea25a987973533e000beac708ea1f9b924b48584f26559d5047d719 -size 1303769 +oid sha256:fc171124960a32d337b634794048f95b1882a0c56f12667ffe66882668dc276e +size 1417442 diff --git a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8abb0cae-8fa5-4de2-9d2c-2b1f1476a3ee.png b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8abb0cae-8fa5-4de2-9d2c-2b1f1476a3ee.png index abe75d142e91b9f23e43a033823b5ee1bb4c856c..524eaf979abfcd96b37dfc1e8b5c5548841b26f9 100644 --- a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8abb0cae-8fa5-4de2-9d2c-2b1f1476a3ee.png +++ b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8abb0cae-8fa5-4de2-9d2c-2b1f1476a3ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43dda16fccf37fc7e27089a13cb2d86501ef5358d0ed77c6f906035ab1441901 -size 1184011 +oid sha256:2b9c3df290aa1fde15475fef5acba1e2e01fc63419866c64bf3e27b1a21c0518 +size 1182161 diff --git a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_c19e9a86-378c-4f24-b5bf-8c5c78cfc272.png b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_c19e9a86-378c-4f24-b5bf-8c5c78cfc272.png index 1f86d35714d9581255aff65720bc2599f2e19572..0c15b7b6d66de643149ae4adcdd1f1b3307cfe0f 100644 --- a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_c19e9a86-378c-4f24-b5bf-8c5c78cfc272.png +++ b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_c19e9a86-378c-4f24-b5bf-8c5c78cfc272.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58f72b6552089d6085e06ea0341a7390e37670f4ef25978b8c3a1229dd93a77e -size 1033018 +oid sha256:bfe76c4b9ef205a8e51fd8759a2933e597973d7c2ce485e3de3ce6924afd8040 +size 1026610 diff --git a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_df533272-7f21-43f3-a50f-89b97eb99bc1.png b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_df533272-7f21-43f3-a50f-89b97eb99bc1.png index 90cc662efcb207e7ab69007b884b6d8533b14c46..e8900acda07d1d51742d02057333b77cba62a2b1 100644 --- a/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_df533272-7f21-43f3-a50f-89b97eb99bc1.png +++ b/images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_df533272-7f21-43f3-a50f-89b97eb99bc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ecb75f563ee62c8f67672003cf73a757f5581851c98b3ca9734eb2bfae6aff2 -size 735262 +oid sha256:4d1065715ade020e2ff1f0bf75696eb76f4e8648e305b3c5a7966b80fbe90713 +size 761974 diff --git a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_325b7d4b-c635-4187-851d-8219f9a98b4c.png b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_325b7d4b-c635-4187-851d-8219f9a98b4c.png index 86178d5da96f3533ad848a54cc88bbf04b3e1ba0..926b50363b6905732154bd2428a1c1c60bc4bf14 100644 --- a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_325b7d4b-c635-4187-851d-8219f9a98b4c.png +++ b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_325b7d4b-c635-4187-851d-8219f9a98b4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d186431c5a95b8683ff57201099ed92c16a23a6e3a1c1c2cb741c9837da80b3d -size 1387073 +oid sha256:3904011396e5851574bcca0de02450778cc693e818e6cac08181bcc373957e15 +size 1134301 diff --git a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_6ba27ea0-5559-4c96-b207-7a504a0f96c3.png b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_6ba27ea0-5559-4c96-b207-7a504a0f96c3.png index 41ac3bb201703197dba19ef6df48780ad235d1fe..3d7a6acdd5de2e7a27f41e2a69b7c28763c2b587 100644 --- a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_6ba27ea0-5559-4c96-b207-7a504a0f96c3.png +++ b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_6ba27ea0-5559-4c96-b207-7a504a0f96c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb74959cb2158cdbe6fc007d45c5268e91882fee373d2f530761de9bed9faf00 -size 1412130 +oid sha256:549fb518ad1fac87902065b1cc4af3e882debecee535ed3fae331122064abcc3 +size 1207612 diff --git a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b33f3bf0-ed3d-4894-92b0-3f5a17a350c4.png b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b33f3bf0-ed3d-4894-92b0-3f5a17a350c4.png index 6800bcbb728cba2709e637a70d7839bd0522730b..330f19db2984ba7da39bbebe5c4ca1d02d0b48e2 100644 --- a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b33f3bf0-ed3d-4894-92b0-3f5a17a350c4.png +++ b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b33f3bf0-ed3d-4894-92b0-3f5a17a350c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08568088a6d99c1aeb6ca05a309dd2725f8c220443cfa6f965a5cb15d4c898aa -size 1029455 +oid sha256:f8d33cd6cb5c1a4250ba48882862d29ff5aefec825ac939674865560f6f58b49 +size 810769 diff --git a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b99de965-6c3b-41f8-af69-0188a1db8435.png b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b99de965-6c3b-41f8-af69-0188a1db8435.png index 905c51c1ded4ed9245266fef73e6bc357319de90..e9c18529be45ab700913f5b445a39dff423545c6 100644 --- a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b99de965-6c3b-41f8-af69-0188a1db8435.png +++ b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b99de965-6c3b-41f8-af69-0188a1db8435.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc26eff633d2531a72e3cad6d1e3dfc58527c4ea4128967288815c25b2f239ab -size 1120753 +oid sha256:e4bd31e631039b78beb4ab87eb5378bd3472290331e03e0a9fffe1609ed7e220 +size 1567270 diff --git a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_cc132005-2d40-4e1f-8699-bf828e06b700.png b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_cc132005-2d40-4e1f-8699-bf828e06b700.png index ce8c530a201a91f375643e5fcf7847030347f21e..aee28ef1f995533fc6709d7b55cfa8d6adaee5bd 100644 --- a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_cc132005-2d40-4e1f-8699-bf828e06b700.png +++ b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_cc132005-2d40-4e1f-8699-bf828e06b700.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c42707ca354c1896dc646c853385e0ee8678331deccec90f733f623bf76330d -size 1230254 +oid sha256:41247f6587b02b6afe248fd01bf746b93392785d388bc0e835e49ee33af6c2fb +size 1863830 diff --git a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_f1699e8f-d02e-4667-92f0-13061818f3c6.png b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_f1699e8f-d02e-4667-92f0-13061818f3c6.png index 1bc5390945031771d2bf284b3228cc9b469976e0..d981d17bee38ebf8907b28e6c845251d6ece4805 100644 --- a/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_f1699e8f-d02e-4667-92f0-13061818f3c6.png +++ b/images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_f1699e8f-d02e-4667-92f0-13061818f3c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6b62bf5b4b1838d75a61e792e51a50da256de2d46c742845f78b7de3e1471df -size 2750237 +oid sha256:5edb62f5ff2d5772463dea5875087ba74c5e15f16fde914606c68fabbd590cae +size 2071387 diff --git a/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_8450e2c6-f8aa-40bc-876e-21cf29a8cb77.png b/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_8450e2c6-f8aa-40bc-876e-21cf29a8cb77.png index e7a78aaa9c96f7fd5b80a8a71bff46f846b65cd3..3d881d234452841524740da12f8d0b7d718ac702 100644 --- a/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_8450e2c6-f8aa-40bc-876e-21cf29a8cb77.png +++ b/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_8450e2c6-f8aa-40bc-876e-21cf29a8cb77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a4175c5bfa7693ee37eb1fdc641fd93aa31938f71160034fdba53c08d426ad2 -size 615674 +oid sha256:cbb3c8695f976e0b1257f7c62f648e3c2a63bb0b92ce93d6caac14c75b1c1783 +size 491274 diff --git a/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_f173443e-4eb1-4ae7-a454-247a9d439f6f.png b/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_f173443e-4eb1-4ae7-a454-247a9d439f6f.png index 6fa06f49762497fd50c1e8532e25efdca26de9b6..926394993c957c53f24f11d6d9ff2367cdd33b6c 100644 --- a/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_f173443e-4eb1-4ae7-a454-247a9d439f6f.png +++ b/images/5f9182dc-d35d-4c0e-9abe-cd913c136528_f173443e-4eb1-4ae7-a454-247a9d439f6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08791f79db3a73f43f95a8c8510d8ce8ac7dd4783c0dd91f12150d9066e0972f -size 1881848 +oid sha256:9a5cc49b9921c668ff04e64c3c2d31fee50f3ee205659bc918e84d144a453155 +size 335293 diff --git a/images/5fb9730d-f489-4cb7-a220-d406794cef29_193f402d-c499-4804-84e4-a47a487844d4.png b/images/5fb9730d-f489-4cb7-a220-d406794cef29_193f402d-c499-4804-84e4-a47a487844d4.png index 5d8613b88bc9c2cf7f71eb62abfec6166fa6f56c..ca7f24def8c32259908dc003eed920ae83906773 100644 --- a/images/5fb9730d-f489-4cb7-a220-d406794cef29_193f402d-c499-4804-84e4-a47a487844d4.png +++ b/images/5fb9730d-f489-4cb7-a220-d406794cef29_193f402d-c499-4804-84e4-a47a487844d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82bda6131978971a35cda7df8864a0895c012a4fecbd8b90396fb117b8ecbbc4 -size 2234778 +oid sha256:eed69b144b91c33158c06e892d667c8ca052c811451c780c238ad6021ce5ce26 +size 1472032 diff --git a/images/5fb9730d-f489-4cb7-a220-d406794cef29_501ce3db-36c4-4b7d-a7d3-392f4e797076.png b/images/5fb9730d-f489-4cb7-a220-d406794cef29_501ce3db-36c4-4b7d-a7d3-392f4e797076.png index c1ceeb6d3fef72b5bd538a4f3a3c028f272d2493..8b609170ecff0f9134467b8eaeba0574f79b4a1b 100644 --- a/images/5fb9730d-f489-4cb7-a220-d406794cef29_501ce3db-36c4-4b7d-a7d3-392f4e797076.png +++ b/images/5fb9730d-f489-4cb7-a220-d406794cef29_501ce3db-36c4-4b7d-a7d3-392f4e797076.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7edc59e0f8618ce3a893893174eaa97195f4a646d5880c7fd0bb2e3bd8b61cf -size 1705740 +oid sha256:eb331c89ddf6b92516434de5e4c7657814f29d06815a17aceb8abcb7a8a774ca +size 1345648 diff --git a/images/5fb9730d-f489-4cb7-a220-d406794cef29_f4792054-15e6-43d5-a50d-aab11eba8bf9.png b/images/5fb9730d-f489-4cb7-a220-d406794cef29_f4792054-15e6-43d5-a50d-aab11eba8bf9.png index d6f168c9f60e2337d9a0456c76386475f84fa9d0..28af086d61a8a0434b7ff598e888be096481960c 100644 --- a/images/5fb9730d-f489-4cb7-a220-d406794cef29_f4792054-15e6-43d5-a50d-aab11eba8bf9.png +++ b/images/5fb9730d-f489-4cb7-a220-d406794cef29_f4792054-15e6-43d5-a50d-aab11eba8bf9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:383ef4f150d2d53609897c83e8b14d5766060bed58667c4e6884d3ec90ed62b1 -size 1655989 +oid sha256:e610f1ac3d10fc80eee4877284471ae609e78f007912a41c1f4f7ec8bd32dac2 +size 823255 diff --git a/images/60383804-a8e5-4e50-8715-da391d76617d_06967b32-70d8-492b-8521-dbfafd2504f1.png b/images/60383804-a8e5-4e50-8715-da391d76617d_06967b32-70d8-492b-8521-dbfafd2504f1.png index 59639b9e1aa619d750c4577435450f85ffd0a739..b394802052af19f7b7e2ab2b8132962dd70d33e1 100644 --- a/images/60383804-a8e5-4e50-8715-da391d76617d_06967b32-70d8-492b-8521-dbfafd2504f1.png +++ b/images/60383804-a8e5-4e50-8715-da391d76617d_06967b32-70d8-492b-8521-dbfafd2504f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:299b434f655f87dd71f49512651531af78e9c39edf18a9403da96d03e0d5a2ba -size 1025341 +oid sha256:3d6689079cba00b1f9864d62664b866571c5ddc1ec5f0d13793f0c858e258039 +size 1934030 diff --git a/images/60383804-a8e5-4e50-8715-da391d76617d_241b1896-c37f-452b-b824-73ff06f7df2e.png b/images/60383804-a8e5-4e50-8715-da391d76617d_241b1896-c37f-452b-b824-73ff06f7df2e.png index 39f208e6cfaf1ba95ea6eee558bcaace35737e67..37a4afb925d38f55c026d2c9237d04023118d75f 100644 --- a/images/60383804-a8e5-4e50-8715-da391d76617d_241b1896-c37f-452b-b824-73ff06f7df2e.png +++ b/images/60383804-a8e5-4e50-8715-da391d76617d_241b1896-c37f-452b-b824-73ff06f7df2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7e70d6c08a7ac3abfcd043fa0bc3460cedb50e6ce129b2865e7af18845ea23e -size 2368466 +oid sha256:44fbed3e36006998ab904bf73fd5a5de01808c2dc22c46739d8cf5d796c7caaa +size 2227010 diff --git a/images/60383804-a8e5-4e50-8715-da391d76617d_4d685f2b-b04d-4d96-9ed3-8bd8f3208911.png b/images/60383804-a8e5-4e50-8715-da391d76617d_4d685f2b-b04d-4d96-9ed3-8bd8f3208911.png index 0408c1acd107bf70e3a35e501da3ce1f17de4c9b..f60def65f089ea3a4b1347c9950e24e0c71c9e05 100644 --- a/images/60383804-a8e5-4e50-8715-da391d76617d_4d685f2b-b04d-4d96-9ed3-8bd8f3208911.png +++ b/images/60383804-a8e5-4e50-8715-da391d76617d_4d685f2b-b04d-4d96-9ed3-8bd8f3208911.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f6b98d7dee3846ca39449cdd2259d0b6716f9d3d07ac1f720e1cd1d0909234e -size 1016079 +oid sha256:b969d0863b9bb2015057bae75f2fc198c047ff098eab6b76123612b3b1a1f236 +size 1270874 diff --git a/images/60383804-a8e5-4e50-8715-da391d76617d_b48691e5-ea8b-45b3-8bef-d5389d03b4a9.png b/images/60383804-a8e5-4e50-8715-da391d76617d_b48691e5-ea8b-45b3-8bef-d5389d03b4a9.png index 25ba17515beaf35f7a990ab884b3273bd8cd0433..656f8f35fc3b755b588240c29d3529ee14cab070 100644 --- a/images/60383804-a8e5-4e50-8715-da391d76617d_b48691e5-ea8b-45b3-8bef-d5389d03b4a9.png +++ b/images/60383804-a8e5-4e50-8715-da391d76617d_b48691e5-ea8b-45b3-8bef-d5389d03b4a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7afa2405e6afd3355caf152bfc3649390f466eae018374b2a97ed719b896a859 -size 817018 +oid sha256:1b603c00acc568aa6e95140487cf461d8cc1494d4137892fa57638ac7709400d +size 1245030 diff --git a/images/60383804-a8e5-4e50-8715-da391d76617d_cb171a5f-f105-4818-bc45-b9a05368abf5.png b/images/60383804-a8e5-4e50-8715-da391d76617d_cb171a5f-f105-4818-bc45-b9a05368abf5.png index 1be305b6a15a06e3ecbe55d4cf0619fe4fbce01f..a7d52b9c09bef34fd4dc2add3758e5bb547f7c46 100644 --- a/images/60383804-a8e5-4e50-8715-da391d76617d_cb171a5f-f105-4818-bc45-b9a05368abf5.png +++ b/images/60383804-a8e5-4e50-8715-da391d76617d_cb171a5f-f105-4818-bc45-b9a05368abf5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:592b475e72d2c523a048c548e96650396439ffe20ef1d4779e386521122a8903 -size 796797 +oid sha256:5482cf7a00c1e3a776c3579cec5e4c4e6dd6e957a7c4a236835fec4b0142244d +size 527211 diff --git a/images/604c4377-e24c-4900-a72f-346d8999d443_338368ed-2c11-449d-ae56-e8726649f0ca.png b/images/604c4377-e24c-4900-a72f-346d8999d443_338368ed-2c11-449d-ae56-e8726649f0ca.png index 43366d6e53df3c527fcd3a1ccd72f4202b376b0f..f4825612346f15a0591e9cd1b0ff83f831c4ee04 100644 --- a/images/604c4377-e24c-4900-a72f-346d8999d443_338368ed-2c11-449d-ae56-e8726649f0ca.png +++ b/images/604c4377-e24c-4900-a72f-346d8999d443_338368ed-2c11-449d-ae56-e8726649f0ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5abf36e406bc400fcdd041a92ef7389e4299d138e0061aca36377f4c670f0657 -size 1177632 +oid sha256:71c5d6631e0d26ff0f6f974a5e8502a54b0f67e09fdff7dcf9b7efe0da32c1b1 +size 1177491 diff --git a/images/604c4377-e24c-4900-a72f-346d8999d443_341e6e6d-828b-40a6-acfd-90e47191518d.png b/images/604c4377-e24c-4900-a72f-346d8999d443_341e6e6d-828b-40a6-acfd-90e47191518d.png index 4e891cf8a434804d271827b8eb0c2700ed3d4620..c373883d48f5b4b109b9746dec51a499493ec527 100644 --- a/images/604c4377-e24c-4900-a72f-346d8999d443_341e6e6d-828b-40a6-acfd-90e47191518d.png +++ b/images/604c4377-e24c-4900-a72f-346d8999d443_341e6e6d-828b-40a6-acfd-90e47191518d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b6f424806805060aa33e13e09d68dd55387761497de99b95483495cf7784ce4 -size 1657763 +oid sha256:44d6ccad5679b865befdd38736e97973e15539dfd206b4faf6b129e69332d136 +size 2418750 diff --git a/images/604c4377-e24c-4900-a72f-346d8999d443_7439f8b2-c8dc-495f-8dab-944cea4da660.png b/images/604c4377-e24c-4900-a72f-346d8999d443_7439f8b2-c8dc-495f-8dab-944cea4da660.png index f6815256e809ea204b08b2c8963cea904cd1e634..f54d8273c5f3a373c0b6844d289d2f859707b521 100644 --- a/images/604c4377-e24c-4900-a72f-346d8999d443_7439f8b2-c8dc-495f-8dab-944cea4da660.png +++ b/images/604c4377-e24c-4900-a72f-346d8999d443_7439f8b2-c8dc-495f-8dab-944cea4da660.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02979d67b9539bca0cf001d2f46bf2c96f2546d63f83fedd19e4f2a017f85ec9 -size 1419095 +oid sha256:459f2e3a8801f05af828de253738f527dc1aac1b3ff37f98d225de45374ba9d7 +size 1744772 diff --git a/images/604c4377-e24c-4900-a72f-346d8999d443_81d06b54-9858-498c-adf3-e40fd5b4ae17.png b/images/604c4377-e24c-4900-a72f-346d8999d443_81d06b54-9858-498c-adf3-e40fd5b4ae17.png index 76bbc8e6de42b02165ad47c80db031ff8360c342..4476a43ff3f7a5e43fe5843bb6c7b50f4bd8ae6e 100644 --- a/images/604c4377-e24c-4900-a72f-346d8999d443_81d06b54-9858-498c-adf3-e40fd5b4ae17.png +++ b/images/604c4377-e24c-4900-a72f-346d8999d443_81d06b54-9858-498c-adf3-e40fd5b4ae17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cf96f3f863644c721ff11a96d7c71c55f1b50ad24c5d35b0b5ca12ab12b2b71 -size 992963 +oid sha256:fbe27669086781d62e0646b0710b2c1c2e469613ea91b9a08c2371a435519fb4 +size 873880 diff --git a/images/604c4377-e24c-4900-a72f-346d8999d443_f55421a2-42c3-4ceb-934b-620dae199c4f.png b/images/604c4377-e24c-4900-a72f-346d8999d443_f55421a2-42c3-4ceb-934b-620dae199c4f.png index c4204ccf24e3e012cc3a96159af802bbdcce11f9..1f7199adee41206e86379d2cb706f5b3b9f7389b 100644 --- a/images/604c4377-e24c-4900-a72f-346d8999d443_f55421a2-42c3-4ceb-934b-620dae199c4f.png +++ b/images/604c4377-e24c-4900-a72f-346d8999d443_f55421a2-42c3-4ceb-934b-620dae199c4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81c288058731a2c66f1d2eead66b59004a7bcfbb52f0b8c228ee6cf2cb894a54 -size 2231698 +oid sha256:5835bff93519709fafd58c0268b170e2c25174f24e9ef0831008b346eeaf90ee +size 1318874 diff --git a/images/607cea69-abb5-4055-aa09-117650cb7cc9_237a3344-5afb-40a4-90f8-ac59015288ae.png b/images/607cea69-abb5-4055-aa09-117650cb7cc9_237a3344-5afb-40a4-90f8-ac59015288ae.png index cb446411c966fead193f32f8ecd46e69dc2dcdb6..6984fb1ca4f35ad992f29cf45b0b8fbee28a4037 100644 --- a/images/607cea69-abb5-4055-aa09-117650cb7cc9_237a3344-5afb-40a4-90f8-ac59015288ae.png +++ b/images/607cea69-abb5-4055-aa09-117650cb7cc9_237a3344-5afb-40a4-90f8-ac59015288ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9721a1710d069f507456fca117ab6e7d359d89faee196128bbbed2d73f5298d8 -size 2251492 +oid sha256:f949d70cf878c8154b335a5979edd6ac840ad224c12b3140e587d5be30cf04a0 +size 549114 diff --git a/images/607cea69-abb5-4055-aa09-117650cb7cc9_41b67b58-9eb3-401d-b495-ef5e61eca310.png b/images/607cea69-abb5-4055-aa09-117650cb7cc9_41b67b58-9eb3-401d-b495-ef5e61eca310.png index 1ae513052f491ab96888aa05777c0a30c64e8b90..d80be77429ccfe3393d44f3fabd69b968c5d8e8e 100644 --- a/images/607cea69-abb5-4055-aa09-117650cb7cc9_41b67b58-9eb3-401d-b495-ef5e61eca310.png +++ b/images/607cea69-abb5-4055-aa09-117650cb7cc9_41b67b58-9eb3-401d-b495-ef5e61eca310.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a249386e356b188cb248e35cbdf0cbc32d2447896f2655c4b2b9a8f2066f4e98 -size 1502074 +oid sha256:c9b9c0d9a13c95defc284df1253b7703b7cc5171399fba8d83a466eed1c5291b +size 2246729 diff --git a/images/607cea69-abb5-4055-aa09-117650cb7cc9_7157211f-9282-4318-8f16-d51a815e9e8d.png b/images/607cea69-abb5-4055-aa09-117650cb7cc9_7157211f-9282-4318-8f16-d51a815e9e8d.png index 8fa75226cd88e9a6b4410c0440534d857ac932c1..f6847981e65371457ee7b2c36a9fb577fa6ae19d 100644 --- a/images/607cea69-abb5-4055-aa09-117650cb7cc9_7157211f-9282-4318-8f16-d51a815e9e8d.png +++ b/images/607cea69-abb5-4055-aa09-117650cb7cc9_7157211f-9282-4318-8f16-d51a815e9e8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78778539b1eeb960662287d3a899ff172372c56947e1a1faeec841a8426fd895 -size 944829 +oid sha256:75aa3f7dde64b1e8a5cb98811f855078ec1430a566c42c1ad1a4dc0ecd95e0e6 +size 1726080 diff --git a/images/607cea69-abb5-4055-aa09-117650cb7cc9_821343aa-9c06-4f3c-9437-15e55f522c11.png b/images/607cea69-abb5-4055-aa09-117650cb7cc9_821343aa-9c06-4f3c-9437-15e55f522c11.png index d3dad11a5a487d27293cc2c44db987c8818b009c..5b2139ce5f0d74b7114da12314e3e4146e87ee38 100644 --- a/images/607cea69-abb5-4055-aa09-117650cb7cc9_821343aa-9c06-4f3c-9437-15e55f522c11.png +++ b/images/607cea69-abb5-4055-aa09-117650cb7cc9_821343aa-9c06-4f3c-9437-15e55f522c11.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:721b3087808e447712c65dc76617533bd7dffd07b0c7db855a6cd86ad16a9c96 -size 3178078 +oid sha256:14c933adde2527879316068fa9b27975bd2d418a2939cdfb516313d735b2b249 +size 956738 diff --git a/images/607cea69-abb5-4055-aa09-117650cb7cc9_8bb3b4e6-7581-4aca-ac35-060f04786c75.png b/images/607cea69-abb5-4055-aa09-117650cb7cc9_8bb3b4e6-7581-4aca-ac35-060f04786c75.png index 99a32e576ea1f284ffc500362d97bfbcf03da39b..c7d07a8b78376bba722c3e1f445663962b457543 100644 --- a/images/607cea69-abb5-4055-aa09-117650cb7cc9_8bb3b4e6-7581-4aca-ac35-060f04786c75.png +++ b/images/607cea69-abb5-4055-aa09-117650cb7cc9_8bb3b4e6-7581-4aca-ac35-060f04786c75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df4c184bb110b56b5c7ef81172f3c097f82d391cae218f05453fe024d67e2aab -size 3155291 +oid sha256:86c6e07adddbfc8c8bc4231d6dc9af2e4c161e236fd367e65fe22b547e06126c +size 1965652 diff --git a/images/607cea69-abb5-4055-aa09-117650cb7cc9_eb8b8087-d063-4855-90c0-f238e4752bdd.png b/images/607cea69-abb5-4055-aa09-117650cb7cc9_eb8b8087-d063-4855-90c0-f238e4752bdd.png index c9fd799177155df5902e5f2531040250a396cf63..01d8560b1cf27ecc1c864c3434d0cbd237de15d1 100644 --- a/images/607cea69-abb5-4055-aa09-117650cb7cc9_eb8b8087-d063-4855-90c0-f238e4752bdd.png +++ b/images/607cea69-abb5-4055-aa09-117650cb7cc9_eb8b8087-d063-4855-90c0-f238e4752bdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0c30e7f61530bb4393b0bdff11e0a3c150c1fa3d8295d3b978b9ab4e4c2c9ea -size 1043878 +oid sha256:8d7e86a52b516f76b0ab3e7176acc04f16d93e4c26e78f5210d6c48cb9463046 +size 1119050 diff --git a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_3904a380-086e-454f-aacf-140c31c9974b.png b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_3904a380-086e-454f-aacf-140c31c9974b.png index e54f8b4c7713871880e71ded143ff0bd22672eb7..385ae9cca7e72d75a1f0bf1a94a0bd926533704d 100644 --- a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_3904a380-086e-454f-aacf-140c31c9974b.png +++ b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_3904a380-086e-454f-aacf-140c31c9974b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48607006f0d549662e7841a52f98860f08518c58869b3d7224c5f36fb2436e20 -size 1523607 +oid sha256:6590e580802c0dff7eef809e9c4991ade7fd5e03e5ff5ec456cbdb05926d9b05 +size 1909851 diff --git a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_6a9eafbb-53ed-43f0-88f7-6282ca1c676e.png b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_6a9eafbb-53ed-43f0-88f7-6282ca1c676e.png index d593aab299df48be01350c7119b12a5b220ede42..594cb7a482dd7fccb0f5415160f3ad734aad59fb 100644 --- a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_6a9eafbb-53ed-43f0-88f7-6282ca1c676e.png +++ b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_6a9eafbb-53ed-43f0-88f7-6282ca1c676e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d50287470b0bace7b1525b176b1b9e795d1cdac21b6fb3d4b51e742ca77ebc7e -size 2543571 +oid sha256:2fd826d2f269ad8fad423ec90b5892e49faf8d4cefb90be7c5d1b19fc0a43226 +size 1943596 diff --git a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_8e0f41cf-b371-4a85-a613-09c17e485957.png b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_8e0f41cf-b371-4a85-a613-09c17e485957.png index 2e69ee7ef99c3dbe5534a3bfc640a857dc6a445f..6a5908bf1fb1f3a1e1180e6fcb85c8d531128029 100644 --- a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_8e0f41cf-b371-4a85-a613-09c17e485957.png +++ b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_8e0f41cf-b371-4a85-a613-09c17e485957.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b21713f025ca1d51cc6c155335836f858a8d072b8945c0bcff3d1105643963ba -size 1154815 +oid sha256:897a019f2f0ebfce7d02e4c8208824b40e3778fd705631076b98d97a5334ba98 +size 1717183 diff --git a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_93efc07c-9976-40b0-8eec-9bee64d4f349.png b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_93efc07c-9976-40b0-8eec-9bee64d4f349.png index 739acd867cc1380966d9f1aa4a648ff58f8eda30..1ada70f85e9c1b4e972d5c93c171c6cb98b60f3d 100644 --- a/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_93efc07c-9976-40b0-8eec-9bee64d4f349.png +++ b/images/60bfb72f-e7a5-414f-990c-8ddd569744f4_93efc07c-9976-40b0-8eec-9bee64d4f349.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:280fca4344c9b0b0643b3baad21726c588c55693d16f424baedbc88d617eca9b -size 4741085 +oid sha256:294e5a1e6334987dd9d6733e8e029514b98473595fdaf38a9f5fc87561a83e21 +size 2352001 diff --git a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_4d44da5c-7602-419f-a8d1-e48ccbe0ccb7.png b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_4d44da5c-7602-419f-a8d1-e48ccbe0ccb7.png index 04f355000c0b6d2bfe6f07580dc1be13c914238e..abd029a3315934de15dbb70828a41d655b345780 100644 --- a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_4d44da5c-7602-419f-a8d1-e48ccbe0ccb7.png +++ b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_4d44da5c-7602-419f-a8d1-e48ccbe0ccb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2c37825b8714d4f24868e3266972e705a61cdc056b53e417edc4969d95e1bab -size 351867 +oid sha256:8141d266c1661d266176c1349436fc79aabd8db6f114ddc5e28c8428d897daf7 +size 327667 diff --git a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_638420e6-97e6-466f-a168-587f17749282.png b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_638420e6-97e6-466f-a168-587f17749282.png index 0983ce3f9a973da94fc66b97be08e362ed39cd2a..2660149efe3de8b7934e535deb9c6265a54eed90 100644 --- a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_638420e6-97e6-466f-a168-587f17749282.png +++ b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_638420e6-97e6-466f-a168-587f17749282.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:108dea0cf84695cdc3988049c9cae08d3b2cc070d836625f8adb15792d7a39a8 -size 355194 +oid sha256:9e137c730693b8690d62fd2f2f6f203098a2fb0c69585ad4083376ee8a694fd1 +size 355637 diff --git a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_91682497-693e-4ef4-8f75-fc5329114dbe.png b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_91682497-693e-4ef4-8f75-fc5329114dbe.png index 05d86f481a9b2ea1ccde8c1d20700b0891504ed3..e3705c536443a86ba44ef014427613ac93f9f546 100644 --- a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_91682497-693e-4ef4-8f75-fc5329114dbe.png +++ b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_91682497-693e-4ef4-8f75-fc5329114dbe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:991f745a8a831210c4e55470e4bf0546c56a92914969fae29f013787dc649bc0 -size 382223 +oid sha256:45d6d6b633858a7c5104a64fb27c320fc29cf488c5737a1ea32f4d2954f2827b +size 379788 diff --git a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_ad40c605-21b9-4aef-a231-fa346f287afc.png b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_ad40c605-21b9-4aef-a231-fa346f287afc.png index 90e9fa2482bbd12084d262045645482a486f9dd4..84c6d6996d6b0dd0d78e3e752e1ce4f36e5eec4f 100644 --- a/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_ad40c605-21b9-4aef-a231-fa346f287afc.png +++ b/images/60e1de47-efdb-47e8-935f-2ac38af1ead3_ad40c605-21b9-4aef-a231-fa346f287afc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:012e100b8fa2020d21efc18f648c1209bfe0c1e79c46084a40241996b9126a11 -size 568188 +oid sha256:0b0e68431685d44ce6a0664294d845f6bf5e01b438ace4c3b79e72551473faa2 +size 352492 diff --git a/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_0dc5da7a-7c42-43ad-8c6d-1270e1186f5f.png b/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_0dc5da7a-7c42-43ad-8c6d-1270e1186f5f.png index 280d77c3e89a27bdc6de453fe03065e4d398902b..5695f77e86d31643a1976aa54492ca3499659310 100644 --- a/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_0dc5da7a-7c42-43ad-8c6d-1270e1186f5f.png +++ b/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_0dc5da7a-7c42-43ad-8c6d-1270e1186f5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afcd474f384e0b4f713c07580c9fd3d69a2c16756f3252689d6f629a33f70a1f -size 1379289 +oid sha256:a816c6d5244292b806f7a168e785ef2c766e07b2b43b2a1fa0133d8d1d6fb2b9 +size 1424369 diff --git a/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_8a3768a1-8b90-4fe8-ad47-a109170ea6c9.png b/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_8a3768a1-8b90-4fe8-ad47-a109170ea6c9.png index b5ca249bdf9612e602f9350f6d26e5f9436c6894..9351031630f73eef2a9f5242d2e6af8c1d7a8cea 100644 --- a/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_8a3768a1-8b90-4fe8-ad47-a109170ea6c9.png +++ b/images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_8a3768a1-8b90-4fe8-ad47-a109170ea6c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba6d65dc5aaa29389b70e48c25d322aa659fd3adae33c66be7842c364a6a7c00 -size 1631049 +oid sha256:b64c3d53a949250769f543d39c5e851c092d6388fb27bc5c9e1ba77d39624583 +size 1718302 diff --git a/images/612653f8-defe-41be-ae48-26ed859d98ca_1d82f2fc-e917-4ead-95cc-52fe5041676e.png b/images/612653f8-defe-41be-ae48-26ed859d98ca_1d82f2fc-e917-4ead-95cc-52fe5041676e.png index c6d6e4cc963e289946d736deda0ff0da2d0115a1..a8daecb227eb425a1efa066e327db4d9b5698ab4 100644 --- a/images/612653f8-defe-41be-ae48-26ed859d98ca_1d82f2fc-e917-4ead-95cc-52fe5041676e.png +++ b/images/612653f8-defe-41be-ae48-26ed859d98ca_1d82f2fc-e917-4ead-95cc-52fe5041676e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec99c1d3fb1e117c12fd5cf36d1439d65c4d5b0d53de4747b50510a81aafe31a -size 1352686 +oid sha256:aa49b4c3a455620a606e4bf36ffb3f2de6756c3fc4009bfd2c54cf21a121a4a3 +size 1085769 diff --git a/images/612653f8-defe-41be-ae48-26ed859d98ca_89fcbaca-06b8-4af6-82fd-530433e7f2c8.png b/images/612653f8-defe-41be-ae48-26ed859d98ca_89fcbaca-06b8-4af6-82fd-530433e7f2c8.png index 38a3fb99e16356a1ea3efb4cd5f75e2f14d7f2dd..7777987a1b8c038c01c830ceaf73d5b01567337c 100644 --- a/images/612653f8-defe-41be-ae48-26ed859d98ca_89fcbaca-06b8-4af6-82fd-530433e7f2c8.png +++ b/images/612653f8-defe-41be-ae48-26ed859d98ca_89fcbaca-06b8-4af6-82fd-530433e7f2c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cdc4dc6ffa2a72f210390593942bc5d7d671df9ef77fb0afe2798737332f0724 -size 2798933 +oid sha256:ffbe6d1932778b7252c384ef09dc9ac30530fc6bba62e1b911cde03e85d72312 +size 3058558 diff --git a/images/612653f8-defe-41be-ae48-26ed859d98ca_fd02da0a-4aff-4945-b1c6-b3d6c65623b1.png b/images/612653f8-defe-41be-ae48-26ed859d98ca_fd02da0a-4aff-4945-b1c6-b3d6c65623b1.png index 41ce2761d1392159dbe984e5abf0cc1635740b76..605c0975183c7f3e2d795a8179d231bd64affaee 100644 --- a/images/612653f8-defe-41be-ae48-26ed859d98ca_fd02da0a-4aff-4945-b1c6-b3d6c65623b1.png +++ b/images/612653f8-defe-41be-ae48-26ed859d98ca_fd02da0a-4aff-4945-b1c6-b3d6c65623b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ef82c56f1487909a9c169abf6df5e950e03e7b490a612f703cdebc224337d23 -size 2601352 +oid sha256:4dc09dbf7bfbb5b2d1f3483764eb825e52590c03e5cb6fc1c4e77dab980e8bea +size 1718649 diff --git a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_28dfe420-b64d-4b66-a40a-50cb80c95ac5.png b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_28dfe420-b64d-4b66-a40a-50cb80c95ac5.png index 248dfb4914e8dece73ac851c61e6871887f09a1a..5ea767c954a1ecdd19ed7666317cb7b6847b3a82 100644 --- a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_28dfe420-b64d-4b66-a40a-50cb80c95ac5.png +++ b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_28dfe420-b64d-4b66-a40a-50cb80c95ac5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b72827eb8078e4d2ed428e776db5408258b04b6e5a1e07c928ff3862c0f2fd02 -size 723118 +oid sha256:6c5c702cee8699e1eac531ec9d41242c510a10d3523f7deb0a00252b00c2dc56 +size 1127844 diff --git a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_38ec759c-f290-4d3f-8336-d7f5d20f5580.png b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_38ec759c-f290-4d3f-8336-d7f5d20f5580.png index b017bb6137ef59205baa0dbc8fac42ac627c2d2d..c5238f01999270a665f99e7f893c87b1826f9ec2 100644 --- a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_38ec759c-f290-4d3f-8336-d7f5d20f5580.png +++ b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_38ec759c-f290-4d3f-8336-d7f5d20f5580.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0cf78d15cbb99c076b1d1ce0a934b0175cac794b6ffb9e1e938a353fe1da8fc -size 946158 +oid sha256:cf492f60a74ec3f1bb5aa2716bce2f9ca8391f1d724a4f5769319a965289e56b +size 1024169 diff --git a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_5bf7212f-6896-4585-addb-9f5a65a58eb8.png b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_5bf7212f-6896-4585-addb-9f5a65a58eb8.png index bb43f3d1e4e0b4196719bc4cb27904fd687ac97e..83f7c9c7d332ae4b857505d29ad82658cab471d4 100644 --- a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_5bf7212f-6896-4585-addb-9f5a65a58eb8.png +++ b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_5bf7212f-6896-4585-addb-9f5a65a58eb8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:feb6ea5594258920ad7be2865123c3ff93b769e6cd2f0284983c242005d110c5 -size 1130662 +oid sha256:31bd6028d0bb879e2fad8da1528c584d001a280130a30c5bae6688c79a2d3785 +size 1201274 diff --git a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7a2802b1-aaa1-4e6c-82da-7ae9fae081b9.png b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7a2802b1-aaa1-4e6c-82da-7ae9fae081b9.png index 136ab4e85d9f613eb3665b3470901547777c6667..61426d19ccd36c7f3967026439993a38b1d8e0e9 100644 --- a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7a2802b1-aaa1-4e6c-82da-7ae9fae081b9.png +++ b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7a2802b1-aaa1-4e6c-82da-7ae9fae081b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7132347824b38b582e00ab0857bc8eaa9f98cc5189680e4e8f59617a445ed3e -size 908412 +oid sha256:30a28f034f44e4f65f621d87b87bd099ac7fc84420f7507b042d6207ac5823b2 +size 1130618 diff --git a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7f372b63-9007-46eb-9765-517c2ca4b480.png b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7f372b63-9007-46eb-9765-517c2ca4b480.png index e26ee72269d9ac27e81276bfffe880de6037446a..f87cb1afd37245b0d286e7c4602ac8f368b6f70c 100644 --- a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7f372b63-9007-46eb-9765-517c2ca4b480.png +++ b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_7f372b63-9007-46eb-9765-517c2ca4b480.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a9f50e1fa14eb0ba6ee2bd6416e80cfce0e1c3f901e2c6e69b3faa623e381e9 -size 821089 +oid sha256:2700745af80a6a50951188275b555ec3ef50272a2048bd59751a9b2cdd13b079 +size 715158 diff --git a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_90dd2ba7-06e3-4e8d-b4a7-cd9f16ee5fd2.png b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_90dd2ba7-06e3-4e8d-b4a7-cd9f16ee5fd2.png index ea4af6f0f0ad54adfbc9e26eab27b1abdfae187c..291086aef9e9b6b64b84bdf5bc317d530a556abb 100644 --- a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_90dd2ba7-06e3-4e8d-b4a7-cd9f16ee5fd2.png +++ b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_90dd2ba7-06e3-4e8d-b4a7-cd9f16ee5fd2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20ee90061c87193ce1f982b00d377b978506add4b06496e4e0636d892295a106 -size 724322 +oid sha256:51633979c68107aa64fcf1315c6c2c40693d5ae5b12f7465ea1044110d7da29b +size 962070 diff --git a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_d2cd0379-7409-47f4-aeb6-8c3b3a889a8d.png b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_d2cd0379-7409-47f4-aeb6-8c3b3a889a8d.png index 8887c8b735e08c4a8209f5e1dfe650ef331ca200..eb36568df2643ea27ac1361e172acce0e16c4172 100644 --- a/images/61563837-99af-45e2-a13f-9d6bd9f74a81_d2cd0379-7409-47f4-aeb6-8c3b3a889a8d.png +++ b/images/61563837-99af-45e2-a13f-9d6bd9f74a81_d2cd0379-7409-47f4-aeb6-8c3b3a889a8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bd07eae12b8e20a39712b8b7a2ed27d46dff4e354bc7c062c9c3604ebfd2d0b -size 728846 +oid sha256:1b8fd958ee629e0b055f0bb4eaad1488912c4a8b226c6a90cb89aa005ea9d36c +size 1157710 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1a5ad52f-9758-4c99-992c-7f1ac68ef8bc.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1a5ad52f-9758-4c99-992c-7f1ac68ef8bc.png index 872156b39037f4536587d47246d3ad9d67d16c31..3a979c1d56e6427a8bb5ad6e0a19bc20c5a451e7 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1a5ad52f-9758-4c99-992c-7f1ac68ef8bc.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1a5ad52f-9758-4c99-992c-7f1ac68ef8bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:793d77504935b363385f9366ebe7a0035afa438563ece744666d13266bbec4ee -size 452435 +oid sha256:29f80b369ed0f7d74bb67be1bc36e08d2a0488a316bc8cb360628c3ca111bd36 +size 726344 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1e410a9f-d065-48b2-ab36-3ff5f7565e28.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1e410a9f-d065-48b2-ab36-3ff5f7565e28.png index 2c1897dac380288151fcc8877034b108892ce95b..98b8b14aaeabf5bae36ed909f22bcd31235430ed 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1e410a9f-d065-48b2-ab36-3ff5f7565e28.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1e410a9f-d065-48b2-ab36-3ff5f7565e28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f34414ce189e15917434a3a55bb832427c566d872fb53393b121d70e888dac30 -size 557431 +oid sha256:119372d801207479a5efadf711f3363ff5e0a4d7dff6c434235ddc3add178366 +size 408534 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5d6ff011-4cfd-4d8b-abdc-39e927e234bc.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5d6ff011-4cfd-4d8b-abdc-39e927e234bc.png index f18255272a9db5f1f7ef127baeaaf4558580e0db..22991de2b2c20d8b4e7e71a5cbd905f44b9b3e25 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5d6ff011-4cfd-4d8b-abdc-39e927e234bc.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5d6ff011-4cfd-4d8b-abdc-39e927e234bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7eec007ccc2bbdacd23cdbcb5677e9cbc53a383a47c5055c909d6bc362046ea -size 1714225 +oid sha256:78e0721e9648a6f8435fdb72ca673cdf88f14f21511006f2c8e4c1d0aaae9a74 +size 1354376 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_73d19c95-32f0-4eed-ac23-4de4484fa210.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_73d19c95-32f0-4eed-ac23-4de4484fa210.png index 3fda0d8d806262e59a21dd49139ddf4f429d3471..2ce7a55049978f9a9522382081798ca13d92d4ac 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_73d19c95-32f0-4eed-ac23-4de4484fa210.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_73d19c95-32f0-4eed-ac23-4de4484fa210.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88946b63531a39f215c06fcb4344a4db2a909fa3a1a59d75b24b579fcf8ade5f -size 991774 +oid sha256:7caf47a078138ab3551f701f9487280a907662bf4807a2785ab566b97d2bdef4 +size 830014 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_8c2ccd6f-96dd-45ff-821e-eb2dc0a30b49.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_8c2ccd6f-96dd-45ff-821e-eb2dc0a30b49.png index b4756d21b8adb6f6c6c2fa3de623ffb56663e38a..be0a7ea4d0577ccc4369d70a8aa042d3159d266c 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_8c2ccd6f-96dd-45ff-821e-eb2dc0a30b49.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_8c2ccd6f-96dd-45ff-821e-eb2dc0a30b49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4a2edfb433a139f65e159b5f9a46bcf900720400c3c0228daa41f97820fc783 -size 490516 +oid sha256:766cbcf1cabe28399053a6cfc46eed321a6cccbf31470ccf56fccd9621716e10 +size 665344 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_9c594686-36b7-46ca-840f-21c065100725.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_9c594686-36b7-46ca-840f-21c065100725.png index 8c632c58aea8b4a87d38f9aacb3d860ebb538366..d9ec1001388aa897ad1f4a14dd03abf2e3e0b7c4 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_9c594686-36b7-46ca-840f-21c065100725.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_9c594686-36b7-46ca-840f-21c065100725.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b9357c7328602914341bc249063f541e2c519afc814cd29900478d5063ef57b -size 427262 +oid sha256:ef21e547a2016ac6dc3a088a253a79ba49e6ceda02c7e1998a84b47df956d0d2 +size 810228 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_a25fd485-84b7-4b83-ab29-4bb5a58ec54c.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_a25fd485-84b7-4b83-ab29-4bb5a58ec54c.png index 41f56cd166f25e1274a810a0d3d3da65776e9bbb..5b2a3747bb027e5f5a401c5d96e53082f92b5a30 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_a25fd485-84b7-4b83-ab29-4bb5a58ec54c.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_a25fd485-84b7-4b83-ab29-4bb5a58ec54c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18ab4e8e5fc8816daad1bd21f05574bd45b5795ca8686396e1d6f48db8057338 -size 573122 +oid sha256:d3431a36bed03c42152ae9159c4f0d7748566974a1d8c7f0be789a7cd60e283c +size 855207 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b07726fe-d4d6-4d0d-a101-5bcffd3b52e1.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b07726fe-d4d6-4d0d-a101-5bcffd3b52e1.png index f2d5ccbb816175331372929f141137c4c1c284ca..e0822c63b6dd6490b441f69286c2b2e99fe40ecb 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b07726fe-d4d6-4d0d-a101-5bcffd3b52e1.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b07726fe-d4d6-4d0d-a101-5bcffd3b52e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bef8422360a0df12f7b53d75dd0b504b8c77d2cee1610309802e3eeab557602 -size 216027 +oid sha256:273764d7974b285ca85c1fd3aabccad7d4a4938bf0b3f0f1efbf1e6554fa8ab7 +size 202989 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b10d981a-f02e-4c54-b95c-df1ac0369cdc.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b10d981a-f02e-4c54-b95c-df1ac0369cdc.png index 5dca79a289ca63242247c814ecc6e71c5f59861d..6bcee9bdf499ea917436fc0eb05e6e27bfc076fb 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b10d981a-f02e-4c54-b95c-df1ac0369cdc.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b10d981a-f02e-4c54-b95c-df1ac0369cdc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfbbfb08232d1276b1dae9679bf4317c9e01eea9349fc22f57111b97f915780d -size 998921 +oid sha256:da83ee7356d68269ea7e8a3847661b318c91116aae322760a90ba01043ac0918 +size 383077 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_cee6030c-0d5e-4c19-89a8-df52fcc3406d.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_cee6030c-0d5e-4c19-89a8-df52fcc3406d.png index df539efb69b7ce9050872324680ec6633b163157..79bd3e93f1801440357c9ee408c6885efa3fe9e6 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_cee6030c-0d5e-4c19-89a8-df52fcc3406d.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_cee6030c-0d5e-4c19-89a8-df52fcc3406d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9678297666e55cd8f4b0a21189063fb37f96a3682b2514b8c615996d8d4290e -size 202597 +oid sha256:0089bd5249bfd454953f8d2b74b3a9b4dc295a959e2bc7b8eba464dc7d5cd3e0 +size 217075 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d75139cd-0143-414a-83ba-e2fdd4372c5d.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d75139cd-0143-414a-83ba-e2fdd4372c5d.png index 2ae28c165aa8825a40a65a136bae7db7d16d6c8b..8ac773cb5e27c0f6e594310e58da9d3986962ebc 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d75139cd-0143-414a-83ba-e2fdd4372c5d.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d75139cd-0143-414a-83ba-e2fdd4372c5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04b16350d3ee9942ea8393db82630aa2218e238aa24df544d84a5a3706642978 -size 635968 +oid sha256:343f628bb7f9c2e95d1f71ac371830cdfbcd4254d461fe20036b9257d03fcdf1 +size 857281 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d838fd6f-80b5-45fc-8388-4038eb8363ca.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d838fd6f-80b5-45fc-8388-4038eb8363ca.png index 1657a03c14ee4f0f372d2a47eb83551ca15c9f16..bfcc903db663c1f5aaf86fca666d95b3c7311a13 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d838fd6f-80b5-45fc-8388-4038eb8363ca.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d838fd6f-80b5-45fc-8388-4038eb8363ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbb1dfb235acada704a9856d19bf28836f99fa39d5cb30d2b8c48e3c0a07feda -size 287304 +oid sha256:5b09be05d874b12bd45418b433d6e775d9ddf1fe8aee5902148915c22c4bd519 +size 266459 diff --git a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_f785e1b7-a569-4764-9ed9-af5a405d8962.png b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_f785e1b7-a569-4764-9ed9-af5a405d8962.png index 44372169c826eccfc49af5ea53bfd6c6960f4d6c..c0cadf3cf06f8987ad3308480ef541f4fc08e558 100644 --- a/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_f785e1b7-a569-4764-9ed9-af5a405d8962.png +++ b/images/618dbd1f-16e6-442e-ba3b-364c6dbda810_f785e1b7-a569-4764-9ed9-af5a405d8962.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bfe00c6497fe03f662b28957a85cf6ba520a51c88abd73fbc104f79771930c8 -size 480269 +oid sha256:d92e7008a031feae9c4473f8dad50293f8a4daac6824dcf0d737cbdd50cb10fd +size 729037 diff --git a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3b9dcd1e-9bb4-4599-b812-5fda7a1bf251.png b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3b9dcd1e-9bb4-4599-b812-5fda7a1bf251.png index d0798f789d115bc42aa5c3b9513b8af69a9a32e2..1b9eb1c14d7219673ede46d3ca3511d866ef0f27 100644 --- a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3b9dcd1e-9bb4-4599-b812-5fda7a1bf251.png +++ b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3b9dcd1e-9bb4-4599-b812-5fda7a1bf251.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1043d77251b1b52fb16b1ac0e2a46fed858e761fb0f92716f078430707183ff6 -size 571478 +oid sha256:60ed62c0d29c18bca824ef9376103806c9109630253e870cd92a5b84ae0bb1a5 +size 537790 diff --git a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3c4d601f-4977-410f-85d7-145e7bcfeedd.png b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3c4d601f-4977-410f-85d7-145e7bcfeedd.png index b88b8da3debece6afc3eef23b4579c7853644a93..3b2c37d6dce4c99b1e32b89c789d15f8fe4f0725 100644 --- a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3c4d601f-4977-410f-85d7-145e7bcfeedd.png +++ b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_3c4d601f-4977-410f-85d7-145e7bcfeedd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:544dc8697ff1eccea3a29e40d302c5f182f220b9b1c852048cacee2cbf7c377a -size 550399 +oid sha256:697f2133e13167c9fe541f23701f06e69859b9c3da909a4cab091bdc8784f2bf +size 1106092 diff --git a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_d6eb2826-502e-4acf-b43c-29e25ace5847.png b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_d6eb2826-502e-4acf-b43c-29e25ace5847.png index 5d46b2e49273ede26cac85f28034cf409ed32616..0f19ce5f98315f1977fc562d721dd0bf646a9510 100644 --- a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_d6eb2826-502e-4acf-b43c-29e25ace5847.png +++ b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_d6eb2826-502e-4acf-b43c-29e25ace5847.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19cb768787625658650f527e22ba2f9d7c54fbde27ab80a81e6b2d674e0a5395 -size 792026 +oid sha256:8dac25807b4d43f38b4a535f41be378d0d5cd7adbe5297585059760ec0273e7a +size 1283723 diff --git a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_e41e3aa0-0593-4975-bd96-9add53085830.png b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_e41e3aa0-0593-4975-bd96-9add53085830.png index fabf19310e52ea4805daced0f000d3c1b32d0b58..c18a250a2f03e0ed0a249adc7df557e1f18c0ab0 100644 --- a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_e41e3aa0-0593-4975-bd96-9add53085830.png +++ b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_e41e3aa0-0593-4975-bd96-9add53085830.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2cf5988a560f87f4f4940646a8c6307226209e6f3a90d4a2f88b319d52a21a5e -size 648060 +oid sha256:712bbfd3dc60eea048d5c2ccb16e07e8a51260771a1b5062262da169f52a5a2e +size 662646 diff --git a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_fa28ad4c-66b5-46f6-8cdb-9f52c1ef5404.png b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_fa28ad4c-66b5-46f6-8cdb-9f52c1ef5404.png index dbe227c58601f1acd0e47cab66dd2a093efc21dc..aac1636c88abfa1c14f48a5a716123e40e477a2a 100644 --- a/images/619ba95b-5a27-4c53-841c-edc281aacf7a_fa28ad4c-66b5-46f6-8cdb-9f52c1ef5404.png +++ b/images/619ba95b-5a27-4c53-841c-edc281aacf7a_fa28ad4c-66b5-46f6-8cdb-9f52c1ef5404.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5841e290bbe13dd902cdf8c22190aef83ea6a4f2161fb0d3dd87074ab76b308 -size 741310 +oid sha256:93088b8e027f67aa094581ebb5fa18f0317a0afbe8ecc8d76c0234da94c461ca +size 442455 diff --git a/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_a008646a-eed9-4a5d-ba30-872e8775a109.png b/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_a008646a-eed9-4a5d-ba30-872e8775a109.png index 703fb219f26c6b8ba0047e6166f115bc8e1bf7a6..fb84f31a9b84e00fb59d086872d0774f77d0fd50 100644 --- a/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_a008646a-eed9-4a5d-ba30-872e8775a109.png +++ b/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_a008646a-eed9-4a5d-ba30-872e8775a109.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d25df09bb7f25eab2aeb658c0261115bfa8c47ec63d1a2780e67b802181a994 -size 1695531 +oid sha256:b32ba953fdfc97e3664e9b8f97ebd995b8729335d95ff6c0b8c378adea5297bd +size 1255134 diff --git a/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_c89c55cf-e379-47bd-b0b4-a642ffe7be1f.png b/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_c89c55cf-e379-47bd-b0b4-a642ffe7be1f.png index 00f3e5a97d1a4ab618bf0ff9af4670686f60c818..d533952b28a6b0c3e616b64a729b8676bdc06a8b 100644 --- a/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_c89c55cf-e379-47bd-b0b4-a642ffe7be1f.png +++ b/images/61c8e051-a847-4424-9d8b-b8bc2c134a35_c89c55cf-e379-47bd-b0b4-a642ffe7be1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0597bef405717667ff6250a550992fe667261b2238fccf3af97f883b9e2623a8 -size 579754 +oid sha256:9aa8d99f579c6949ed4321a1182763028ae135586ecc6e89ac5ea683c2b38612 +size 567363 diff --git a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_21f4dc7e-c394-4957-8985-4f8889128c41.png b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_21f4dc7e-c394-4957-8985-4f8889128c41.png index 6da1d0d80f99ff495ef2ce36da8df987caede5ab..fcc4e4bcb024366d816de2ae9d502ffa6502dc58 100644 --- a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_21f4dc7e-c394-4957-8985-4f8889128c41.png +++ b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_21f4dc7e-c394-4957-8985-4f8889128c41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0ce0c94ae562508898e0d05c4f9f245193221b3e18b77d30fb7a348b1108e33 -size 887126 +oid sha256:175694b23f21aaf1c39b05df9f7f61757b74016f7fc8296a53d194c9ebc8fec5 +size 1096863 diff --git a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_3aa74cd2-39e2-4618-92d3-1f4de5170032.png b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_3aa74cd2-39e2-4618-92d3-1f4de5170032.png index 27d5cbcd05fda6eaf3fbb2aec1cf601593b29798..fa445a5139d1f91554e95f0ded5f7e423ecbca82 100644 --- a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_3aa74cd2-39e2-4618-92d3-1f4de5170032.png +++ b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_3aa74cd2-39e2-4618-92d3-1f4de5170032.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67538f136daa3e968bfd6e61fa7004abd2df5325a845f4a1c46e6e93248d2967 -size 850413 +oid sha256:cabe52a71446b223b39eb23528a67ddd9fb4c25c4483e5bf1449f1ab6517c86c +size 543096 diff --git a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_6e15b8bd-803c-4dd4-8e9e-0b102e3d9a69.png b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_6e15b8bd-803c-4dd4-8e9e-0b102e3d9a69.png index af575f423ecfe29aa1652ff9c570d9edc954c725..127c069f2b0740afcb1b574bc508179f33c0b03d 100644 --- a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_6e15b8bd-803c-4dd4-8e9e-0b102e3d9a69.png +++ b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_6e15b8bd-803c-4dd4-8e9e-0b102e3d9a69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a73543d648dae9da4e55dc52b968133ec10ed8aa7223030bc32a5f598c00d38d -size 811726 +oid sha256:65fb3a657d160cd83cdd27b2ab908825f120e60a7d2d42c4c722679adb47a872 +size 730169 diff --git a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_8f549f9d-9187-4482-bc70-2a244ffd8c8f.png b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_8f549f9d-9187-4482-bc70-2a244ffd8c8f.png index f7b8c37965179215e3ae2e49a793ba143107af9c..e7dfb74a0a0f75d2fdf75d21ef71803cde3ed0d8 100644 --- a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_8f549f9d-9187-4482-bc70-2a244ffd8c8f.png +++ b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_8f549f9d-9187-4482-bc70-2a244ffd8c8f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5542e1218e31d42b2ec34f26a1ecf51fe33a5456a411591a550ca57d9ce38c87 -size 1098877 +oid sha256:344c05b6fc31f7b3c4566ede657c615531a414eb772eb681f7ea19d8d49a98bd +size 1250025 diff --git a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_99f3702a-7d37-4557-b110-b28e439599fe.png b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_99f3702a-7d37-4557-b110-b28e439599fe.png index 1ba449eb9f721505dcbf67aba844a372aa3246e3..89008a1724dc0380bbfca5388d09f7131f5c419e 100644 --- a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_99f3702a-7d37-4557-b110-b28e439599fe.png +++ b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_99f3702a-7d37-4557-b110-b28e439599fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:656e29f4bb0d1c54b462a36b67951cd74cc6fc87b857f90d2e3c82a279257307 -size 820450 +oid sha256:484620928badcc42c8e0b7e0f352432926cf2e821b922482c25c2ac1b0e870bb +size 520936 diff --git a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_cd2e2892-6319-4ee5-82a8-f4ea09e54de9.png b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_cd2e2892-6319-4ee5-82a8-f4ea09e54de9.png index aea20707c1b23be897115d14c2cd67ac95d6a99e..e24c7c626d2afec584b4cb19d7efdca91d7b87c9 100644 --- a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_cd2e2892-6319-4ee5-82a8-f4ea09e54de9.png +++ b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_cd2e2892-6319-4ee5-82a8-f4ea09e54de9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a4a5dc8a67ffc063ce144b71f87141be72070a86e772b8040c71a8c6f1595c8 -size 1242697 +oid sha256:d95c2f25a496806ab58cf972d5b7ff578d89f9ea5e2a955064bdd0a9b73a324d +size 1798348 diff --git a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_d4189d13-7ce7-4aeb-80da-c3fe1b0ac5a1.png b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_d4189d13-7ce7-4aeb-80da-c3fe1b0ac5a1.png index 68e8b79efdd4ba9bd1a8c95a40b928996417f50b..322182c897530c0d20ba2756aa5cdc5f7f97a402 100644 --- a/images/61d28a34-cc8e-46a9-b41d-c95933de2214_d4189d13-7ce7-4aeb-80da-c3fe1b0ac5a1.png +++ b/images/61d28a34-cc8e-46a9-b41d-c95933de2214_d4189d13-7ce7-4aeb-80da-c3fe1b0ac5a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6667165ab5a7ad37a9540491914b84191bf447bc6ce1e4fd2f9724fbcb69f54f -size 804806 +oid sha256:a37d982ff37f4481ae0fbca5e42654892192dad650630e1610c7dc3fcef92400 +size 1311060 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_08841ec4-c606-4419-a0ae-ad5480b680ad.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_08841ec4-c606-4419-a0ae-ad5480b680ad.png index 506d15145d0b436809cab8ee1b77792d10aa1bf1..39fe417d60ca0e903b434f1895fad634530b0b7d 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_08841ec4-c606-4419-a0ae-ad5480b680ad.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_08841ec4-c606-4419-a0ae-ad5480b680ad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4e4dfca8746d9c0d5b9e5a0260441852d7d6fa9713e20e98b01f6522055bb6a -size 411430 +oid sha256:9d04cd5534b23ec9a4248111338a7e455547f1ff59debdb28d6c55ad4ed56a55 +size 358728 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_121e1a49-f7dc-441e-a922-e668447ccb12.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_121e1a49-f7dc-441e-a922-e668447ccb12.png index 95944eac053437031d13a25d7e79d94ad661209a..f268e9537df68e6681abc184be0d5cf317ee5f88 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_121e1a49-f7dc-441e-a922-e668447ccb12.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_121e1a49-f7dc-441e-a922-e668447ccb12.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e571702f49cdd883bc07711912429305d02b5829f661d2f0cc337ba73e91510 -size 1451364 +oid sha256:26151d68ca346cfb6a9d943ca49acff3bfbf6b1112f888929e8b1f2daa50ee27 +size 838481 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_4124e048-0c44-4b5f-b3f9-a449d3e18de9.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_4124e048-0c44-4b5f-b3f9-a449d3e18de9.png index a44ee1646a60672ab518f09949fe9d4106812f18..4dd0d2a36e2a38bf13841a7508b8d3c6cd835fab 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_4124e048-0c44-4b5f-b3f9-a449d3e18de9.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_4124e048-0c44-4b5f-b3f9-a449d3e18de9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71d8c0c89c0585413899bfe24f66b7ab25cec942cac351b3804634e682ebae30 -size 773701 +oid sha256:fb0eed099b71648ff09d0c452cb682f08073beec036e1679424bf9a44f4ef209 +size 831774 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_5b09a6bf-3ece-4b80-961a-6928f0367453.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_5b09a6bf-3ece-4b80-961a-6928f0367453.png index 0c8aaa217788e85e78f4db23c42794ce0bb99a0d..69c7bc791652daa226a92b5ffec2adc99b620eb3 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_5b09a6bf-3ece-4b80-961a-6928f0367453.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_5b09a6bf-3ece-4b80-961a-6928f0367453.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a82cab1dc74773aada6019ce245f3f923a7cec90e5a9bfea8812eca295a0d594 -size 773832 +oid sha256:3bdc3cd24a751cd561b9613b5e663620d474058ccaec94fabd9e00513872ab23 +size 820370 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_72041631-5eab-482b-9413-7c3553541926.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_72041631-5eab-482b-9413-7c3553541926.png index 2671f79b77fed55226e42b356fa1aa52c76e35e7..5f2fea1844e369008054a00e393525125628872f 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_72041631-5eab-482b-9413-7c3553541926.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_72041631-5eab-482b-9413-7c3553541926.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc406bdacb8d80ea3fce384baffb1882501f5cfab237b05e7a9c2350f6cda31b -size 1439536 +oid sha256:dfc597cbf54129906786b1904758d2c657f987caecf93a46a56650ec2e2adfd3 +size 692461 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_9f96c9e1-3fd7-4fdc-be01-3c98192d9cbc.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_9f96c9e1-3fd7-4fdc-be01-3c98192d9cbc.png index 44e586bf5132049a602b337a7a294fc3b9da9906..87302fd102597a15165959212288c0496fdfed5b 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_9f96c9e1-3fd7-4fdc-be01-3c98192d9cbc.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_9f96c9e1-3fd7-4fdc-be01-3c98192d9cbc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9967531057ca0a458b5a726e2bde2407f6dfba5a8b93013a7ec53bf981cf50c -size 376322 +oid sha256:d66aeff59dda8352de5e614cab3bbcfa81a3ff8c78d67050f9625137bbd61008 +size 272774 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_a9f12745-aa3a-4825-b3f5-06459fe37511.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_a9f12745-aa3a-4825-b3f5-06459fe37511.png index 253aaa2c9de4e4034032ea9ac2a1beaf14a05be2..80fbf650564089e6d0a0be572f8350fd2789c61b 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_a9f12745-aa3a-4825-b3f5-06459fe37511.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_a9f12745-aa3a-4825-b3f5-06459fe37511.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcfa25979a5ce9fa2b692cf5890830b661d584dd390df00b7591c2049b9cd3d5 -size 625269 +oid sha256:b9c87758db59bae79387f568e1f8d2d47088b7afcdef0988e7735066c69a4ad3 +size 562515 diff --git a/images/61fa0552-66ac-4572-95f2-800537cfee7b_ced15527-c767-4abb-87d0-3add94dbb551.png b/images/61fa0552-66ac-4572-95f2-800537cfee7b_ced15527-c767-4abb-87d0-3add94dbb551.png index 17ca1cab2f1130bc87e193fb67e163aaaade6eb1..5128f0ec66292c4b0661867f32018b9f29f3f6bf 100644 --- a/images/61fa0552-66ac-4572-95f2-800537cfee7b_ced15527-c767-4abb-87d0-3add94dbb551.png +++ b/images/61fa0552-66ac-4572-95f2-800537cfee7b_ced15527-c767-4abb-87d0-3add94dbb551.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1080308c3e302dd1dae0f3a86521671b817687836d9479aea1ec31d1905ff833 -size 771171 +oid sha256:cdae5e5d5f0d269ca8aae28e0429205902c7297b84901938399fcd5eacb45d89 +size 807123 diff --git a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_32a87b2c-3e26-45ba-9fe9-32c4dc3949b6.png b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_32a87b2c-3e26-45ba-9fe9-32c4dc3949b6.png index c53354a18871e6d9ee0dace24604657d2cd255e7..432443c60f14957079a979be413bd4cdb76fdac4 100644 --- a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_32a87b2c-3e26-45ba-9fe9-32c4dc3949b6.png +++ b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_32a87b2c-3e26-45ba-9fe9-32c4dc3949b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c2a9f3d80963154730a494b3bb18f1e36bfd928da2a9c541382563ecfe9ea93 -size 639313 +oid sha256:936615a0417a9f4ab48f82c2dd15e69d810b04443704132e6843bf3041e0841b +size 593377 diff --git a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_89295463-6730-47b5-9111-ef25b548fe7a.png b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_89295463-6730-47b5-9111-ef25b548fe7a.png index 84f4e537b3533c9747be501c8cb670265fde4041..a3c48feff538bef588e9e3bf0462c675b422ac2a 100644 --- a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_89295463-6730-47b5-9111-ef25b548fe7a.png +++ b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_89295463-6730-47b5-9111-ef25b548fe7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83eb274a235892f92a4736dda8b77068a14a37d9b21c9b0cd790a0070820fed2 -size 1369162 +oid sha256:4c9bc7e18baa65bd6da16c35a103f020a5571d2bbe569152eae831209731be9c +size 2016593 diff --git a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ab92e162-cd39-4288-a702-dc080854bd00.png b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ab92e162-cd39-4288-a702-dc080854bd00.png index 0162231f95d7f0dfa867675ab1359955d72c0454..2a59dce8c924533ae983a9109400b2bbcdc290be 100644 --- a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ab92e162-cd39-4288-a702-dc080854bd00.png +++ b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ab92e162-cd39-4288-a702-dc080854bd00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb489cfe154bd6f077675684f56c77647c65fe14950d4f25ca4ec0cd75e9d47f -size 1181885 +oid sha256:7621085e658e0d373a91187784bd2923e1f02d3fa1239346f4ed60c196eb78ef +size 2074096 diff --git a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ba3ec399-f548-4454-b3f0-eaf53fce1d3b.png b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ba3ec399-f548-4454-b3f0-eaf53fce1d3b.png index ba3d400190a0b174fe5ce3df902def753ab444a2..30804c382199f5904ff90ff564ed1ab96e872799 100644 --- a/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ba3ec399-f548-4454-b3f0-eaf53fce1d3b.png +++ b/images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ba3ec399-f548-4454-b3f0-eaf53fce1d3b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e282644714e4de73c0ef8d2bf0c0129b048353fc6846e39056171dd5acf1dcac -size 1243383 +oid sha256:637dbb8500f88dd649d5863a524a162e64b528a14edbd1be5b37cf8509febb76 +size 1739559 diff --git a/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_4f0c10f1-48d0-4d3a-af3b-f9d37f97fc96.png b/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_4f0c10f1-48d0-4d3a-af3b-f9d37f97fc96.png index c99ec38fd99bd61c7d9b548961de0f4e079a6fe7..92cdd83ead4df39b2857c19e1e4be2c8fa1f402a 100644 --- a/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_4f0c10f1-48d0-4d3a-af3b-f9d37f97fc96.png +++ b/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_4f0c10f1-48d0-4d3a-af3b-f9d37f97fc96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2aaed45878d6af2b2002d71b8ade9861873ed2f489960732520641b361bfb82 -size 651670 +oid sha256:74fcec8078731ed3cf44e4070d5dc3f3d388e27debb674a33a45be6ae6291612 +size 798239 diff --git a/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_8a105984-8971-4dbe-8929-a49933e300de.png b/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_8a105984-8971-4dbe-8929-a49933e300de.png index 2e3c4333d9c393b7548ac2251da5e94081b7a9f6..7e15de7961f3ce1801138caeae77f0e8fdecf9e6 100644 --- a/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_8a105984-8971-4dbe-8929-a49933e300de.png +++ b/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_8a105984-8971-4dbe-8929-a49933e300de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b48d431a41b55d72d88375aa2ecdf989a16dcdfe150748cf84f74ff7b08b72ea -size 1417853 +oid sha256:331de1413c6e871cf304fc5cf91ca85d85abad30a35d2ad1e87b9c141541dbe0 +size 746011 diff --git a/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_9498722d-5902-4a3b-9128-22ab274da505.png b/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_9498722d-5902-4a3b-9128-22ab274da505.png index 2e18c13cbb240c7384fff6070c6294d9a004a2fc..c144ff7698aa220fcf9a709ae32cc96ddc9c20d2 100644 --- a/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_9498722d-5902-4a3b-9128-22ab274da505.png +++ b/images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_9498722d-5902-4a3b-9128-22ab274da505.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:777b1f0cbe5b0b1d753891a5e2fadba61277d14016e24b9318677f1559c54d74 -size 869379 +oid sha256:873d82018fb73a1f5a01406cb584ded2f9513770d5b5059633de470cb4a9669e +size 869742 diff --git a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_43243a2c-3d76-40fc-8e5e-7fc1f48328e0.png b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_43243a2c-3d76-40fc-8e5e-7fc1f48328e0.png index ae3be65cb3483ca6b8b3545055258abfe7b250ed..1f8dc3b42acff1b43db02eac2008c279e096c8ba 100644 --- a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_43243a2c-3d76-40fc-8e5e-7fc1f48328e0.png +++ b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_43243a2c-3d76-40fc-8e5e-7fc1f48328e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97d7ea635573ffac6a3c7521d3a2e9e007901914464d4ae4639470210b560a94 -size 1284281 +oid sha256:e8da672d50dc9a158391fee2fe89e2a262784456eabce53f96b83df7a516288c +size 1203126 diff --git a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_4f5d1bb3-55ec-4b13-a0fd-8c243591f074.png b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_4f5d1bb3-55ec-4b13-a0fd-8c243591f074.png index f4dd17b6d821448f813fc2eac584d6495e45d115..e92eec0a42de9309a3637376d3da86fcba3c527f 100644 --- a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_4f5d1bb3-55ec-4b13-a0fd-8c243591f074.png +++ b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_4f5d1bb3-55ec-4b13-a0fd-8c243591f074.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55b2769cb44095d104cfcab88f2ecc8d25b82cae920e4f91a30da426e619fc8c -size 1641430 +oid sha256:7dfe6269716ca34c19e565474d233478d42a73716dd178f5f1a5056ea2962de4 +size 1188670 diff --git a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_75f78e83-b910-4a8c-bd15-97003e9216fd.png b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_75f78e83-b910-4a8c-bd15-97003e9216fd.png index b69721b9f9f0cdc7fc0be1224682221f10b54bff..9a9ccd11ed5363654ddf878d35982a4c52b0be1a 100644 --- a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_75f78e83-b910-4a8c-bd15-97003e9216fd.png +++ b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_75f78e83-b910-4a8c-bd15-97003e9216fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c56a6c4660e9d756386c44b1f6f46e8e79200896e197df45440b1bd34ee31de -size 1594814 +oid sha256:aae2d522ce9019af217b9e520ae98550a1b45d481f21be4fa15a95b59d2a1061 +size 2010055 diff --git a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_7d5bb406-d247-416d-8e13-dd2cf463b43e.png b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_7d5bb406-d247-416d-8e13-dd2cf463b43e.png index 69f0ff1dce72580d08f230ce4c55958820828c46..68eefd4f777b6f89c1ed95a1386ee1fcd1ff7505 100644 --- a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_7d5bb406-d247-416d-8e13-dd2cf463b43e.png +++ b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_7d5bb406-d247-416d-8e13-dd2cf463b43e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbc0a0fb75851c130cb52cba871abe078acda385895084e0925dc1fe3465001c -size 1594955 +oid sha256:5119d0f4e90e7e9f6b8101c4ec8ef14553d31fac65d9731c69a121e96c4ca60e +size 979269 diff --git a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b0065ed3-dd9d-4845-8174-7be5aed5406d.png b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b0065ed3-dd9d-4845-8174-7be5aed5406d.png index 67d03c6d774e0c03522a03fdbca0c5a40a6713b1..4f548c9a0446f7ffa7d373a43168ad51e5f270ee 100644 --- a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b0065ed3-dd9d-4845-8174-7be5aed5406d.png +++ b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b0065ed3-dd9d-4845-8174-7be5aed5406d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f30aa3a09d1409b6eb7349102830f5b2f020169e9b3da79257d6713532597070 -size 1594111 +oid sha256:b3a957c22f2f02eb596feb0e60bf3d7d8e72c0cb4fbb2848375ab617c5584946 +size 1312927 diff --git a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_ff31502b-357d-4a19-b304-f831b6999618.png b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_ff31502b-357d-4a19-b304-f831b6999618.png index 0e233396a3c8a1bd05470c6800d7b8d2db14d58a..015ab989e5ed51c67813560685fec59ab24c7b6d 100644 --- a/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_ff31502b-357d-4a19-b304-f831b6999618.png +++ b/images/62c5067e-daff-4af6-bd79-83f6a969f4d6_ff31502b-357d-4a19-b304-f831b6999618.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d5d38c7d4cb5018e5c01373d80d796e3ff7ab06e76b7cd93622368093fcd98b -size 1655064 +oid sha256:829081fe929db3ca48cf131963c04b2f28258ad7ab219124f77900984f081455 +size 1389324 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1a967805-4e68-408d-907b-66a52037ca3d.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1a967805-4e68-408d-907b-66a52037ca3d.png index a46ee27d2fb0f4f39beef69c81bc5fbcb8b14847..a22b21695dfc63c834e65416a79622ae8688b706 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1a967805-4e68-408d-907b-66a52037ca3d.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1a967805-4e68-408d-907b-66a52037ca3d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69672e841d1c9a0e9642aa69ffc520f7b8e32c511209a43689debfaab618ebd2 -size 229949 +oid sha256:012297f193031919cba7fd5c92ff9b077b6652ed0ef7efa905aecd858024b6e9 +size 470364 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1aefcac7-5379-4ce3-b57c-ee32805a47e9.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1aefcac7-5379-4ce3-b57c-ee32805a47e9.png index e93f7f627e3f69f07d9d0c405b7c83419a602e4f..f04d334e82155070154756da8c41eb1890fa273c 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1aefcac7-5379-4ce3-b57c-ee32805a47e9.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_1aefcac7-5379-4ce3-b57c-ee32805a47e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bce9bfff01e1154e85e28242dab49c450e8c6885501f4e3c8a0358619c3b58c -size 737099 +oid sha256:9d120becd42d422260401dfc243714a7258591292bb63feb767c2fd65f20e326 +size 1066322 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_669be30c-bc87-4806-9cc7-18139eb1e8bd.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_669be30c-bc87-4806-9cc7-18139eb1e8bd.png index 3ff5208b0ff0d4ce823098c34b864b7d22faa064..142e42171e31dde454fde4f589eacf2ff3e0b2b8 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_669be30c-bc87-4806-9cc7-18139eb1e8bd.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_669be30c-bc87-4806-9cc7-18139eb1e8bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e93a7b3bff67866fef414205d8e184308e45453c76f727f8773192528514208 -size 224218 +oid sha256:9bab521727ed389b6994ae470620f7dc4d130dce2d22b2b2e1f52bd7ccac244a +size 425391 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_7dec7f53-f08b-4658-993a-b6121c95d246.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_7dec7f53-f08b-4658-993a-b6121c95d246.png index 4ac7042b81f97cbcdd0eb11d401eac7d5149f980..c1ee69d61d4c1ab34d96fcc95c73400710158f0b 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_7dec7f53-f08b-4658-993a-b6121c95d246.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_7dec7f53-f08b-4658-993a-b6121c95d246.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c77b0faff172ed2a9233c47e2b6ecd9ab980b4c3e07b1dfa7d62bf4c86e2210d -size 264078 +oid sha256:914fb344d2067f0274104a1b2fe2a281ceac6f332bd44b408335b26361e7c3b1 +size 671082 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_a9b0f4a3-62b9-47d2-b251-ff3694d32864.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_a9b0f4a3-62b9-47d2-b251-ff3694d32864.png index 40fe9ea00f86c195270a8caee3b605b978afe40b..d2dff1d17188fd66b363effb6ddba98acc669d95 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_a9b0f4a3-62b9-47d2-b251-ff3694d32864.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_a9b0f4a3-62b9-47d2-b251-ff3694d32864.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fa897409ff657b9a81e5783a1879daa74025f20e552da3a5572e6207a174b2a -size 989419 +oid sha256:5a9e684e135f8626efcdb672ae25887513731ed539ec45b8b31f7520468284d3 +size 1051852 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_d1ce353f-b6b7-4181-b6be-9430a19a75d6.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_d1ce353f-b6b7-4181-b6be-9430a19a75d6.png index e02553be2f77fa4a94dc5948c93183f58f3e27cb..688567e51ae6bd4d5bcde384f61964abffc1f418 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_d1ce353f-b6b7-4181-b6be-9430a19a75d6.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_d1ce353f-b6b7-4181-b6be-9430a19a75d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19fef9e4316d42768de1e090c8765ce69636169465a534f5962f627579c2549b -size 275605 +oid sha256:e5c44b468b02cdd2672dd221cdb78c1a61cdef081f1c7fb3a97cf6c4e17eeaac +size 267271 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_f6c8fe1f-3ce7-4cb5-aac4-3843d4af5920.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_f6c8fe1f-3ce7-4cb5-aac4-3843d4af5920.png index 144dd9ea785d1e648363b4f2834bd4cc515258dd..b1f5f67f0b3c93af7250ba2402cbe54cd394f098 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_f6c8fe1f-3ce7-4cb5-aac4-3843d4af5920.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_f6c8fe1f-3ce7-4cb5-aac4-3843d4af5920.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d15e32b6ad5c033e51399b4a8d4ec53b3d7adb83165f3c74ecfa99cb590f1e4f -size 894184 +oid sha256:e83acc1bcace50c4b26946cb88224242b91b199cd068bf3df01908a5d8a62aa9 +size 1117235 diff --git a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_fdca6b87-1298-4b66-bbfa-d325894b0f2b.png b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_fdca6b87-1298-4b66-bbfa-d325894b0f2b.png index 2c292aa6fb4a85b0848c2d8978b96c80778e47ff..806195c38452157e29ab09a793c4dda13e85b468 100644 --- a/images/632bb279-036c-48e5-b40a-962b2e90d6d1_fdca6b87-1298-4b66-bbfa-d325894b0f2b.png +++ b/images/632bb279-036c-48e5-b40a-962b2e90d6d1_fdca6b87-1298-4b66-bbfa-d325894b0f2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c47b0c42be40cf70363778daa580b00fdc01ab278f87baadc62625034aefa60b -size 2401384 +oid sha256:f31214cf5ae60d13ebef3c3fbab41ca34d707e9ea569a8d648afc376c640ce81 +size 1954097 diff --git a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_512499f3-8a7a-46a3-89c2-27ab80ecd283.png b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_512499f3-8a7a-46a3-89c2-27ab80ecd283.png index 5e389cbb83924e3d14643cc974dea57fc5c545fd..e1102240ec0e30ff5930981b15ec6d9f86abf6d5 100644 --- a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_512499f3-8a7a-46a3-89c2-27ab80ecd283.png +++ b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_512499f3-8a7a-46a3-89c2-27ab80ecd283.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae7dff37432b6fbabc4a1f43ddf3dce13c7708700c047bdb2ad9ef6237fd2af5 -size 1919518 +oid sha256:9206268293ff3f326df3629586658b22e6c237b43a7888447f09dc58c456fb54 +size 1486398 diff --git a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_acdc38cd-1d94-4d85-bbd1-5179e1ce1ae5.png b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_acdc38cd-1d94-4d85-bbd1-5179e1ce1ae5.png index 86512c008d6b5c6e49a2e2e0f48358158935ccbc..2e632a7c3b2b4fe19993a36d53aef511052f9d97 100644 --- a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_acdc38cd-1d94-4d85-bbd1-5179e1ce1ae5.png +++ b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_acdc38cd-1d94-4d85-bbd1-5179e1ce1ae5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:123e9a2bb6eb2056a90b02bd7257d0b714b24c4f2ee06d95771b4612f114859d -size 451334 +oid sha256:25c79230317375a2f51db86f7ce22c572104902af155a2c253acdf517d96e98c +size 493335 diff --git a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_b59fa278-4a1e-4869-866a-137ca9046aa1.png b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_b59fa278-4a1e-4869-866a-137ca9046aa1.png index 379962e7f24a115b5a4cefc003729ae592c5b62e..010750f7ee3591ebe92c549672c3fa0cdc93ece7 100644 --- a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_b59fa278-4a1e-4869-866a-137ca9046aa1.png +++ b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_b59fa278-4a1e-4869-866a-137ca9046aa1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6a73de9f3d41d69eb19ebe8f3ad909ba73c500c09ee1d367fce06d7ef7b117f -size 737778 +oid sha256:ee7eb07982e9d15bbd29750ef685feadb421eb7dcb5ec02e4b8fbd5264add63a +size 604230 diff --git a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_e07b8f59-1316-4fc5-b5de-cd8befb4cbd9.png b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_e07b8f59-1316-4fc5-b5de-cd8befb4cbd9.png index 3c19ad9f77ae560fef53d989edf3f804a9538d1e..2c32c84230a8d80ffdea5f74e72f130e20d69a75 100644 --- a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_e07b8f59-1316-4fc5-b5de-cd8befb4cbd9.png +++ b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_e07b8f59-1316-4fc5-b5de-cd8befb4cbd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51f52f72bc4d801c3c256f74c9be6abca79dfc7c0353be8fdf24a2294539baa4 -size 1267910 +oid sha256:9e62865f33a2f81a54a69a65aac6cb317dfffc32a6a393a6d196dbb062451f5b +size 609593 diff --git a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_f3514a0d-7a41-4ecd-93df-f14a6fad29a6.png b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_f3514a0d-7a41-4ecd-93df-f14a6fad29a6.png index a0b4bc77225beec6f1b8391c0c135a389a868498..56b76ab86c2ffc9f2a0d22fb17e55e4a61329fb0 100644 --- a/images/63388e25-a4a3-416d-bc9b-bb915fbca977_f3514a0d-7a41-4ecd-93df-f14a6fad29a6.png +++ b/images/63388e25-a4a3-416d-bc9b-bb915fbca977_f3514a0d-7a41-4ecd-93df-f14a6fad29a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:468d6f307f0175034ac221b55f64486e9b16fe42a4a16a3b53f61c8137e26294 -size 1725442 +oid sha256:f9fd9c206a5da71211ec7ee9cfe70d27e8e9757cdc681987a9d3cceb626bed08 +size 664116 diff --git a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9c019089-5faf-48e5-a693-58652ee8c53d.png b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9c019089-5faf-48e5-a693-58652ee8c53d.png index 7738cdc22f77c32a20399ec46e8c6567f240f29a..2aaf0249ed1b13f31cbcde9c6260517a375a232f 100644 --- a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9c019089-5faf-48e5-a693-58652ee8c53d.png +++ b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9c019089-5faf-48e5-a693-58652ee8c53d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4792d3d91a10c3d7188a26fd279fb32d7231c1f1949f252dbe415153e14a7b03 -size 1391687 +oid sha256:0d11a74881df043502a1ea318ca0e693cdb61929dfee9a05d9192d85ff32b3b3 +size 1262714 diff --git a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9e6abcf6-1bbe-42c9-bba3-8fdfe5b228a5.png b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9e6abcf6-1bbe-42c9-bba3-8fdfe5b228a5.png index b8eeaac78e29af5176e0c6793d5e15bc69c2f6b9..348c0204743f7a39b1201641e4ccd455d12e0058 100644 --- a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9e6abcf6-1bbe-42c9-bba3-8fdfe5b228a5.png +++ b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9e6abcf6-1bbe-42c9-bba3-8fdfe5b228a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:743192953bc2403b99d3f8c45c120b0367f1f551d6f59042308e386a3e09a9be -size 1287874 +oid sha256:2a93e04bf5356920d41f44304bba3aa560cf0f70ddb83b9f527c073ad9d889b9 +size 1477307 diff --git a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b6225a8b-0610-400b-859b-f9e1b5e4fe1b.png b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b6225a8b-0610-400b-859b-f9e1b5e4fe1b.png index 1d4b73e2babf66a15aa6f59b9ac5a24cba6a8e6f..0272d2f7ec096c25ff912a536cf96dc626d82aab 100644 --- a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b6225a8b-0610-400b-859b-f9e1b5e4fe1b.png +++ b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b6225a8b-0610-400b-859b-f9e1b5e4fe1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eabbfaed3377c53c39e2f6940b1ee171c51ef23d9835974004334f02f9ded134 -size 1396810 +oid sha256:556bbd81b5bb94ea4e77acea9db5b4a83879f883529a944de1bf19dcf84a32a5 +size 1511425 diff --git a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b88d7456-bced-43d2-886c-48cff487fdba.png b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b88d7456-bced-43d2-886c-48cff487fdba.png index 7738cdc22f77c32a20399ec46e8c6567f240f29a..0843e42e4af65bfee8e1624f14d8fcc14bc29f09 100644 --- a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b88d7456-bced-43d2-886c-48cff487fdba.png +++ b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b88d7456-bced-43d2-886c-48cff487fdba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4792d3d91a10c3d7188a26fd279fb32d7231c1f1949f252dbe415153e14a7b03 -size 1391687 +oid sha256:c45243553effcf2c51fb661555f25efebe763fe29a54541b7bdc852c23e133d6 +size 1734725 diff --git a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_d8569323-5315-47fb-9b08-d9b48ce92b2e.png b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_d8569323-5315-47fb-9b08-d9b48ce92b2e.png index b4c4b96f451a94bdd716caa698a4c8771c213c02..bdcd2f3adfdfd83c44ac0bb8344c7f55d5eaa812 100644 --- a/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_d8569323-5315-47fb-9b08-d9b48ce92b2e.png +++ b/images/63529dc2-7b13-493e-9a0c-b1ce50256a16_d8569323-5315-47fb-9b08-d9b48ce92b2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da64acaa0ac84e8811dbb5cd7e5b99ae2eb388b9d125d01c935e600e6987fd50 -size 1204310 +oid sha256:dfa95ede02cedf1f6f6c4068c1691c5fc82154aea6bdc6696dd904b2301d35e9 +size 1489035 diff --git a/images/63d1f820-37bf-4adb-aabb-65eb7925790c_73adafc4-ae3f-4cc6-89c4-64510b8e7910.png b/images/63d1f820-37bf-4adb-aabb-65eb7925790c_73adafc4-ae3f-4cc6-89c4-64510b8e7910.png index 018ed32145111e16655861113cec5af86a50387a..44b4b044ce10ec0ff4c6d5536772db7ce912be9d 100644 --- a/images/63d1f820-37bf-4adb-aabb-65eb7925790c_73adafc4-ae3f-4cc6-89c4-64510b8e7910.png +++ b/images/63d1f820-37bf-4adb-aabb-65eb7925790c_73adafc4-ae3f-4cc6-89c4-64510b8e7910.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db504b82aa499877f7e9cd3c91f098d8523536e94df0b1adeefeb6eb07a3ecee -size 1208003 +oid sha256:85f50b8ad237e110d62e75037921c28d7b496395b0c65e4cbc97b9915e3cd96f +size 1838768 diff --git a/images/63d1f820-37bf-4adb-aabb-65eb7925790c_99c79398-3d2e-47f4-9ba3-1df19f5f70cc.png b/images/63d1f820-37bf-4adb-aabb-65eb7925790c_99c79398-3d2e-47f4-9ba3-1df19f5f70cc.png index 493a342a106d71892a9df202a0ed9e5c768d1823..2f839c525336a564095a1602109e48d181e1fae1 100644 --- a/images/63d1f820-37bf-4adb-aabb-65eb7925790c_99c79398-3d2e-47f4-9ba3-1df19f5f70cc.png +++ b/images/63d1f820-37bf-4adb-aabb-65eb7925790c_99c79398-3d2e-47f4-9ba3-1df19f5f70cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cfadc97590d55e034282fc902bb326af8b104913cc88a4af6732aebc5ba3f4a2 -size 1010436 +oid sha256:1270933f106ee28c68a6d338a73e2bc387263d063bc85ffe5515406c9fcb0c43 +size 1480722 diff --git a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_046e8cef-b409-41ce-a840-3daf9c4f05a7.png b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_046e8cef-b409-41ce-a840-3daf9c4f05a7.png index d187c535ab344d7e583d8f854ae5e33d5ae94c07..f2193dcf2352e5889447ef66247a4df9bd4f9a2f 100644 --- a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_046e8cef-b409-41ce-a840-3daf9c4f05a7.png +++ b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_046e8cef-b409-41ce-a840-3daf9c4f05a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a013784c4a1aa2af6bb161961393910364d5ac16772a642f5a89dc9a473e7bed -size 1214100 +oid sha256:6ad4691d6e2f75a9bbe093cea1d73fc0f6e9c6103ffcaa870ee1849c6948345e +size 1332959 diff --git a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_223e0643-acb4-4c02-ac89-012a32461ea6.png b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_223e0643-acb4-4c02-ac89-012a32461ea6.png index ab3ab27494ac8e0faba4d244fdcb77d5114cd045..050295ac6499a736cb83f9c3d44f5fc0e9a007db 100644 --- a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_223e0643-acb4-4c02-ac89-012a32461ea6.png +++ b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_223e0643-acb4-4c02-ac89-012a32461ea6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:218ed67359b396bb441cfc29f70b57a662f53279ae6edf1a3d1978519e2d8af8 -size 1394717 +oid sha256:ea87e5e6a5c63cbaae443cf02c4fc54f5b5fadab6d26f52f0995a998649f3fc8 +size 970582 diff --git a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_33a7ea3f-5ecb-4453-9465-1a9d3739e42a.png b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_33a7ea3f-5ecb-4453-9465-1a9d3739e42a.png index 9163e71bdae1bdd5a82162db06d5ee77c6dfb87f..99d3f1a28a1fbd510b7a88ad50dbecedb1c89968 100644 --- a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_33a7ea3f-5ecb-4453-9465-1a9d3739e42a.png +++ b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_33a7ea3f-5ecb-4453-9465-1a9d3739e42a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a03d32692c62c84c81df837633d2961a318fab9ccdafb6e5080018e9b5a9449 -size 1377383 +oid sha256:79f03a20845fa3578bab994b31301adf4706402a0b7e790063ee4f2cbd04f5d9 +size 1140730 diff --git a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_408335c6-cd6f-4e47-a018-845ce17a180d.png b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_408335c6-cd6f-4e47-a018-845ce17a180d.png index ca5a5a6b864f121ca9fb290362a84947c98082de..45d1c05d9250cde6fcef82136090ebc107f79d08 100644 --- a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_408335c6-cd6f-4e47-a018-845ce17a180d.png +++ b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_408335c6-cd6f-4e47-a018-845ce17a180d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7dfcab9b6ac499144f624b8e45bfa4523d5ad4185dd09ecef574c44b603bd283 -size 1383842 +oid sha256:bd5c9e1949b7b3ab99c8ed4b0fad2d8b46b6e14c7026b2dbb4b52bfef929b825 +size 1185783 diff --git a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_80ec2d6b-ccfc-4134-88b5-e9ae4967039e.png b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_80ec2d6b-ccfc-4134-88b5-e9ae4967039e.png index 9ebc4bc72f97fe5e07a93376072c721d213a7fa2..d2018d2356c44bdc3b99bbc397d5d480731b7307 100644 --- a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_80ec2d6b-ccfc-4134-88b5-e9ae4967039e.png +++ b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_80ec2d6b-ccfc-4134-88b5-e9ae4967039e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1815c7796e2114514a10ea43d4168af104cec70564245ad088a06ef06a577669 -size 1119800 +oid sha256:73aaa6e391acb389260dec16ba14bac005ee59a3d59a6b4771ae6d8ff2a8af15 +size 1210835 diff --git a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_d5348938-9876-40b0-81d5-01436fa10e76.png b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_d5348938-9876-40b0-81d5-01436fa10e76.png index 225e6f591cb6dfa44cc45ca18f88f708ff6b1520..0b06d0e868ecc894a8ade0cb463957846c7e3009 100644 --- a/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_d5348938-9876-40b0-81d5-01436fa10e76.png +++ b/images/63e3020c-bf52-4950-9e26-50f98a59b5e3_d5348938-9876-40b0-81d5-01436fa10e76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3ae286a3417933a6b801904fe2d536523aa4c4c7c3def03da06d5ec988e900c -size 1440368 +oid sha256:ad0cf50218183f8cf336db7b5af443dc12baafe32278dd7d97bf6bf677ef616a +size 1603060 diff --git a/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_7aa12e31-3e99-44a8-82c2-471c8e11d629.png b/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_7aa12e31-3e99-44a8-82c2-471c8e11d629.png index fff3cee7c92ae6368b737f3f91cf15a3a76cc2d1..6d6009e26b1394eef611310b28ce418c8592bfe2 100644 --- a/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_7aa12e31-3e99-44a8-82c2-471c8e11d629.png +++ b/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_7aa12e31-3e99-44a8-82c2-471c8e11d629.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3f7f0c197c8d72b1333118f1f42c80d65c94c05858d2607b51118c718fae978 -size 3608703 +oid sha256:313ce4049b2029f9813fadd33b8c61d0f3e9c84903d99ba85c208f9cbefe4a83 +size 2890789 diff --git a/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_f64548ad-02e9-4c42-af4c-240a494202e1.png b/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_f64548ad-02e9-4c42-af4c-240a494202e1.png index ac73a6ad4df2c622f04de49dd5073400f65eab8d..82b726fb2ef18e0eeda50339f57ccabaf651455e 100644 --- a/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_f64548ad-02e9-4c42-af4c-240a494202e1.png +++ b/images/64051efe-53dc-4e79-9980-c3d75d34c4aa_f64548ad-02e9-4c42-af4c-240a494202e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cc146edebaddbe8aedf798229e4150fc5f41a60e7c59079d55a5d1b0638dc59 -size 3592516 +oid sha256:850f60c3a5a566fc0e85cc4d0f160f10d55f7b5c3fcbc7f3e2c0240cdbc97900 +size 3922207 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_17f036e1-04aa-4a66-828d-e19685efb75c.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_17f036e1-04aa-4a66-828d-e19685efb75c.png index 9e7c31f4064778453e759f795e68417c97af843c..447659f709d7f15e677ff1a3be1113b946f9d230 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_17f036e1-04aa-4a66-828d-e19685efb75c.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_17f036e1-04aa-4a66-828d-e19685efb75c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21a79e8a2a3ffbfad5108416729729fdb93804de1375a6922223c238bbb7b14a -size 601553 +oid sha256:723ed8fbfd5250cad21ec1434e46e8d01c93cc1c6b4c6b2dcbe41f16be539eb6 +size 665608 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_2fedfeba-1361-4546-a638-0b8f70a9f69a.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_2fedfeba-1361-4546-a638-0b8f70a9f69a.png index e839fedbb943b2be7145bf5f142dc27ab3fe427e..960766e5db425f1b9fd9f6af5c11d554908dcb7c 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_2fedfeba-1361-4546-a638-0b8f70a9f69a.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_2fedfeba-1361-4546-a638-0b8f70a9f69a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42555913595c526cbaf031dfc79f5ec6d94028e2272aa14202b1b850d955dfa1 -size 390365 +oid sha256:6aa120ce45b898dd96ede5abc787565aa2b5d6d5d031e1c6151a8d28c750b0ca +size 639820 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_38e3e23c-932b-406a-bd3f-34958395c5c7.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_38e3e23c-932b-406a-bd3f-34958395c5c7.png index c2f3b9e38b9823af4e7f83dc47f054ea2274df9d..75b05796acf4dd671907d81a3235a2e4c6a2ac22 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_38e3e23c-932b-406a-bd3f-34958395c5c7.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_38e3e23c-932b-406a-bd3f-34958395c5c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:607ff6d787a43c928d16467475fcf5a931bd48bb895c07034230901903024046 -size 751966 +oid sha256:305d8cdce0733c25dada90c9325564a9b01e2407c38378d8f92673c0352b34be +size 929263 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_52d7c938-aadd-4349-b61b-4db12d69371b.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_52d7c938-aadd-4349-b61b-4db12d69371b.png index 6c23a6a865381a86880d28e1c535654ae44671d5..90bf567987e53a54789a5363d01503754a6d1845 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_52d7c938-aadd-4349-b61b-4db12d69371b.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_52d7c938-aadd-4349-b61b-4db12d69371b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39fa5a94309455237d9c318b7ffe563c08b8e2ed43b6d370db916b6d40700171 -size 1975311 +oid sha256:5a3fdc501ab0712b32ff46682e8437654d46d0dcd9926412596d126df04376fd +size 1054582 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_60a95fcf-316e-4431-94e7-50a38e6a421b.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_60a95fcf-316e-4431-94e7-50a38e6a421b.png index 6bba38a8f9fc305b944fb1622cfdeb58540f01f1..7ad00b29c47fe71c232389282967f84d15c323a4 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_60a95fcf-316e-4431-94e7-50a38e6a421b.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_60a95fcf-316e-4431-94e7-50a38e6a421b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f27656a62bb9b4636872cf195c56c46d3684272d021e656afeb068912e4e238 -size 470838 +oid sha256:f8f9c111abf092047f937e7e2abf8a7045d801352f34e859638f8081a4dacb0b +size 589044 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_7d95104b-2cee-40c4-b43f-914ab0c77461.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_7d95104b-2cee-40c4-b43f-914ab0c77461.png index 05b9af27aafc4048a6863e28ea6c16419f2c1e86..fc6696d4ec642277aa1c434c31abca389b397df8 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_7d95104b-2cee-40c4-b43f-914ab0c77461.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_7d95104b-2cee-40c4-b43f-914ab0c77461.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b800c6bef9497123d6faffd9a0f5b7399c67919e89d39adeeb29cf66d925763b -size 763918 +oid sha256:efca455d47776b78f78997eea0c53d120fa69d13b830898bd6087916268dface +size 984789 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_b5b09d80-00ed-4295-82de-d967b31efaa5.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_b5b09d80-00ed-4295-82de-d967b31efaa5.png index 18eb1133077a11493d2786cc52ea798182f4825a..655e0baab563ff9d3e3950bbc6a6725fbd221ad2 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_b5b09d80-00ed-4295-82de-d967b31efaa5.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_b5b09d80-00ed-4295-82de-d967b31efaa5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5b8f7d7eb9c8a38e06211638abad70087561dd27617095744c1005d73ba3960 -size 560087 +oid sha256:2cf83754fd5feb98e5f43bfd1f3c57dfe3234f15de9d5a1c8339cf1133e5bfae +size 498786 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_bcafc9b0-d8a1-4b7e-8199-431d48b1765e.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_bcafc9b0-d8a1-4b7e-8199-431d48b1765e.png index de6fc20c2c7839c766aec7bffde345cc2ad1290d..319354a497f58908aed960dfe2bc2a54a1a7aba1 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_bcafc9b0-d8a1-4b7e-8199-431d48b1765e.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_bcafc9b0-d8a1-4b7e-8199-431d48b1765e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef637434e81333b54ef8131d33e59e2fe8cc3a5cd1f4359aaa582086d0ec9748 -size 721386 +oid sha256:4a8682c9eb7b1194feab295a5f907df7472673ae79d3aba75cb4b1457b421214 +size 674314 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_be6d2fbf-2e18-4a79-ae8d-29fe60b67390.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_be6d2fbf-2e18-4a79-ae8d-29fe60b67390.png index bdac9f628d364c6db588433d531a2c6cde744d63..4214960562a104891a9eb3da3b051f0d995f3080 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_be6d2fbf-2e18-4a79-ae8d-29fe60b67390.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_be6d2fbf-2e18-4a79-ae8d-29fe60b67390.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf212266f853635d092e7c8845205756afc9cf50063f6375be11a38a2b308aaa -size 761557 +oid sha256:cb61137823b8ff1cf575744f6836557bffcd42aa2f5d530244d6794887b255e1 +size 707031 diff --git a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_e16871e0-fd0f-46bd-b7e2-46e8908c39ba.png b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_e16871e0-fd0f-46bd-b7e2-46e8908c39ba.png index 8398bef8a1163ebd5c9c599fa21945619866a31b..e95e233b15de9d2cefaa3fa4a85dfb296653bc33 100644 --- a/images/6407babe-fe48-492a-8211-1f22a81b9ac0_e16871e0-fd0f-46bd-b7e2-46e8908c39ba.png +++ b/images/6407babe-fe48-492a-8211-1f22a81b9ac0_e16871e0-fd0f-46bd-b7e2-46e8908c39ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14c88995e472a12e89fda85426d44f5423bc64fa7280236e1573dd9bf95a7a04 -size 389067 +oid sha256:1082bc4959392fe9faa978dc57c85ad72201383bcf217badb32f6bbfa7e12e50 +size 327701 diff --git a/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_698c355e-5508-4e32-a78a-33f097743f70.png b/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_698c355e-5508-4e32-a78a-33f097743f70.png index cf7507bc4ae962f584cb019f62c0d2414f556af5..61cfa55913e6efb5292d9a2ebf13003bf7069fe9 100644 --- a/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_698c355e-5508-4e32-a78a-33f097743f70.png +++ b/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_698c355e-5508-4e32-a78a-33f097743f70.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fce706988d54296ab41e426bab7c5de61ee90e9d54338f50df95524661aea338 -size 720169 +oid sha256:99e8baf348f249fb055677c330040d0071dbc90af583e7b23e7f046069fd9c7d +size 629154 diff --git a/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_ff5d4950-a9ff-4c8c-b17e-036c857de1e7.png b/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_ff5d4950-a9ff-4c8c-b17e-036c857de1e7.png index 528706b1b8f7c7748a0690d1677cfe9d1de6c818..d64946d3e0b91c4dac5aa13bf720dd59dc923852 100644 --- a/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_ff5d4950-a9ff-4c8c-b17e-036c857de1e7.png +++ b/images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_ff5d4950-a9ff-4c8c-b17e-036c857de1e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74d7df9e091a38a1b2324a0abccc0c47ff4db5ccbbaaaa9dbd2db54febeaad74 -size 1783550 +oid sha256:5c10a19c02baeaf52540980164f36356a514fc74ef04acc6eb7cd28f871e4053 +size 1458571 diff --git a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_931c4775-68b2-4411-a8d8-57c3b4f6cb64.png b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_931c4775-68b2-4411-a8d8-57c3b4f6cb64.png index 01e2b571ad7db82610e3fc7c19673540baad23bb..f18ba9696f64d647403669adc714e6fe5a6cca28 100644 --- a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_931c4775-68b2-4411-a8d8-57c3b4f6cb64.png +++ b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_931c4775-68b2-4411-a8d8-57c3b4f6cb64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4419d4df753d38564bba8c2f80e25cd296af75fbd33bec0d6c2291312d6f2d1d -size 770065 +oid sha256:288e3148a33e44a3eac09975fa1c4118bc3f1654166bf811a0b10e9f8340f66d +size 693670 diff --git a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_aa168c45-dd50-42df-acda-564969d01e5d.png b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_aa168c45-dd50-42df-acda-564969d01e5d.png index 07fd387c8d2ceae5500db71698ce93ecd6b366ec..c236a5a3234f1be938a1403d32c616b1b40d63bc 100644 --- a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_aa168c45-dd50-42df-acda-564969d01e5d.png +++ b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_aa168c45-dd50-42df-acda-564969d01e5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:005ef46c566ec7821b8a662d09579cf4c0cd46de86e335e382170cb24334c96e -size 738657 +oid sha256:3383fd2f5c512f81faaaafbaf31adccd9d6e04dd97a0dde1bf8f8332c397527f +size 1168397 diff --git a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_ee082227-512a-4438-bed0-43fcedf4d1ee.png b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_ee082227-512a-4438-bed0-43fcedf4d1ee.png index aec2aa6a479fccc6a7a1e426b26bb1116b049213..01ce64c7cd8999eaa99ba33c3ad2c27b3ad90752 100644 --- a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_ee082227-512a-4438-bed0-43fcedf4d1ee.png +++ b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_ee082227-512a-4438-bed0-43fcedf4d1ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b48679f1c596b9b52d14eab8f09b9e019fbc56c4300309986eed52861e78928c -size 770159 +oid sha256:a915a2fe23e00bb3272f042aabc59f664914c906e11063d3a0fc90c6efc82065 +size 893885 diff --git a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f0deb072-39be-4a95-ad08-f142fd16bef6.png b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f0deb072-39be-4a95-ad08-f142fd16bef6.png index d328e8cf19d5d8019a3c00501bd44c6593c14377..e89390b9dcc8f25d4980490d1f18b7ed9aeda2db 100644 --- a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f0deb072-39be-4a95-ad08-f142fd16bef6.png +++ b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f0deb072-39be-4a95-ad08-f142fd16bef6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1490c482a6adf19c6a152a99fbbd06401abee50b5d3ed9d113a60047eedf8ea -size 369500 +oid sha256:20e91c51c5570edb6017b291583d51a03e4af2bc402cf00d4b7b8a6b533cab0f +size 1752948 diff --git a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f7490aaa-f8c1-4fb4-b36b-2f6c2be04574.png b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f7490aaa-f8c1-4fb4-b36b-2f6c2be04574.png index 73f12b9b8b36dc25428227cea734fc3e7a1c7aa6..cb91e859d364d9db934ee80b18b3df66a2f4af20 100644 --- a/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f7490aaa-f8c1-4fb4-b36b-2f6c2be04574.png +++ b/images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f7490aaa-f8c1-4fb4-b36b-2f6c2be04574.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb37630dedb13c0e7c30f27ff52bc77f382f4b9027701518810c220d94ca8cea -size 374849 +oid sha256:72fb0407328f6a7d89e24ea52998e10a290c41f5b2d236085a15df28b8077d72 +size 872860 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_00b8010a-a12a-4481-b961-a21322bb3972.png b/images/64779409-c396-4779-a574-b219c3b22282_00b8010a-a12a-4481-b961-a21322bb3972.png index fd049d12c0c747635583ed17e478b27c90268e3b..918bbb493080b772fe78a22953c7c3d5c0967f74 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_00b8010a-a12a-4481-b961-a21322bb3972.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_00b8010a-a12a-4481-b961-a21322bb3972.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ad526900e762473eecf5e7c0dd19e65ee132aa851a238ac23c4fc03ca749cdc -size 1397023 +oid sha256:c4b10cb1105591164130ef641858bf2c9ad6810fd44c86c51071821565a84e47 +size 1446574 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_101b5602-d0c4-43e7-8d2e-97a5ce286aac.png b/images/64779409-c396-4779-a574-b219c3b22282_101b5602-d0c4-43e7-8d2e-97a5ce286aac.png index b2ffa46e7b26bd814f37b1426b3ee9886a336878..f3429143bf484cc6d95be2c68c1039184e9fcc74 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_101b5602-d0c4-43e7-8d2e-97a5ce286aac.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_101b5602-d0c4-43e7-8d2e-97a5ce286aac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f6091f4c644033b8e3bb996376ae32493d9de5d9146eb6f6e39d294bcfd68f9 -size 923161 +oid sha256:fa9aa986d7b4a50565e0afe86ab0ef082a4e57b271a3aa77c63cdce7836cc676 +size 1085990 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_269409d6-ecee-4f2b-99b4-ba29d6714c6a.png b/images/64779409-c396-4779-a574-b219c3b22282_269409d6-ecee-4f2b-99b4-ba29d6714c6a.png index a3760c86fe4b5e6c8f1c7a2d87ed93a6d105fe80..f3fa7add7092d7d77997f64d3e0c980950ae4b73 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_269409d6-ecee-4f2b-99b4-ba29d6714c6a.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_269409d6-ecee-4f2b-99b4-ba29d6714c6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce9b62d0683621be478bacfa1e45d1a7f6155ba275fb0da10cfca95022f9a27f -size 1271849 +oid sha256:13e41c0c9e5fe330b495d92e7038d43980caff3e2e6138cdf1b725c1983ef637 +size 1106080 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_4952ce05-c06b-458e-b0ac-da5925c2ac39.png b/images/64779409-c396-4779-a574-b219c3b22282_4952ce05-c06b-458e-b0ac-da5925c2ac39.png index e7062d8fd9a5ab0008e49495de3b8c98d5b507cf..d8c0c39e84d3e7d114670c51c65cec8dde04f010 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_4952ce05-c06b-458e-b0ac-da5925c2ac39.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_4952ce05-c06b-458e-b0ac-da5925c2ac39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cfa704458522609b245116154f0000fc3e351003047e5024f9e05263b5b82538 -size 931596 +oid sha256:dab7005d8c8f4156c416d7ee7a225ec1f765edd1f3cb3c86a82fe7ceeb370d9a +size 924606 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_5bba0b11-0c16-4d8a-89e8-086cdeb2a3b3.png b/images/64779409-c396-4779-a574-b219c3b22282_5bba0b11-0c16-4d8a-89e8-086cdeb2a3b3.png index 309441554eb2482d1de15aa75f22f0a9f4beef48..92f341f6b2195e52d28d83f7735f6486f65036d9 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_5bba0b11-0c16-4d8a-89e8-086cdeb2a3b3.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_5bba0b11-0c16-4d8a-89e8-086cdeb2a3b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96eaae6df215a62aa848beb4aa1aa235b581a31e6cb9f9017746917ef61074ef -size 921778 +oid sha256:496a784587cb57b8cfad465097188573dd095e8b92d2890f1025c8b1d2d502ce +size 911770 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_7f72659d-d09a-4ec5-8d21-174f5ad2b87e.png b/images/64779409-c396-4779-a574-b219c3b22282_7f72659d-d09a-4ec5-8d21-174f5ad2b87e.png index f32529503528d814e8d910899aeec586cacbc160..dfe8bbf3c751264a8cb96abac96b7721811ec0f5 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_7f72659d-d09a-4ec5-8d21-174f5ad2b87e.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_7f72659d-d09a-4ec5-8d21-174f5ad2b87e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7158d4dd818aaa74dc7149983e559dc45bda1accfcb8173bdc1c22cf3f648bf3 -size 1269642 +oid sha256:57c72c6f888da56fe1f0e3b1da220c7a9c603ce775925c793d533049244050c0 +size 1091914 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_908889f1-3f7b-4123-b773-f233a4fde2dd.png b/images/64779409-c396-4779-a574-b219c3b22282_908889f1-3f7b-4123-b773-f233a4fde2dd.png index 34772c08dd5c0a46828716409203491a421d9f15..865c8ecbc8f80c0bf8e880ddbc3ee2746f8c50ef 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_908889f1-3f7b-4123-b773-f233a4fde2dd.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_908889f1-3f7b-4123-b773-f233a4fde2dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2e7236c45c668781aead096809188cedcaf8009a2d0f93c5a464225afc09900 -size 1442508 +oid sha256:12c33922505e7217a18731e61e3b024d9bf0f5d828b78f500e1c89540cd4ebba +size 1123720 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_aa0b3bef-232f-4fda-aea1-7ad5aa44e543.png b/images/64779409-c396-4779-a574-b219c3b22282_aa0b3bef-232f-4fda-aea1-7ad5aa44e543.png index d57b034505f39172f11cf0d724939fc6ab2fa8c4..4021dc60a59dc1d2e309e05b6f11101f15119ae5 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_aa0b3bef-232f-4fda-aea1-7ad5aa44e543.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_aa0b3bef-232f-4fda-aea1-7ad5aa44e543.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0748a9b27f091785e3b4fe6a145e802e3b0fd4b51b4061ea156a481d81a4c6e9 -size 925606 +oid sha256:51dfe15cf6c8dc37b89dbfbad0359776f877526f1e74f63ee8029a59438f7432 +size 786566 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_bb6ec3fd-7f24-4864-a4cc-1f7df779b2b6.png b/images/64779409-c396-4779-a574-b219c3b22282_bb6ec3fd-7f24-4864-a4cc-1f7df779b2b6.png index 56930a136c3544e7f54f71c889017c6ceb90d9a2..3453e45aad7b93457f620e9efa3cff4a7e8743a0 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_bb6ec3fd-7f24-4864-a4cc-1f7df779b2b6.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_bb6ec3fd-7f24-4864-a4cc-1f7df779b2b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ed4b75272613dc7b13eb7fc9c8ae2c4cdbc3a5fa61c5159f4e6f523f697af93 -size 1771185 +oid sha256:2d2028e33a48ef4dbaf44d4285335815e6b2bb3d6b9314fe8238f2e7ab8d5328 +size 1514846 diff --git a/images/64779409-c396-4779-a574-b219c3b22282_ee6dfe65-aa46-4181-97d8-3c1944f1ba7c.png b/images/64779409-c396-4779-a574-b219c3b22282_ee6dfe65-aa46-4181-97d8-3c1944f1ba7c.png index 4ebf0c41c885adb83ebf23477307791d1e62110e..53d8761fd048df278a7d7637ba9e0be2fcd62464 100644 --- a/images/64779409-c396-4779-a574-b219c3b22282_ee6dfe65-aa46-4181-97d8-3c1944f1ba7c.png +++ b/images/64779409-c396-4779-a574-b219c3b22282_ee6dfe65-aa46-4181-97d8-3c1944f1ba7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77d835f8b0f968693756ec5222fd0b79c8e04bd140762d2dac6606872ea38f23 -size 929549 +oid sha256:16f4af80e42b6e9382d7820cac64f1b46553e70b82e431cb09fe0c7f2eb27e87 +size 976478 diff --git a/images/66625c9d-5bf3-42d1-b463-ab2767307201_4d3a86ff-5ae7-4d7a-b585-f7555dd04d67.png b/images/66625c9d-5bf3-42d1-b463-ab2767307201_4d3a86ff-5ae7-4d7a-b585-f7555dd04d67.png index 10852180306cd39890174d8dfa5f60cafde4b6e9..b1ed878a64f9d221d66cc10e09bdf69ce7966569 100644 --- a/images/66625c9d-5bf3-42d1-b463-ab2767307201_4d3a86ff-5ae7-4d7a-b585-f7555dd04d67.png +++ b/images/66625c9d-5bf3-42d1-b463-ab2767307201_4d3a86ff-5ae7-4d7a-b585-f7555dd04d67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:426fa32d5bec1797d936a949af8f0e2858f3840060fc852a7f1294c7753eb10e -size 722247 +oid sha256:5804064bad4a1d4c25cfda8d7dc5d0fc00f26abc45ce3177b9f2f5e3fb962067 +size 775989 diff --git a/images/66625c9d-5bf3-42d1-b463-ab2767307201_aba5ccb1-8c17-4ae3-b311-38bbf81bd19f.png b/images/66625c9d-5bf3-42d1-b463-ab2767307201_aba5ccb1-8c17-4ae3-b311-38bbf81bd19f.png index 836f47cc20208c905102cd61755e9074931ef432..5a962549eb143e0e430e532aa8e05af91bd11ae0 100644 --- a/images/66625c9d-5bf3-42d1-b463-ab2767307201_aba5ccb1-8c17-4ae3-b311-38bbf81bd19f.png +++ b/images/66625c9d-5bf3-42d1-b463-ab2767307201_aba5ccb1-8c17-4ae3-b311-38bbf81bd19f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d2e010ce514c963850a7e7658fc258bb33961eeec83ce9021eb83f24474029f -size 778657 +oid sha256:14223bf081e1c8124c8c7368c414cff8a8d9df4dbb0159b0e26d87b1d2f5ae64 +size 629642 diff --git a/images/66625c9d-5bf3-42d1-b463-ab2767307201_ef8fc370-3a59-4d06-bcc6-a8048ad914f8.png b/images/66625c9d-5bf3-42d1-b463-ab2767307201_ef8fc370-3a59-4d06-bcc6-a8048ad914f8.png index 8e80f7d306390532b36c1a88b47f12da132e2280..49617b1123abd8c04f2cf5eb9615c21412cb5e0c 100644 --- a/images/66625c9d-5bf3-42d1-b463-ab2767307201_ef8fc370-3a59-4d06-bcc6-a8048ad914f8.png +++ b/images/66625c9d-5bf3-42d1-b463-ab2767307201_ef8fc370-3a59-4d06-bcc6-a8048ad914f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a082aa510935e6ba1922798eec59535f141c737ce9d3dd50480e8c91d5803c30 -size 1550940 +oid sha256:89763a47ae03b4ee6f4bfb243bd9bbb0c1ea7977a5c81c22e9c3a79da06268cb +size 1647542 diff --git a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_18773f07-6800-4427-8cc0-f846379f6f4c.png b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_18773f07-6800-4427-8cc0-f846379f6f4c.png index 462f83e7186f21b59a81f4efa1f9a6c33a36e663..7b1f5266c2ef19704bcef993875178621d59f83f 100644 --- a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_18773f07-6800-4427-8cc0-f846379f6f4c.png +++ b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_18773f07-6800-4427-8cc0-f846379f6f4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:613591d3c6431535768e47f2f7fb09322e8f94202480bf2b07267c773ea84547 -size 2221199 +oid sha256:25ac9275b643d133daf5bbb3d9c27f0b63270e44a733d37e51efc3a7f040baef +size 1685065 diff --git a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_40298f0b-61e7-46f8-bb2e-5c1acb88b464.png b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_40298f0b-61e7-46f8-bb2e-5c1acb88b464.png index 6c63c8d0b7aafd3abd5ee6d6ed86f0d5c6f0b7b6..7e671d958538106f69aba9dfdcb34457cea55209 100644 --- a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_40298f0b-61e7-46f8-bb2e-5c1acb88b464.png +++ b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_40298f0b-61e7-46f8-bb2e-5c1acb88b464.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19c09889446329da3bb3549367c2b09d088bb377b48f6d46004cf8f60e192430 -size 913758 +oid sha256:1eda375d141efbb22470a0eccef0a2876d0b7423ac5d4ac5c365ced3aeae05ce +size 1785187 diff --git a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_78338dfb-bd62-4024-9a68-17480d94f80b.png b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_78338dfb-bd62-4024-9a68-17480d94f80b.png index 886b9a05493c33842cc2485c117d4fb9ca1e95b1..f919e103892469c71d57aad9ffb05b6e808f4db0 100644 --- a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_78338dfb-bd62-4024-9a68-17480d94f80b.png +++ b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_78338dfb-bd62-4024-9a68-17480d94f80b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0655b0e6ec3afe0cf46272a411a2743a478e0eec6f5c5815ad1a796ae1dba2e2 -size 1001345 +oid sha256:f0e1cc896526f7b0ae2abec4428cd5713b74753db53a614ff2638750c32981f8 +size 1338600 diff --git a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_cbb3b97e-254d-43a0-90e1-31250caeca05.png b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_cbb3b97e-254d-43a0-90e1-31250caeca05.png index d9466959f712620edf32d4042eb926890e6acfb2..0da537e4d64573decdbbc88939878708ae21fa57 100644 --- a/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_cbb3b97e-254d-43a0-90e1-31250caeca05.png +++ b/images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_cbb3b97e-254d-43a0-90e1-31250caeca05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e452223c6fb328d777b0623dbb18f6634bf2116c5d9917a5cbaa211ae17d705d -size 1001616 +oid sha256:ddc464457d380780f4cd23aa59ab052a602829a16424fee09c0753db690db14c +size 1219516 diff --git a/images/66a5b212-cf94-4917-8015-58970dc54187_1a954a07-0ffd-4322-a31d-b66f330025eb.png b/images/66a5b212-cf94-4917-8015-58970dc54187_1a954a07-0ffd-4322-a31d-b66f330025eb.png index 0008303fd9a9b383126ed325162ab1e9b4521df0..5be5e8facd4959548f729dacc255b518517f0483 100644 --- a/images/66a5b212-cf94-4917-8015-58970dc54187_1a954a07-0ffd-4322-a31d-b66f330025eb.png +++ b/images/66a5b212-cf94-4917-8015-58970dc54187_1a954a07-0ffd-4322-a31d-b66f330025eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:415d255e9724fded644c1e1acca881c05564f207df9ff0df6377f0dbc914a7fd -size 1070622 +oid sha256:4d98df1b018d83c96a3e36372e85fa9e2373dfac8611629f5855c441495846fa +size 1913491 diff --git a/images/66a5b212-cf94-4917-8015-58970dc54187_6a84d789-0284-4b64-9412-96091f7b7a32.png b/images/66a5b212-cf94-4917-8015-58970dc54187_6a84d789-0284-4b64-9412-96091f7b7a32.png index 62b97979d5632f6aac4bfb16fb6850b60cf878c7..69ccff42fd6d29cfddf945c235d97f820cbaf788 100644 --- a/images/66a5b212-cf94-4917-8015-58970dc54187_6a84d789-0284-4b64-9412-96091f7b7a32.png +++ b/images/66a5b212-cf94-4917-8015-58970dc54187_6a84d789-0284-4b64-9412-96091f7b7a32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a30596a7801baf7bf97072f7f26d5aa6d57d7780c9b1a053347be878532aedd -size 1531478 +oid sha256:b86f147886beceeaf8341e66aadfb71871792a1aad50f4f3254da3085c2a5fec +size 2099247 diff --git a/images/66a5b212-cf94-4917-8015-58970dc54187_6afdf927-8e32-47d0-a8a1-3197de949c01.png b/images/66a5b212-cf94-4917-8015-58970dc54187_6afdf927-8e32-47d0-a8a1-3197de949c01.png index fdf49ea8853a2c86965d768c9cc3bbf0e464f3ff..3bb47c207a487dcc65eb49e2c8f7b48f1d74a66f 100644 --- a/images/66a5b212-cf94-4917-8015-58970dc54187_6afdf927-8e32-47d0-a8a1-3197de949c01.png +++ b/images/66a5b212-cf94-4917-8015-58970dc54187_6afdf927-8e32-47d0-a8a1-3197de949c01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d18a200a6f56be2c37d0f09a443fdaa599ab759f4ba88b102e9eeca5e3c772a -size 2079073 +oid sha256:f11fec388bd318135f8743ddda92d21f42423dfd5f5bab33840ad008f470cd09 +size 1096347 diff --git a/images/66d12284-5525-42d2-a66f-fe5c460b7764_0a45420f-cf51-42a2-82da-24ffd4e8dba5.png b/images/66d12284-5525-42d2-a66f-fe5c460b7764_0a45420f-cf51-42a2-82da-24ffd4e8dba5.png index 3b108f16f7c7e7a9c846510c5ba6a8e103eacdc7..32b14d374dbcbe134bf7935db57aa2fa43fa6ef0 100644 --- a/images/66d12284-5525-42d2-a66f-fe5c460b7764_0a45420f-cf51-42a2-82da-24ffd4e8dba5.png +++ b/images/66d12284-5525-42d2-a66f-fe5c460b7764_0a45420f-cf51-42a2-82da-24ffd4e8dba5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f015e67401ca6b00eebe04b2b9de4bba11bee4f20e7d002470c10722554d12d -size 2023038 +oid sha256:e3964995aeb87cbab6a2fe642b8d1f77933cf0ca560432446862eab5462618e1 +size 950777 diff --git a/images/66d12284-5525-42d2-a66f-fe5c460b7764_184d3ce5-9bab-402e-bf38-9d7a0072c5ee.png b/images/66d12284-5525-42d2-a66f-fe5c460b7764_184d3ce5-9bab-402e-bf38-9d7a0072c5ee.png index d89cbedac59df1e0bc464df9d0e55a74144c29d3..248273caca83013e7b9242b8d523f6a7fbdc16c8 100644 --- a/images/66d12284-5525-42d2-a66f-fe5c460b7764_184d3ce5-9bab-402e-bf38-9d7a0072c5ee.png +++ b/images/66d12284-5525-42d2-a66f-fe5c460b7764_184d3ce5-9bab-402e-bf38-9d7a0072c5ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9addec78b25876009c9fce224f6303543450116642ce5f2eec47aed7538aa5e -size 1130828 +oid sha256:5f7b20b0fd9799e07ebaab741732f02a824760df9c76828a29cacc58047e2a87 +size 1153754 diff --git a/images/66d12284-5525-42d2-a66f-fe5c460b7764_410c37f5-5711-41ac-893c-9b0a78045d84.png b/images/66d12284-5525-42d2-a66f-fe5c460b7764_410c37f5-5711-41ac-893c-9b0a78045d84.png index 9e01df3e051491ba91295960ff6c11487ac11283..6b968a162f71f3640dff6c443315aa05a31452a6 100644 --- a/images/66d12284-5525-42d2-a66f-fe5c460b7764_410c37f5-5711-41ac-893c-9b0a78045d84.png +++ b/images/66d12284-5525-42d2-a66f-fe5c460b7764_410c37f5-5711-41ac-893c-9b0a78045d84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:614c2940ec42f3e1b348f63b3736319ed2f4a46c2457d541cbe37bd3423510f7 -size 1264012 +oid sha256:64e4e891788b1699c62555e73c01292c794d21fdf351dee56bd5e28bcc8ae866 +size 1309031 diff --git a/images/66d12284-5525-42d2-a66f-fe5c460b7764_758f0d99-942a-4dc5-93c4-acefb1418d47.png b/images/66d12284-5525-42d2-a66f-fe5c460b7764_758f0d99-942a-4dc5-93c4-acefb1418d47.png index 9870937d3a9ff0c822902388038268318dd3130b..af02a1606b78ded3d1b205278ab851a73557759d 100644 --- a/images/66d12284-5525-42d2-a66f-fe5c460b7764_758f0d99-942a-4dc5-93c4-acefb1418d47.png +++ b/images/66d12284-5525-42d2-a66f-fe5c460b7764_758f0d99-942a-4dc5-93c4-acefb1418d47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebcfc426cd1ae073a1d748df57dfda23967039c1c131971f4bececb1933829b1 -size 1493808 +oid sha256:8e4185fca596269824e5d74c781726860bfc87b3e297de2531f0521e66d56074 +size 1048209 diff --git a/images/66d12284-5525-42d2-a66f-fe5c460b7764_bf7c3146-d7ce-4c7e-83ca-1e3ff7b12175.png b/images/66d12284-5525-42d2-a66f-fe5c460b7764_bf7c3146-d7ce-4c7e-83ca-1e3ff7b12175.png index 95d07e39be510ce2572e99a2f998cec64aeb2f46..fa0ad3ae3e0ba8cd2875ca3e5a33275643c296b7 100644 --- a/images/66d12284-5525-42d2-a66f-fe5c460b7764_bf7c3146-d7ce-4c7e-83ca-1e3ff7b12175.png +++ b/images/66d12284-5525-42d2-a66f-fe5c460b7764_bf7c3146-d7ce-4c7e-83ca-1e3ff7b12175.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:575dd9a6ac370e84dd079c0e748b592450b18a25da7bc552b63b834e0941aa6f -size 2015395 +oid sha256:a335171396f413fab490e90793dc8ac873f844743d5506e955215d2cbe464269 +size 899772 diff --git a/images/66d12284-5525-42d2-a66f-fe5c460b7764_f365d762-a1b7-4bfc-be6d-29c97073326c.png b/images/66d12284-5525-42d2-a66f-fe5c460b7764_f365d762-a1b7-4bfc-be6d-29c97073326c.png index 81bd346c0bcc0f33cc0815a91a3c80a0e1b8d6e7..e9a88afccef71d8039e0dd3457ebf792ad0100a3 100644 --- a/images/66d12284-5525-42d2-a66f-fe5c460b7764_f365d762-a1b7-4bfc-be6d-29c97073326c.png +++ b/images/66d12284-5525-42d2-a66f-fe5c460b7764_f365d762-a1b7-4bfc-be6d-29c97073326c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:adb8d423d287fe0bdb93a528cfbcccbfcd9676cb1b621a9a7eee4aef29201426 -size 1553005 +oid sha256:9afd9d2f40a92322e0a3240ad70f248378d2c6fcaeca352140ce7b804c2f1639 +size 1261936 diff --git a/images/673841c2-de8c-4417-bdcc-dc48753a539f_2c2aec1c-480f-4da8-ab1d-5b869f7da824.png b/images/673841c2-de8c-4417-bdcc-dc48753a539f_2c2aec1c-480f-4da8-ab1d-5b869f7da824.png index c26bf885c201b42e74a92ef8eab943a4e71a66d1..44811d04677063d295432173274dca7b8a017574 100644 --- a/images/673841c2-de8c-4417-bdcc-dc48753a539f_2c2aec1c-480f-4da8-ab1d-5b869f7da824.png +++ b/images/673841c2-de8c-4417-bdcc-dc48753a539f_2c2aec1c-480f-4da8-ab1d-5b869f7da824.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c26dcd507cc710861ea3618523a96aa1aa6f81f59040ee4044e0477811bd70fc -size 848413 +oid sha256:aa57a8cd6296058b137f040409017f4da1b3fbf851a9abd2db8febabbc480364 +size 707192 diff --git a/images/673841c2-de8c-4417-bdcc-dc48753a539f_b741cfde-eb55-4b8e-964c-40e90b358bd6.png b/images/673841c2-de8c-4417-bdcc-dc48753a539f_b741cfde-eb55-4b8e-964c-40e90b358bd6.png index fce9c89f644149c44f748e580b75d89c542fd3f5..717a9ee330afb5b9997ed7131b043b15f7984ed7 100644 --- a/images/673841c2-de8c-4417-bdcc-dc48753a539f_b741cfde-eb55-4b8e-964c-40e90b358bd6.png +++ b/images/673841c2-de8c-4417-bdcc-dc48753a539f_b741cfde-eb55-4b8e-964c-40e90b358bd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5af2c329d4cc89467f9a449bbad9751850d2348355822671d1d893f5ad6f4905 -size 1167899 +oid sha256:758aba5945239964cd0f7ad3fbf7f814e32909db7437e4cb76082609f258bf95 +size 1065172 diff --git a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_36d9bad0-4139-4bb7-9fba-972a1c25c356.png b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_36d9bad0-4139-4bb7-9fba-972a1c25c356.png index 9427d19ef7959e10f006986145b2bb1401e2b26b..6a553fbd03975a83fc7ef395d3333c92e582eca3 100644 --- a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_36d9bad0-4139-4bb7-9fba-972a1c25c356.png +++ b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_36d9bad0-4139-4bb7-9fba-972a1c25c356.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d824120000bcf26c13f0550de188e7d0658ae8d84b622ac5887ad331dd3507f -size 695740 +oid sha256:6588c60806009623385ee2735ea742291a34845f6b5f347ae5ea8c567a24ac77 +size 700591 diff --git a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_3e93eb48-8370-4f9e-8adb-36dde059ff13.png b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_3e93eb48-8370-4f9e-8adb-36dde059ff13.png index 9338a629b95842d5e000115286123e3a62b5afb7..b722e295a06f54be2a4228c444e81d1788c61d8a 100644 --- a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_3e93eb48-8370-4f9e-8adb-36dde059ff13.png +++ b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_3e93eb48-8370-4f9e-8adb-36dde059ff13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39af3de5f9a1dd103836b5e33168acce53fd9629a51b19f39e4e20ea25998d87 -size 708915 +oid sha256:6651851d04fa0e79aebf534542355917984fdd4889f0347ee03eb5d66c2c595a +size 706032 diff --git a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_a4df195b-f418-472c-be2f-9883758c1acb.png b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_a4df195b-f418-472c-be2f-9883758c1acb.png index 0bb5d26793653502a2f4eefab55ae7354e954751..8d7f3f014045f01307dc08f28829d28cc3f63746 100644 --- a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_a4df195b-f418-472c-be2f-9883758c1acb.png +++ b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_a4df195b-f418-472c-be2f-9883758c1acb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0262bbd1971f4488223b2ecec60bdb8697496e80f2bba82cd4d031ec96155c0b -size 1432362 +oid sha256:eb984cb4da7fe0f7225edd42a7a514d72dfe090bedd2b5520246e2e1937f6e6d +size 1353950 diff --git a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_b65f2754-439c-42c9-a484-846c10998517.png b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_b65f2754-439c-42c9-a484-846c10998517.png index edd1c372348cb7671a8360c9274cd550a3b79598..bdcbcd4b126fe2caabb430457afb7cab5b98c73b 100644 --- a/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_b65f2754-439c-42c9-a484-846c10998517.png +++ b/images/6760de22-dc0e-4b29-916f-a6a78f2c0551_b65f2754-439c-42c9-a484-846c10998517.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2121575be14de0b7b125d99463dd9aeb71c951b6a23c3f4d610ced0db9cd9ae -size 789307 +oid sha256:5bab60e052306935d2526d5de68b0672d79eb0bb3e08bea9e68b5d27fa12343f +size 700517 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_0e295f69-6563-427f-9cb9-163bc4c61253.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_0e295f69-6563-427f-9cb9-163bc4c61253.png index 196732233673e423c98bf659711a86a4f31cfc49..365948acc8807eb3be2e4fdfec5826f8a9e2d553 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_0e295f69-6563-427f-9cb9-163bc4c61253.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_0e295f69-6563-427f-9cb9-163bc4c61253.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0881aa9647b8df655131cce70b13f8b81e65f441015f7e084e7610a5a3c84bd -size 428114 +oid sha256:a4a11c1d1b130bb9588e07810f33edf3cb28c51dc333899394a1f876cb18b2ff +size 359786 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_13a3d40d-9eaf-431a-929e-17a081ca2a59.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_13a3d40d-9eaf-431a-929e-17a081ca2a59.png index 462c0b33b3873cb74e0929f8c745442b199f27f7..32909d84d12db095f3e112949a8b686452f5ad1d 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_13a3d40d-9eaf-431a-929e-17a081ca2a59.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_13a3d40d-9eaf-431a-929e-17a081ca2a59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8919c65a446c507d5de2a46f1fe128259ee45bc3098d2d8d1b7faf762a875f84 -size 391316 +oid sha256:547330fc8147fb1e8c89898c2221187677825c2baaadd0ddea7a0c92f8b594a0 +size 263041 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_1688074f-20d8-4b87-be42-6323f8e78807.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_1688074f-20d8-4b87-be42-6323f8e78807.png index 87add6517ff73caadd0ec1ecb8d7ba35c232d738..defc1e33abab5339e4a5eedd638b8effc28cd163 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_1688074f-20d8-4b87-be42-6323f8e78807.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_1688074f-20d8-4b87-be42-6323f8e78807.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa90cc8dd451eeb851413c6341dc6c6fac2eec973020faf4c143f13ae7aefdce -size 658113 +oid sha256:a20fb5bfd7832ef34f1ca4c5a1ca67c6335b938d4af60d1ae0acd0b8b28c4669 +size 732622 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_35d262d2-4f80-4480-9a29-7c095d95e029.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_35d262d2-4f80-4480-9a29-7c095d95e029.png index 2100867139992669e6b0a459b85675858e55d4ba..21faeabd6b00abe3047a37735e52a1831b820888 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_35d262d2-4f80-4480-9a29-7c095d95e029.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_35d262d2-4f80-4480-9a29-7c095d95e029.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3695a4b3dfa7a239759bd3a3c27e6e91e3b3557e971631a24180c5714b362cca -size 420075 +oid sha256:9dae9a4e672fd356c315c92fac6e9f8f077c85c8f5f1b424ada4042485298e32 +size 396198 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_45a80841-1cc0-465b-9537-9b8b10dae0d2.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_45a80841-1cc0-465b-9537-9b8b10dae0d2.png index 2f47331adc5ef855842a92a4fe5ce2c2200b402c..eec0f03f23705d649be20435a8b504cf1fe21e76 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_45a80841-1cc0-465b-9537-9b8b10dae0d2.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_45a80841-1cc0-465b-9537-9b8b10dae0d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:972508043be97c377da218be11f3fbcee15eeff332f1a712db5054d5d6b31941 -size 507259 +oid sha256:f2be3d7c25cfceea3d484e883528dcb90d2e5b8c29ae384e150af584401c4736 +size 864988 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_6592370e-2797-4a62-9b33-9769fd75aa37.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_6592370e-2797-4a62-9b33-9769fd75aa37.png index 3b8be0908439aa7e91024c452ffdc8ca53926c82..364d82e84aaef8360b2ec1538812fc67d089c137 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_6592370e-2797-4a62-9b33-9769fd75aa37.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_6592370e-2797-4a62-9b33-9769fd75aa37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe9dc32935beca05ecb124639f3eec59cfce401ae6d1b9c131ba4d1abb45f923 -size 352198 +oid sha256:0d7721667307940c67b00c60f21a9db81cc1593e556339b00e308dbc1d2855d3 +size 377355 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_6b765434-a76e-4653-b427-7662d96ba478.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_6b765434-a76e-4653-b427-7662d96ba478.png index b3798aaacce5a94c82778416dbff541c32901603..f38970414755961e338aa5a068f8830f6c957e64 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_6b765434-a76e-4653-b427-7662d96ba478.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_6b765434-a76e-4653-b427-7662d96ba478.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5429a2181c799c0a8756f29b171d0134d1cd34c6e180e06dbbd575e68e437d89 -size 488412 +oid sha256:e28a8db3c4f169ed0d45712fa955e39fa839e3e0b9914ee926ff6d344bb0f35f +size 587378 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_6d69a62b-9667-42ef-bc20-3c552c0e2e2d.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_6d69a62b-9667-42ef-bc20-3c552c0e2e2d.png index 2770754baa1e8af9195f00aceaf2cfa96ac16526..37d3335667785e3d2206b2b55ffc378b553e48f8 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_6d69a62b-9667-42ef-bc20-3c552c0e2e2d.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_6d69a62b-9667-42ef-bc20-3c552c0e2e2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebdbcf39b2dc46bb5f230afda7f10de61ea37bc47dd5dd8448d308c98ce4ad7a -size 695854 +oid sha256:eeb3191e2519a0c20f27412e06d317c1e4e0af71508eabbf56ae931a15e0cd21 +size 678472 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_7681b7eb-faa0-4363-ade5-49c5cd230b87.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_7681b7eb-faa0-4363-ade5-49c5cd230b87.png index fe2fd3099f413a4a0b4cf7f1c48261d47450c6b5..0c7d009dfaee23be21f43881deeb0edae563f112 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_7681b7eb-faa0-4363-ade5-49c5cd230b87.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_7681b7eb-faa0-4363-ade5-49c5cd230b87.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:319378672679ad9932f910abd28aa5b85851f7aa05dc1f7b6f9ef8fd28ba0ce5 -size 367734 +oid sha256:738b5813a1a0dc4b067a6e480d14ca8e900aa2099fe17a1c15149174396994cd +size 869660 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_7ee2e3b2-d056-429c-a1c7-301f38f08660.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_7ee2e3b2-d056-429c-a1c7-301f38f08660.png index 6244e2bec7fe6a449bf6e5d1a62afd4c71723f25..684640cdcb2d83737c7b1a2bdf77609367c2dbae 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_7ee2e3b2-d056-429c-a1c7-301f38f08660.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_7ee2e3b2-d056-429c-a1c7-301f38f08660.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9fc21eb8b35a2c9050034dfc53f139fa5aaa16d50ff998c2217536f600879fc -size 361118 +oid sha256:59ff86ffe0b3b963b592ab26ee3432868d6fea0799d1afe0cedba62d28f73549 +size 640139 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_89d4ed36-f44d-45fc-b119-11e4213af3f5.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_89d4ed36-f44d-45fc-b119-11e4213af3f5.png index d56aca6e2e6628af8b47505b1425ba032790d748..1f01c6048c9c88685fe851f7d27227253dcdf7e3 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_89d4ed36-f44d-45fc-b119-11e4213af3f5.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_89d4ed36-f44d-45fc-b119-11e4213af3f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd84c191c08c12bcfa0a098a976d2c43157007735969493ecf597830c30e2cb0 -size 419950 +oid sha256:47234248bbfd43b43927c8ea848e2ce6a3d75d3552b1e97cb6efbe742769cb4b +size 887365 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_8b345767-07e0-4c1c-b939-9cdc2d8bd275.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_8b345767-07e0-4c1c-b939-9cdc2d8bd275.png index 6ba6be06e7a15c4eb549d8fc1c529b4e35400868..085556ed5b5ad886217214963865d4cf7373f8bf 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_8b345767-07e0-4c1c-b939-9cdc2d8bd275.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_8b345767-07e0-4c1c-b939-9cdc2d8bd275.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b255924f08c6d6344f35efc273d1f1fcdb280a15f8bda829bdf4dbef09c1ffa -size 489461 +oid sha256:9dd1acb3a89bc24ef2be67cdfcc0cc1d19b475a301ea71a070e13e4d71d9803e +size 703453 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_8f4f2684-c054-41e1-aa3e-7ddd71a11026.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_8f4f2684-c054-41e1-aa3e-7ddd71a11026.png index e7c8bef1eff10d8639bc005bd4ff689c394cd2cf..b9342f835b91e3655aab975b62a418aa901e66f9 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_8f4f2684-c054-41e1-aa3e-7ddd71a11026.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_8f4f2684-c054-41e1-aa3e-7ddd71a11026.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe072da9d5f1f1ad7a88a494bab8e06527411d9895d64782d7b3dbc557e03b06 -size 547021 +oid sha256:f869f67e5d203a6627329cd3703bf55e2fe69098a8b024db828d1547515193e6 +size 864748 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_943d00d4-11cc-4ffc-8bad-584d783ae37a.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_943d00d4-11cc-4ffc-8bad-584d783ae37a.png index 405ba94a0c53102cec8901470b758718e946a60e..3a3820f75bedde4dd62cfda4a9ff5c203c51341d 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_943d00d4-11cc-4ffc-8bad-584d783ae37a.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_943d00d4-11cc-4ffc-8bad-584d783ae37a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28586868a44a4ac1175117469e1d42ebe90457cfef033d912fb4d47812caaf18 -size 395043 +oid sha256:c176d6afbb95795a3011847c88790e159ec07fb85a18b691f179640074b5b8a0 +size 442515 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_9b095d3d-faae-4676-bb3d-46ffc3ae5f3e.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_9b095d3d-faae-4676-bb3d-46ffc3ae5f3e.png index 7daa838b83d77f0721af0e3d073177259ec49c88..c823e84a290ac7d6166be258bce532bbdcf15de9 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_9b095d3d-faae-4676-bb3d-46ffc3ae5f3e.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_9b095d3d-faae-4676-bb3d-46ffc3ae5f3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:912f5f6880944025bdc65e7df1ed216f64318333c98c332f3ea74378a351c634 -size 436279 +oid sha256:33f413027ed8b45a1f23d72bdcb5a38a97930bd7eac548b01c0cc75aa9bbaabc +size 456708 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_9bd8b189-7a5b-4d0e-96ef-ac97d7b147af.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_9bd8b189-7a5b-4d0e-96ef-ac97d7b147af.png index 95ffc632db239882e292ea896cc8d56b1a4ba3a0..12908e2be6cee82af2713e1c7e7c143b49ecfb3e 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_9bd8b189-7a5b-4d0e-96ef-ac97d7b147af.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_9bd8b189-7a5b-4d0e-96ef-ac97d7b147af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb1e7c8b03eace9e9489ce5a8e36d5d8536acc758490ad4fb25d8f18c16b2cc5 -size 420083 +oid sha256:2ac642dc65e8434a6479066c240f8eddec5632bd245f2d4cd9b3be379708e663 +size 991263 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_a2b2735c-c36d-4565-b31e-00371ed0717c.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_a2b2735c-c36d-4565-b31e-00371ed0717c.png index dfdf2bba3393ea6a1c8a230a3f562eaea99cd85c..eef63d9bdd2e18f07f9eba44c565c3b41eb52e38 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_a2b2735c-c36d-4565-b31e-00371ed0717c.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_a2b2735c-c36d-4565-b31e-00371ed0717c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94c9523cbcca91ba04d91cea730bac25d5bf900be3d98574ec6e709a651c0718 -size 428658 +oid sha256:cadb257f455cd555056d2fbd74366fa2c8c3056af31560a1bccf0206a258f894 +size 1012480 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_a40aa80f-344a-4d08-8333-4778e7549172.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_a40aa80f-344a-4d08-8333-4778e7549172.png index 0631c6a99a4539376ebd8ae6b3fb3e7161a9e5a0..0914a77a48bddb4b344f6eb321ca996ca2bebaae 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_a40aa80f-344a-4d08-8333-4778e7549172.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_a40aa80f-344a-4d08-8333-4778e7549172.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a606e9eec5d76be678aec2b5e9fc67843c2e9a928f63cbf4fceb3462492c91e7 -size 699805 +oid sha256:871ad7bb89f645dde8696573f6ed2fcd5fcad446eabf66e0147590b5a1a51e6f +size 1099865 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_bdc2d235-fba7-4911-a120-56b4a3e32410.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_bdc2d235-fba7-4911-a120-56b4a3e32410.png index c4f1476e7010a71198171498a386c2ddfd4b1fcc..13aac02a785868df11baad2f72d21e603c4595f2 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_bdc2d235-fba7-4911-a120-56b4a3e32410.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_bdc2d235-fba7-4911-a120-56b4a3e32410.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aaa502047b381e3d376ba9aaaa4ef6fd06a33e3a3bf83c7d4aeea66137f31d43 -size 430666 +oid sha256:de692eebeecda7e2a03d8ad17134403000cea9df9590b00e0fa8b9c082e2b123 +size 982147 diff --git a/images/67648411-9d11-4254-8973-7d8dfbca58e8_da3ddac9-4749-42d8-9fab-3cd56b1ac44a.png b/images/67648411-9d11-4254-8973-7d8dfbca58e8_da3ddac9-4749-42d8-9fab-3cd56b1ac44a.png index 4dd2754834b9ba4cbfe0237a227b7ea665162fa1..9dcc401550a9878bb20934979ff08b80d133cbe4 100644 --- a/images/67648411-9d11-4254-8973-7d8dfbca58e8_da3ddac9-4749-42d8-9fab-3cd56b1ac44a.png +++ b/images/67648411-9d11-4254-8973-7d8dfbca58e8_da3ddac9-4749-42d8-9fab-3cd56b1ac44a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91068c30f8335ab6a7f09cbbabb474235ad5edb6daf4b905413bdebd7f4475fd -size 359634 +oid sha256:6fc2b4eb2bf468abe07c1900e2a86571749f5471f729b968f4bed02d2e6084cc +size 962912 diff --git a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_2a2b01e7-6723-4766-8f9c-83c518877422.png b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_2a2b01e7-6723-4766-8f9c-83c518877422.png index aef40355dcaeb0792f06a64c4c2695511f3f7bfc..da6331f0e9d0a3bbb68c78ba29872b28683e1ff2 100644 --- a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_2a2b01e7-6723-4766-8f9c-83c518877422.png +++ b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_2a2b01e7-6723-4766-8f9c-83c518877422.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a27323a2865f620128cb34bfa663d8c032e1db9a7d47917a7a82d34b6d25a549 -size 829795 +oid sha256:8983e197079f6f2cb07a24571bd876345d7d92e8cdfec5b91aa53bfbb67c388d +size 626651 diff --git a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_62d943fd-4365-4704-9f70-94cb2619c702.png b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_62d943fd-4365-4704-9f70-94cb2619c702.png index 2addc7d2cad299530daca264c63af9a44982f216..b710ee6e437e7d55da36d091b9f522627ef3f532 100644 --- a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_62d943fd-4365-4704-9f70-94cb2619c702.png +++ b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_62d943fd-4365-4704-9f70-94cb2619c702.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3883bbdf7b103583cb773a9d9248f482e3154e5788ea550cd36221020d69e77 -size 2009605 +oid sha256:dc462efc5814a68e8bbc22b6832f9089e024acf12de79e460068a37a79d026cb +size 786392 diff --git a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_79dd5af5-6248-4261-916d-6a5be124e417.png b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_79dd5af5-6248-4261-916d-6a5be124e417.png index 08c5ee9a78ba02ad50a2e252cfb90988569296e1..3ba27dd5e8cdf0cfd6a748d40c58ff58457f30eb 100644 --- a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_79dd5af5-6248-4261-916d-6a5be124e417.png +++ b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_79dd5af5-6248-4261-916d-6a5be124e417.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7c007c66ee992213caa5a2bf333e1f3e6d8d9c6cec80afdb3f4ff7a05e8c3aa -size 1041496 +oid sha256:7443972cc94da685febe1f3f6db419c6237e19fc83f2642009570aafa7229f4b +size 974039 diff --git a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_ac166cc9-fe74-40f0-8f14-eb93e372c3e2.png b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_ac166cc9-fe74-40f0-8f14-eb93e372c3e2.png index c1255089ad41d29c9b90e25880238e889a266233..93487fa563eb3ec0a14ae063dd23a289fc4aaab4 100644 --- a/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_ac166cc9-fe74-40f0-8f14-eb93e372c3e2.png +++ b/images/67f9fb2d-443a-4e5b-9530-e24372faedd2_ac166cc9-fe74-40f0-8f14-eb93e372c3e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec57529dcdd72711214cf547b065547a88967a023962ebc606bf803b82f0548a -size 1405604 +oid sha256:faa2e532fac4304dd060893843ccff6cd86358bb8de70836012e1e647b8c7b3b +size 1617242 diff --git a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_09dcc085-8dd6-4a59-89f8-c14cb6266555.png b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_09dcc085-8dd6-4a59-89f8-c14cb6266555.png index 91f8bc8cc13a090c9d418b872512c993b28aaacd..ec455639f78a9e98b202957b07dd80d75d1ff3b8 100644 --- a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_09dcc085-8dd6-4a59-89f8-c14cb6266555.png +++ b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_09dcc085-8dd6-4a59-89f8-c14cb6266555.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8be1b540dca2c17f35537e3a01dde447fad9504e929da6ce9aa425480c9f990d -size 2348467 +oid sha256:d22749b888fbd49555af2b29608a975da3bb963a576ce76afe37fbf098e1d04b +size 1590210 diff --git a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_6c95fe7c-6317-48bf-a43b-7f3032763ef7.png b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_6c95fe7c-6317-48bf-a43b-7f3032763ef7.png index f271f618ab77ad9820a87a444dc40aafc4014fe9..f740b71b009dca07da095d56fffad26448ad3520 100644 --- a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_6c95fe7c-6317-48bf-a43b-7f3032763ef7.png +++ b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_6c95fe7c-6317-48bf-a43b-7f3032763ef7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94a344eed5fd569e037614a7d739dc54c12ca1c03ad2ebfa6a5dd7bf1994bd73 -size 936861 +oid sha256:dd2531ff591d372053d1faa3810b9d3a4d3ebef6c07e79b23d736e61e303a90b +size 819146 diff --git a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_9f57308e-e4bb-4c1c-8514-5c0d179fa5db.png b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_9f57308e-e4bb-4c1c-8514-5c0d179fa5db.png index 67a8f57d3136d3e56f626b530d8ce220d9ea7738..bb1cd3252ef0fa55b0a67282efa232605076a33f 100644 --- a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_9f57308e-e4bb-4c1c-8514-5c0d179fa5db.png +++ b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_9f57308e-e4bb-4c1c-8514-5c0d179fa5db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5419495ae1462e66a38fc3694c75fbd9a43d0dc6fe7ad768ba42d0a8d7fe04b -size 668121 +oid sha256:420a93aaea9b9e41ce2b4ca4d04adbc1e0b45507d8fb59f25691530ede248e8b +size 732783 diff --git a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_d118ec61-bac9-44db-bf4d-acc261383072.png b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_d118ec61-bac9-44db-bf4d-acc261383072.png index 3b8fb97de8fd3b4adf91705ce3f45502f2253202..7beee2f3c0be0b797467274a0a678dfbc6e1cbc4 100644 --- a/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_d118ec61-bac9-44db-bf4d-acc261383072.png +++ b/images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_d118ec61-bac9-44db-bf4d-acc261383072.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a4d84516a2df7eeddabe1b58116191092125bba1409f836b475a0f68d8b04f1 -size 1246964 +oid sha256:5bcc98365222221136a8674b34286d9d8ca06838b97695c03e12dd9e47b80d76 +size 1111709 diff --git a/images/69065697-a426-4e4c-80f7-82bf592b268c_87c4ceaa-e61e-4250-aa05-6deb28fe18db.png b/images/69065697-a426-4e4c-80f7-82bf592b268c_87c4ceaa-e61e-4250-aa05-6deb28fe18db.png index 78b64deaa3ae0b79ba6a2dc5d5256adfc1098bd8..ea8dd20a092e9083c67337701f4e7a6143fb73bc 100644 --- a/images/69065697-a426-4e4c-80f7-82bf592b268c_87c4ceaa-e61e-4250-aa05-6deb28fe18db.png +++ b/images/69065697-a426-4e4c-80f7-82bf592b268c_87c4ceaa-e61e-4250-aa05-6deb28fe18db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe578657d9187358de0cd9f939386e160f427526fc5270035403b19888395e77 -size 2433126 +oid sha256:a0cd1700e36d218b48eddaed831f1807229f116efaa7352fc17225f0799eb6a1 +size 282854 diff --git a/images/69065697-a426-4e4c-80f7-82bf592b268c_ae5a5edd-5bd7-49a5-8be6-ed830b009bfb.png b/images/69065697-a426-4e4c-80f7-82bf592b268c_ae5a5edd-5bd7-49a5-8be6-ed830b009bfb.png index e527f1e00b6717644d5ed0128b1f56e414e6af88..57808f11a99fb55954ece4a4f257513ff2fdd8f1 100644 --- a/images/69065697-a426-4e4c-80f7-82bf592b268c_ae5a5edd-5bd7-49a5-8be6-ed830b009bfb.png +++ b/images/69065697-a426-4e4c-80f7-82bf592b268c_ae5a5edd-5bd7-49a5-8be6-ed830b009bfb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5db58e8ac082559f64b876566141da155f2f8fb94c8a4087ba4f401ea9c5321 -size 1420452 +oid sha256:365ba6805b874311ee86a7c376658467723363b74d195ea7a34183178030371c +size 1284115 diff --git a/images/690eedad-706c-4c48-a803-45b4f1c069bb_ca11435d-ef30-4f9f-8a60-fdc777a44ab9.png b/images/690eedad-706c-4c48-a803-45b4f1c069bb_ca11435d-ef30-4f9f-8a60-fdc777a44ab9.png index 9d8ecae7a018e2f1467986d8cc0aaf926ab7768c..8318c822cdaa4f40b7645ce97ca711298694196f 100644 --- a/images/690eedad-706c-4c48-a803-45b4f1c069bb_ca11435d-ef30-4f9f-8a60-fdc777a44ab9.png +++ b/images/690eedad-706c-4c48-a803-45b4f1c069bb_ca11435d-ef30-4f9f-8a60-fdc777a44ab9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f44e18401957935610822166ae7f49225fdd3e4f44f2f1af7a12ed29cd085a41 -size 1325736 +oid sha256:46bda9b1f00801fb65377d592f74410614d0562c82449b197e6af6480dc6653d +size 1230686 diff --git a/images/690eedad-706c-4c48-a803-45b4f1c069bb_dfc898b1-8a15-4482-8e9f-563a8d77ae89.png b/images/690eedad-706c-4c48-a803-45b4f1c069bb_dfc898b1-8a15-4482-8e9f-563a8d77ae89.png index b0ee133a08212059e18b8ec649116e80317b244e..35c0562bd77c303c6892094fc79d3264bf7b4e0d 100644 --- a/images/690eedad-706c-4c48-a803-45b4f1c069bb_dfc898b1-8a15-4482-8e9f-563a8d77ae89.png +++ b/images/690eedad-706c-4c48-a803-45b4f1c069bb_dfc898b1-8a15-4482-8e9f-563a8d77ae89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9357465d4bc74e7d52e4db1b9067a6b1d3401e82e9e4475bef02036c4026120a -size 2601235 +oid sha256:4903d63059f469747db92fdfa40d752d906b120a059edd64c0f0249c39b71be8 +size 2411221 diff --git a/images/690eedad-706c-4c48-a803-45b4f1c069bb_f32216ff-a9d3-426b-ad6b-0081850a3db0.png b/images/690eedad-706c-4c48-a803-45b4f1c069bb_f32216ff-a9d3-426b-ad6b-0081850a3db0.png index 1e8c7ca8771916fe4941d94375004607ffff5892..cf53df3357d7e495dcc5b5b10d31f98ccabe733a 100644 --- a/images/690eedad-706c-4c48-a803-45b4f1c069bb_f32216ff-a9d3-426b-ad6b-0081850a3db0.png +++ b/images/690eedad-706c-4c48-a803-45b4f1c069bb_f32216ff-a9d3-426b-ad6b-0081850a3db0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64d2de9da9919af0050afb377961ebeca8213dc4ea68219c7fd18d452168960f -size 1289669 +oid sha256:6e99c761ebbb4ebe619ea5d1f87d306c64df652589eac04859e4665cadfd0e8b +size 1075469 diff --git a/images/690eedad-706c-4c48-a803-45b4f1c069bb_f7f48747-38d7-43c2-9499-034d6ab8590f.png b/images/690eedad-706c-4c48-a803-45b4f1c069bb_f7f48747-38d7-43c2-9499-034d6ab8590f.png index e6b662f60873fcf99d83cb0fb7622003ffa87ce6..84eebab5c0316273d44228aef3d2572ed69ed3a4 100644 --- a/images/690eedad-706c-4c48-a803-45b4f1c069bb_f7f48747-38d7-43c2-9499-034d6ab8590f.png +++ b/images/690eedad-706c-4c48-a803-45b4f1c069bb_f7f48747-38d7-43c2-9499-034d6ab8590f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:456aa095a9fa3c0a63f695acb79cfa8349ab1d616e3a3c90ca0f558b7f023662 -size 1321644 +oid sha256:8d9d6350186c2801f192af4c0744da9ca91a19e561c6357df2e9535b2dfea5e0 +size 1322282 diff --git a/images/693ae151-6a70-41b1-b016-87279c4c532e_68993e22-ae92-47b0-9712-e4e67c7c657e.png b/images/693ae151-6a70-41b1-b016-87279c4c532e_68993e22-ae92-47b0-9712-e4e67c7c657e.png index fb3914262965dccda9ed5f5a65ea3096690b018d..3247bfeaf7799bb0ba0ce0770a5da750d8a6c08c 100644 --- a/images/693ae151-6a70-41b1-b016-87279c4c532e_68993e22-ae92-47b0-9712-e4e67c7c657e.png +++ b/images/693ae151-6a70-41b1-b016-87279c4c532e_68993e22-ae92-47b0-9712-e4e67c7c657e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0eb20ea5e1b720e5b8fe88f722f23b9a93c06587503987f4c4d3d99b40c8ceb -size 2094038 +oid sha256:7d5256d5cf66570c401255126dd93a94917374e8373ee201cb86c691f6108dd0 +size 1830093 diff --git a/images/693ae151-6a70-41b1-b016-87279c4c532e_80c51282-2c04-482f-a0f3-1dbbc5d4574b.png b/images/693ae151-6a70-41b1-b016-87279c4c532e_80c51282-2c04-482f-a0f3-1dbbc5d4574b.png index 5d38103fceba73e893d4bfb5c9fc47f86b450443..5708dfbcbe1e93c2f9366040e62cea6cca8eca82 100644 --- a/images/693ae151-6a70-41b1-b016-87279c4c532e_80c51282-2c04-482f-a0f3-1dbbc5d4574b.png +++ b/images/693ae151-6a70-41b1-b016-87279c4c532e_80c51282-2c04-482f-a0f3-1dbbc5d4574b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f657631d190365073167bfd0d59ae6bbd0c41c54a51a52f2755c6a995b75265 -size 1823063 +oid sha256:435f5f89d4086b5c945743f7b48e4ed1aaec0966392e91afc41a6cd04c902503 +size 814058 diff --git a/images/693ae151-6a70-41b1-b016-87279c4c532e_f455e88f-ed98-4077-b0cb-ea5f32a69743.png b/images/693ae151-6a70-41b1-b016-87279c4c532e_f455e88f-ed98-4077-b0cb-ea5f32a69743.png index dead4bf83b5180b49d25661fed7724ae9757c715..ef4b200043ef8884f3817ce2d610db7a8b69d320 100644 --- a/images/693ae151-6a70-41b1-b016-87279c4c532e_f455e88f-ed98-4077-b0cb-ea5f32a69743.png +++ b/images/693ae151-6a70-41b1-b016-87279c4c532e_f455e88f-ed98-4077-b0cb-ea5f32a69743.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb72aceb285b86fb6e1a212489b397598da7a9638b0c7123286d855d016981d7 -size 2092448 +oid sha256:cfdff7b5bc0dfe26ca9e85c61c15515ad7bbe7833801b9370ecbe98b3e358361 +size 903559 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_09862727-dffe-4e83-a678-d29962c98d92.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_09862727-dffe-4e83-a678-d29962c98d92.png index 382fe06ae19742b26b0a50be0bce64781bcf983d..402687d53bd7b3b5cd0a857365dbf54481ce38a4 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_09862727-dffe-4e83-a678-d29962c98d92.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_09862727-dffe-4e83-a678-d29962c98d92.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:588bb55fabab0948909863d7df1afd1b1b02374f200bcac551d76b817ed37c55 -size 585638 +oid sha256:b4ea5ca4e6f1bd4c5a1a162b004ae9164b1d4aeb6886be758ff186eba41b1a7b +size 765175 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_0ebc6a29-19cc-47fc-bd87-454c1635c3a2.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_0ebc6a29-19cc-47fc-bd87-454c1635c3a2.png index 7b247e91c70ee876869774e5fcc42a30be4a5f8e..3be7fe5e8c5ad63a1b8ee476e5845c69083cb196 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_0ebc6a29-19cc-47fc-bd87-454c1635c3a2.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_0ebc6a29-19cc-47fc-bd87-454c1635c3a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da355c62df756613d27fb5c335948e66d010a406c6b5e5ec658978c847c1719f -size 735168 +oid sha256:706cb9f1c8badfa5ba6fa6ff632e475589b05cae4ea7c6ed38b46070a6c9a0e3 +size 426312 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_3132d536-bc3a-4ad1-a326-4db8b75dc6ad.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_3132d536-bc3a-4ad1-a326-4db8b75dc6ad.png index f91c420bfff2c8fe4f139630049e1abb4c80dffa..c45752700232bcec618fe62b5f6fa39c2b45ecb8 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_3132d536-bc3a-4ad1-a326-4db8b75dc6ad.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_3132d536-bc3a-4ad1-a326-4db8b75dc6ad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce54c20ee3efe439717f3a5c2f984fd28955dea198e0980da63b70f99431b791 -size 544769 +oid sha256:74412866509e287250afb36630e11a15344cf7c2da450750453917fac2d66a6d +size 460179 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_5475af7a-bbf1-45fc-8a4e-0cc96327858b.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_5475af7a-bbf1-45fc-8a4e-0cc96327858b.png index 6548ad63093ebedb716f3c836db628991bca04d0..f8c23be39bab6c370944fc45e0a033577cea5b2a 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_5475af7a-bbf1-45fc-8a4e-0cc96327858b.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_5475af7a-bbf1-45fc-8a4e-0cc96327858b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1469aa114d34d447c2d669b62cee17c235cc939e77a30aa97ab9430d915df478 -size 650762 +oid sha256:808d08d26622e640a751cf8ab6c47d1385c6631b491c0a491dc3db395f487307 +size 455174 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_9e3771b1-9b5b-4f07-b3d7-ccd343968002.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_9e3771b1-9b5b-4f07-b3d7-ccd343968002.png index 2841cc1b42a1b704d6969b489f0a01311f4c1df8..8d001e92270bfcf23806f621620ca523be84d8ff 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_9e3771b1-9b5b-4f07-b3d7-ccd343968002.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_9e3771b1-9b5b-4f07-b3d7-ccd343968002.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9b97747e8c232b080a4808c349015e0f62245b4427b1cb269f32056de317a5c -size 642771 +oid sha256:7f08bc89bea381af1818a81751ed000745d5399b028d9b8da875a6c32f3ea569 +size 736353 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_ad3cdc43-66c2-4833-b233-5df774bfceab.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_ad3cdc43-66c2-4833-b233-5df774bfceab.png index 292f74d3b336dc2244ad3fcf56fd0676b242fed0..115d3df4b3cadb08eb129e15eafc1d350815d720 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_ad3cdc43-66c2-4833-b233-5df774bfceab.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_ad3cdc43-66c2-4833-b233-5df774bfceab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:209493c58be91756ce364e25d8119b6595475efa7ae497ab650aaadf4960edf1 -size 600511 +oid sha256:1a1cc1b57330d4bb4066a3e04b5e85147ed4cd7e52b18b524335d6c018ca4c63 +size 746658 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_c3ee6477-58ce-4d62-bd31-236bfd3babe9.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_c3ee6477-58ce-4d62-bd31-236bfd3babe9.png index d7ca8ba1148f31b1d54a72825c8bbfcce09a2f63..ba522e35e9d2f35a4f3fd23f715d6aefa8b8d8c6 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_c3ee6477-58ce-4d62-bd31-236bfd3babe9.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_c3ee6477-58ce-4d62-bd31-236bfd3babe9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d9e8d4fef9d6f10e065cbfa2be3e39106da4e92d4b08c464209b2204975ad48 -size 584024 +oid sha256:5d390d70cdaf26a5beea2ba9d5e8688ab08c253c3d4cf30199cb90fe59843cd1 +size 649785 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_d7bb0019-1d5f-4911-ad86-8eb40fe86004.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_d7bb0019-1d5f-4911-ad86-8eb40fe86004.png index ad581d06117675cc690bcd73059ce73ac8352ea6..fe94a2ac83ede1e20879834c485fdecd7cee6b04 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_d7bb0019-1d5f-4911-ad86-8eb40fe86004.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_d7bb0019-1d5f-4911-ad86-8eb40fe86004.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2169e58d92e988cee2ff005d87d6e55605f97565c1fd53923688a5502af9a154 -size 583843 +oid sha256:7a3bd4487c5065a6911cee5540d79e70fad788aa61d860f2759f069583af91ae +size 528616 diff --git a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_f22dc09a-f72a-46e8-b245-40fa16163f84.png b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_f22dc09a-f72a-46e8-b245-40fa16163f84.png index be088f86267f2e55484348616841011a7d54bc98..789359b8563a53206c5d31f0b6dfe019c78b3909 100644 --- a/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_f22dc09a-f72a-46e8-b245-40fa16163f84.png +++ b/images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_f22dc09a-f72a-46e8-b245-40fa16163f84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7886189d67b06e6c40e3f45117ff9eef4712bbe1589abd9aa2c7dceab9e711b8 -size 534328 +oid sha256:b87343996dfde34115d867d7ccf7b52fe700e30ca4e22e81a5be6104f5ab1c0c +size 529097 diff --git a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_1faa68ab-b59e-4b69-85ad-8f3f1c42d617.png b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_1faa68ab-b59e-4b69-85ad-8f3f1c42d617.png index b3235e590c09ff23ee65ca02509b59ed5710801e..3d960da31ff0d3693e42140feffde20b34b3f644 100644 --- a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_1faa68ab-b59e-4b69-85ad-8f3f1c42d617.png +++ b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_1faa68ab-b59e-4b69-85ad-8f3f1c42d617.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57e4bf4dcefb363a57ffb1418d2d3e5cea7b66b4e0c4828681c324ac2f056c72 -size 1532815 +oid sha256:01bbbfcd03019a6bf82eb2bf633c6af9b8acbf5656848152754b9a2c1ccea9d3 +size 1590087 diff --git a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7393d933-1951-4632-880e-50e665f52a82.png b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7393d933-1951-4632-880e-50e665f52a82.png index 1d009be2d912aea8ba6a3b2118a627569b9fb7e6..b506c82776721663f849123af5ed0043c8c88d81 100644 --- a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7393d933-1951-4632-880e-50e665f52a82.png +++ b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7393d933-1951-4632-880e-50e665f52a82.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:628b972f77498bef115b739087c0ab541e660b71d79d311c01d24d511e6b9f61 -size 2958436 +oid sha256:93c34ac6f7c4304416c3f04a446c8b8a28971565c10a363e79a6faeab1caaf29 +size 1846649 diff --git a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7daaafa4-1726-4b9a-895e-79ec1c80b455.png b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7daaafa4-1726-4b9a-895e-79ec1c80b455.png index 0d386c3c9e8f2d847e790c5de8670f0e2a5cf118..6ea683485e27c5da74104d94a4481e4973714926 100644 --- a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7daaafa4-1726-4b9a-895e-79ec1c80b455.png +++ b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_7daaafa4-1726-4b9a-895e-79ec1c80b455.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af41e8081e515c6da976ee51c44ea63feb3dc5a2eb746d8fb3524b94067b1c48 -size 1532818 +oid sha256:d26c4a3b0c285580cbdd68ce406748b568e6b2feefa741dc51dc6454722bcb04 +size 1751104 diff --git a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_9568b250-989d-4e11-a9e7-4b0dd6772a73.png b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_9568b250-989d-4e11-a9e7-4b0dd6772a73.png index 723afa3c3b5f70e5375973c6dd90693c6636e235..7d2e61e74ceae4d901cdf73398b2d7d76899016d 100644 --- a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_9568b250-989d-4e11-a9e7-4b0dd6772a73.png +++ b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_9568b250-989d-4e11-a9e7-4b0dd6772a73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b9bf6baedfb5b70f8922639b0bea242100a4fa948bafe071a001644b33e1809 -size 1532443 +oid sha256:5507f0d28e790b7e98d0d0ddacf94ed0a8cca2330cf64aacb96294aa199358e3 +size 1785119 diff --git a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_d9c1648c-f508-47c9-83d1-5649ca2da7df.png b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_d9c1648c-f508-47c9-83d1-5649ca2da7df.png index cd35a30ca250de43b6e529009420f735df6a3519..19ec7f4480f8b6071ab21478aee33339937df65a 100644 --- a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_d9c1648c-f508-47c9-83d1-5649ca2da7df.png +++ b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_d9c1648c-f508-47c9-83d1-5649ca2da7df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3abceb7453f8675086a85a5f73cf37a7dd33eb4c786d9a9a1c672df2b34c609c -size 1497305 +oid sha256:420aa79610046cceeb30e433c417a059ddf1d5f178ac0b11db050d4390470b8a +size 1562224 diff --git a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_deaf5409-9171-444b-af73-c5f6b73aec49.png b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_deaf5409-9171-444b-af73-c5f6b73aec49.png index 4e24407921d40f471e381ea39f19288979d24886..8d8ea7660f31745d743005b64687a395e3296ba5 100644 --- a/images/69661946-6c3e-4e26-95ed-1f7641a31f22_deaf5409-9171-444b-af73-c5f6b73aec49.png +++ b/images/69661946-6c3e-4e26-95ed-1f7641a31f22_deaf5409-9171-444b-af73-c5f6b73aec49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33254cc94075637cf90f8433cd5583e857d0b6c4032f6b63349b186affdfa005 -size 1542435 +oid sha256:a362d111bd7f95c9f527a075ef6353db4e9b9a73955188fed228ffb12a011ba9 +size 1592931 diff --git a/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_51cc3205-af3d-43a8-b777-60200db8d366.png b/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_51cc3205-af3d-43a8-b777-60200db8d366.png index c47e3c6f1549ee9f22b2cf6d243b725dcafd2a90..cfa6093e5d7c527964bc67fba92a0a7a86db29a6 100644 --- a/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_51cc3205-af3d-43a8-b777-60200db8d366.png +++ b/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_51cc3205-af3d-43a8-b777-60200db8d366.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3cbe92b7d67c658c97ac873159b3cc250a6798e3aabe59d04950acf5d9c8270 -size 1372374 +oid sha256:14e47d646056e0210c9450b75f578787a26bc03bdf461154092423ac66bc2cd1 +size 1180738 diff --git a/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_e46c7544-0dea-4eee-8a35-8253034883a4.png b/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_e46c7544-0dea-4eee-8a35-8253034883a4.png index 48ac4b13b2f036567bef0d9665271feee11809d0..2200a7a715209f99468b33fe6a1e33617dd8d2b0 100644 --- a/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_e46c7544-0dea-4eee-8a35-8253034883a4.png +++ b/images/6a326478-2a1b-4e47-b298-53f3ac12ed51_e46c7544-0dea-4eee-8a35-8253034883a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ebe21803787016ccba15958818f60ca71219e19d09b1f695ff10708dc3e9b45 -size 1043100 +oid sha256:dbac1f6100265a1772fec4f9d82432987afc1a10d5ecb25a807ad93244eed180 +size 932261 diff --git a/images/6a56a1fb-f201-4bf9-b225-842181920388_08878507-5684-4a12-a316-4ce18a2fbf55.png b/images/6a56a1fb-f201-4bf9-b225-842181920388_08878507-5684-4a12-a316-4ce18a2fbf55.png index 7e009b452db1984614e214efc72baf03611cf73c..ff1404bb02cbb8cca9c1be8ac93a0216724288f4 100644 --- a/images/6a56a1fb-f201-4bf9-b225-842181920388_08878507-5684-4a12-a316-4ce18a2fbf55.png +++ b/images/6a56a1fb-f201-4bf9-b225-842181920388_08878507-5684-4a12-a316-4ce18a2fbf55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc2c079db0b18a9bba0b0d169d2844afc4861556b34ea9e0c7c3798d16f85c49 -size 882134 +oid sha256:ef30b6f5f07d915cad5c9214f203a6c2bb74f341f243dc3b9a22adda3cf9ac88 +size 1417687 diff --git a/images/6a56a1fb-f201-4bf9-b225-842181920388_29bbfb84-2c18-4a74-a208-f68abf1d3f48.png b/images/6a56a1fb-f201-4bf9-b225-842181920388_29bbfb84-2c18-4a74-a208-f68abf1d3f48.png index 375997f30d0318afa7620a938886e3f3f767d468..7fd071ac16b0ef53de7907a08cdebe45b1765f47 100644 --- a/images/6a56a1fb-f201-4bf9-b225-842181920388_29bbfb84-2c18-4a74-a208-f68abf1d3f48.png +++ b/images/6a56a1fb-f201-4bf9-b225-842181920388_29bbfb84-2c18-4a74-a208-f68abf1d3f48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8a15b2dff597acf1d967a7c0d2a3206de57f662f18a9137fcc359bf395742dd -size 1932522 +oid sha256:b27a50db1da13746785f32648bcc6a0d3709a2cc43518edd93820e08d6185967 +size 970947 diff --git a/images/6a56a1fb-f201-4bf9-b225-842181920388_30197f54-0965-4a7f-8a1f-526d0351cbca.png b/images/6a56a1fb-f201-4bf9-b225-842181920388_30197f54-0965-4a7f-8a1f-526d0351cbca.png index 2ebdc019b605dad98e213f7914bb892d7b9a28c0..49fbc0786be19247a1db9b0a86a57d992b2173ff 100644 --- a/images/6a56a1fb-f201-4bf9-b225-842181920388_30197f54-0965-4a7f-8a1f-526d0351cbca.png +++ b/images/6a56a1fb-f201-4bf9-b225-842181920388_30197f54-0965-4a7f-8a1f-526d0351cbca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d7bc633338ee2efd20024dc7594b732ea259a2785027e5adc3366b91aebb85d -size 904210 +oid sha256:560684702f52dd223b6f0c831a2f551f6b6f4679a41aa968d894a3e473e4c012 +size 1015982 diff --git a/images/6a56a1fb-f201-4bf9-b225-842181920388_45786006-20a8-4fb4-b399-e07ae8d308d3.png b/images/6a56a1fb-f201-4bf9-b225-842181920388_45786006-20a8-4fb4-b399-e07ae8d308d3.png index 5b9372bf1c558dc65c66492461c250fa85431aa8..b2782f5fd591425710a597729d70df2c4d2f2224 100644 --- a/images/6a56a1fb-f201-4bf9-b225-842181920388_45786006-20a8-4fb4-b399-e07ae8d308d3.png +++ b/images/6a56a1fb-f201-4bf9-b225-842181920388_45786006-20a8-4fb4-b399-e07ae8d308d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f63fe72311eb0ecf8c07d60c0d4da5243ffc655b46a26bc1acca484e74e60fde -size 896705 +oid sha256:9746ff05be323306340bed1b8a57275f3968ca0144cabaacf46b8068e2c52139 +size 988858 diff --git a/images/6a56a1fb-f201-4bf9-b225-842181920388_64384e4d-3b63-4313-bc53-479890efd517.png b/images/6a56a1fb-f201-4bf9-b225-842181920388_64384e4d-3b63-4313-bc53-479890efd517.png index 56ead3105141a8a5f86f90e33072d2c892ed76b2..0948e02d22eef4c9a2bfe58a9ff71e4a37ec126c 100644 --- a/images/6a56a1fb-f201-4bf9-b225-842181920388_64384e4d-3b63-4313-bc53-479890efd517.png +++ b/images/6a56a1fb-f201-4bf9-b225-842181920388_64384e4d-3b63-4313-bc53-479890efd517.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5603d27791a1fcc780b6856a6f838cf4a1e1a679f673219182eadfd23d18454f -size 2347462 +oid sha256:7dd422839708518790356468f90ad70709263c73e87a23a4fac661a78f13374f +size 1946516 diff --git a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_37743c80-cdfa-45ca-8318-679da8952f30.png b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_37743c80-cdfa-45ca-8318-679da8952f30.png index 62d1e4ccc7304a9034e1d7831b850b80b4e117ba..c2b6e229659b0031e59889848a894f4679b71736 100644 --- a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_37743c80-cdfa-45ca-8318-679da8952f30.png +++ b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_37743c80-cdfa-45ca-8318-679da8952f30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8569977c5933c0c81f21e36eb5ca9b9fd61e64b8b3bc77542975320af759a4ad -size 416384 +oid sha256:5a12866c42de23400d0af4923e6f8ce3054000e6ffbce008b652b96d267c2002 +size 559915 diff --git a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_46c14367-c9aa-4663-aef8-8d3ebac75daa.png b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_46c14367-c9aa-4663-aef8-8d3ebac75daa.png index dc653a59081b82a01fc801da358a339fd79ca617..7fcd804c9e4c91786492fc400d7a9f81ef1d7d03 100644 --- a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_46c14367-c9aa-4663-aef8-8d3ebac75daa.png +++ b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_46c14367-c9aa-4663-aef8-8d3ebac75daa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95cb590fe54c7120099aef3089d62f1b8b7709a094057f38f0def88f599e5359 -size 2165352 +oid sha256:7fda7d6924048c225a737281c615ae82627597f853195340cbcb42417c730866 +size 1491083 diff --git a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_580a4333-fdc6-4c07-aa20-748b8b6c9ffb.png b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_580a4333-fdc6-4c07-aa20-748b8b6c9ffb.png index bc8ccc7349b64f2379f1e74cbdb2e33f3eea1f1c..28e88948ce00b9f42d86302441f7a849e079202b 100644 --- a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_580a4333-fdc6-4c07-aa20-748b8b6c9ffb.png +++ b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_580a4333-fdc6-4c07-aa20-748b8b6c9ffb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:335d4749866044d5d81935c31a4f40a42050ced498f40889f2728bed46f9ce47 -size 1152719 +oid sha256:042a02c682e05769637696ab065ce5dad3f7f4c3c2737cd8449dd69385ed68b9 +size 773766 diff --git a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_84171eea-4480-415c-a5cd-77899aae8110.png b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_84171eea-4480-415c-a5cd-77899aae8110.png index 56a52ef49a477db558e2fc53f6f966d8532cf211..12c518d7315e06dfc362a09fd6b543d349e16368 100644 --- a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_84171eea-4480-415c-a5cd-77899aae8110.png +++ b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_84171eea-4480-415c-a5cd-77899aae8110.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e085d88010d4261dc3d1ee507efb6bfad132d0b20ce9b7558ffabb5daa1785e -size 588036 +oid sha256:e39d1efa610ceefa7d6a85085a4cfefad8d3924b9d34151dcfe9a3c7781daa03 +size 877066 diff --git a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_93810b4c-013b-4165-94ac-140b40837aed.png b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_93810b4c-013b-4165-94ac-140b40837aed.png index 392a584b6d894ceb3f9de31f0be13ec6777638ac..154148c7994a36b375b07a3b1df986aeca857ec2 100644 --- a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_93810b4c-013b-4165-94ac-140b40837aed.png +++ b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_93810b4c-013b-4165-94ac-140b40837aed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37a4976bb3e2fe905a250ae590ff01aa050d6fba67f96573d7c2467b44fb7400 -size 739213 +oid sha256:2a4224676fe65eba515575b5f58a9ea4a4ff81219ffde824c7652eed8f039c7a +size 734142 diff --git a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_bc650861-2931-44a1-8ee6-9a22468604df.png b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_bc650861-2931-44a1-8ee6-9a22468604df.png index cbc7b435e6e820c2a469371c8415d3ff195bb311..6f878d27cf082fdcbbe5aa5148f4cc943b32e27b 100644 --- a/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_bc650861-2931-44a1-8ee6-9a22468604df.png +++ b/images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_bc650861-2931-44a1-8ee6-9a22468604df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:436639e1019544bd60da9944bcd955baa48902e66c39c134fb091416cd3107e1 -size 1288355 +oid sha256:1772f043585f08457e790ba1e1d59eed85bd5bc0a67628472397409adc8a53c6 +size 1724679 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_0b8fc837-695c-4373-a0fc-9a01cb61535a.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_0b8fc837-695c-4373-a0fc-9a01cb61535a.png index 6d2c10abde7319809318d96d4834c7fd1835ebae..04ee5748db815166e1292e1e069a9888c3c1f39a 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_0b8fc837-695c-4373-a0fc-9a01cb61535a.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_0b8fc837-695c-4373-a0fc-9a01cb61535a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ee134af21c0653d092c06c9808febf80968c39000d9b295deb11f669618fcfe -size 944342 +oid sha256:c314e77c09cf41168461b87c9d06b987d1803bf092f65ea995b4bc386f4e5ab8 +size 691445 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_139899ed-e29d-42d0-b65d-dd0f16f15868.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_139899ed-e29d-42d0-b65d-dd0f16f15868.png index 2a23fec25ba67dfc1a64f698005d6bc320064737..f66bc6cb9c98a4b006a7bbca2846466727446117 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_139899ed-e29d-42d0-b65d-dd0f16f15868.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_139899ed-e29d-42d0-b65d-dd0f16f15868.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be817bf63995a1afae15a8596d6a4382b05804499ce450b08a9fb18047d11ee0 -size 567139 +oid sha256:95ad6d3536e37f7baa87def3055452e2c2db22bfe27d91762d4e966245970b26 +size 540614 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_3ace967f-2791-4a0e-87d1-c514a29195a5.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_3ace967f-2791-4a0e-87d1-c514a29195a5.png index bbd5b91a41def5d67095daef20124d15df21c5d0..f61ea6d299e32771ca3d1a402b3de670f6eb4048 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_3ace967f-2791-4a0e-87d1-c514a29195a5.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_3ace967f-2791-4a0e-87d1-c514a29195a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b1f1bee658cc3f29e87bb58478d0ef72a201f41820a0e8013a9a2e37edee184 -size 537484 +oid sha256:d9224c31f40a9252da35ba46becdc8b61a08cc1fd41107af06d7ed24ce6a7025 +size 453172 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4944ca15-6133-4d09-8a1c-cf0e040131c3.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4944ca15-6133-4d09-8a1c-cf0e040131c3.png index 801ee8c2d064304c6b323affaaca817d3941081e..29f5d272208ea61c7d770bd3a392f4ab7bb858e2 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4944ca15-6133-4d09-8a1c-cf0e040131c3.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4944ca15-6133-4d09-8a1c-cf0e040131c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8fd648280bc4a1b0b76560244facee25c644ef3a19e56abab6b709e88f286b2 -size 560199 +oid sha256:ffc54e1529a2048d0aed58144f765c914ede3868a7d544b54b11c9bf0653be46 +size 525989 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4cc3fe67-f860-4993-a050-ee0e7f64b481.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4cc3fe67-f860-4993-a050-ee0e7f64b481.png index 4af62119fa06082122f464285bb68f1f836ad1dd..e279b55296b8e2ac5e747bf814d8ab4465a64212 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4cc3fe67-f860-4993-a050-ee0e7f64b481.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4cc3fe67-f860-4993-a050-ee0e7f64b481.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6314dab4da04affb29e9380e18a49bfef14d638e57bb8dcf755380f22a9324b -size 566606 +oid sha256:3d4180e999d8fd143b5df06c2d7a62b05668da848a003a8f9ed7b289c6ddaf44 +size 489474 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_54e0b5c5-4903-4dd7-97bb-8fbd0b78e00b.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_54e0b5c5-4903-4dd7-97bb-8fbd0b78e00b.png index 7dcebc74d9af6270badc7f60c8b171f04dc74927..d0eb609d98eb8cb8f515229833c39c45d24fb253 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_54e0b5c5-4903-4dd7-97bb-8fbd0b78e00b.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_54e0b5c5-4903-4dd7-97bb-8fbd0b78e00b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a57e02d97a8cdeb7f04e6760eb8e5fd13957b91fcf7d4ace7c17bedc0073767 -size 524874 +oid sha256:8143f7be6b1fc680699eadb9b48814d20b6f7c632ff38dd08ef78d6ba8b9e36d +size 487333 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_56612ecc-9966-4b43-bb15-24148c457635.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_56612ecc-9966-4b43-bb15-24148c457635.png index cfa8e99da5b7d3a4ba6ae0458f51ba41b96196f2..e54e84be93f581e3fcf33ebef177144b46daebb5 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_56612ecc-9966-4b43-bb15-24148c457635.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_56612ecc-9966-4b43-bb15-24148c457635.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42f705c04208e4535b853c9d4aa994b823757a573a9e742f37286e92ed5a68de -size 575002 +oid sha256:5b32039366e65cd92b6d9098d7b14fa9ae225a94120cb8ab1c84a0671a44fd37 +size 655419 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_6fe96e04-6f9b-4de6-960e-14f70df89eb1.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_6fe96e04-6f9b-4de6-960e-14f70df89eb1.png index c04861a874f5a20a830ef3991ea7980a20ea9a2d..523564021a4783730d5439c6f2622ad0439ae2ee 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_6fe96e04-6f9b-4de6-960e-14f70df89eb1.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_6fe96e04-6f9b-4de6-960e-14f70df89eb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcfe97cc420fcb8e88d8278c1dead4b3e47982ca7198ce9ff465c0a067d1ac4c -size 570856 +oid sha256:534cb63ac4168c68b421aee01f80b4a4bc6dc73f74a872e4a9cffeae057142b8 +size 369317 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_79a47f9f-c64f-40d5-a039-2f51a467d145.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_79a47f9f-c64f-40d5-a039-2f51a467d145.png index 30cc7c218ac1c1a5920d3b9316d8b1d9a9465f48..8275ecae220dd1385cb06494f1bf2607a1dfad9e 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_79a47f9f-c64f-40d5-a039-2f51a467d145.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_79a47f9f-c64f-40d5-a039-2f51a467d145.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3384c82037fa798ffea7626a1b68c609cf31addb3a2e12301d386a306f4cb633 -size 2644012 +oid sha256:50589004cb985eca42fd384bd5e65ae3842be36b75f383ee907e76504d07b3d3 +size 1354643 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_7aaf77ba-4f36-41e2-9f6f-737a7cdb55b2.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_7aaf77ba-4f36-41e2-9f6f-737a7cdb55b2.png index df461b641844feeef586635117aaeda0a7479e5f..9e05af1cc1af11511358282147ff72089e52e192 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_7aaf77ba-4f36-41e2-9f6f-737a7cdb55b2.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_7aaf77ba-4f36-41e2-9f6f-737a7cdb55b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07696cb3011a5df792f13b1bf5b664a50e3e084704aaef8f766cf715bacf9f8f -size 559266 +oid sha256:57ea07dd1b6d62e2989b6ca04047825a26f9c39575244495505113bc164e9815 +size 568167 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a2f6493b-5528-4aeb-a97d-602877298c51.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a2f6493b-5528-4aeb-a97d-602877298c51.png index 0da4d883db18dad86683b7a90ae8ebc1ce9981d3..4b8efd2b9925cd597dffc41900d8710f8e765f85 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a2f6493b-5528-4aeb-a97d-602877298c51.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a2f6493b-5528-4aeb-a97d-602877298c51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5681b725193bf4bfa7118e38046ebb0bdeda9b5a9ae67d0c8575b5960e8f824 -size 547891 +oid sha256:ac527a2fdd916c64024fa6ee26409c9459156967a1b4af7e7133ede7bd0b5160 +size 503269 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a43d3350-8e3a-4d40-b1cb-fba874d15c92.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a43d3350-8e3a-4d40-b1cb-fba874d15c92.png index e0bfb70eaf2ca229a636ff20101dde4641d625ac..807a3142bca76b97ee31e3a54c0c4186333e7eb1 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a43d3350-8e3a-4d40-b1cb-fba874d15c92.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a43d3350-8e3a-4d40-b1cb-fba874d15c92.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa707be3a62365e07b4a32c42a65be173140f84c5b039da5ecafa5655a4bca49 -size 569471 +oid sha256:d7bfc8fe0c761148f3751fe4a7448152f3bfedd45c6ee5d661deed80ca49e7cf +size 557169 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_b1e774ca-1b61-47d4-bcaa-5e233a773cdb.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_b1e774ca-1b61-47d4-bcaa-5e233a773cdb.png index b9116a1a82577df855f9859103476af6f81ead0e..52e92fceb6ad34c1efb84a943422db710d2d7634 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_b1e774ca-1b61-47d4-bcaa-5e233a773cdb.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_b1e774ca-1b61-47d4-bcaa-5e233a773cdb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5713169f2451c54692b542f7c8a2796853caf4afbe4101d999c35a4333b83ca1 -size 590028 +oid sha256:ec5a9a5fc264604332bb7716618ccc2a2ee23798352d90c44fb174ed9e068246 +size 409988 diff --git a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_c1fdc477-879c-42ad-b10a-cb7edb58d429.png b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_c1fdc477-879c-42ad-b10a-cb7edb58d429.png index 911b94634d620514e7047cd285136e42b774779f..44fe9ea885cb9af135afb58a201353cad03ae754 100644 --- a/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_c1fdc477-879c-42ad-b10a-cb7edb58d429.png +++ b/images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_c1fdc477-879c-42ad-b10a-cb7edb58d429.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca158de8fef6a51db8c8071b1700b55bba902c18b7e06dbaee1cdad6480df417 -size 614484 +oid sha256:ad6e8ff65076ee539edf26504ff329adad6f6e16c55dfe8601c1ba6b9522fc9f +size 1401019 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_043449b3-6d42-43f7-844d-b5855db096f2.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_043449b3-6d42-43f7-844d-b5855db096f2.png index 221504de22d7baef87c3df94daef9592ea0a7471..d2d7705256100cb4887480c757a320cd3d717124 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_043449b3-6d42-43f7-844d-b5855db096f2.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_043449b3-6d42-43f7-844d-b5855db096f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d74eb3d02d5821dbcf09fc2763b30e822c3597e6bc57f37b5477319ec71ba377 -size 2205187 +oid sha256:a92a63b19ba37fe2fdcb15e62e3fcfca95c737c8d54c21fa3a06d37bf180a718 +size 1861375 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_20a9bb0e-d885-42ee-bfb2-0a0ab6c13706.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_20a9bb0e-d885-42ee-bfb2-0a0ab6c13706.png index f502b7c45175028a1543d10712c2f7b7768bd3ee..8b2ca58b508c02d07afae0d774c60f0ee53e5eb7 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_20a9bb0e-d885-42ee-bfb2-0a0ab6c13706.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_20a9bb0e-d885-42ee-bfb2-0a0ab6c13706.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63d80c1a81368934fc91845b6c90875ed2adcfa81b63cdb75fcb8e5e738280da -size 2208503 +oid sha256:a88c686b87c3b20f2e30428af00c6c0c956af201ce794276fb4cb26704d86c9f +size 1679690 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2c9414e4-66fa-4d75-befd-bfffdfcb6497.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2c9414e4-66fa-4d75-befd-bfffdfcb6497.png index ccf2440099023574233ef02e042192d8b043b9c7..29b224db3db9d33c1ada6d8d420c01ee02185dbf 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2c9414e4-66fa-4d75-befd-bfffdfcb6497.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2c9414e4-66fa-4d75-befd-bfffdfcb6497.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b3b26c999b4438e13d6dce473b0e4971d542d020d122280a6eff8ec11197abd -size 2089455 +oid sha256:efdb5e0895e4fafa6ce95c064cfe28e908408ce26ecc20e77d843ab1b7327f37 +size 1539693 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2dfc1364-9827-47be-8fe1-5b4000462ec0.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2dfc1364-9827-47be-8fe1-5b4000462ec0.png index fb6cb6f6fad1a4031f7d20db7a7a82d6ebc92710..fb393189211d26eaabafc8fd3b3fd7b6cfb632f1 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2dfc1364-9827-47be-8fe1-5b4000462ec0.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2dfc1364-9827-47be-8fe1-5b4000462ec0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f51b08250861eb4f646b01d377bb7c36b3b8968308be846ce1a39bc83f7519e -size 1741470 +oid sha256:866d23cf8683120c088a842f129bb6df7da80e1491612877a7f1f261746f0ba6 +size 729364 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_3cd13063-08bf-498b-92ed-c690490a1526.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_3cd13063-08bf-498b-92ed-c690490a1526.png index f46b96092a4ccac92ccb1ea6ab2b0a80e03d4ca2..b6471814eab72e6d670988cf925e144071476a3f 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_3cd13063-08bf-498b-92ed-c690490a1526.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_3cd13063-08bf-498b-92ed-c690490a1526.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80f1dadf2df1acfbfb51940d53e187f568d7d474cbd6841dda6a79725e887378 -size 2297397 +oid sha256:373e17412f22e6839c4624002581da385cb0ef34e8b52201af6c2de82c8926be +size 1064876 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_41d4cbcd-ea80-4132-a2c9-b4da47c6a95f.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_41d4cbcd-ea80-4132-a2c9-b4da47c6a95f.png index e4b3d0432f3a14d0ebb803a9bb53375e827f6da8..c4c94af7bc6659b3553b902c7e3cb534a0e628ea 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_41d4cbcd-ea80-4132-a2c9-b4da47c6a95f.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_41d4cbcd-ea80-4132-a2c9-b4da47c6a95f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba0087098ce54ef606e8157151576926e83431d6691e12a4a2438354de951a70 -size 2154225 +oid sha256:ab85ed6a94cfa8350cb7528e3194a6ddc1dc4f370471d827b4aac6ffc440959b +size 1591790 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_6c351b5e-0ace-4391-ae82-bd84884f79f9.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_6c351b5e-0ace-4391-ae82-bd84884f79f9.png index 3876c1f9fdd32a6e97129cabba1b5cd29205d420..f2172ea0ee382fb4d00daa36e8bcf4240d2b7b80 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_6c351b5e-0ace-4391-ae82-bd84884f79f9.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_6c351b5e-0ace-4391-ae82-bd84884f79f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77356e1fa4a28e6800cbbd85deb790234bc46e2e8d217e54e23ac2f654dead19 -size 1600069 +oid sha256:1a6db4a3571455c3809503ba1c7edceec084c7cc742f270ed9b01b6da0d3b524 +size 1609788 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_763f2654-e332-4ba0-b78b-81110a53ff6a.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_763f2654-e332-4ba0-b78b-81110a53ff6a.png index 3c0ffbc75b4b847f02cef75f65a7e92d8f60f5ec..019dfa67832e402f0a71175ae9e9dd6b998a46a4 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_763f2654-e332-4ba0-b78b-81110a53ff6a.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_763f2654-e332-4ba0-b78b-81110a53ff6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71a6061dfe610ea430243696168a0b1e049f35d26d4e2061e85b5a802c3302ba -size 2242635 +oid sha256:c773fc5cbe6631fc28584688f804587460afed6941d91fa650b1f937f9b1feda +size 1525555 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7693c6c7-4388-417a-acdc-d0ad1937dec5.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7693c6c7-4388-417a-acdc-d0ad1937dec5.png index fb5ae7a2499fadd305c289731d946e5631cae4e4..0c8d407dad5250f4cd8cc99308e05e52ebe1772a 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7693c6c7-4388-417a-acdc-d0ad1937dec5.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7693c6c7-4388-417a-acdc-d0ad1937dec5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba4ad9394d65e9c835c51c45d99faf0cadfce2eb3dcf938596600731ba4d4180 -size 1374519 +oid sha256:213579fcc34fc1d13cd0fc74feb15527015378288afcf7df78afd25a6431c294 +size 2185335 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7e1d3bb9-5950-4300-ad90-dd0be92707da.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7e1d3bb9-5950-4300-ad90-dd0be92707da.png index ffb63ca0aa2602e8afed51f01c3473f857648e27..9fe3c8bc7cd8dafe25ebfa688484c0ea02c7b417 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7e1d3bb9-5950-4300-ad90-dd0be92707da.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7e1d3bb9-5950-4300-ad90-dd0be92707da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b33ac400aa234e09dd4fea0392c47b8640c8890b04cfc3a501100d9b65995d20 -size 1401371 +oid sha256:ea44909b93de7569d81444ea1978de67abf19ac29f6147a955ee92aa4551ae72 +size 1553660 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_890d1f76-6792-4972-a0e5-8d1215c8fea3.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_890d1f76-6792-4972-a0e5-8d1215c8fea3.png index 73e00c749e3864f4fc1ec9e07db7b65b9c4fb101..14cb445953f7050fb430fcdb6d40b0f10fa358f5 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_890d1f76-6792-4972-a0e5-8d1215c8fea3.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_890d1f76-6792-4972-a0e5-8d1215c8fea3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08b986756d206795e5de12b5d4d35b35b89defd29798179041ec79ddd547e60d -size 2405814 +oid sha256:8272ef3a292373482fa5ca43026ca82033890f88cf2cedb77ad72d5ade914fc9 +size 1700619 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_9e12befd-5920-4a3a-a8c7-6f47a1a13b4f.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_9e12befd-5920-4a3a-a8c7-6f47a1a13b4f.png index 908dfa9c1ce0f77d80235ce19a95365de204b1b0..d42239024fbbf711a48a1b96ae80a501dc20a95e 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_9e12befd-5920-4a3a-a8c7-6f47a1a13b4f.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_9e12befd-5920-4a3a-a8c7-6f47a1a13b4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c294b595db4cda842359d5f2a29b069f0d66bc2d023e0e8c986be1fcaa393de -size 2398381 +oid sha256:c266ed686906f027195798bc29472bad543a4de9b952d11cc3fd4895de81b3b8 +size 737507 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_a8729521-544c-4677-bbeb-2aebc43bf83d.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_a8729521-544c-4677-bbeb-2aebc43bf83d.png index 23f7d27de50e634ca52c1473fb952ee069fb03fe..0e5d29f9ee324682fe2ba5c9811ddb732f105c47 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_a8729521-544c-4677-bbeb-2aebc43bf83d.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_a8729521-544c-4677-bbeb-2aebc43bf83d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbba97ea20d0161df241aec637d26b4c0dee7d371523a67cf76c1e40289849f7 -size 1785440 +oid sha256:e13dcf4ca2d58287d15265a872c1e4e9582467fa5b1c94c20276acece2afb34e +size 1423163 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_eada1fe6-09c0-45de-a024-e035bf9aa036.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_eada1fe6-09c0-45de-a024-e035bf9aa036.png index 0e1a914066bba3a0b7c4d7c9339c06e1f404c6f0..80cbb51b434188af46a5285b9a51af9334150479 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_eada1fe6-09c0-45de-a024-e035bf9aa036.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_eada1fe6-09c0-45de-a024-e035bf9aa036.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b1026e8c27a3b42da870b27302fea7b4ff437abc1414fa1acb6687f937666d0 -size 1871193 +oid sha256:e9dd37b3e10752f00324b70278f2e41cbeca8c7d8fe3e75f17201920400a9168 +size 1815859 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f0038f74-a616-44cf-b13a-29111280ae8b.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f0038f74-a616-44cf-b13a-29111280ae8b.png index 2be99f98d51f3e239c669d3cc4ca50845658950a..7b9130591ca4535a8f0b78070f7c4fa17fa03616 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f0038f74-a616-44cf-b13a-29111280ae8b.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f0038f74-a616-44cf-b13a-29111280ae8b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:444a7428d89d59311ca1e4a18c87ceabd18f33b03861a95824cd9e7f993a5584 -size 1715965 +oid sha256:3a794643314026179d3a84689644f5cd9a7f64337dd168230ee763a892c3d52c +size 1522418 diff --git a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f2609a37-f14c-4a51-9474-cb3de2b81745.png b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f2609a37-f14c-4a51-9474-cb3de2b81745.png index eea612ee73f4e5027b72c3f378e6fe0d21568d4a..346dfbaff4aced25e99fcb39db41ba5935e76a16 100644 --- a/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f2609a37-f14c-4a51-9474-cb3de2b81745.png +++ b/images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f2609a37-f14c-4a51-9474-cb3de2b81745.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac7273d707cb4daffdc5174833ac2ad55cd7b09aae01ecd24cab48b5444277db -size 1510442 +oid sha256:815662eb127f270f3397a4138e235fd6c85ee982d650bf3fa035f8df7ce14b08 +size 1582901 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_099c45fc-ac66-40c7-92f9-016c98c58a85.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_099c45fc-ac66-40c7-92f9-016c98c58a85.png index d2fd0a4c5e1d60d4b1a2a3058cc57e428af771ab..541a973b5def2ab005a008abce8109c2cbef3ee3 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_099c45fc-ac66-40c7-92f9-016c98c58a85.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_099c45fc-ac66-40c7-92f9-016c98c58a85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e0556d58a0d6325555699ed07eca96c198bd1882b0f5bf56897c7e3b8932928 -size 1634799 +oid sha256:39055becd067fec4eb49b9b9223de4c219d60673bdc2e12a3e72994a81e8d67d +size 1305462 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_1c085040-b388-4cc6-988e-cda1e7b83177.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_1c085040-b388-4cc6-988e-cda1e7b83177.png index aacd1a15dea9a615bd7c7103a315995ac48924ce..9ff8cfe67428bb0b2899af9ae2f72ea1676511e9 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_1c085040-b388-4cc6-988e-cda1e7b83177.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_1c085040-b388-4cc6-988e-cda1e7b83177.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c695d0521022ac30ae0ebbf7f46485cd8ca649274a2c317f4b05bc0cafd77895 -size 655841 +oid sha256:1e46bffc39af21f69d60d300e410fc820a3c0a2961fed08c9c1397e6e0672992 +size 418370 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_3fb76e56-0be0-4742-beb1-49587ce945aa.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_3fb76e56-0be0-4742-beb1-49587ce945aa.png index bd268fc394b230907cdb7ecf9a01ff77c0e54f9a..7916bf96ea88292dff0b71e12f8c593b2a99bd47 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_3fb76e56-0be0-4742-beb1-49587ce945aa.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_3fb76e56-0be0-4742-beb1-49587ce945aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54439a4ab6a9ea430d54e9f48b284b77d28d98a127600b826a0702fe3ed3e6b3 -size 785285 +oid sha256:26a1a2959cdb762f7be39fb698a6305366c704cace7f9100631e7591f0091d55 +size 412766 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_4d976fd8-c4fb-49c4-bc13-c59b2122b543.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_4d976fd8-c4fb-49c4-bc13-c59b2122b543.png index c14bee0e2dbb54135f8d6c5fb6c6d8f87b2e906a..8c5c26d6cbdf3640035239ff215c4d303bf82b26 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_4d976fd8-c4fb-49c4-bc13-c59b2122b543.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_4d976fd8-c4fb-49c4-bc13-c59b2122b543.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7404c6246578ba563678369d898c5b5d0989ffc57e3da5f7b4a4718be4e9ba4d -size 505810 +oid sha256:76591a2aca60f9f4cedf77462db3e56ed6d6d635f0907e884d25529594ec3479 +size 735741 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_6d2e17fb-6fa7-41b5-b3ad-e06febea811f.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_6d2e17fb-6fa7-41b5-b3ad-e06febea811f.png index 0335f9e081b7fff8186836b0d907b26367aa2bea..fa4d69ad39a39e9d0c29cb19fab2295fce2d5152 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_6d2e17fb-6fa7-41b5-b3ad-e06febea811f.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_6d2e17fb-6fa7-41b5-b3ad-e06febea811f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59871a65126beb0c21232ab3abdd099335b19bf6ff0fd458d99238ba1967047c -size 772680 +oid sha256:ed06d8bd9d06ec232aea50a14b1e438b8f3cc3aa42d7219dea0249bee9d044c6 +size 456814 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_92ef851f-5e36-4b3b-826d-730a35f6816d.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_92ef851f-5e36-4b3b-826d-730a35f6816d.png index 5fcb9196f80221b5b51f8a4d65ef3078c369b23c..8b8da9160f8c993daa03f35e7f3764642ff913fc 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_92ef851f-5e36-4b3b-826d-730a35f6816d.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_92ef851f-5e36-4b3b-826d-730a35f6816d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3b0b4652c3f020932101ef52c1e6ccae084a229440fa0ead7d9c2873e418693 -size 1758665 +oid sha256:1f35f41fb5f5304aec4da49f93f9391176e9cba3fd4df57f7d1fb32199befc80 +size 1150046 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_a0b8ce6d-f627-464e-8c71-a2e196fe4999.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_a0b8ce6d-f627-464e-8c71-a2e196fe4999.png index d6d1959fbd05bc38c11546f17d151fc2ba19e6b4..2dda2141f70afa003e946c0d848f5748e641d613 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_a0b8ce6d-f627-464e-8c71-a2e196fe4999.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_a0b8ce6d-f627-464e-8c71-a2e196fe4999.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60608f8c65537692940ca5105315491a7844f7618d61e0e1fc8d6e70619b0aca -size 895410 +oid sha256:9e34098041ad665fa8147903e2b4e9d522ef760d5fef77c779af627d7745dfc5 +size 1415214 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_abcaa74b-7460-4b0b-95e6-3fcf23ac1904.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_abcaa74b-7460-4b0b-95e6-3fcf23ac1904.png index e6647fb34802888d3ece93e707cae84dc781eb61..4a8a358d72b31a26ba1f05107470f6a4d491ca55 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_abcaa74b-7460-4b0b-95e6-3fcf23ac1904.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_abcaa74b-7460-4b0b-95e6-3fcf23ac1904.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c9e16a23fa227c869443a50a62f6b5ae330e51e8924c5ffc604c57ede347ad5 -size 1486406 +oid sha256:80669e2695b081a6c98f5e3d8f2849e9e48500afe01af4fd39b2a8e4b4068356 +size 443340 diff --git a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_b0dcb485-a80d-4014-8137-c2c7c9675b7d.png b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_b0dcb485-a80d-4014-8137-c2c7c9675b7d.png index c0058c479d93f6baa5ab4b5cf1b49a03744996f0..b77639f3a05f14fb9fda305c3f31001b5bef2318 100644 --- a/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_b0dcb485-a80d-4014-8137-c2c7c9675b7d.png +++ b/images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_b0dcb485-a80d-4014-8137-c2c7c9675b7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd8e5e7cd7254ec3e62bd5bbda87f884e61d473824f0ba35489b4c2ace27acf8 -size 628182 +oid sha256:4a765b0134ae31df361f012b115847f98f95e73b0eb39b355c93d03ab4512355 +size 592515 diff --git a/images/6b54b029-bff8-49a7-acca-c57163f14279_24a30d96-2890-4243-9595-14ea4999444c.png b/images/6b54b029-bff8-49a7-acca-c57163f14279_24a30d96-2890-4243-9595-14ea4999444c.png index d4255247fad74acc684faa55e0097bad8b8e5353..cb6c25bb42cb3429be55e247925508ef43a2f5d0 100644 --- a/images/6b54b029-bff8-49a7-acca-c57163f14279_24a30d96-2890-4243-9595-14ea4999444c.png +++ b/images/6b54b029-bff8-49a7-acca-c57163f14279_24a30d96-2890-4243-9595-14ea4999444c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb252685bb1c494dad7bc4db56844f2ed89bc78d90fdbf60d9ade8052a00ac7e -size 731924 +oid sha256:75dcface6c1c9b9d6c6579a26be50d75d5f63f747254349baa82916c42c6ddb0 +size 1034167 diff --git a/images/6b54b029-bff8-49a7-acca-c57163f14279_2fb8c104-cff9-426a-848b-db783f818ab8.png b/images/6b54b029-bff8-49a7-acca-c57163f14279_2fb8c104-cff9-426a-848b-db783f818ab8.png index c70aa1337580c09291bf171cd720462ad4f74b76..66c3c61cf0c985e0371f4f2883dbb6926fcc3828 100644 --- a/images/6b54b029-bff8-49a7-acca-c57163f14279_2fb8c104-cff9-426a-848b-db783f818ab8.png +++ b/images/6b54b029-bff8-49a7-acca-c57163f14279_2fb8c104-cff9-426a-848b-db783f818ab8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0631af8bbc3248d62a16240755ddc09dd9489965399016fbf9583ff355553ff3 -size 681322 +oid sha256:bab71e1ebd2d890bc8b0cc71394a797ffc9f7b9455c66ebf47f4f8e42ebe1e54 +size 624092 diff --git a/images/6b54b029-bff8-49a7-acca-c57163f14279_39381d41-f8cd-4298-a524-0412ae6ba389.png b/images/6b54b029-bff8-49a7-acca-c57163f14279_39381d41-f8cd-4298-a524-0412ae6ba389.png index 63222407f074c96b33d80f4b99bc2dc4e5ea60c8..b03ff1800985653545bb485d6f7cbfa7c2bc152b 100644 --- a/images/6b54b029-bff8-49a7-acca-c57163f14279_39381d41-f8cd-4298-a524-0412ae6ba389.png +++ b/images/6b54b029-bff8-49a7-acca-c57163f14279_39381d41-f8cd-4298-a524-0412ae6ba389.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ece7fecbdc40529486d5633cec6bec5ca973d6d031dc5148b95672df45911f1 -size 680408 +oid sha256:52f2c063b933f0742edaf97687c77064170c854a36106e64dc7c4ee31036fd7e +size 550167 diff --git a/images/6b54b029-bff8-49a7-acca-c57163f14279_4cadd81b-2ad9-43cc-a6ae-7785bf77b8b2.png b/images/6b54b029-bff8-49a7-acca-c57163f14279_4cadd81b-2ad9-43cc-a6ae-7785bf77b8b2.png index 64bcfcfd58e206f343d0c0a763656845de68c9ca..bff1145d70d7fd2986cec45f305bceaba9635f6f 100644 --- a/images/6b54b029-bff8-49a7-acca-c57163f14279_4cadd81b-2ad9-43cc-a6ae-7785bf77b8b2.png +++ b/images/6b54b029-bff8-49a7-acca-c57163f14279_4cadd81b-2ad9-43cc-a6ae-7785bf77b8b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39fd4d47b4c0ed70f8b92f1d18c593cac3321bdeb40e19b3e17604befbff198b -size 929164 +oid sha256:856f6439f99b05e48fb9a66e8d8bab3c28f16e32d088a917386f7eff7e8d7f86 +size 996290 diff --git a/images/6b54b029-bff8-49a7-acca-c57163f14279_69d92e43-3d66-42f1-b437-29280a51214b.png b/images/6b54b029-bff8-49a7-acca-c57163f14279_69d92e43-3d66-42f1-b437-29280a51214b.png index be3c6eb73acadbcf25107af5c7e6f04a55295401..d1fac7086c939645dc9744383c73e9786d8f253b 100644 --- a/images/6b54b029-bff8-49a7-acca-c57163f14279_69d92e43-3d66-42f1-b437-29280a51214b.png +++ b/images/6b54b029-bff8-49a7-acca-c57163f14279_69d92e43-3d66-42f1-b437-29280a51214b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b8cc18273e57902cdf3c73cde040ac03e8b4e76eff2b50fb5dc09c63fbaf0e8 -size 1954443 +oid sha256:a73ec853be47d93ea3e1162e1f1ca7a4bc05ffbd6645e025b33b8f7fe538d189 +size 1289026 diff --git a/images/6b54b029-bff8-49a7-acca-c57163f14279_d89fe4e6-31ff-4e6f-97a6-498dfeac0525.png b/images/6b54b029-bff8-49a7-acca-c57163f14279_d89fe4e6-31ff-4e6f-97a6-498dfeac0525.png index cecb7de4ac9aa7ff6470f2a46b6defab58da3c05..d1afbfd127996a561c665e3b2ff81e07b7ef9fbb 100644 --- a/images/6b54b029-bff8-49a7-acca-c57163f14279_d89fe4e6-31ff-4e6f-97a6-498dfeac0525.png +++ b/images/6b54b029-bff8-49a7-acca-c57163f14279_d89fe4e6-31ff-4e6f-97a6-498dfeac0525.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e01a2f9c76a029bcfa6bc57b087717ff3750b8c84d01510ae00beef706db2d1d -size 648906 +oid sha256:10e64f54a4c672fba21182ad0bb892d385b87794de36559122532429f3a81f41 +size 1044169 diff --git a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_038250de-f189-4f6b-9a09-14f89412c863.png b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_038250de-f189-4f6b-9a09-14f89412c863.png index 65be45fd2bfcc416e187ea7aad8ed8c940e8d9d5..9944c9bbfdf4ac6625a77f4cc8b87a3d28414e9d 100644 --- a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_038250de-f189-4f6b-9a09-14f89412c863.png +++ b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_038250de-f189-4f6b-9a09-14f89412c863.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b611e60e750e2c868970490fdb5c544f8567a25aba03ce24636defaf164e7ca2 -size 899454 +oid sha256:e474028ef635328280ff6cdeecc2ab99fe498cc24f306c2e2e6e2b29b9937fd6 +size 1069630 diff --git a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_0b3d6a70-f8fc-4541-80bc-ec18ed024db3.png b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_0b3d6a70-f8fc-4541-80bc-ec18ed024db3.png index 1b6d4486ade2dcac7d4062a0193de2413bfeffed..ec2b288b586c55df011133eef9eaee2049d866cc 100644 --- a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_0b3d6a70-f8fc-4541-80bc-ec18ed024db3.png +++ b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_0b3d6a70-f8fc-4541-80bc-ec18ed024db3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba8d766366cc7ba481ae6a89f846452704a2edc3ea8ad2c06e6e55e5374be4a2 -size 444332 +oid sha256:62544474e03b1e7fdc9b0097a98d51ad960209e19382fc6c695e74ca8fffddf1 +size 328597 diff --git a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_209698d6-671f-446c-9af0-d3ec4a85381f.png b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_209698d6-671f-446c-9af0-d3ec4a85381f.png index 955ad3b3459383e0fd49c6e4e928f91ae6d30a83..e33ccbb0eb4f076da13493998468c5ea8193c1f1 100644 --- a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_209698d6-671f-446c-9af0-d3ec4a85381f.png +++ b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_209698d6-671f-446c-9af0-d3ec4a85381f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64cd91f5beedb0d8ac992cc6c75c6827825f2a137a066f6963e2af56e1b5efc0 -size 845882 +oid sha256:198c8d05aee631a28e8d8484b1a805f5b72c9c38a8e410fc5f74fea967a7fea9 +size 1016824 diff --git a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_e60b21c2-3627-44e4-9b13-975c218d9d1d.png b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_e60b21c2-3627-44e4-9b13-975c218d9d1d.png index ea8218777489a4ae131711cc1355d3e91fee60c6..5f9677b5ad3824820c32c946af93a7ed61a5da7f 100644 --- a/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_e60b21c2-3627-44e4-9b13-975c218d9d1d.png +++ b/images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_e60b21c2-3627-44e4-9b13-975c218d9d1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0c67ee44b2b7f86a0974fc6a75c54116ea1090e04a9d5b71273f7828ad8546e -size 444656 +oid sha256:bbc9af73a787cf6c555df54f52660abe25d353ef6e585ebd5a3e043d026a25ef +size 361461 diff --git a/images/6b831239-435b-494e-9aa8-a49e8605d0b3_163e98c4-a7ef-42c2-b151-5cc75d670ca5.png b/images/6b831239-435b-494e-9aa8-a49e8605d0b3_163e98c4-a7ef-42c2-b151-5cc75d670ca5.png index 153ddd96dcb818f80702d3f1ff036d717391a28a..52a4603780fe9e3396deb33c4e67d35a7768b783 100644 --- a/images/6b831239-435b-494e-9aa8-a49e8605d0b3_163e98c4-a7ef-42c2-b151-5cc75d670ca5.png +++ b/images/6b831239-435b-494e-9aa8-a49e8605d0b3_163e98c4-a7ef-42c2-b151-5cc75d670ca5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:721cffd1bfaeb450adf94958f96692f06ae717952e6649985c9b5003e9cde620 -size 977082 +oid sha256:4ffb794936837d7fb6c1dc6c8b5ca5d3083135ca6a746ead0cb4cc94120471ea +size 981269 diff --git a/images/6b831239-435b-494e-9aa8-a49e8605d0b3_97df459e-9422-40ca-88fc-0a6f15b4fbfe.png b/images/6b831239-435b-494e-9aa8-a49e8605d0b3_97df459e-9422-40ca-88fc-0a6f15b4fbfe.png index e8664f209226b1872dade51b647bb3792ca39d55..b206822453ac30252a1443948a0102b8f728a2fe 100644 --- a/images/6b831239-435b-494e-9aa8-a49e8605d0b3_97df459e-9422-40ca-88fc-0a6f15b4fbfe.png +++ b/images/6b831239-435b-494e-9aa8-a49e8605d0b3_97df459e-9422-40ca-88fc-0a6f15b4fbfe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87d818f10a09fdbb7164395194b770c3541f04603e5a3a8d3d45f1502a3532e3 -size 1000653 +oid sha256:413eda541b2fd33fb71797a4e1bf3a1a0db4ac7c641542430b621a276993a64b +size 1028558 diff --git a/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_14d7a703-5b89-4d70-a04d-48db66be0fc7.png b/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_14d7a703-5b89-4d70-a04d-48db66be0fc7.png index 88f7bea891cf78a3a98250e275ea1988409c6d5a..4eae0bed8a180ce6fc65b7065a9d737b3b53592c 100644 --- a/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_14d7a703-5b89-4d70-a04d-48db66be0fc7.png +++ b/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_14d7a703-5b89-4d70-a04d-48db66be0fc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edbe57aa85f4ff5d51944524260725ce8560c5808a3898ac84a7df830fd509b2 -size 557795 +oid sha256:89f4cef7d56d1b5c9d685b5c5434fe0c4683ace94178743afa6bf00fd9bf9728 +size 408469 diff --git a/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_18af82b7-9edc-400c-be67-a8172c96e423.png b/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_18af82b7-9edc-400c-be67-a8172c96e423.png index 0d33d6d6063449647846dbebcc42f2671134004c..c723efdeb82d1534be8ecdab38b3f6efdc50dac4 100644 --- a/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_18af82b7-9edc-400c-be67-a8172c96e423.png +++ b/images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_18af82b7-9edc-400c-be67-a8172c96e423.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b4be0237eca009da6675e6b9b9d787fae172fa4d995467a8f87988509b43cab -size 324887 +oid sha256:cbf82c6115d99cfe677b9866551224db6998189de813e06235495bbe9cc0d7cf +size 687586 diff --git a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_17a4c8ec-34de-455d-b607-6752a9cfdd37.png b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_17a4c8ec-34de-455d-b607-6752a9cfdd37.png index 37e7fcb3ccea06f3a96f0ae67b098d903544c93c..dba99fea2027a6600532ec80086b019609e2ce48 100644 --- a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_17a4c8ec-34de-455d-b607-6752a9cfdd37.png +++ b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_17a4c8ec-34de-455d-b607-6752a9cfdd37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2473c43cfee0844e0415c67765bd160b26c6a2617ee64d0f2dc1c4ae5c8f2337 -size 167166 +oid sha256:7e0dde45e71a076c8adc7609da40842e3e74ea6dca9281df925a9804900a21c9 +size 177210 diff --git a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_3ed73142-d61d-49dc-b37b-f1ffdf6df747.png b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_3ed73142-d61d-49dc-b37b-f1ffdf6df747.png index 99e85787f628f53ddbfbfde2095e77d296045c6d..63b63f8076b5e8e492a47390e492b2a4d2e7b9bc 100644 --- a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_3ed73142-d61d-49dc-b37b-f1ffdf6df747.png +++ b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_3ed73142-d61d-49dc-b37b-f1ffdf6df747.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a5d49d925804293b0af6b0611d15dd64b00f90df79b496336f7df55e2f45b9d -size 674253 +oid sha256:e5e64ad1f88f30714e823a45c108f294a96befbbb582b0f64308dcd8f24333cb +size 462994 diff --git a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_6efa03dd-c712-4bce-80cd-c1ac3251e298.png b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_6efa03dd-c712-4bce-80cd-c1ac3251e298.png index f9247c0899bc8040c6bd9a2285273965266ebcea..95a462ca56226cae26b8e39d28bd96561c8ced9c 100644 --- a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_6efa03dd-c712-4bce-80cd-c1ac3251e298.png +++ b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_6efa03dd-c712-4bce-80cd-c1ac3251e298.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4f20eb5defe706b972ea13f9dbac92f5f25079722bf23cb739f80fe1635540a -size 675974 +oid sha256:9752d2576e23a18a431ccdfa58b38982d3feb93cb37cbf09e9d6afda80430318 +size 489200 diff --git a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_741f7aa0-288a-42b4-95c0-1dbe3f4025e0.png b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_741f7aa0-288a-42b4-95c0-1dbe3f4025e0.png index d096643ffffdbcb62c026a42759cd7f781cfc344..d0c19a8f8517c2044da9d7aa55e2cd7abf366347 100644 --- a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_741f7aa0-288a-42b4-95c0-1dbe3f4025e0.png +++ b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_741f7aa0-288a-42b4-95c0-1dbe3f4025e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f374e6702e7bec513242dc490b65a95486a80d532d3fb92b4edc52831178d5f5 -size 635549 +oid sha256:a4ae9c13acbc2678b31481cba159ca8d905d806733a75531def4b1eea7aab8a3 +size 478442 diff --git a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_aac2e48b-eaab-49e0-8d87-8fdf57bb909a.png b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_aac2e48b-eaab-49e0-8d87-8fdf57bb909a.png index 99a2038cc08779260497fb3776928ba905b8d4f6..b73af1adb59ed7d5818a7ec11bee0f6491597871 100644 --- a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_aac2e48b-eaab-49e0-8d87-8fdf57bb909a.png +++ b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_aac2e48b-eaab-49e0-8d87-8fdf57bb909a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b781dae481b504a1c1d633b78fa8d8b608da12394988af200b6b0f96cf4d512 -size 377795 +oid sha256:4b342bf65c3facb4f13f3440ce23c54fa8e4becbd5adb8a7fd9d6cc034dcff6d +size 327529 diff --git a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_b9a3579d-b988-4a98-97d1-9bdc1abcfb2b.png b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_b9a3579d-b988-4a98-97d1-9bdc1abcfb2b.png index d9a362f66075778bb0678578e2554e42b03df3a7..c4a7ff003aff588d0776e391401c7b5a06cdf498 100644 --- a/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_b9a3579d-b988-4a98-97d1-9bdc1abcfb2b.png +++ b/images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_b9a3579d-b988-4a98-97d1-9bdc1abcfb2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60267a950d73ad900df18ce4e5d4afb549f4f884e2aa2c0b9d85b056ab407888 -size 621009 +oid sha256:94a14965ba739edd795e25d2954d624a6d52a45d4dce9e2f755f09c44a527f32 +size 536453 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_005bc9e7-3f90-4be3-9512-4e6c3fc9517d.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_005bc9e7-3f90-4be3-9512-4e6c3fc9517d.png index ff32d4e32b27ce3d473eaf6cdf822c41dc44190c..3f8f5c386fd6f17ba65ded8f50c1352ff1bc61ca 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_005bc9e7-3f90-4be3-9512-4e6c3fc9517d.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_005bc9e7-3f90-4be3-9512-4e6c3fc9517d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02e30470c5ad3b0487eb39b3d5dd978ba9da9fa0fa99578e28680906eced6c2b -size 365179 +oid sha256:6624bbaca063e6b8738128f58e282d6d54b4e9b1b44f5716b784109cbf753d9a +size 380658 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_0f9aceb2-5773-43c0-883f-c3e0ab76df13.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_0f9aceb2-5773-43c0-883f-c3e0ab76df13.png index 558113c45f2e910ef87c24814d844b3b7846fed2..4345d65217ace3299037a0a8711233b0df8ecc61 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_0f9aceb2-5773-43c0-883f-c3e0ab76df13.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_0f9aceb2-5773-43c0-883f-c3e0ab76df13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97465d4e27234cb5a4e181cc4eb5808489e90d53030f1b2e22d24a0b60aa6765 -size 362510 +oid sha256:851376e63184d5b605201d17a78c01e674c14028b7f5134b9dfa983af146d8bf +size 532874 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_1ef89271-0828-431c-8ad2-83ba8b886666.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_1ef89271-0828-431c-8ad2-83ba8b886666.png index 3f73e8c5d6168928e33339cfd218d2e6443d2e40..9bb9e4a64c39ad27cf7c77e74ce17760c5042f37 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_1ef89271-0828-431c-8ad2-83ba8b886666.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_1ef89271-0828-431c-8ad2-83ba8b886666.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:353651f4c2ad3dcd0e9e438bbd0890a21eed1611366a4a44c46cf1ef40b9c84a -size 1568887 +oid sha256:299451cf8f67fca5653e20bc131de5d75268bc0d2ada1c23100a31256a490025 +size 1893236 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_2e5b9bce-a220-413b-bf5b-eb0cc86e8fef.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_2e5b9bce-a220-413b-bf5b-eb0cc86e8fef.png index aad21a06578a65ac11ef325de5da2c942f1a5b04..bbd70d051996a017b2b12696939e2fff1c2dbff3 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_2e5b9bce-a220-413b-bf5b-eb0cc86e8fef.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_2e5b9bce-a220-413b-bf5b-eb0cc86e8fef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbfa44fe3408a3fb49bbfdb7fa94f71e11c6993e9f8804ad0db23c4f4a13e876 -size 388293 +oid sha256:6cacd06c71eb04f72456301c80e3480eb242bf8a21ac5a84229f46b6be96bf8a +size 569301 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3163cce7-c289-4004-94b8-15e312dac0dd.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3163cce7-c289-4004-94b8-15e312dac0dd.png index 791cbd8a73b10014f21552fb30ae0f5e216c8b13..5e4220ab749d3db0c353a46e6d4c16af6286a886 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3163cce7-c289-4004-94b8-15e312dac0dd.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3163cce7-c289-4004-94b8-15e312dac0dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:646728c9e7d81a8285f20288901c1d81ccf448c6b3a8be3feabe88e1eb50e813 -size 386157 +oid sha256:e6c423c9801db0fd5c6e3b02d5ce608acb7eea2fb9139a55d8bd00b0fdd543a7 +size 391948 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_36517be2-c47a-4c23-8d97-fefc258aa5b9.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_36517be2-c47a-4c23-8d97-fefc258aa5b9.png index f028fddea18d692ab03cc4b665cdfb6cca062a09..fb9e569e4f0f0ce629cc09426a8eaeceb4d22e28 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_36517be2-c47a-4c23-8d97-fefc258aa5b9.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_36517be2-c47a-4c23-8d97-fefc258aa5b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20fe1e26085c561c9f493e0ec125b56cd7007632ed8669b73714920cf5eafba7 -size 365585 +oid sha256:c08df091be6d229306778dad8a05a0fd529f3494d451cba84158ccbe1e699dc6 +size 535969 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3bef15a6-9466-4ee9-b485-4f7dd16e1291.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3bef15a6-9466-4ee9-b485-4f7dd16e1291.png index bc31ae68f249d0973f04d22a3ec90607deedc6b2..19894494a57e7db67acfbb8c3424884ba980b36e 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3bef15a6-9466-4ee9-b485-4f7dd16e1291.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3bef15a6-9466-4ee9-b485-4f7dd16e1291.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b85de46453f863a0369a7b46dbda3ade4c193e351ea021861edc5ba4783a815f -size 363536 +oid sha256:d56758928875f1d852380c986c2b0b8a8b901456ac2973cf0d044033250963c1 +size 426560 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_59acd30d-a2a0-4546-86d2-667a574ec341.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_59acd30d-a2a0-4546-86d2-667a574ec341.png index a10fd6991d6a4b8bb6efb6dd009bc493a8466860..9ca53c83720455515860974fb23f4026be79b250 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_59acd30d-a2a0-4546-86d2-667a574ec341.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_59acd30d-a2a0-4546-86d2-667a574ec341.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd0c5fc4da3aa99f20780236df4b715a1c22c62009fe92fa2b5092befb608b50 -size 365421 +oid sha256:ba65061ffea0fd358ed6b946d9e908240916077a9eb1efeaf8735e952cdb9a50 +size 547945 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_6836bccf-f0bb-4f6c-86ce-a94e27dfccfa.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_6836bccf-f0bb-4f6c-86ce-a94e27dfccfa.png index c10e2329977cd4d78ea2fb8e899114f8ce4501dc..0a0047336293c970474f9799a59b75598d13e11f 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_6836bccf-f0bb-4f6c-86ce-a94e27dfccfa.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_6836bccf-f0bb-4f6c-86ce-a94e27dfccfa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61b6c13f254e27bd63c860cf448781b746d1b8b84a1e1a682404443ce6bd9734 -size 364127 +oid sha256:a2e28001799f8f43d457cd3ef16519509f5918e607a075c6dbf972cc70051a95 +size 423360 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7b70a5e0-27fd-4522-ba80-8655d4cfe594.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7b70a5e0-27fd-4522-ba80-8655d4cfe594.png index 07183e6f131020afea8ffa87331f8e6f5392860d..fef1b99d22e8cf7f64a93d6ecfa9b0760e2788d3 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7b70a5e0-27fd-4522-ba80-8655d4cfe594.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7b70a5e0-27fd-4522-ba80-8655d4cfe594.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f46a7a7058400295aca25b623fe52c12e0da54e6f32a64cbdef27e096a4ad640 -size 388614 +oid sha256:faa18f59121aea672fd2d57547358a3d82be58279cb3040c3a4ebadc3e675ee9 +size 376814 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c2b9362-222c-4103-b794-de349596c06d.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c2b9362-222c-4103-b794-de349596c06d.png index 69850b8ee801059683915e500a78fb676cb4dc5f..2ef147363c7b4c664d69e1b35873b0dbdd26b802 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c2b9362-222c-4103-b794-de349596c06d.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c2b9362-222c-4103-b794-de349596c06d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af5f610d11c2b29f80c9cb599e317ab1489604f339a2a25532379031a83ab11b -size 391553 +oid sha256:2391a5dbb5cdb8c1c3ee34b3700345bd20a5cda2516f56c365dcf5fad9c49382 +size 422883 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c778de5-a846-4249-9be7-49bf4badb86f.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c778de5-a846-4249-9be7-49bf4badb86f.png index 3684f7330487c50eea70b429eb4add4a85631cfd..34126429961341d69f2978f29dc421f9f0368862 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c778de5-a846-4249-9be7-49bf4badb86f.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c778de5-a846-4249-9be7-49bf4badb86f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b44fcf0acc217601a2dc344d22603d604f260daa5cb808c170994f53a33ddac -size 809866 +oid sha256:705ef66900d9d007d6a5bf10eb3010ad2d1d30b4d20784bac0d1d12d040e5fbb +size 629127 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_83318fc2-ad80-4bda-8a6d-1be341afe2a2.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_83318fc2-ad80-4bda-8a6d-1be341afe2a2.png index ff32d4e32b27ce3d473eaf6cdf822c41dc44190c..57d9299819e5f830ab6597f5c6bc2824d4fbec2b 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_83318fc2-ad80-4bda-8a6d-1be341afe2a2.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_83318fc2-ad80-4bda-8a6d-1be341afe2a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02e30470c5ad3b0487eb39b3d5dd978ba9da9fa0fa99578e28680906eced6c2b -size 365179 +oid sha256:0fccb36f2d366a60714b567eb01db6c8d1fb90f731b58fe1729d08a38821c95f +size 535539 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a074576d-9370-4453-bac0-97e1eb002723.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a074576d-9370-4453-bac0-97e1eb002723.png index bc31ae68f249d0973f04d22a3ec90607deedc6b2..7afe7b9e848a2d7da3ce1fe8b48dde5b378ef4b1 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a074576d-9370-4453-bac0-97e1eb002723.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a074576d-9370-4453-bac0-97e1eb002723.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b85de46453f863a0369a7b46dbda3ade4c193e351ea021861edc5ba4783a815f -size 363536 +oid sha256:8bff2b3023e83ee9bae3748b726b158b4cf3ebbf2e55c30305798b1d132d508b +size 324054 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a20c534d-630c-4fa9-94d6-ef298b8e67ea.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a20c534d-630c-4fa9-94d6-ef298b8e67ea.png index bfa208a6353061e7cb9c9755139e04313a8821cc..fdf2ab872f103e408e8453cc54ba3ae39ee64e74 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a20c534d-630c-4fa9-94d6-ef298b8e67ea.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a20c534d-630c-4fa9-94d6-ef298b8e67ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68d950251bf19f19637fe142635a978ef03decc3becb3ff2cb045dd3f44fac99 -size 369020 +oid sha256:b443fa89016d1b647d2a2523932c1a44c7b4e7846d480db44d5d96c7b8f6bcab +size 317720 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a8fa1979-765c-48b2-9f63-f931c7c44900.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a8fa1979-765c-48b2-9f63-f931c7c44900.png index 2d5ffb00fb39c017d1f7849984607e7e9e7a7fc2..d90f371f7b852c605a8fc50152541c40964dc67c 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a8fa1979-765c-48b2-9f63-f931c7c44900.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a8fa1979-765c-48b2-9f63-f931c7c44900.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a61f24a9625844bd39dc02afa71f446ee8651d4ec3ca0cd6891fb35c90c15c1 -size 437658 +oid sha256:dfc4eb9093de594d91c9e971187bf6d0129ff88f30d7fdaab095927e0027baa7 +size 479198 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a950a8a1-9cd6-423c-8bb6-8411564ed498.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a950a8a1-9cd6-423c-8bb6-8411564ed498.png index a1ef03461c73a719150b2f3f0c92e11017b75d09..4308d97a37fcc60991fe0449c1c2b559dd4f9e54 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a950a8a1-9cd6-423c-8bb6-8411564ed498.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a950a8a1-9cd6-423c-8bb6-8411564ed498.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:563f96e4a0c53ca4bf059ba2395651b73871cb19d2673df1d7b65fecd63bdc1e -size 369758 +oid sha256:4f4d8138a3c04dfd72dd5cac6b6d63ef70e8f42e36064567d1549cc93e96eb78 +size 261243 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a957bb96-e539-4222-b03f-b8c371629b9e.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a957bb96-e539-4222-b03f-b8c371629b9e.png index 7a7163851c0b29a7f670696f523b653adad21807..55cdf9e25759ade9f1b2eb74b494a77bdfdc704d 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a957bb96-e539-4222-b03f-b8c371629b9e.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a957bb96-e539-4222-b03f-b8c371629b9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1b11bc15a7fc156a5e0c4a9181cbb253d1619d65c0f70e438b9acbac1221097 -size 384827 +oid sha256:4afc9640cc31742239fb8e3f87980f8fdd361715484b66df55f92db63d38310d +size 323278 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a9eaf474-7818-4f53-b92b-fc9157317806.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a9eaf474-7818-4f53-b92b-fc9157317806.png index 34bb82656fcca5bdc9316928ee2fc4087dd02cf5..2111141054db60592c80c1abe046d74287abfee6 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a9eaf474-7818-4f53-b92b-fc9157317806.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a9eaf474-7818-4f53-b92b-fc9157317806.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3503275be9f0574cdc19d9a29dc4859b2ae83b6a7172812330339da11c07595 -size 372133 +oid sha256:9176318d1629762b75d7986a983ea59673f15e6526b32e61d5b288a4d2265d97 +size 353416 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_ad5a4145-122e-4236-bc44-b1efcc78caf4.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_ad5a4145-122e-4236-bc44-b1efcc78caf4.png index 7eb434f3e534e5edd58b58908af750b58c1ec6a5..5070ffb346aa1578162d0b3d1cb737601d5c25ef 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_ad5a4145-122e-4236-bc44-b1efcc78caf4.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_ad5a4145-122e-4236-bc44-b1efcc78caf4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55d60569d51143781c680f2f6db32c136793e975b9eec121820cb15d711e8e65 -size 384112 +oid sha256:83e58e1a7b31b575ae338c12845db89af9caffec37c1fe2469c771a0971e50d5 +size 295385 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_aec31435-a64c-4d66-9fc2-9e9600ff35c6.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_aec31435-a64c-4d66-9fc2-9e9600ff35c6.png index 19352e4e2b5b23bb340f240a0242151d3a443584..7edfb78a79a2c3d3eb51ca9b67ea8890b2a645f9 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_aec31435-a64c-4d66-9fc2-9e9600ff35c6.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_aec31435-a64c-4d66-9fc2-9e9600ff35c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57d4d2b69375e9ee254d6c4e2f88ba7b9aa99028077c2174f122129343538ba0 -size 362217 +oid sha256:6db21b71207265b1a2e8fe1fe9dba573bf475a78e1fb90596263e1e3186eebee +size 445790 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b036523c-2ab5-412e-b346-8ca1741f8efb.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b036523c-2ab5-412e-b346-8ca1741f8efb.png index 067ef3ed5e6b152570a12429b1be416b6a8b367e..c8ecb48d41270468d0605579410fe025331c9964 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b036523c-2ab5-412e-b346-8ca1741f8efb.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b036523c-2ab5-412e-b346-8ca1741f8efb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f70e64267d73d962649894b1f06f49555270712caf0520bcf731f07b359d9bb -size 383407 +oid sha256:b3b14667909afc0aa59fe3fd36c78f20f2e20fc894af8d78c5a804dc95a6cfce +size 422498 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b442079b-5565-4361-b7f3-666110df8ba4.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b442079b-5565-4361-b7f3-666110df8ba4.png index 9545619c0b8be8f709bc2b1ab7d2dad5bf4ee7ad..17cfefdb82fdcf30c339f83541307c686764c3d8 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b442079b-5565-4361-b7f3-666110df8ba4.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b442079b-5565-4361-b7f3-666110df8ba4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b27d63da0f13851b4217f885bc9c372c280af5f296b33fcbb4fa1b16feb10e77 -size 365571 +oid sha256:53f517a0b79d86b18ceb8e74a89879ff80fa3181f7735773379e46b490e2198b +size 346710 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_c6afb333-db4f-4c5e-a453-f71572c34a7b.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_c6afb333-db4f-4c5e-a453-f71572c34a7b.png index 94b0babdd97d2596aa90e2dfc10320ecfe6c397c..2cf32d6a4839cd6fac931956d1201f4be73fdc98 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_c6afb333-db4f-4c5e-a453-f71572c34a7b.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_c6afb333-db4f-4c5e-a453-f71572c34a7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4b9b3e907c887c36895dc318f2d6d7a71ae6e9fe637eaf03318f2f89311b7a3 -size 369178 +oid sha256:f2923e55c131cedea0f0c5beabe5c4722a00b46eab6654f0a0abcb0d8733c707 +size 501180 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_f9125541-f6f0-462e-bbd6-74b95fa0141e.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_f9125541-f6f0-462e-bbd6-74b95fa0141e.png index 7a7163851c0b29a7f670696f523b653adad21807..8363e48af1ff38f73b124407655f19eef90aba74 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_f9125541-f6f0-462e-bbd6-74b95fa0141e.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_f9125541-f6f0-462e-bbd6-74b95fa0141e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1b11bc15a7fc156a5e0c4a9181cbb253d1619d65c0f70e438b9acbac1221097 -size 384827 +oid sha256:c71991d3e021b4b34af90b890ed7b2bf1ac569554bb53bbf1accd103b9d7a800 +size 545141 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fc51fd06-6764-4183-9c13-c4e78867ba63.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fc51fd06-6764-4183-9c13-c4e78867ba63.png index a10fd6991d6a4b8bb6efb6dd009bc493a8466860..25ca75a114dc1eaea7350f1823b4d0920b68aff2 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fc51fd06-6764-4183-9c13-c4e78867ba63.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fc51fd06-6764-4183-9c13-c4e78867ba63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd0c5fc4da3aa99f20780236df4b715a1c22c62009fe92fa2b5092befb608b50 -size 365421 +oid sha256:0e326fcbb9ec39693334506077def34261dbbdbd354dfbc4ae35cf702e135e71 +size 288237 diff --git a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fd4064b1-988a-4940-9578-6fbfbfc2f352.png b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fd4064b1-988a-4940-9578-6fbfbfc2f352.png index 3f31e316bc4c54b436eec3a0d04fa59ffa4d9b44..85901fb535ce11eec8e188f44ec092838956f9c2 100644 --- a/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fd4064b1-988a-4940-9578-6fbfbfc2f352.png +++ b/images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fd4064b1-988a-4940-9578-6fbfbfc2f352.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:161c1da5880fc471577a9d501a289f965f0e235b651e37214fe51f03b611deea -size 1504466 +oid sha256:4c074430418cfeee1535be5287a42ec01ee687dfc7b2d9586237a56dd6132ae3 +size 1216534 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_057168d0-10f5-478b-88de-40fc2f2a1544.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_057168d0-10f5-478b-88de-40fc2f2a1544.png index 68e09b245e02e593039eeaeefe58655d716f02d1..d07b43750a6e451b165b7a3b670e340c73fd0141 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_057168d0-10f5-478b-88de-40fc2f2a1544.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_057168d0-10f5-478b-88de-40fc2f2a1544.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5bab755335b439f867d931ebf21a1179ee8fc9f1efe8b4ba51aac031c0536a49 -size 451126 +oid sha256:2bfd1091f8d7c0b2f13e7f8bc08b04b1bf9fa33c090d1bd72bf7ea291da50e1c +size 481688 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_0d72fff6-d3e1-4d08-9fa3-ecd760e525fe.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_0d72fff6-d3e1-4d08-9fa3-ecd760e525fe.png index 2f6b093f2d248e65d80d4390ca3dad1699d89dab..ce5d7480f982223519c5095a601f5b4986144a28 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_0d72fff6-d3e1-4d08-9fa3-ecd760e525fe.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_0d72fff6-d3e1-4d08-9fa3-ecd760e525fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b08a8e29b437cc5271c52f5cdabb4cb683b605c2b073883d2330bcd791ee3eeb -size 639599 +oid sha256:82572f567900aac2a2dbcd95f9dca804c0c3ad455769ff489316ab6f68e53a95 +size 642193 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_148ed09c-e612-460c-8704-bafa61872edf.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_148ed09c-e612-460c-8704-bafa61872edf.png index 99eebda36658a815c8ee47221f2a04f0246cb1d2..769ff16191c10b4a09fa7b8162ab53f85dda655b 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_148ed09c-e612-460c-8704-bafa61872edf.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_148ed09c-e612-460c-8704-bafa61872edf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bb8827f1d9a16603f5de1595b601a4fdc74a6f1f025cc1cb990ff9209100b99 -size 307192 +oid sha256:14d39152fabf5ae0decb618c9941062aa2b67c08e1c9a9df12b9eab6368f3375 +size 731593 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_18f5cbed-b040-4dcb-a90f-5aecfd8e43fd.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_18f5cbed-b040-4dcb-a90f-5aecfd8e43fd.png index 7821f654cfad8db62a41fe2bf204f20bd9bf3da8..35db2433d5b412de995d3ad492b4f628525c613d 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_18f5cbed-b040-4dcb-a90f-5aecfd8e43fd.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_18f5cbed-b040-4dcb-a90f-5aecfd8e43fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f31663f55812590a79e157dddf19da1f926271e7328c04f6162425aa8801317e -size 329704 +oid sha256:f31c422513396f72ebaa63e7e56694bade2b90d03ac7ca52bbcca0bdbfffd9ab +size 674676 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_2bb85139-e8e3-45ca-8e49-d99aea4df215.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_2bb85139-e8e3-45ca-8e49-d99aea4df215.png index c71d965ec40727849a9f7f7c12502b9d91bcb2e1..2f733cdef5a64c05d094283a44888e3acf1689f1 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_2bb85139-e8e3-45ca-8e49-d99aea4df215.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_2bb85139-e8e3-45ca-8e49-d99aea4df215.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6560493f8466af0f5d28a90bd961573399fa5ce538a4363e9dbf823b88c82d0b -size 540786 +oid sha256:3f4521dc1e439ce80a46c00891464052356ffb4f5ea8b92d618f405bef86cf62 +size 664197 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_32423741-d475-4401-bb90-37b5783f2819.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_32423741-d475-4401-bb90-37b5783f2819.png index 8c766f7c55b532a5a53cb3cf65fb7a15383ea01f..3cec466133350517d12f5ea217c060bfd0fec452 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_32423741-d475-4401-bb90-37b5783f2819.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_32423741-d475-4401-bb90-37b5783f2819.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e16e37ec9a49dea45e5fc87edbfbfd15637e0eaededc9efc8ad7201d22209901 -size 336238 +oid sha256:39670cfd38262a290dc960c545d9f1081b099cceeaad02f29d9bee9c241bb66b +size 275388 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_52f50c15-5013-43bc-b055-f287c38e0d96.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_52f50c15-5013-43bc-b055-f287c38e0d96.png index fa5bc575db89b31858c6d3233cfa575963ae8ae2..d0fb513de5222eab88e2fabe8072f6b5fd87681e 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_52f50c15-5013-43bc-b055-f287c38e0d96.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_52f50c15-5013-43bc-b055-f287c38e0d96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eee6abd7062ceee65d90106cd7ebc56dfd6b850e257be1b2737512cb4d7c71a2 -size 264180 +oid sha256:dd13eca99d6aa2f6e47bc65ea59d587e0337a3816492b0fe9e114960993680a8 +size 209754 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_69b182b6-f5e4-4aa1-85d5-a98b88129a7a.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_69b182b6-f5e4-4aa1-85d5-a98b88129a7a.png index 32a1407b2deead17ece193574f9a758c28a2b030..0cb2d899b0898e15003ebfdbe1c051bcaab6630c 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_69b182b6-f5e4-4aa1-85d5-a98b88129a7a.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_69b182b6-f5e4-4aa1-85d5-a98b88129a7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39c0cfff4b86c4f2e48c782fab43932be2e8304852e684a260f0a7891542ee59 -size 484958 +oid sha256:b670382b930aca111d2bc3437f9b8bfda2f32f22346eb44fbd71ee732fda67c5 +size 618506 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_76fd2922-8aaf-4d4b-9266-d53fa5daf0de.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_76fd2922-8aaf-4d4b-9266-d53fa5daf0de.png index 7a621ee1479225f59b7c9b92deb1c34a9340ed1a..5d0e746216badd07a2938ed1dc0c9e9c7511c4d8 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_76fd2922-8aaf-4d4b-9266-d53fa5daf0de.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_76fd2922-8aaf-4d4b-9266-d53fa5daf0de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66d4ab7ef6798d7bb7f2ffc005105fdd58dd7150deb053fb5ba4031dc90d0b20 -size 259374 +oid sha256:935f4c279dba73fca512ac556776985a620e5dc3f8b3853b9bdef1445ce366d1 +size 610331 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_963377e9-ccf5-42ae-90ad-74516b1a38a6.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_963377e9-ccf5-42ae-90ad-74516b1a38a6.png index e22285a04c0cd320b8719d674fdc088d41663b28..1d790eeeae255a316a42935d34125b0a1024b499 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_963377e9-ccf5-42ae-90ad-74516b1a38a6.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_963377e9-ccf5-42ae-90ad-74516b1a38a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09926361201a24905501d78e0bb22e1f7dd5c84043d5d2009d4a5edb99cdb252 -size 351072 +oid sha256:95e406697251c4daf1c278b6d1037500717fb19c32963720455f004843eda202 +size 446029 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_9ad6f25b-3247-4c7d-843e-ba9936959a88.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_9ad6f25b-3247-4c7d-843e-ba9936959a88.png index 659c310da00ccb610b643487f522da7a999e0b9a..2d8066a04e560210455aa27dea1a6b3bdbe95594 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_9ad6f25b-3247-4c7d-843e-ba9936959a88.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_9ad6f25b-3247-4c7d-843e-ba9936959a88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb05f33d534d4eace17bf08091b83362d2d7762560793e8c52131ec24aadd383 -size 1888906 +oid sha256:58ed3d80769bdbd8964d9961ce2dac55919f635b730810aca243e4fefb71aad7 +size 1685319 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_a433db85-74f7-4865-aef9-fcf866f4a035.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_a433db85-74f7-4865-aef9-fcf866f4a035.png index c98a328a1b77360554e46f4df0f9471e0d79df13..d68b5d42274637032c6186b714f27356461e82ca 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_a433db85-74f7-4865-aef9-fcf866f4a035.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_a433db85-74f7-4865-aef9-fcf866f4a035.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98199d27c103ca8fecacc031a1a2f202eed94b9339c1c44201b41c12dda0d864 -size 285314 +oid sha256:f7d2a48d06c2a0a673112d36a2e67b5a39b65321a9cbe6c94c7f2529e820589d +size 483172 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_acc19e2a-1a0f-4208-a5a2-8c63425767d1.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_acc19e2a-1a0f-4208-a5a2-8c63425767d1.png index 9244c3ca309b481f7462d7df5bb1796324c6e4d5..97ef5d6c77c9460fdb657c387635b36e1eda5dae 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_acc19e2a-1a0f-4208-a5a2-8c63425767d1.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_acc19e2a-1a0f-4208-a5a2-8c63425767d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9978feb073f8781b834249c2c7956e6190b500d81c2bab77842738ab61b64c1e -size 316513 +oid sha256:70e694c99a1d3f1ecd990040fe9e98bec646e4188778dccf23a427106a464dd2 +size 232954 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_c0435cf8-b490-4f65-a376-0fc31e91ef2b.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_c0435cf8-b490-4f65-a376-0fc31e91ef2b.png index adb82262ca8ed61c37d081c45e5298d629e61414..bfc84e110c17e0823c7e0330292b6fdca64877d8 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_c0435cf8-b490-4f65-a376-0fc31e91ef2b.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_c0435cf8-b490-4f65-a376-0fc31e91ef2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9bb9051981fa1dde02c4a19aa74d44e1f2c238dfd0c01845b4e56425063aec41 -size 337882 +oid sha256:143e72884fe6f2608d89a3e89bc88ef6c19a4c303859e3a86e6b0fac470bfe92 +size 631362 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_ce91ca6f-c166-4451-aec6-a9f75ea23165.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_ce91ca6f-c166-4451-aec6-a9f75ea23165.png index 00e5e742966e7d865c27013e3d0b698b92294b1e..fbf68d15791aa6b1b3382f5d360460ac56b55dca 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_ce91ca6f-c166-4451-aec6-a9f75ea23165.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_ce91ca6f-c166-4451-aec6-a9f75ea23165.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec3bb4922b500aee05d32f457e1e49fa5b92ab9043145200fde0e3e1fc93f18e -size 287325 +oid sha256:ae6c90ecb6ef8444aa6605c7f5b3e8380eef79547570ec7a79dfe61a116fc483 +size 215475 diff --git a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_e6fa804e-3e98-4f26-9433-3da3a3fa7bf7.png b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_e6fa804e-3e98-4f26-9433-3da3a3fa7bf7.png index 564449facb030946f52b0a02b3650e666f1787d2..56a4c3065cff12cb5eed409207bd7b20d72f9cc8 100644 --- a/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_e6fa804e-3e98-4f26-9433-3da3a3fa7bf7.png +++ b/images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_e6fa804e-3e98-4f26-9433-3da3a3fa7bf7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c943bf6b4e5feba1f5fd13e5f3db1b8e8252f3b94fb14912984eadfc4c555a3b -size 312538 +oid sha256:da3c7e16183d62baa4bd90dfef4267b2332987146ffc9f8c88d90bb78b4e25fe +size 404196 diff --git a/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_46aff272-d165-4cda-bb3a-39cf087aaba3.png b/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_46aff272-d165-4cda-bb3a-39cf087aaba3.png index 1a5d7d82099193da4dbb1afed2fcf4990866ad4f..6d019ae078fea1d604e68bcaf7aad011b6a0d558 100644 --- a/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_46aff272-d165-4cda-bb3a-39cf087aaba3.png +++ b/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_46aff272-d165-4cda-bb3a-39cf087aaba3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8aded879503d8960c5280114572b48e71392e7ce0306046a6f21027b12f0d66 -size 3811488 +oid sha256:5b71abe2d3b40119d6fd016622d88f6c2a1e0525024ffe2fcd6e73eb44416c67 +size 1299080 diff --git a/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_88a2761a-0c07-43a1-b931-1b8f81bb2cea.png b/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_88a2761a-0c07-43a1-b931-1b8f81bb2cea.png index 8ba68f4bc094be58dc2b367a2c8273dc2051853c..4e1a2831bdd845bc5870b197bf1142686ea658ed 100644 --- a/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_88a2761a-0c07-43a1-b931-1b8f81bb2cea.png +++ b/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_88a2761a-0c07-43a1-b931-1b8f81bb2cea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f314b5e9ca0853f1d43c243a9533c79befbe750330a90cc38ba3e5ec6c693c2 -size 2945997 +oid sha256:efcdd22771c40e6b30f45c9bcca989da815ce859c46dc952143e5abd531fe9c8 +size 2297461 diff --git a/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_c64fb6ac-9525-46c7-bb5c-a78cf71e4fc8.png b/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_c64fb6ac-9525-46c7-bb5c-a78cf71e4fc8.png index c1f39ecee98839c572530605913a943413391ae9..367db61095d28cbd20fdea0ef7dcf705d3de6bee 100644 --- a/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_c64fb6ac-9525-46c7-bb5c-a78cf71e4fc8.png +++ b/images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_c64fb6ac-9525-46c7-bb5c-a78cf71e4fc8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e413f094c9e858e84ee334f85663206bd79539c091a148e9c7d4404dc2930e7 -size 2837984 +oid sha256:843d81415113b16e077ef6786c88c61ef842e8005e36bbb23994b5ec0b5d0d8e +size 2761652 diff --git a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_5f3874c8-9929-49b6-8e63-d7e356a0021c.png b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_5f3874c8-9929-49b6-8e63-d7e356a0021c.png index 5cdff1f81961e547227e8a22ac4ce6021bad420f..bce4b8563b473e3567d9a2d0a3c93b19cf4da8f3 100644 --- a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_5f3874c8-9929-49b6-8e63-d7e356a0021c.png +++ b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_5f3874c8-9929-49b6-8e63-d7e356a0021c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7fcc98de9b50e8c0e4c34ed42af93b73aef29fe283b6a6840fd2b0c34cf7a75 -size 394581 +oid sha256:090b459313e2425ed9787179cd581cab6669d8c2012d8d31c2ca3a9263473746 +size 463353 diff --git a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_bb20e275-f386-45b5-a913-79812fd3d5ac.png b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_bb20e275-f386-45b5-a913-79812fd3d5ac.png index c350e5550c1410209f6c5706867b1cfbd2b851f7..c69ef7bdd26722abb94f525e5d65e70bb0f29ed2 100644 --- a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_bb20e275-f386-45b5-a913-79812fd3d5ac.png +++ b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_bb20e275-f386-45b5-a913-79812fd3d5ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c99ccf28bfb999a4d9191722c4d9e49f90d152264bb81e84a34e3fd3360b1d79 -size 1047270 +oid sha256:1970c976c1213b46cb6da3a43b4641abc4459b59b2e7a8d69ebceb3547182e04 +size 1373422 diff --git a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_d905eacd-ba9e-43ac-815c-d4e42a636301.png b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_d905eacd-ba9e-43ac-815c-d4e42a636301.png index d79c8fedf3f87aa044dc5a88c27581fa19f73727..9a7cb7298422c520a7bcdf76f184891b314a56af 100644 --- a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_d905eacd-ba9e-43ac-815c-d4e42a636301.png +++ b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_d905eacd-ba9e-43ac-815c-d4e42a636301.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f7cf1e4998958d61d75a4dacb100471d2768dad7c444d6ed140acd22272c093 -size 264865 +oid sha256:b2e3c6d03e7358b4ded7ab8f2dad403a9b43a5960572ff5d9f704c695e7f2cfe +size 1383853 diff --git a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_dc9fbc47-c21f-4f1d-bd12-6bd2c9d95272.png b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_dc9fbc47-c21f-4f1d-bd12-6bd2c9d95272.png index f9c6c7d1277f704ad62e9edfeb0ddcbbcd91b7a5..c696eee147fe1cfcb2c1fea40aac2c7178934238 100644 --- a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_dc9fbc47-c21f-4f1d-bd12-6bd2c9d95272.png +++ b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_dc9fbc47-c21f-4f1d-bd12-6bd2c9d95272.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:817c9af4047cf260a5460bc525d435737fe548329ee1b254aa276ce4aceb3160 -size 343262 +oid sha256:d35df34e36624d85725a2ef2de41d9e29214f36bca90cd41d3ac245b60c3302d +size 1389376 diff --git a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_fd1d5d23-7a0f-4576-be97-833ebf8126bd.png b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_fd1d5d23-7a0f-4576-be97-833ebf8126bd.png index 18b36184857de457a2d82ee14ef4cdd83eaf365a..ecd1c6385c5dadbec10b29f86b3f3b70c47e303b 100644 --- a/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_fd1d5d23-7a0f-4576-be97-833ebf8126bd.png +++ b/images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_fd1d5d23-7a0f-4576-be97-833ebf8126bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cc0798e2dc6b28d22f16386695bb357e642509d110a09788579bf0010ad8f16 -size 741727 +oid sha256:28e759f77eb8b2255f403403e0280f1bf0adf4c4282bf2fec20d8423c42f9af9 +size 1420892 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_18aac891-d2cc-4fd1-bd1a-04b3b0a349af.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_18aac891-d2cc-4fd1-bd1a-04b3b0a349af.png index f6fd8d6da3b36cda33ab741e5baf3dcb7abe03bd..97c09b2cb4d49be3bd45e1be13d098d925fde68a 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_18aac891-d2cc-4fd1-bd1a-04b3b0a349af.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_18aac891-d2cc-4fd1-bd1a-04b3b0a349af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32587f3424bcb043f7dcc190bc0da82824129d1a88604ae0c3ffaef9c8ac12a2 -size 1216753 +oid sha256:52a7b682fe3121abf65fe898740cbdf8115372156d67ab858173d86edeceb93a +size 1296591 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_3d867619-5934-4379-a470-a5f78405c6c3.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_3d867619-5934-4379-a470-a5f78405c6c3.png index 01d7e68d627de4e469a963f5cadf3863b9a958cf..22ccd4b5e34d158364d20605782ee1fd37a84556 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_3d867619-5934-4379-a470-a5f78405c6c3.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_3d867619-5934-4379-a470-a5f78405c6c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5861c80fe1558ab6fcfbce1dd09c09d2e33e9a4c2c1b7101634be19564d7b62a -size 1189853 +oid sha256:8a2e2fc8a32ee60f4e370a1e4cb8ef1f3fff910da96b7e42c8c61ae9c0de711b +size 1535525 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_3fb3d43d-eda2-454f-bd81-4beaabe0e47c.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_3fb3d43d-eda2-454f-bd81-4beaabe0e47c.png index 4620384d53de6d95d163bd6dc6a4c4fee10bd177..e1e125cd29ed3fdfa3a191346928ee858202f62f 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_3fb3d43d-eda2-454f-bd81-4beaabe0e47c.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_3fb3d43d-eda2-454f-bd81-4beaabe0e47c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c59e74e6026a7ed3df0be315538d024f1618130c8aeea91eb4a82dcf129b7dae -size 1137395 +oid sha256:07d0139cc4da9a0e6f2941fc54ec3d9a3ada0102e4f218d4c2b14e32789f7c8a +size 1026779 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_45fd9710-3ac8-4e4e-beb0-e624ac8a3e9d.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_45fd9710-3ac8-4e4e-beb0-e624ac8a3e9d.png index c1c93a2aa3799ff79978a2c2e053bf6b95b30b86..fae0c964ae71039ed3652682e8157d40c9871f02 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_45fd9710-3ac8-4e4e-beb0-e624ac8a3e9d.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_45fd9710-3ac8-4e4e-beb0-e624ac8a3e9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5f1367b3e082bf3419e6f9f18cadab8ecf70cd9b4566976f0ab240569327482 -size 1375517 +oid sha256:ac49b2b0d863c0240beca60c1f66398c63b26f99774fcb88a7f085b7fac71f56 +size 1358878 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_5b0f6466-edf2-454c-bfdf-d1c49da07f97.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_5b0f6466-edf2-454c-bfdf-d1c49da07f97.png index 7e05a3ef5b5afcf44cbc013ba371f550c9e31c74..5154301d53db2e842e652b6769c8539aa82414b4 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_5b0f6466-edf2-454c-bfdf-d1c49da07f97.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_5b0f6466-edf2-454c-bfdf-d1c49da07f97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd46eec4013e8f9f500a2b47d4f7d061b972255a7e7a5b7075d51ca8720ad26d -size 1186841 +oid sha256:f3f1bceb904c880e8e95ebf8e685ac97c48af21f0b98052f635556a7aafd984d +size 1581504 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_636947f5-7244-4149-8c6b-21830f9574ae.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_636947f5-7244-4149-8c6b-21830f9574ae.png index 269cc8ba54fdc8cd0092627c9f2fab70215fa104..7b5ae5498c3ee0c60a1c96e0e58b1c3324d7458b 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_636947f5-7244-4149-8c6b-21830f9574ae.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_636947f5-7244-4149-8c6b-21830f9574ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97a630f71d9bad9be3d8beb69c616ad8550874b0a41e9323ab617f955f8107b8 -size 352361 +oid sha256:31250c7fb17cda68c9f224d84dcf2f26ab380b9b0e4bf26336dab98ba061f41d +size 267005 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_8321c8e6-a5b7-45a2-b38e-6d0b5fba0bf0.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_8321c8e6-a5b7-45a2-b38e-6d0b5fba0bf0.png index 8810d63fa14fe2542bccbea08db3bbd7013aaf69..d0bcca0d5e124b31e1d71d530d2442df7cb09750 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_8321c8e6-a5b7-45a2-b38e-6d0b5fba0bf0.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_8321c8e6-a5b7-45a2-b38e-6d0b5fba0bf0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94a81678ac6a1aba0d3e27ea1c45978feef6ea0dc28448c96c50c12f279ee8ea -size 1367930 +oid sha256:207f0ead9ba7e2cbbffd0695caac0fca4ec61b3e25b3e21fe0d885bd76f66d16 +size 1563574 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_aded9f6c-a6d3-4ac8-9090-f2bd5ceeb5fe.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_aded9f6c-a6d3-4ac8-9090-f2bd5ceeb5fe.png index 6d30960c98704d680c50911393e134e753c8c892..b9d42577c0f91dd7d6f5817e9aacf90eac82f09e 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_aded9f6c-a6d3-4ac8-9090-f2bd5ceeb5fe.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_aded9f6c-a6d3-4ac8-9090-f2bd5ceeb5fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebf57c4793508da050687ca7da899db8cd95a5c94b489384a724d6d8ac6edf20 -size 1390730 +oid sha256:f4b49a8ba47f0710a930a8b59781075834eda9c2fb35399286190997af21098f +size 1519417 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_ba6b661d-92fd-4244-8a69-962bc891113c.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_ba6b661d-92fd-4244-8a69-962bc891113c.png index 98ef161495463a0f72efea47834b67ee7885c1fa..301be1314c2b71ce399966fad6833cb8edbd7ea8 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_ba6b661d-92fd-4244-8a69-962bc891113c.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_ba6b661d-92fd-4244-8a69-962bc891113c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3805c51a3856eb1dd29ac89bf3338fd2a2b6f9a87be4afce1d6587228348ab0a -size 1247519 +oid sha256:2b2576ea9f68df9930317fefb420edf5ed6fd5d172af089cc89c03c1f424747b +size 937803 diff --git a/images/6d87b507-14dd-4903-a131-fa089499ccb5_f44293d8-7694-4f8b-b54e-b14d572de3db.png b/images/6d87b507-14dd-4903-a131-fa089499ccb5_f44293d8-7694-4f8b-b54e-b14d572de3db.png index de02664e6a06c59a104340969b92dbdad3c29074..fa92a90046b1fa35a7f9f9d34b87a50faf2df752 100644 --- a/images/6d87b507-14dd-4903-a131-fa089499ccb5_f44293d8-7694-4f8b-b54e-b14d572de3db.png +++ b/images/6d87b507-14dd-4903-a131-fa089499ccb5_f44293d8-7694-4f8b-b54e-b14d572de3db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:536638c6819b7a6c39860b1122fba179b00c7c5e42637c6d568fe8dc1419e60b -size 1130338 +oid sha256:b28bebbd814c86f49eacea36e30edbd7118070fa843d1f83f3667ad72b233a6a +size 961960 diff --git a/images/6d963cc0-90d3-4908-bee4-29a8530536af_09291760-75ab-4a52-b1a3-763fd1ac9e02.png b/images/6d963cc0-90d3-4908-bee4-29a8530536af_09291760-75ab-4a52-b1a3-763fd1ac9e02.png index 822a20517d67e3c4305aa4fc258e1f8ca992dad5..e65d23dd0b2833c16a2322aad739c1ba914a2387 100644 --- a/images/6d963cc0-90d3-4908-bee4-29a8530536af_09291760-75ab-4a52-b1a3-763fd1ac9e02.png +++ b/images/6d963cc0-90d3-4908-bee4-29a8530536af_09291760-75ab-4a52-b1a3-763fd1ac9e02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2ebbab926ab01010b0c8700c85e17cfcb49ce4e37ae68f95440fba83910db8c -size 1290109 +oid sha256:564453a06ae4f8f736ee9d972d874c048f076a76d8a237297ae8d437a6fa3d6f +size 1149694 diff --git a/images/6d963cc0-90d3-4908-bee4-29a8530536af_604638dc-e4b7-4183-9b31-ea41921cdd3a.png b/images/6d963cc0-90d3-4908-bee4-29a8530536af_604638dc-e4b7-4183-9b31-ea41921cdd3a.png index 822a20517d67e3c4305aa4fc258e1f8ca992dad5..336f0a5d30bc7bf007be045a712ca0a3d165dae4 100644 --- a/images/6d963cc0-90d3-4908-bee4-29a8530536af_604638dc-e4b7-4183-9b31-ea41921cdd3a.png +++ b/images/6d963cc0-90d3-4908-bee4-29a8530536af_604638dc-e4b7-4183-9b31-ea41921cdd3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2ebbab926ab01010b0c8700c85e17cfcb49ce4e37ae68f95440fba83910db8c -size 1290109 +oid sha256:46837afcf88663c570770d878d29d82146b2532c5189679e36373234d2e57345 +size 1486209 diff --git a/images/6d963cc0-90d3-4908-bee4-29a8530536af_8d663377-4e7c-4493-be53-0c5f14abeae3.png b/images/6d963cc0-90d3-4908-bee4-29a8530536af_8d663377-4e7c-4493-be53-0c5f14abeae3.png index c3327f7d30091aa5aafa73e0771592716193ee51..0e445b1593ac676fd47ecc4e792166bf24e40909 100644 --- a/images/6d963cc0-90d3-4908-bee4-29a8530536af_8d663377-4e7c-4493-be53-0c5f14abeae3.png +++ b/images/6d963cc0-90d3-4908-bee4-29a8530536af_8d663377-4e7c-4493-be53-0c5f14abeae3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36c1fe843b3723f5743e1b28ce73d5f032011263b62925c8ff181883783d5337 -size 1326138 +oid sha256:437d128bc5e838ca6f45b76b2da69e6e98993073e2098f96268f8a2dc4f249a7 +size 2065733 diff --git a/images/6da08512-9375-4b47-8cd1-addec58f385c_08af7cdf-e95d-4875-a679-c15c9c08e85b.png b/images/6da08512-9375-4b47-8cd1-addec58f385c_08af7cdf-e95d-4875-a679-c15c9c08e85b.png index ad9922233352c54910c038e25453c00808ee9a78..4727d23f72e1f34fd255e293eb8b10d4dca5637e 100644 --- a/images/6da08512-9375-4b47-8cd1-addec58f385c_08af7cdf-e95d-4875-a679-c15c9c08e85b.png +++ b/images/6da08512-9375-4b47-8cd1-addec58f385c_08af7cdf-e95d-4875-a679-c15c9c08e85b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08404976e07876517fd14c0196c06157aca315b9c645bc99118b81bc0812725f -size 785603 +oid sha256:f184f72b4d3cb90f617210d3aba99a8eeae4ece41ca2b9600f012f7c79a14ea4 +size 1529566 diff --git a/images/6da08512-9375-4b47-8cd1-addec58f385c_3f581127-3bd7-4965-8787-13548d03385c.png b/images/6da08512-9375-4b47-8cd1-addec58f385c_3f581127-3bd7-4965-8787-13548d03385c.png index 050cc9e9d426c1ec9ecc78e5c8a418d00fadc23f..c044722f86fe1951a8da0540f718c52ea75cd072 100644 --- a/images/6da08512-9375-4b47-8cd1-addec58f385c_3f581127-3bd7-4965-8787-13548d03385c.png +++ b/images/6da08512-9375-4b47-8cd1-addec58f385c_3f581127-3bd7-4965-8787-13548d03385c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e6f8673edb0ab1ccb8fbd0df4a4016b2f640561e03fb1adf3c71486f33e9a9a -size 641242 +oid sha256:e4e633cd9df909908046be207408cc00306e9f17c7d5c0c8c2c8deeafb879605 +size 974595 diff --git a/images/6da08512-9375-4b47-8cd1-addec58f385c_6ce0bdac-5180-4167-939f-a6fc87f8c8e6.png b/images/6da08512-9375-4b47-8cd1-addec58f385c_6ce0bdac-5180-4167-939f-a6fc87f8c8e6.png index c6807ee017b3cd91eff566a690cb400bc1337eab..dfa2480bf0c5600930ae09a5eb92657539f5189f 100644 --- a/images/6da08512-9375-4b47-8cd1-addec58f385c_6ce0bdac-5180-4167-939f-a6fc87f8c8e6.png +++ b/images/6da08512-9375-4b47-8cd1-addec58f385c_6ce0bdac-5180-4167-939f-a6fc87f8c8e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4badaf825ad396aedddaab271443697e3e55b72714273c3e79271c84f4172627 -size 773984 +oid sha256:4bac7c3a1c585822881475617201998b8f2b9530da74ec0e1fdb5a4e7b0206fc +size 1148663 diff --git a/images/6da08512-9375-4b47-8cd1-addec58f385c_8004364b-2cb8-4ef4-95bd-14d0365581d1.png b/images/6da08512-9375-4b47-8cd1-addec58f385c_8004364b-2cb8-4ef4-95bd-14d0365581d1.png index f3645cb761d98953454b284d79ac159ad8d70156..6b732eeeeea495cc9c0c025439ed81fc84e3489f 100644 --- a/images/6da08512-9375-4b47-8cd1-addec58f385c_8004364b-2cb8-4ef4-95bd-14d0365581d1.png +++ b/images/6da08512-9375-4b47-8cd1-addec58f385c_8004364b-2cb8-4ef4-95bd-14d0365581d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca6cf86158d9cd9c6adc7a4e2d1c8d48b6b1844e551c637435c124b931bdade5 -size 2325952 +oid sha256:25646ddb536670e199c6ed0ac19ae97db91da6529fe69a652b37219777661e26 +size 1176474 diff --git a/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_3a79506e-4983-4f73-800e-97010e8017a6.png b/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_3a79506e-4983-4f73-800e-97010e8017a6.png index b45187ef41019a643811c2149adcccfadf15d386..5f5f3e0dd48c09c64c8965577919ee18d3ef76a7 100644 --- a/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_3a79506e-4983-4f73-800e-97010e8017a6.png +++ b/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_3a79506e-4983-4f73-800e-97010e8017a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba1d977a7b94d8d653c806ed22807a0c7c9dac724d9d639031e584d192ec390f -size 1793305 +oid sha256:5c3b814fb5c50ea36cd669aa28ebdb1c8e0a0571d15ddfce0c52646d8cd97d11 +size 1156107 diff --git a/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_8dfaa8ab-c597-47ab-a575-06c902f13b04.png b/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_8dfaa8ab-c597-47ab-a575-06c902f13b04.png index 0f5a1b62fbec20306b2e3561f6ee270d8e72fbfd..313db7244dd241e77d7980b1775a14052bbd716f 100644 --- a/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_8dfaa8ab-c597-47ab-a575-06c902f13b04.png +++ b/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_8dfaa8ab-c597-47ab-a575-06c902f13b04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15d8966e18e1d1a887d6221ab8f5a7de2137f72ddf61f9b34f9074d89f323925 -size 1512320 +oid sha256:97422c41bd4ed1902341a907bb58dacefa42216be9320e8adffb0adea5156895 +size 1517201 diff --git a/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_d1178e23-83d4-4c13-8b20-31c9cf3a4166.png b/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_d1178e23-83d4-4c13-8b20-31c9cf3a4166.png index 408c44693aeafbb9ad67c172b64a87878598658e..478b2d8191a1d53ec27f7fb9431f29d15b9bd337 100644 --- a/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_d1178e23-83d4-4c13-8b20-31c9cf3a4166.png +++ b/images/6df317e6-3414-4f2e-b5fc-b70914def4eb_d1178e23-83d4-4c13-8b20-31c9cf3a4166.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9826889bece84e34696adb60579f184e89d0a041c5eda5f1ed9ec9dfdf4aa8e8 -size 2529288 +oid sha256:71ab42750d9b1b2a5c8a2fc63af78f3be7a167ba145e63612c366619b78785fa +size 1070821 diff --git a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_1bb0ea8b-610d-45e5-b23d-ee1bad6369c3.png b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_1bb0ea8b-610d-45e5-b23d-ee1bad6369c3.png index 5fe73f1cb08ef9f419122d674223fff21fe08bcd..7d7fc316d1fbc18ff4020fe8003c8ea7689bee22 100644 --- a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_1bb0ea8b-610d-45e5-b23d-ee1bad6369c3.png +++ b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_1bb0ea8b-610d-45e5-b23d-ee1bad6369c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cafda8fb2586411d76a87f7c06ada4a7f4f155b3144a5e97805f3580695f394e -size 280086 +oid sha256:dc99f53e26642da8eb1d8d26ce51c6ed0ed7f3ee75aa1dcbd0c8ee9986645460 +size 132975 diff --git a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_a03147e4-838d-4a8e-a343-f72f05555caf.png b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_a03147e4-838d-4a8e-a343-f72f05555caf.png index b1a2f5a9e3d103464f0ded1bb74972549df20f7d..306d601611a86c2b92e2fb3f7a08ca4b16b71fda 100644 --- a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_a03147e4-838d-4a8e-a343-f72f05555caf.png +++ b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_a03147e4-838d-4a8e-a343-f72f05555caf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5dabae91e90be7a213714eb9217d0d52982fe4f4176a1eea0089a2719d37ae9 -size 962102 +oid sha256:06e272fbbe5a9e55be50475ccec5491e9a92c1da461aa6bb3d4e0d65ac5b55ac +size 754983 diff --git a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_cb3687da-0349-4e99-a3e4-e8d30f34901a.png b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_cb3687da-0349-4e99-a3e4-e8d30f34901a.png index f727ab8a0330c38502ff6f8ebec2a09e2c2fe884..fe37f3a533d4767e66362bdb99b287192dea58b6 100644 --- a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_cb3687da-0349-4e99-a3e4-e8d30f34901a.png +++ b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_cb3687da-0349-4e99-a3e4-e8d30f34901a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb8f1f009808ce2251f79c6686d23cacee5b3264d53fb7ac3f657547519463d8 -size 962180 +oid sha256:b550c3d185cf037032fa9345ea90b7c8d18a7aa543069c44e81b3c2849d2fbe1 +size 1017878 diff --git a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_ccf8e5a6-cc0b-46b9-93a9-a725eb195bc0.png b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_ccf8e5a6-cc0b-46b9-93a9-a725eb195bc0.png index e9b88003a2afcd8deeab9db264c61a69187bbce1..ee85fd0699d26e27a9e19007ab793da27f0476dd 100644 --- a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_ccf8e5a6-cc0b-46b9-93a9-a725eb195bc0.png +++ b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_ccf8e5a6-cc0b-46b9-93a9-a725eb195bc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:801abae08a9e6ddaa21494963deaf43f19b495abed319ff4224b9e6580f17169 -size 775415 +oid sha256:0c30d7fc0e42c55ec2af3ccf12697c2cab6c154b3b766e6f30876329e6885474 +size 725853 diff --git a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_d379fe7d-7df4-47a7-9759-adc5e3551cec.png b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_d379fe7d-7df4-47a7-9759-adc5e3551cec.png index bc96b3c4c21b2769826ab12357da31a0465ca658..5aba65e6624d773e1ed141c5a4c0e77eb636fe75 100644 --- a/images/6e565708-43e2-492b-9f1d-25d51387dcf7_d379fe7d-7df4-47a7-9759-adc5e3551cec.png +++ b/images/6e565708-43e2-492b-9f1d-25d51387dcf7_d379fe7d-7df4-47a7-9759-adc5e3551cec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c262600588ffd14a031afa8ebc0e79cb972b927a81988fee4aa3483a4a86026 -size 796759 +oid sha256:d28da87afbf38de831dcc76fac7bb5cbf914e1c3399424a68d2d044428b049a2 +size 677916 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_149a80ed-1e2c-4a63-941f-99a5fec6a11e.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_149a80ed-1e2c-4a63-941f-99a5fec6a11e.png index 8a46e77211a1370e07064aa1b89cef3dee01981e..bf06c21c74dc758b390e642d7496ac1cdd1aadf2 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_149a80ed-1e2c-4a63-941f-99a5fec6a11e.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_149a80ed-1e2c-4a63-941f-99a5fec6a11e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ea5b785e2623f9c482e023b5f94bdd708f909caf61b56a11d4c1b4d55c867d1 -size 1975410 +oid sha256:798ec3095dfdc89f520fca801091912425c5a234bc9746f6b6d49b14e038d3da +size 1467252 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_28d18847-a922-481b-983f-a0131d55e6a7.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_28d18847-a922-481b-983f-a0131d55e6a7.png index de023e50f682203c55335ed720a23dd2cffdaed2..19758d55cdd9f826188ffce9e6dcb551bc0cacbf 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_28d18847-a922-481b-983f-a0131d55e6a7.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_28d18847-a922-481b-983f-a0131d55e6a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67b4712ced347323831864e3ae978445f22c57aac879c8ace855a901467dac93 -size 669118 +oid sha256:6ce59f454344415f41a42cecdb937fd807ca44bb6b7d03f78becd48eaa92d63e +size 772047 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_345899fd-7f18-4087-99fa-be98ef4a1cd2.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_345899fd-7f18-4087-99fa-be98ef4a1cd2.png index 75e9b362e5fdd0f8de1b2a316f968454ec5d03cc..0649073159618a3ca263cede3c3fbfe9808ac6f3 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_345899fd-7f18-4087-99fa-be98ef4a1cd2.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_345899fd-7f18-4087-99fa-be98ef4a1cd2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f402b3d3ab2c3cb0a0b41ae8f7af8dd807a24d9b1889e4fb14e0dd6049ff29b3 -size 688253 +oid sha256:ddc6d800dffc009604688a389ff0c32bfaaadcf4d6bff375e9fc44c059c90f89 +size 558700 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_3f033257-fd1c-4875-8b86-4f0b4cd589c7.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_3f033257-fd1c-4875-8b86-4f0b4cd589c7.png index 01f30ffc4deb13608f30ccf5c9cf82dcc3e258b4..f37fd4d209e9378824354dc69453e71f8760ee56 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_3f033257-fd1c-4875-8b86-4f0b4cd589c7.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_3f033257-fd1c-4875-8b86-4f0b4cd589c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2cd478716d59551c687aaeff8a8871b3bb770827d0c2061cab24751e719e125 -size 626222 +oid sha256:ab9b8a559ca5c408b30e29af48a8eb0b2617f2505c75b6054ab697d6017a9078 +size 739439 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_4abed667-f469-420b-9820-825cdd9e9b91.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_4abed667-f469-420b-9820-825cdd9e9b91.png index e021d20b5ac792394475049ca5dd653c3879ab3e..0394d20211011e1127fef724c85a2b9ff41c5265 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_4abed667-f469-420b-9820-825cdd9e9b91.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_4abed667-f469-420b-9820-825cdd9e9b91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:62e1cc9d56dc8f3e7b46f6f097cd7af124dd37e0093c0b6b9d848422cad7e864 -size 1163206 +oid sha256:34c87eee0539c56ba5bc9ee8f91158e248056739b3280b4b296652580fa6290a +size 1621075 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_71116ee3-3e15-4a37-9498-820698eef9b2.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_71116ee3-3e15-4a37-9498-820698eef9b2.png index 6fc7d8704245d6b614cc47a030b6d38a7c9985f7..17a94ebccb638b7ecd26197dca760de0fbd20db1 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_71116ee3-3e15-4a37-9498-820698eef9b2.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_71116ee3-3e15-4a37-9498-820698eef9b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6dec39cb66ec64de22f23e6fe5c511b2d5f280283c7b0498085d82757a2e95d -size 788410 +oid sha256:f997e71ca0abd96805f8cda502b5b4275c0da92036b7abfaa0618bf3af4f59c4 +size 1060970 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_a1d7fbd9-6bde-48f1-aa51-307d0fcac7a4.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_a1d7fbd9-6bde-48f1-aa51-307d0fcac7a4.png index c691d08092a2ec026e31cdca245c53986340fc16..4bf0b7a8eae0a5b9cd78b4b8deb073aa965d5861 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_a1d7fbd9-6bde-48f1-aa51-307d0fcac7a4.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_a1d7fbd9-6bde-48f1-aa51-307d0fcac7a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f2ebd6328ccb23ed9cd8f322e49cccf5132505876ba13ac6696d1a9d50df6de -size 648078 +oid sha256:9294ab318d3bc21ecbc153030ce6509ee8de3ec1d9efdbcc262ab6fc6aabe97f +size 704310 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_ad212d0b-ad5b-46ca-a911-3fe3755efd13.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_ad212d0b-ad5b-46ca-a911-3fe3755efd13.png index d40b886a27b8def5dbc7409038fd663fe2d0ac28..2d7b8546acea8641abf25ecda8d9db1c946b5cda 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_ad212d0b-ad5b-46ca-a911-3fe3755efd13.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_ad212d0b-ad5b-46ca-a911-3fe3755efd13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1a43d7d64ee4d1cf0eaa146d68a07ca58f2c2cee4f06352af3b61f531b03aa2 -size 793334 +oid sha256:85cf2da0ac0909c2a3b7728dbcefaa9ecb1f87d3bb35b81ae300cd51eca9bfc1 +size 821023 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_c5d08604-1632-41cd-89c7-39dbdcb8a353.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_c5d08604-1632-41cd-89c7-39dbdcb8a353.png index 993131a3197c69136b42590223e56d7781858caa..57994ed203757d40493f1f66285ded4ff9fcdd8f 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_c5d08604-1632-41cd-89c7-39dbdcb8a353.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_c5d08604-1632-41cd-89c7-39dbdcb8a353.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7695797b0c89518634e4c2731f30b2e710a433d2878f90da34029fcea7718695 -size 670888 +oid sha256:f4e1ca52c884d39838be85335c945b9ad019c99104f160f9261137e43ad0dbdf +size 569396 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_ed4fbad4-bbb8-48a2-98b6-3f0b9cf383ee.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_ed4fbad4-bbb8-48a2-98b6-3f0b9cf383ee.png index dbc9194ea0f82af95b07507aa500c71927912fd8..5db56e60f66422dbaed5d6c582a255555c3f2b7c 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_ed4fbad4-bbb8-48a2-98b6-3f0b9cf383ee.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_ed4fbad4-bbb8-48a2-98b6-3f0b9cf383ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a57c4e9b54902c919ea72e442b98645c63d72e1c100fd4517f0cd10957ab0ccc -size 651469 +oid sha256:bd75193cdd2d4c0f5ce0a526508e4eb96d730298a8fb48104b1dc9218880735e +size 556773 diff --git a/images/6e98e331-c80b-4316-b332-af76c4cb2440_f4c92290-d674-4ed1-9ba4-d8a1d2be1464.png b/images/6e98e331-c80b-4316-b332-af76c4cb2440_f4c92290-d674-4ed1-9ba4-d8a1d2be1464.png index 23260519bce68aea2838c3ced43659109ecb269c..10d9321f31111bdb702ea0df62666b9e769b9d8c 100644 --- a/images/6e98e331-c80b-4316-b332-af76c4cb2440_f4c92290-d674-4ed1-9ba4-d8a1d2be1464.png +++ b/images/6e98e331-c80b-4316-b332-af76c4cb2440_f4c92290-d674-4ed1-9ba4-d8a1d2be1464.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ec458355f078837d27689e0abc78f3b87026a714cbb93c0262c5cfad5a57ee2 -size 656258 +oid sha256:5d6cde02d6f9f48c8fe11164a8586ef5be349478b920ab0c74a193599e1b7e0d +size 746686 diff --git a/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_7fdff63e-288e-4dc3-b053-5253f6c23c15.png b/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_7fdff63e-288e-4dc3-b053-5253f6c23c15.png index 139278ddf572eb3f34c25b6c001606e0f2ad3f23..510d737e6e70117f944dbb3413ed2953a9b43a83 100644 --- a/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_7fdff63e-288e-4dc3-b053-5253f6c23c15.png +++ b/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_7fdff63e-288e-4dc3-b053-5253f6c23c15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cc0b6b81c7c3a0903596fc7a12a1b98e3559c28c0362775611e78487dd839d8 -size 2192451 +oid sha256:1c1aa1188a02ca0f3c6185bc1a5a90e1fd436537ead9f9445f28e59f6c7fb6ac +size 2592278 diff --git a/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_a1f5c62e-b068-422d-8b69-407eb0f05496.png b/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_a1f5c62e-b068-422d-8b69-407eb0f05496.png index 64075c5136ef32c4238d4583f3dcddc73206b993..5d15c2963027bc9e1fe54309d5e60fe6e55553cf 100644 --- a/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_a1f5c62e-b068-422d-8b69-407eb0f05496.png +++ b/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_a1f5c62e-b068-422d-8b69-407eb0f05496.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9a38da3b16b96f2ce104b75ca36c7fdcb3e23f59f6b26fd64456ffd2e170679 -size 2102801 +oid sha256:ec47bd4ae31ec7528acf26dcf4ff258a3ce70475f630af570abbadaf1343bbd9 +size 840933 diff --git a/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_c793da3f-2031-4dfc-8684-78418b702dd3.png b/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_c793da3f-2031-4dfc-8684-78418b702dd3.png index 3794ff69a68712d7beded2b5246ab65c1a9c572d..60f7085d6b920e55043868e112022a1821f54273 100644 --- a/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_c793da3f-2031-4dfc-8684-78418b702dd3.png +++ b/images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_c793da3f-2031-4dfc-8684-78418b702dd3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40b0b3f5c27246c0d84ef84473a2c35f07d7c64a7b09441b11d9018ac5b92141 -size 2390065 +oid sha256:6b45a7528ef2907f2566e975c721480438e48032385a061d53cac7989456bbb6 +size 2397188 diff --git a/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_09e4b8ca-0a6e-4236-80ad-0662b8b16205.png b/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_09e4b8ca-0a6e-4236-80ad-0662b8b16205.png index af476868fc29a9c65546e8f22b98ebb247646a25..c3752fb653d273a456415a6b08fd092227ac7fab 100644 --- a/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_09e4b8ca-0a6e-4236-80ad-0662b8b16205.png +++ b/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_09e4b8ca-0a6e-4236-80ad-0662b8b16205.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d137b47bb01361c4bc91b3a10f33688d0cc68efcce0270a28d27ec8926300384 -size 357668 +oid sha256:867dac78a684c3cf271f31eec3b0ad9133a3f37feb68d144dd36a637be83ec5b +size 451795 diff --git a/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_d0d43d10-b53a-4fe7-9b1a-bc20816f7d60.png b/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_d0d43d10-b53a-4fe7-9b1a-bc20816f7d60.png index 46156a29ef7370cd22bd56ab4b04e440afe8f856..365352c3d851615a7321efa7e9f7042b10d1a017 100644 --- a/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_d0d43d10-b53a-4fe7-9b1a-bc20816f7d60.png +++ b/images/6f1fe14d-543a-43c6-964a-0c74f6d86091_d0d43d10-b53a-4fe7-9b1a-bc20816f7d60.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e35e480ed9562fd7271f99292b8b08192fd234178280b6dca125a9947ed38916 -size 441556 +oid sha256:621dfe6c597c2d0638130623cc72c5cc94229e86884e4574bebc18b2efdc8d96 +size 506314 diff --git a/images/6f4e562e-91f9-401d-8730-af947985b821_30c13bcc-6f9d-4265-8c57-1073030ce44f.png b/images/6f4e562e-91f9-401d-8730-af947985b821_30c13bcc-6f9d-4265-8c57-1073030ce44f.png index dba5c0a582eaa5c163829be1131c8792653a56be..4cf85515b38a9bf2a0401fd0dd61f6c4a2919cd1 100644 --- a/images/6f4e562e-91f9-401d-8730-af947985b821_30c13bcc-6f9d-4265-8c57-1073030ce44f.png +++ b/images/6f4e562e-91f9-401d-8730-af947985b821_30c13bcc-6f9d-4265-8c57-1073030ce44f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6db7491a23e84261bb4ccb493b7c776adb8af78b3f8a12211b5c72d170dc4df9 -size 1511807 +oid sha256:65b25a8a5a07cfa7113ff62379cae57180a49fb2018d9f8b201bdb201c5f69b4 +size 1424853 diff --git a/images/6f4e562e-91f9-401d-8730-af947985b821_86abe01d-3a02-4f50-86be-bc8454ad2f8c.png b/images/6f4e562e-91f9-401d-8730-af947985b821_86abe01d-3a02-4f50-86be-bc8454ad2f8c.png index 28cf00b1931774b26eefb3ae54cb4e1b561d6a77..260484890d13fa233388853ea59c36b162ebc63c 100644 --- a/images/6f4e562e-91f9-401d-8730-af947985b821_86abe01d-3a02-4f50-86be-bc8454ad2f8c.png +++ b/images/6f4e562e-91f9-401d-8730-af947985b821_86abe01d-3a02-4f50-86be-bc8454ad2f8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65b6b29c37b542f335837747600f471ca1eb3244708bfeaf799c856f351817c2 -size 1838835 +oid sha256:10e17069a7caf4a29afbeb7db3877e8203b1e6d4c0eec2e78b2cc6176bc7f724 +size 1752901 diff --git a/images/6f4e562e-91f9-401d-8730-af947985b821_8815465b-cee1-4d62-bd97-da432f3cf972.png b/images/6f4e562e-91f9-401d-8730-af947985b821_8815465b-cee1-4d62-bd97-da432f3cf972.png index 80c65ba6614b84cd8913715c09dd1c50c3621831..7c2fd85aec9bca8f0545ecd830a4b31499929f6d 100644 --- a/images/6f4e562e-91f9-401d-8730-af947985b821_8815465b-cee1-4d62-bd97-da432f3cf972.png +++ b/images/6f4e562e-91f9-401d-8730-af947985b821_8815465b-cee1-4d62-bd97-da432f3cf972.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8354899d4e95d37e876dbcf31738b3e2678e96516e34c0b00fc658c43f3e84e -size 1196925 +oid sha256:1629a1ca48dad0f95b9d21f760e04c3457c4127e96c3a48b2f28bf1e7ea73a57 +size 1386407 diff --git a/images/6f4e562e-91f9-401d-8730-af947985b821_a2035429-c51b-4259-b2d5-9eb766c20bf3.png b/images/6f4e562e-91f9-401d-8730-af947985b821_a2035429-c51b-4259-b2d5-9eb766c20bf3.png index cd8ad32c1501bcf01a3f47ea367f561ad6f46880..9b0b203bbbb7837687fe28e7f4bcd5720b964742 100644 --- a/images/6f4e562e-91f9-401d-8730-af947985b821_a2035429-c51b-4259-b2d5-9eb766c20bf3.png +++ b/images/6f4e562e-91f9-401d-8730-af947985b821_a2035429-c51b-4259-b2d5-9eb766c20bf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97c4da46528cdb3ac1b6539e92eee969ea1c23eecfb7d38ce88504420ba729b2 -size 1028904 +oid sha256:466e53b45f2db3021f69c0a6c6ceaabea56ee61c290bfe8dba49374c6d1cb71b +size 982986 diff --git a/images/6f4e562e-91f9-401d-8730-af947985b821_a8b1678d-bc7c-412c-b9ad-f628c66d0f63.png b/images/6f4e562e-91f9-401d-8730-af947985b821_a8b1678d-bc7c-412c-b9ad-f628c66d0f63.png index 2aa158247ef246b2b83ffda0563852abfa6b912b..f8b36d3dc62dc71fd7913e107d5b536f269f8ccc 100644 --- a/images/6f4e562e-91f9-401d-8730-af947985b821_a8b1678d-bc7c-412c-b9ad-f628c66d0f63.png +++ b/images/6f4e562e-91f9-401d-8730-af947985b821_a8b1678d-bc7c-412c-b9ad-f628c66d0f63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0f834766de181f2b6f0e52e393aae244c59524aeaeeb82d2241bd3225d0ee30 -size 863316 +oid sha256:9694920bc6457609252c3722cfbf7d2a13e8dd07c77b3f60e16c380d639716f1 +size 1526908 diff --git a/images/6f4e562e-91f9-401d-8730-af947985b821_bb5ecc35-cad8-4934-8fd3-8db479c6b832.png b/images/6f4e562e-91f9-401d-8730-af947985b821_bb5ecc35-cad8-4934-8fd3-8db479c6b832.png index 9b18a2415c2bb73b825d2040707280d81c8b2c2b..58afaa74f29f2008b650694e4fc892d250b141a6 100644 --- a/images/6f4e562e-91f9-401d-8730-af947985b821_bb5ecc35-cad8-4934-8fd3-8db479c6b832.png +++ b/images/6f4e562e-91f9-401d-8730-af947985b821_bb5ecc35-cad8-4934-8fd3-8db479c6b832.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79a75dd648d07ff76b1ecf32e07bfbee6f2016b3253f557e3c6887893fb571b0 -size 1838144 +oid sha256:200e38dcf9ad0a5aeff8f15add4a12fa571b878b594bc0dbd34b4809c72e4c80 +size 1649198 diff --git a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_210df914-bbcb-4529-9054-666734af4cc6.png b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_210df914-bbcb-4529-9054-666734af4cc6.png index 16a64b6bf24711e68e3218873a66ba20199e0631..f2fec633e29b19e878a0750568292af5a3c17600 100644 --- a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_210df914-bbcb-4529-9054-666734af4cc6.png +++ b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_210df914-bbcb-4529-9054-666734af4cc6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3ab4d1125e3b90f6ae8cd56f0a39b32d80d27eeff702edfb01dea378eeb0ef5 -size 2241781 +oid sha256:b8ff24e426dbcdb0434056f0ad421d01dfc04bdd925eaab47d1af57338eb78ef +size 1764820 diff --git a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_4aede9a4-0099-4d40-8b0d-4399bd3bd274.png b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_4aede9a4-0099-4d40-8b0d-4399bd3bd274.png index 65c404355a6d97cd4a1e5e69382f3a5df06a3543..6ea6b44ca514603023bdef4541db329a21af7bb8 100644 --- a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_4aede9a4-0099-4d40-8b0d-4399bd3bd274.png +++ b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_4aede9a4-0099-4d40-8b0d-4399bd3bd274.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c414503533587c496f4aa02c32f5ac751f65f734013d757b0f262a727e20c74 -size 2893038 +oid sha256:32e317de78e1d69e9b13ed0f95bc68220268175d5075fe5fa50f4772fab3e019 +size 1480388 diff --git a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_59129ef1-cf47-4b83-b2f6-89a4f17166c7.png b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_59129ef1-cf47-4b83-b2f6-89a4f17166c7.png index 9df27e52506fdc0c790618a07a66381a3375e7fe..576589c40cd7b8a624a4a205411ad5ef764809f0 100644 --- a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_59129ef1-cf47-4b83-b2f6-89a4f17166c7.png +++ b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_59129ef1-cf47-4b83-b2f6-89a4f17166c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43f39b161b880cf9299dc2cfae2be9a5d325875a562f1ae6a4fe353b28b7c934 -size 2246670 +oid sha256:7be6add04948f1ef29358bf8f568b953d16a56daf314bbe15bf63aafafdaa073 +size 1664173 diff --git a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_76836f77-682c-4d0d-a708-0e890ec81eb4.png b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_76836f77-682c-4d0d-a708-0e890ec81eb4.png index 509d93793029a2b8d280d6caf3639bfa15e5ccbc..224c190e18eddef1f01823abd95353a9a888b91a 100644 --- a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_76836f77-682c-4d0d-a708-0e890ec81eb4.png +++ b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_76836f77-682c-4d0d-a708-0e890ec81eb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5764426c5ded5c9144400616df555e61d3be030bde3c29c511228137763c791e -size 2181246 +oid sha256:e14ab315c332bb684cf3928dd4d8f99b51eddb41d974ccbd56a4274537a380de +size 2045374 diff --git a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_db9f5f4a-cfd7-4ca7-9dd7-e73dd9314048.png b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_db9f5f4a-cfd7-4ca7-9dd7-e73dd9314048.png index 77d2fe637dbc87bff84ac3be0acfb2ef995bda79..fa2ffcf612ccafc464694dcd504f7980bac57e7f 100644 --- a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_db9f5f4a-cfd7-4ca7-9dd7-e73dd9314048.png +++ b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_db9f5f4a-cfd7-4ca7-9dd7-e73dd9314048.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:301c45a5a6416f0fa7f13a0dca216145c5feed958b610cf99310541970ddccab -size 2242099 +oid sha256:bd9a4281558ddc87c57ce0ab9f7b44fa4c460126c3853114d09a0e9cb65de63a +size 1742342 diff --git a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_f4d8f5c8-3590-4a21-a09a-085d4d732c2a.png b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_f4d8f5c8-3590-4a21-a09a-085d4d732c2a.png index d311f3cf27d1ea486eed54ae7ad8d95958a21ee6..70404ace7425e60c15d6d7e3dfe01079613bfbfd 100644 --- a/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_f4d8f5c8-3590-4a21-a09a-085d4d732c2a.png +++ b/images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_f4d8f5c8-3590-4a21-a09a-085d4d732c2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:358785b7d26c7bcf64d18cb84d922f6b9601a2e201aa088ef36cb05f194b5d50 -size 2434486 +oid sha256:7acb99c24d5f70f61206f4bb39fbb22a897821a3b46ab3858a18385a9531d879 +size 1654625 diff --git a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_4f574815-5359-4ee4-95ac-4dd90be90835.png b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_4f574815-5359-4ee4-95ac-4dd90be90835.png index 30b120ea8c4126d4204015100eb86c1d155df809..773c81b9ba8e6153c724623e1ccb4c22875c9ee9 100644 --- a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_4f574815-5359-4ee4-95ac-4dd90be90835.png +++ b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_4f574815-5359-4ee4-95ac-4dd90be90835.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b04b969b730e01b09db51de469c3aeaf7577f0f7f9c82f4e57ff9cbb2e60474 -size 1061645 +oid sha256:139f593ebe13ea4c7663f365bb5471f05a8d192051302ae52abe07e252933f72 +size 872911 diff --git a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_5aaf3df7-66f5-437f-8051-2cb596fbdcde.png b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_5aaf3df7-66f5-437f-8051-2cb596fbdcde.png index b2a78638dfdafec8128c4be06d0dc0722dcbb4e4..dca6f8b6a52470bc37d8bb742b118a47bed75f93 100644 --- a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_5aaf3df7-66f5-437f-8051-2cb596fbdcde.png +++ b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_5aaf3df7-66f5-437f-8051-2cb596fbdcde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25dc47577f297098904abfddaa8ef84c4ce7c85435c56c3bfd6ce9c942c190ac -size 1893257 +oid sha256:5b8218ff52735324d59a7f641c80cbaca6c5fc2aa6069dcf3ce89290778fb6b7 +size 962800 diff --git a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f2a55dd0-a78b-43a9-8611-d22a9f6510bd.png b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f2a55dd0-a78b-43a9-8611-d22a9f6510bd.png index e0ec66963feea3d64a859174cf14ba20534cc132..7844919428a7eb2d130c8e63a9f090e8ef1893fd 100644 --- a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f2a55dd0-a78b-43a9-8611-d22a9f6510bd.png +++ b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f2a55dd0-a78b-43a9-8611-d22a9f6510bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a49ff11fd1a377def206bfa9d5de208ce32199de0270dfd1ddbbdc31b2a1eb3a -size 1327297 +oid sha256:2098e303da40a87d71e1c3cfdeeec59cdfba579e3ce269f7eba6e5946a12b041 +size 1448410 diff --git a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f9091185-4976-4ac0-bc2d-a85a3143a6e9.png b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f9091185-4976-4ac0-bc2d-a85a3143a6e9.png index 173bee9de6110168deb01da60c76c5d4e4b9c6d9..0b430b8de76106268b5ad3d76fb038168fa2cf28 100644 --- a/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f9091185-4976-4ac0-bc2d-a85a3143a6e9.png +++ b/images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f9091185-4976-4ac0-bc2d-a85a3143a6e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e2070fb31f21e11f350b0f0f86b70be49e2d2e5b0a4462206833a803d0ecf36 -size 1184389 +oid sha256:85a886f510b97bedb7fe23a2a5b343d777b0a488d49f9091cbb275d1e4cbf15b +size 1662599 diff --git a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_19d143be-5401-4456-a21c-788e8e6a043b.png b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_19d143be-5401-4456-a21c-788e8e6a043b.png index 5648d58ce21dae593d0883114b7bdd5be019deda..e0a3c6c4350cdd3dc1819b106a05a30825a3950d 100644 --- a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_19d143be-5401-4456-a21c-788e8e6a043b.png +++ b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_19d143be-5401-4456-a21c-788e8e6a043b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb22d3cc8460f6273c1fa9b62151666f77c319388259530373a4c28039176028 -size 663264 +oid sha256:6e30f588037c242e20fa1854a8aa5f979621a01619b5eb7d4aa0af235ec97574 +size 1236288 diff --git a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_21105c34-cac6-408f-b1ed-2ee9550a4dcd.png b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_21105c34-cac6-408f-b1ed-2ee9550a4dcd.png index 0a01875ac22873ddd8aa64902758db696df7ca01..b095ef479c93de21b6af4005b5c4a97cdc07812e 100644 --- a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_21105c34-cac6-408f-b1ed-2ee9550a4dcd.png +++ b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_21105c34-cac6-408f-b1ed-2ee9550a4dcd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95b29cfe840a4b5133a882fe29f19581a80cb8b3bda1b8f70c6fdc382cf4e089 -size 772527 +oid sha256:cf3a7ae89ffa23b3c5625ec2a61f16a9737b288978efa6f8ed4164a417bf435e +size 1400771 diff --git a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_637682df-5b80-4530-a3eb-e242bef29336.png b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_637682df-5b80-4530-a3eb-e242bef29336.png index a23edfcf20b05bd84e3737f28ca3c22e32792c60..60b9f582c4156977ba0e939391de5a7a957ff19c 100644 --- a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_637682df-5b80-4530-a3eb-e242bef29336.png +++ b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_637682df-5b80-4530-a3eb-e242bef29336.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a21a7172bc67d22b302b6524134651e9d77deb620367b7c907a57a9a7191218e -size 602102 +oid sha256:54b1c244f34b045b6d0c1513c7ac4e67d570e081d58b8402b947be8d448b4cc0 +size 842273 diff --git a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_a36b8ff4-cf58-4fa8-9f5c-e3246e288c83.png b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_a36b8ff4-cf58-4fa8-9f5c-e3246e288c83.png index 136a3e4224ba5649a1588719dff1732f00afcf73..5c1c6dc89937a8ba2619451e971f835d1c4345a4 100644 --- a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_a36b8ff4-cf58-4fa8-9f5c-e3246e288c83.png +++ b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_a36b8ff4-cf58-4fa8-9f5c-e3246e288c83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a9b90ff5153d6d31c9255730d8b6357e96000b635c2fef4e8e0f9cb32529e6e -size 805958 +oid sha256:960caa29ccf2334bac6e372a9723272cca6ed1b3879d11931e87b02492c11f0b +size 1509211 diff --git a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_d32919a6-4663-42fb-a081-9cbd842bc551.png b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_d32919a6-4663-42fb-a081-9cbd842bc551.png index 609dc306d99346a8fc7a1b809079ee61d0f3f57d..7dabe2644cf060c307e43123b40df9105d14d4a6 100644 --- a/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_d32919a6-4663-42fb-a081-9cbd842bc551.png +++ b/images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_d32919a6-4663-42fb-a081-9cbd842bc551.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14bdf542cdd9745da4553d8ff09e06f2acdff266e7d75f2ef306295f7cb98c34 -size 1659962 +oid sha256:1ca02928261b7f6c820ee512ba35e341b13614d60edeabba78b52a0582222b4a +size 1328463 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_04e9cc0d-00d8-48a5-b493-a9b27a1aa465.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_04e9cc0d-00d8-48a5-b493-a9b27a1aa465.png index c46d474b2c01daabb3cd4da09f14b4204c025363..d100bed373fafb7fb03f640b9693231c43be5e4f 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_04e9cc0d-00d8-48a5-b493-a9b27a1aa465.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_04e9cc0d-00d8-48a5-b493-a9b27a1aa465.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2e51c3eaea3a3461acf04dd5e4fe9be26390362006561ffd4184a8e38a1bd07 -size 1604562 +oid sha256:6c80e4fe6761bf7bb50fd6489dff14d0962133ddc4a5fe17d3fb8649a697f834 +size 1495376 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_1f12c03d-ee6f-4717-bdfc-66c289973d4a.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_1f12c03d-ee6f-4717-bdfc-66c289973d4a.png index 55e6ea8e2f0ac359333cc342026b7b62f507e398..0722fb427f30cd35bc4069b659f3df86ac82a84c 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_1f12c03d-ee6f-4717-bdfc-66c289973d4a.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_1f12c03d-ee6f-4717-bdfc-66c289973d4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f28ce96f4812996e09eeae31dc2b39659d804a62a82cdf2c0edd89ee57f41fd8 -size 1123003 +oid sha256:b20a92693cbe431572903fa8d2e9cf32f2359845bab26998f60e684b4eee4c70 +size 1385598 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_3139a384-73c1-48be-9299-680fcd57a365.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_3139a384-73c1-48be-9299-680fcd57a365.png index 9508a6d0b1ab620b63e741df894f6dd4fdfabd8f..440a4dcd7d88f7edeb36ffed4e2fc9bc02122715 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_3139a384-73c1-48be-9299-680fcd57a365.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_3139a384-73c1-48be-9299-680fcd57a365.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36c169c997774a7c9d4710e4a157c9db79c489cae6ffcad84285e6d4c3e60aeb -size 1164523 +oid sha256:20d5e6fd8e35a2e16090e8cf4fb62f8e7a76a9ba05057c03746a4b25295408bf +size 1811361 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_56bb169a-e765-48b8-a83e-afbef30548bb.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_56bb169a-e765-48b8-a83e-afbef30548bb.png index 73903ed4a76d82b342f234b72e798589fc6b4b84..53bcaf272ff49474c3ecfddbe21942d916600e9c 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_56bb169a-e765-48b8-a83e-afbef30548bb.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_56bb169a-e765-48b8-a83e-afbef30548bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2148c4021b049539a8e4a1c32136234522ebd49bc945fcb9a951badf8f30f80c -size 954848 +oid sha256:0173d4d61551753a127587e400d300b9650d86203ad50e7d7173cec4e50d4a97 +size 475449 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d3e3c95-6cc7-41d8-923c-a543635c2643.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d3e3c95-6cc7-41d8-923c-a543635c2643.png index d2db1bf86b52d2a4831d64b101640643ebfa160e..1d111e509b32a0bdc1c6e859f7ec491c3250f923 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d3e3c95-6cc7-41d8-923c-a543635c2643.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d3e3c95-6cc7-41d8-923c-a543635c2643.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da80f1802813a7f5242002d1e6e2f65b1fa425c7c4cfbda948f0b5ea3c4170a1 -size 1155089 +oid sha256:16a58ce2a13187aff5551a90d525d5348165d8ce7a6914aa810fb249e4b6a1b2 +size 1571655 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d697126-f42c-4bda-8316-05bd6ab4e3a7.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d697126-f42c-4bda-8316-05bd6ab4e3a7.png index 470e46abd82cae3d1dd4b9e6537e44ad58e1de78..5719099a9881351f6fa77593644ae10abdd19180 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d697126-f42c-4bda-8316-05bd6ab4e3a7.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d697126-f42c-4bda-8316-05bd6ab4e3a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fed73ed4c952084a68f6d5c77f88cfe91e9e851acb0e53cc3a91009f92682c42 -size 1309216 +oid sha256:ac9779cb1cfdedea2a878bdd08db17a35d13b3b52f7009d4a527c2e13913b851 +size 1450387 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_85de1ef5-b340-4275-924e-4ad340d35a4d.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_85de1ef5-b340-4275-924e-4ad340d35a4d.png index 86757fda5a838d28bf785742457995deccb9ab27..ef7208f82a8a0024d7bcd0412320bba93c9240ac 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_85de1ef5-b340-4275-924e-4ad340d35a4d.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_85de1ef5-b340-4275-924e-4ad340d35a4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba5fd034e238a192e1f9310eb558a518d6782baa04d2bf181b1d9d9391667630 -size 1448365 +oid sha256:3831423b5fea7943ec38f69668c12fb04dfc12f40f77d0af9a991d23d0c18509 +size 1917162 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_c488f9fc-084f-4e7d-9c02-41933cf52026.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_c488f9fc-084f-4e7d-9c02-41933cf52026.png index 7d412618c696f96821e4dd0e4a488e5be7768da0..f62860c71a19c87d32c57f52507a700d1ebea4d7 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_c488f9fc-084f-4e7d-9c02-41933cf52026.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_c488f9fc-084f-4e7d-9c02-41933cf52026.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a3032100ceecd88e558c2cb0c07527aa5e28ce05f6ba75abbaf33891a17aa6d -size 1254823 +oid sha256:be7a2859b6cfdcbdeb509eb6eaae7376a62b71b0f70052e002e5e7d4926db09b +size 1308506 diff --git a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_ca69e7b5-562b-4bf2-9457-9254ea31cee2.png b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_ca69e7b5-562b-4bf2-9457-9254ea31cee2.png index 3cc463d37488ae987e3a33838852bfd0d92821de..6df5011b669500bd09e4d90d31f2c6b0b98b693d 100644 --- a/images/706d0ccd-c0ec-423d-88c0-a5716700a855_ca69e7b5-562b-4bf2-9457-9254ea31cee2.png +++ b/images/706d0ccd-c0ec-423d-88c0-a5716700a855_ca69e7b5-562b-4bf2-9457-9254ea31cee2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d913d417339c52087e559534e7a04694fb1a332eb5dfd64ff5bb28399c7b4c14 -size 1140165 +oid sha256:d7563b620eaf9ad80df84b01fc05a9924fa90de6c8bd90811d94e94f771fa25c +size 826016 diff --git a/images/708cbe51-b493-41da-afa8-648564133972_2fd5487b-aa31-4b3c-a230-36c025edc516.png b/images/708cbe51-b493-41da-afa8-648564133972_2fd5487b-aa31-4b3c-a230-36c025edc516.png index 4273616b489681e3c82abbee33b1385e07170b33..5e6efda236831284a21d4bba9780cff933bd2227 100644 --- a/images/708cbe51-b493-41da-afa8-648564133972_2fd5487b-aa31-4b3c-a230-36c025edc516.png +++ b/images/708cbe51-b493-41da-afa8-648564133972_2fd5487b-aa31-4b3c-a230-36c025edc516.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5f4b7479377f7cb41a0a6b94f3b4cdfb47ca70f879ea4ddffc08eb584ccc3c6 -size 324153 +oid sha256:378d345c0f52bb8ec60a877fb4291076b04431b86658fa4df278e00fe8236ce4 +size 357937 diff --git a/images/708cbe51-b493-41da-afa8-648564133972_5be2112f-8d62-404e-8ab3-6202c78c3536.png b/images/708cbe51-b493-41da-afa8-648564133972_5be2112f-8d62-404e-8ab3-6202c78c3536.png index 860edfc51a6babfafe03ad1ce7f470b60fc78cbd..cfdb6403061387982e63c420e0a3cc22515a8c2e 100644 --- a/images/708cbe51-b493-41da-afa8-648564133972_5be2112f-8d62-404e-8ab3-6202c78c3536.png +++ b/images/708cbe51-b493-41da-afa8-648564133972_5be2112f-8d62-404e-8ab3-6202c78c3536.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c90430409b7ab3e14be40215311603a9d3832cd663c04b1414537955d2a9117 -size 1293992 +oid sha256:e59272f622f8bfb902ce8baf53530f2c99d9835ddb90c9a88523777658491cf0 +size 1653965 diff --git a/images/708cbe51-b493-41da-afa8-648564133972_93da400b-38b2-4337-9b42-dae5b8caf0b6.png b/images/708cbe51-b493-41da-afa8-648564133972_93da400b-38b2-4337-9b42-dae5b8caf0b6.png index 14074391d5a826615a42fd2cf06af24f2e391770..11749ceb06f22c163f884c84cf8d1a01d33f6b18 100644 --- a/images/708cbe51-b493-41da-afa8-648564133972_93da400b-38b2-4337-9b42-dae5b8caf0b6.png +++ b/images/708cbe51-b493-41da-afa8-648564133972_93da400b-38b2-4337-9b42-dae5b8caf0b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c12a3c436708a8f93413283fcb1012dd5cc8f98e458231812b371dea30c0acd -size 324200 +oid sha256:7a275f91651944bb6172cdd22aa0a2b9c586b8676a10057dbd1c70573e691584 +size 322599 diff --git a/images/708cbe51-b493-41da-afa8-648564133972_ae0eb36b-220a-432b-99da-eb328e43f411.png b/images/708cbe51-b493-41da-afa8-648564133972_ae0eb36b-220a-432b-99da-eb328e43f411.png index 45969e19478b8d3233bbde770a98a98894fc186f..9e2db698b43a49e8dde6b14f5e7a747d1348a40c 100644 --- a/images/708cbe51-b493-41da-afa8-648564133972_ae0eb36b-220a-432b-99da-eb328e43f411.png +++ b/images/708cbe51-b493-41da-afa8-648564133972_ae0eb36b-220a-432b-99da-eb328e43f411.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1519eca34f516ed202806e399540f232dafc8792ad8a782257b07989dddd5dd6 -size 1166882 +oid sha256:5669d6b271911907228df7d5cd065c3c625c6b943239ab8cdbbb0ed15b946931 +size 1404729 diff --git a/images/708cbe51-b493-41da-afa8-648564133972_d1944090-239b-4c54-a478-91e6b01bdfba.png b/images/708cbe51-b493-41da-afa8-648564133972_d1944090-239b-4c54-a478-91e6b01bdfba.png index f11c9eb9b63539c653cce205c3cfd028b4cc1338..72c113b1df2c719b87e94002372d4bca2bb43c37 100644 --- a/images/708cbe51-b493-41da-afa8-648564133972_d1944090-239b-4c54-a478-91e6b01bdfba.png +++ b/images/708cbe51-b493-41da-afa8-648564133972_d1944090-239b-4c54-a478-91e6b01bdfba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83146ab8e15bdece1117a28d3fc593ce2afc1b41e70d081ded7f029f7a833a44 -size 331814 +oid sha256:25d2a54bbd022630c8efa4855846ac6fb9489f98c462e0e040347d643e73f94c +size 348537 diff --git a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_1387e66f-88ee-4ac8-8cc8-363de89dd7bc.png b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_1387e66f-88ee-4ac8-8cc8-363de89dd7bc.png index 452c33cc65fb185a87e9874d526a230d25c5bf71..25195d98d82979ec430c5bebf13ef2c050a9e422 100644 --- a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_1387e66f-88ee-4ac8-8cc8-363de89dd7bc.png +++ b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_1387e66f-88ee-4ac8-8cc8-363de89dd7bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44b2fab4a08f229d6863225401f8d9a9990becf80b6edad3dd52661ec0fc2e5f -size 474803 +oid sha256:e670c0e6976340a21364fb8b86e49fc7e953634d90585ca27ad1735a47c1f36a +size 616218 diff --git a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_2cde0581-1919-4200-9358-c3d15bd24028.png b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_2cde0581-1919-4200-9358-c3d15bd24028.png index 395f995a72c54a6433546433469aa45fc4e3bef9..34ab8037cfd818104414a201d11a3c05e979644b 100644 --- a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_2cde0581-1919-4200-9358-c3d15bd24028.png +++ b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_2cde0581-1919-4200-9358-c3d15bd24028.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ba64cf5c5183af6ccc984688359e4d0b7d3a76a0ec9487c4380c39fd7c55cea -size 715941 +oid sha256:5a59cf56052e5e6769d0548c86b754d8e5cbb461290edc68fcc0ff48a5a48725 +size 529204 diff --git a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_8b782e73-72f5-42e8-89e0-197104dfbedd.png b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_8b782e73-72f5-42e8-89e0-197104dfbedd.png index e15196888fe3fa5198bba12f40b590d7ab080190..61ac56ca7d149772439a5af27f856089b3145240 100644 --- a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_8b782e73-72f5-42e8-89e0-197104dfbedd.png +++ b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_8b782e73-72f5-42e8-89e0-197104dfbedd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7db1dd16b52ba418f4932f7a92bd4fa1be1f89fcc7c2112dda17a232d0c5b9a -size 474797 +oid sha256:2e592f630f8d44636cfda74913501626ecbf0f1abe75e6d8227c33243d682825 +size 645633 diff --git a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_91a0d4fe-3524-448a-995f-8c4d570884ec.png b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_91a0d4fe-3524-448a-995f-8c4d570884ec.png index eb053ea822ece0bd2d72c72dca1bd814c9801ede..f909331cf8ec4bb3080971fa3a780c50a91e1b61 100644 --- a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_91a0d4fe-3524-448a-995f-8c4d570884ec.png +++ b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_91a0d4fe-3524-448a-995f-8c4d570884ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8821c9ed98713593d417d3e9588ed0cea900cfe3f23098b71a256324988577bc -size 612853 +oid sha256:e9a2097fdcf299383d5f74d390eca220b08032e526a1f56e6589a897db9d3637 +size 624811 diff --git a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_a134016b-dd49-4e6d-9c0a-f2a9c11f25f7.png b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_a134016b-dd49-4e6d-9c0a-f2a9c11f25f7.png index ad5eadb62e0da117b708bb2323390e34e927c37f..ca25bbdb7bc62db708edc3450858173afd252358 100644 --- a/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_a134016b-dd49-4e6d-9c0a-f2a9c11f25f7.png +++ b/images/70b3ef5b-d900-44cf-9b62-9ecece97954c_a134016b-dd49-4e6d-9c0a-f2a9c11f25f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:652d39cc978481ca4994510cf2b1faff6b2a56fcc9e62ffe213fc07803ed8d28 -size 740955 +oid sha256:812b5ac3ab4e64ec1fbae60dfed8aff0ac92f865e137c5b011b4f53be6570e2f +size 779157 diff --git a/images/712d9d7f-d7ee-4554-affa-133449231ae4_1b19b03c-7423-46e6-abea-ab1bb37a520b.png b/images/712d9d7f-d7ee-4554-affa-133449231ae4_1b19b03c-7423-46e6-abea-ab1bb37a520b.png index 6d779e680e96f371685183cad265628394529d01..6a9e52f6ea6c1c1f3fe18bda3e9634534ef9aa5f 100644 --- a/images/712d9d7f-d7ee-4554-affa-133449231ae4_1b19b03c-7423-46e6-abea-ab1bb37a520b.png +++ b/images/712d9d7f-d7ee-4554-affa-133449231ae4_1b19b03c-7423-46e6-abea-ab1bb37a520b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21026bf0d3a229b20f2627057c261e869ddaaf779da019fb829ad29f106b2b7c -size 2616572 +oid sha256:ec166ebd89def5fe7ffa4dcb70dd4c157457093581c5de7b0a8194ce458248fa +size 1936658 diff --git a/images/712d9d7f-d7ee-4554-affa-133449231ae4_492f1698-0267-4a06-b636-cc4f0480d04c.png b/images/712d9d7f-d7ee-4554-affa-133449231ae4_492f1698-0267-4a06-b636-cc4f0480d04c.png index 0adb97f5508aec0170552cae89d16118189bd291..9d4ed2b5a5ed33518cf179e0e28fede2f4cdd317 100644 --- a/images/712d9d7f-d7ee-4554-affa-133449231ae4_492f1698-0267-4a06-b636-cc4f0480d04c.png +++ b/images/712d9d7f-d7ee-4554-affa-133449231ae4_492f1698-0267-4a06-b636-cc4f0480d04c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27124582f0ec6f39542dc1dba986b722a2295a0ca865daad8fc5c461743ec292 -size 1333375 +oid sha256:c24b1bacb93cffa1a967bf42fc4120336d58a1df9aefa8634699f42c943e55c0 +size 1243181 diff --git a/images/712d9d7f-d7ee-4554-affa-133449231ae4_716b1d6d-07d1-4ace-b6af-d1fa67a344c0.png b/images/712d9d7f-d7ee-4554-affa-133449231ae4_716b1d6d-07d1-4ace-b6af-d1fa67a344c0.png index ee543faf2b0eb7aec963c0cbe40d4fa218a97c64..c25ea1de6d2551db376fa1c7943bf006e5404200 100644 --- a/images/712d9d7f-d7ee-4554-affa-133449231ae4_716b1d6d-07d1-4ace-b6af-d1fa67a344c0.png +++ b/images/712d9d7f-d7ee-4554-affa-133449231ae4_716b1d6d-07d1-4ace-b6af-d1fa67a344c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d7cb0a0f3a01f282d30a57dea3664cb957ec82ed52824f70fc0319acd773c8f -size 1344219 +oid sha256:b27679c5b3eefe6ed7160db6970c32d7fe1956c9bd3691027200c73b3e3ccce4 +size 1415216 diff --git a/images/712d9d7f-d7ee-4554-affa-133449231ae4_84137f8b-2f70-4479-99db-8a8c3f1da091.png b/images/712d9d7f-d7ee-4554-affa-133449231ae4_84137f8b-2f70-4479-99db-8a8c3f1da091.png index f5595c61090c8b1e61b09715fce1e7d610150478..70e2c65adeb0b75dfb168096f57b1494d26d323f 100644 --- a/images/712d9d7f-d7ee-4554-affa-133449231ae4_84137f8b-2f70-4479-99db-8a8c3f1da091.png +++ b/images/712d9d7f-d7ee-4554-affa-133449231ae4_84137f8b-2f70-4479-99db-8a8c3f1da091.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef843cf47a920b6da0ca4504ad78e13863b046eb164a15703af15accb0552ced -size 1329337 +oid sha256:fea02c91cbb2209416d22ad5937cd12cb325339a6ca7b11b36db63f0e1a3587f +size 1100668 diff --git a/images/712d9d7f-d7ee-4554-affa-133449231ae4_9c650bd4-031f-4287-8751-f277c6861d52.png b/images/712d9d7f-d7ee-4554-affa-133449231ae4_9c650bd4-031f-4287-8751-f277c6861d52.png index fd98a63856218a5816e9005292a22ada29e4b556..a142f43ec29c38805eb19d6bfc74261c9bd55d5b 100644 --- a/images/712d9d7f-d7ee-4554-affa-133449231ae4_9c650bd4-031f-4287-8751-f277c6861d52.png +++ b/images/712d9d7f-d7ee-4554-affa-133449231ae4_9c650bd4-031f-4287-8751-f277c6861d52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4f56430c6785f780af032ad41139e7c4a2e2ef1050abcd7b395b1ab8ee3088d -size 1769427 +oid sha256:d523ebf6005182ddb027a3a7b1ed7807923829d8b9ec553dae7e4460cfc698b6 +size 2108311 diff --git a/images/712d9d7f-d7ee-4554-affa-133449231ae4_f7af6222-9fbe-4bed-9d34-344c135ddca0.png b/images/712d9d7f-d7ee-4554-affa-133449231ae4_f7af6222-9fbe-4bed-9d34-344c135ddca0.png index 84bf9469b697f716a0b738dd23cdfc2289d6e4b9..380a0d9c9afa0457da68ea7842d10cfc98db1e10 100644 --- a/images/712d9d7f-d7ee-4554-affa-133449231ae4_f7af6222-9fbe-4bed-9d34-344c135ddca0.png +++ b/images/712d9d7f-d7ee-4554-affa-133449231ae4_f7af6222-9fbe-4bed-9d34-344c135ddca0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fef61e1b71dba7a1d0f132401b61f4cb707eb3a4d4d44668f2557e9c0e69d30a -size 2022022 +oid sha256:69161b6ec2fdff3e6db988ee860dcb3c5dd7ebb8e2ae132b6d5b5a7b3e4e8fdc +size 2224492 diff --git a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_0ea5b7ba-2e88-4415-ac7e-eb3b6a7f71e9.png b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_0ea5b7ba-2e88-4415-ac7e-eb3b6a7f71e9.png index 929855158208d5da5d98e11a26a9e35d67c70262..290b55630669b8eee0097a3f56d7e55bc2b59e3a 100644 --- a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_0ea5b7ba-2e88-4415-ac7e-eb3b6a7f71e9.png +++ b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_0ea5b7ba-2e88-4415-ac7e-eb3b6a7f71e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6a6c60024ea341586e396c4cd08bbcfca1459b5f3d964fb5ac5c6227b6959da -size 314397 +oid sha256:f9f43a61060b35244894eb2abd314b78690e6548073a1ee4d54d2f1273974311 +size 312298 diff --git a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_6bf9852e-ea2c-456d-8c70-2fc0f68b13dd.png b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_6bf9852e-ea2c-456d-8c70-2fc0f68b13dd.png index e788021ed5d0874ce61915a3f85c8941ee312ad3..ea697487d5f7feb9c0585ca7280fcfc0397f44ae 100644 --- a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_6bf9852e-ea2c-456d-8c70-2fc0f68b13dd.png +++ b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_6bf9852e-ea2c-456d-8c70-2fc0f68b13dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86268da27b264134d8e7f9e5dcd292b6e27b7235d9e6913ce3841060c3d7b863 -size 1460324 +oid sha256:3cbe621270cbf3b7dba4f3cebf60774a4105e16db1d5795a4d7c98388b738405 +size 929188 diff --git a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_b165ce6a-330e-4979-8733-1d329d59b870.png b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_b165ce6a-330e-4979-8733-1d329d59b870.png index f83d43fa74d7628cce3912b8aef888d0e4d9995e..bb5efa73b39a3ed062c34a72986351ca16065f0a 100644 --- a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_b165ce6a-330e-4979-8733-1d329d59b870.png +++ b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_b165ce6a-330e-4979-8733-1d329d59b870.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c83ffaa03ce22cbebbfe93bf41648437d552ed8d6c726838044967d1838ce19 -size 323130 +oid sha256:7ca0c136a679df1c8361e788f43626da0c4df9c1dc9c68cdfb25bff6ba149080 +size 318980 diff --git a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_c8ae4adb-0aa7-406f-8732-7d52c7822725.png b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_c8ae4adb-0aa7-406f-8732-7d52c7822725.png index 43dae95a8ed7bad04ee801c8999715142084d15c..a6bbb52538c75be9f256b4d17f3f7f64b68fe6a4 100644 --- a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_c8ae4adb-0aa7-406f-8732-7d52c7822725.png +++ b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_c8ae4adb-0aa7-406f-8732-7d52c7822725.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f48fa21b6ef15d9b53302f6ba6755d552aba902a379c97182159e1b670221e24 -size 132392 +oid sha256:dc0b40ebb283ee4dac0fbef17b15d82e36e215d0e8c7ea430399eb673da8ceb6 +size 132156 diff --git a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_d13bb605-b91a-48d6-a6cd-a915bf50dc3d.png b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_d13bb605-b91a-48d6-a6cd-a915bf50dc3d.png index 4e49aec5efd1526f321735451dac7bba851d915e..5246bcf5dbac21b3bdc761b9e3ad79a98bdf3b2f 100644 --- a/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_d13bb605-b91a-48d6-a6cd-a915bf50dc3d.png +++ b/images/71638c81-42f0-4218-a1b0-d3a137ad1cff_d13bb605-b91a-48d6-a6cd-a915bf50dc3d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54506533460b6c6671a5b596508f165eefeea46bc647295311495535d94d7f00 -size 133989 +oid sha256:a290fdcfa4606c34420892f446a2850c2842ac10a8bf24081101dff565d2950c +size 133052 diff --git a/images/716ed90e-a138-452e-b5b5-167911871fda_499639da-7d48-4aef-a2f6-2f0b1cda21b7.png b/images/716ed90e-a138-452e-b5b5-167911871fda_499639da-7d48-4aef-a2f6-2f0b1cda21b7.png index 45b6047db02d28ca2e6ebc9758b6d810f50b333a..f9a160c52db1e72f6198e40d011e9340b6e9a79a 100644 --- a/images/716ed90e-a138-452e-b5b5-167911871fda_499639da-7d48-4aef-a2f6-2f0b1cda21b7.png +++ b/images/716ed90e-a138-452e-b5b5-167911871fda_499639da-7d48-4aef-a2f6-2f0b1cda21b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1c96594fed2956a6144a0df977559935465e7dac16b9645a2cd9eacd43a48ed -size 2088019 +oid sha256:77d1aca40ed0c1f8cd566cc3cc93e0875b4d519f2c3bffb64238bb91b6857430 +size 1704422 diff --git a/images/716ed90e-a138-452e-b5b5-167911871fda_4a544b84-9172-41ad-aa8a-e19736c63137.png b/images/716ed90e-a138-452e-b5b5-167911871fda_4a544b84-9172-41ad-aa8a-e19736c63137.png index 13cb540999dca8f41a9011021a70b2c57d531719..43ead7b7efcd9d641635a1f3d9463ec1ee6c9e47 100644 --- a/images/716ed90e-a138-452e-b5b5-167911871fda_4a544b84-9172-41ad-aa8a-e19736c63137.png +++ b/images/716ed90e-a138-452e-b5b5-167911871fda_4a544b84-9172-41ad-aa8a-e19736c63137.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3d393a5970d28825475c960355e2e672e8fa16eb874a730e2c209bd613ffe55 -size 274329 +oid sha256:c33f9c186384219de4310d137d658a127a530f98d9ca127988f25c4a5bf6dd6d +size 297420 diff --git a/images/716ed90e-a138-452e-b5b5-167911871fda_da452918-14c7-4410-a6e6-4e50951940a6.png b/images/716ed90e-a138-452e-b5b5-167911871fda_da452918-14c7-4410-a6e6-4e50951940a6.png index 2078b78b5def0acbc40037f91af176043ed8aa11..816f7e7adfa12ead2c00d49b98e172526fae6218 100644 --- a/images/716ed90e-a138-452e-b5b5-167911871fda_da452918-14c7-4410-a6e6-4e50951940a6.png +++ b/images/716ed90e-a138-452e-b5b5-167911871fda_da452918-14c7-4410-a6e6-4e50951940a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54f950e65759d18ce6711ee4c9e3982fe6aeed6386cbe8e2a8175348f33fde79 -size 2099761 +oid sha256:3027d717dc53fc025f69a31739581380357c55a52c78dd435e6f8b9e86bb6693 +size 1747985 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_085c77a4-a501-4a57-a42b-1fbe40737f32.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_085c77a4-a501-4a57-a42b-1fbe40737f32.png index 2d431388f7b402f569aa88c93afaeb4f08dbd821..eaddd301a42790f09f780ac1df85e414e10fa410 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_085c77a4-a501-4a57-a42b-1fbe40737f32.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_085c77a4-a501-4a57-a42b-1fbe40737f32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc4e64531014e1640fc58eeae95bf3d47a869b0b449a5070c256caeac74d5346 -size 976448 +oid sha256:c8eeed35fe085ee9984b585e1a9d4f546c89a4ccc1db0223222bb69c5f0d5dd5 +size 802476 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_245b8385-23b8-4570-b928-1c9e54526995.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_245b8385-23b8-4570-b928-1c9e54526995.png index 61be8f5f2fa2cb48c6f7e2e626b72135534663c8..149249bfb3577f66a52928b99505808716053430 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_245b8385-23b8-4570-b928-1c9e54526995.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_245b8385-23b8-4570-b928-1c9e54526995.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:292c1d198750503b041dc99f58e4aaf1247bdf798d078e7269a54b029b463201 -size 943584 +oid sha256:044bfa6d34e8b566a8e948061074a76783f0273cd47835bf2845092980b71dba +size 784033 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_3607097c-cce3-4597-9d98-6882d4f5621e.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_3607097c-cce3-4597-9d98-6882d4f5621e.png index 34d9c037f2b9fbe65eceedd7108890ab20425727..d79982436219ff704be900ab0cc11c02a645c354 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_3607097c-cce3-4597-9d98-6882d4f5621e.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_3607097c-cce3-4597-9d98-6882d4f5621e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03f6be66c78271ed63fa95e3c18ef789270aee97311fa361c14baec8b4fe08ea -size 394410 +oid sha256:2e141dd0de0380089a9df4d137549be5ae2dc94ee31f2806a057339edc73526c +size 658062 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_489a9668-fabb-4591-aa4f-a235753a96fb.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_489a9668-fabb-4591-aa4f-a235753a96fb.png index 17fcf80886e337be0b4a493b85ac3b5cab901e65..22be857a46fa53cc945118e4fb18b491645e8abb 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_489a9668-fabb-4591-aa4f-a235753a96fb.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_489a9668-fabb-4591-aa4f-a235753a96fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ecfec3e243ec1e9b435f1746da0cab1c0e2438bd5ae0aba19ecd0313625a87d2 -size 790766 +oid sha256:99c117ea9c37a7a7a2905e14b9b365e237213c256f455efdf7655a4da2a53ed4 +size 296542 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_4c33082e-6e43-478e-a153-e427d1b17fc3.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_4c33082e-6e43-478e-a153-e427d1b17fc3.png index 0f4ce1784306a9bbf8f4f5748c4a4b529188da5c..d533a17eb711b0fa8bb11457b30532b79f6e4126 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_4c33082e-6e43-478e-a153-e427d1b17fc3.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_4c33082e-6e43-478e-a153-e427d1b17fc3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54c1fdd6ffb3b599b7a1b8d36838cd87a5f1d441047ed0f6d15468e0a3631021 -size 990709 +oid sha256:67b8e304fcbd1a79902139b035ea60d107aa44a586a0c99f2a52146d2e73fee3 +size 1599201 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_610a704a-42dd-43c7-b6f6-8800697dc2d5.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_610a704a-42dd-43c7-b6f6-8800697dc2d5.png index 18ee16d00a7e749788b8fdcc00ef6d5d45a6606a..4954f60bd4546c1cb47859d2b21e3a39d05ae449 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_610a704a-42dd-43c7-b6f6-8800697dc2d5.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_610a704a-42dd-43c7-b6f6-8800697dc2d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f1619e713a0c71f542de0c61d293f590c61ccdf0cc377573420201cc87ba48e -size 928326 +oid sha256:09fc9ff62543e958e5436999b883f59ba826b90ce54d3bab0a6e23245ac761ac +size 1451435 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_8d39083e-f62b-4599-bcc0-c857a5abf85f.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_8d39083e-f62b-4599-bcc0-c857a5abf85f.png index e9cfd4b928d8b798abbd26cc82831a0424aacb59..5d6c60852fef4cbbb8926d5329b30d30811386b8 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_8d39083e-f62b-4599-bcc0-c857a5abf85f.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_8d39083e-f62b-4599-bcc0-c857a5abf85f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e04118bf7f782a70a39cc7330cb7d2f2660c064f282935026bc112de2d55de7d -size 394077 +oid sha256:8044af5e42b1de7e0fa5e2b5499411a4983cd410ea994ddc75abcb060393f707 +size 713597 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a6763793-cd95-45e2-8a89-7b39cd608221.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a6763793-cd95-45e2-8a89-7b39cd608221.png index dc76dd0a0924f4fb393e05d471756815829f6968..a18094a93a4951bd8240d4eeb79f10f1daa86339 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a6763793-cd95-45e2-8a89-7b39cd608221.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a6763793-cd95-45e2-8a89-7b39cd608221.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d696df19218f37312229ff97aa93a77e580573c1e0d6493707f3ec941ba362bd -size 929191 +oid sha256:2014db2812ed8800fb5e50455f8a5fbce3a8471127da1ea8f5b9216e467edcca +size 1185127 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a78fc3b8-5fae-4252-baf5-97f41c62fb6b.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a78fc3b8-5fae-4252-baf5-97f41c62fb6b.png index a90da9d871c2b07e6bb3a661026b03dfcb9a380d..331d08e1af9fc5ba61deb8f681c01c335d3bc35e 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a78fc3b8-5fae-4252-baf5-97f41c62fb6b.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a78fc3b8-5fae-4252-baf5-97f41c62fb6b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc43c3a2930498cf0df10f81b77d3bd1b0e85773c64be59c96ae7e2b9ba056cf -size 974543 +oid sha256:7b1f4c5f53d3ad380436cdf2253f8a58acfc64da04021a6305144e98e8b98846 +size 1437085 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a8cfda8a-1fc7-4f7c-bec5-09e4f3b1c420.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a8cfda8a-1fc7-4f7c-bec5-09e4f3b1c420.png index 563982a622a5a2e9464f97ae86bbce06d4a849eb..d730c43f5a1c574ba005846dabd5e3ca718ff810 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a8cfda8a-1fc7-4f7c-bec5-09e4f3b1c420.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a8cfda8a-1fc7-4f7c-bec5-09e4f3b1c420.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96a7b9999ae4a40ed6cfa87c8d1c2b1f0960f296ecd76bbfa964467e9f516102 -size 871998 +oid sha256:02977634f298649409cf039746aec07ed4284627f9c4c5a69241c133f78d4c02 +size 715543 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_ed7f3a80-da26-4473-8e9c-142ffcb114b7.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_ed7f3a80-da26-4473-8e9c-142ffcb114b7.png index 4b2de80664abbdd0e444c2251365dc6557b210e8..72259ca4fffe57df5b3c2134040cb1c1cb4672ac 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_ed7f3a80-da26-4473-8e9c-142ffcb114b7.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_ed7f3a80-da26-4473-8e9c-142ffcb114b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e5ffbf530250c74f7d2f21765079d0b0e10b437b965898085796168a4d38981 -size 422531 +oid sha256:e828b4013f27752c54f23e19331131d27932e8ccc8a07118685591dbd4f31090 +size 746125 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f1a04b25-a0cc-4bfa-bf18-7862f7ba3700.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f1a04b25-a0cc-4bfa-bf18-7862f7ba3700.png index 3c6a2efedca03de6a079c91e639266dbd434fb3c..e6f448566e738ccd3420bc191997cb4ab8fb03a8 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f1a04b25-a0cc-4bfa-bf18-7862f7ba3700.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f1a04b25-a0cc-4bfa-bf18-7862f7ba3700.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49b537c5d1da21c76cd2a1918be0560d682ff45f3ad6050b00ba7d23de468339 -size 988666 +oid sha256:5c1cc4a9e2ebe4aa05b2dec3ce04ede252b9eacdd80a2149f206f2147bff4a1f +size 1380453 diff --git a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f7f24d84-4a4a-4bad-9163-6010b47e39be.png b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f7f24d84-4a4a-4bad-9163-6010b47e39be.png index 39470277e04dd0bc0a538f09417dd50a7c7f1629..0988d813d925643b12478552246c2419b041da9a 100644 --- a/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f7f24d84-4a4a-4bad-9163-6010b47e39be.png +++ b/images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f7f24d84-4a4a-4bad-9163-6010b47e39be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39ca81c8a144534e7ef23cad908ad6c6c66e1796b99c7bf67e93486372d524b2 -size 988591 +oid sha256:821bbdff6c3696f23baa7d27e259f303a21f292d798683a9b0d69b28062bffd5 +size 1360718 diff --git a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_60a5012f-5b05-4bd5-b6f1-9a6932903e03.png b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_60a5012f-5b05-4bd5-b6f1-9a6932903e03.png index dd9e26addc75fd593e4649b2868792a3e01fc9a9..f582734ecb69efc3ce03934a6db2d059209f4317 100644 --- a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_60a5012f-5b05-4bd5-b6f1-9a6932903e03.png +++ b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_60a5012f-5b05-4bd5-b6f1-9a6932903e03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a07ce22d0df08ec36c0fbae7dd19414d99876081b5b88c286391c535c495fb1 -size 3529928 +oid sha256:228b3b34299674c423ff60bbc9365f1b20ba80d9ce56aca3bb21452abc1e898f +size 2688241 diff --git a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_99817004-e146-4ae0-91fe-42055681c14f.png b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_99817004-e146-4ae0-91fe-42055681c14f.png index 9d187fa05ab6a8f77ff9a0e590fd6b2af21d8f8b..0fd879a3e8a5757596c430ea20a0c6d83aa73101 100644 --- a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_99817004-e146-4ae0-91fe-42055681c14f.png +++ b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_99817004-e146-4ae0-91fe-42055681c14f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbc9a05922dc145f7f60839607573587bea7cec076fd08fa074bc2cf5c7963a7 -size 653111 +oid sha256:55a66549d7bd3b551cb0c5f1f7bc327b893cec6dfbea7a75b6759365d7f68085 +size 713735 diff --git a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_b168819e-8122-471e-a359-ee4ed4099355.png b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_b168819e-8122-471e-a359-ee4ed4099355.png index 2834de9db96016ae13ecfdd46127b075e32465a7..175249358941ba4d9579c5b968d246369104c55c 100644 --- a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_b168819e-8122-471e-a359-ee4ed4099355.png +++ b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_b168819e-8122-471e-a359-ee4ed4099355.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5124bb8e508caf11b8dfac390cee2027a55c9e1f1b7ee500db89dc945c5c1ca6 -size 779702 +oid sha256:eff2f88a8e83c148a86363a552725a1399fc015dab87cb8284d135762a428be9 +size 271427 diff --git a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_c4f666ab-2efa-4467-b72e-e21775ff008e.png b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_c4f666ab-2efa-4467-b72e-e21775ff008e.png index c48e52ae1f4cacff71ae9086c19c84097b794162..00a085c96be79d4a7ee86d8c7ff444954bbcbbd7 100644 --- a/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_c4f666ab-2efa-4467-b72e-e21775ff008e.png +++ b/images/718ccfb6-687e-4260-ad6e-9fa3942abec5_c4f666ab-2efa-4467-b72e-e21775ff008e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9266fdb0d6268a0c1979b291a411f6782eeb07c8fc7180ffa0d08309f95262cf -size 3543736 +oid sha256:1e44dfa37da0c2efb12ac4805c64fdf61cd040b32b5fa2831dfddabfffc27ec7 +size 1690980 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0018120a-8da1-4a36-a1c4-b4642c97211b.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0018120a-8da1-4a36-a1c4-b4642c97211b.png index 1b03bb7a4984928963af88a2fca68846284b226f..736b16020d27241f5d4e9c7bd3d6de23552e9d96 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0018120a-8da1-4a36-a1c4-b4642c97211b.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0018120a-8da1-4a36-a1c4-b4642c97211b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:116f95c997e8f11e26d4efcc0048b1648eedec8bad72f91f23a5d985da76fe3c -size 1111252 +oid sha256:24f4384849c1014243b1ff1435176f9f0eb0bec5d47cd6fe7b56bb0ef1859adc +size 1407523 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_096ddbe2-4904-4769-95a4-5f086d977a22.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_096ddbe2-4904-4769-95a4-5f086d977a22.png index b3d49ab4f319bb6801c88e1952f75278d7099632..d673072143b9060e04ac72a340a92709f46d64ff 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_096ddbe2-4904-4769-95a4-5f086d977a22.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_096ddbe2-4904-4769-95a4-5f086d977a22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f6ce8fc132649c2ad4312f07c4da2f581cd7ce3578c70ff4732adff5ee423f4 -size 1020388 +oid sha256:d774dcf801d9888ab0fb2610071adfc87dec87622c54343d0f889875fd68972c +size 1394476 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0d7f064a-aa60-43bf-a75a-a1e7ff4351ab.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0d7f064a-aa60-43bf-a75a-a1e7ff4351ab.png index 007c998f26ce8d6f6400bbd58cfd36bc791ff890..570052433b7759d2fc5174969acb8e9925cc51c7 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0d7f064a-aa60-43bf-a75a-a1e7ff4351ab.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0d7f064a-aa60-43bf-a75a-a1e7ff4351ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d61f7a469d75173beb179edef506d8132eb8b0cabda29797e06a6e1b325b6084 -size 730878 +oid sha256:f404dad690b3ff34e07959051e2b1967af9080f87cb22f551e44f6a2f96b6735 +size 672804 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_10392ce8-9a90-4bbb-8106-e627c22465da.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_10392ce8-9a90-4bbb-8106-e627c22465da.png index b278f69c43a87e841ce633592a66677219445715..dea8b18b21301315a5a6e24eb807998ce7e86856 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_10392ce8-9a90-4bbb-8106-e627c22465da.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_10392ce8-9a90-4bbb-8106-e627c22465da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1ad2cb0f2878ca1a431f563a7304f368638cc6fe34c274b1cc87d0f39e92475 -size 423962 +oid sha256:120332ecb803747ed51568d2bb01f26da6f8f20e57587db18be87b6d442126f8 +size 549446 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_16d6591a-59b8-4700-9681-4750058e8157.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_16d6591a-59b8-4700-9681-4750058e8157.png index 05e6da295d25db4ca695fb14774d0f7354aa8bea..ce4d16cd8a71b6635b26ff9de74c44aee556b569 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_16d6591a-59b8-4700-9681-4750058e8157.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_16d6591a-59b8-4700-9681-4750058e8157.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0328d1453e3c0416a77e319b1092df7f7bc605086fb9508ba0e53b2a88e226b -size 1408195 +oid sha256:ef99a648adcdbf9deb4820b83953f29244ddb2ddd4f712fb50a8ad1a7b018e02 +size 1585297 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_1a2a11b4-132e-4f75-a8fb-ab3ede13cd0f.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_1a2a11b4-132e-4f75-a8fb-ab3ede13cd0f.png index 41e1c5f2b35a613bca050986a081b2dbb1663c6b..1f8505053d5d54df698182084a3e117abb62daed 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_1a2a11b4-132e-4f75-a8fb-ab3ede13cd0f.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_1a2a11b4-132e-4f75-a8fb-ab3ede13cd0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c61449fabc7f74857fcf4cdda722d3827d47d948590e981aa9671055df9f6508 -size 1324667 +oid sha256:eff2f0da39f9a9cfbc01b89163db7c05faa6133d0e90a060ee09c18ff8795877 +size 1503477 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_350fd79e-7572-4e46-b13c-7bb569bebc81.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_350fd79e-7572-4e46-b13c-7bb569bebc81.png index 60e1f456c4330e263de3374908f8fde126afdf27..c7d9ff0f8ddcf3787b8810b7a85b4a8b6366b524 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_350fd79e-7572-4e46-b13c-7bb569bebc81.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_350fd79e-7572-4e46-b13c-7bb569bebc81.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0956b172f3f1a51c4d0f8363cfb37dff1b15760662c8b0bd2f5d47d3a053f7cf -size 323708 +oid sha256:5b3b07e8e17463792750cac1b7c0b12b2b59a7831ec41e88bdc1ce8809b55a7e +size 199516 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_41227868-8f44-46fc-9ee1-31604f7f4dbb.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_41227868-8f44-46fc-9ee1-31604f7f4dbb.png index f5589204e90b14513a1cc57790eb4c1b7c8f53d7..a47166861577d23f99ab17529326bb01a2426793 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_41227868-8f44-46fc-9ee1-31604f7f4dbb.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_41227868-8f44-46fc-9ee1-31604f7f4dbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2545c48bcf107c2361e2d6887ee29446011f14d21740e60133991c34bce4aab -size 417310 +oid sha256:2c9b8fd635c0b29254bf5a35ca5c78c37183d1d3e48ce6b5391fad8b61743cc6 +size 317502 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_481e6509-59bc-48c7-b6c0-1f065058835d.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_481e6509-59bc-48c7-b6c0-1f065058835d.png index 7f1282d29374df126e04a5f444edc976676b4338..7ca8d4ea874cab32ccee8f23c8dbf5f675b8149e 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_481e6509-59bc-48c7-b6c0-1f065058835d.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_481e6509-59bc-48c7-b6c0-1f065058835d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5c957a02610eced4917c4cc5e97127f91f00c55d253e60db09b176ae87c6280 -size 818319 +oid sha256:1c321217720ce0b6367938cc8c38dab0b871572c898fa8a60799646f76b2846e +size 560451 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_549866d9-de41-45c4-934c-6f26d3529dd7.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_549866d9-de41-45c4-934c-6f26d3529dd7.png index b17906b949c68582c72e9b60b409bb9a6722cd38..7e633a00d18b8496e8198f0709773187306bdc8d 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_549866d9-de41-45c4-934c-6f26d3529dd7.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_549866d9-de41-45c4-934c-6f26d3529dd7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b915c5600c949257d89ba2fdbad698b0ceff6c74f683f018963161b5f6a53ec -size 606078 +oid sha256:af23a935c366085e4e88d9bc08d700cb705e53d0dfe666ac3721fe2287788bbf +size 513900 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5baed715-106d-4b5c-b7b0-353a8b06f423.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5baed715-106d-4b5c-b7b0-353a8b06f423.png index 96c38d0101a852c4034094843e52f467371debe6..66548221046c4c5868f79ef506aee4b77e5190c0 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5baed715-106d-4b5c-b7b0-353a8b06f423.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5baed715-106d-4b5c-b7b0-353a8b06f423.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6867560e62dbd7c000ec6bd173d5c893af61328229a8b86fbd85cf1edab5be3 -size 320313 +oid sha256:9c35a4932726a956c5b4000c7aa02160c686ecdd41ac11c3690af65b375d67d0 +size 324347 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5f4ecb4d-824e-44c3-870a-813c9d96d954.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5f4ecb4d-824e-44c3-870a-813c9d96d954.png index fbeb7e2a4891e59b5447d12012bf435b2d01fff5..da8a400a8e46436a58dd08854d1880a7b3bfbb43 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5f4ecb4d-824e-44c3-870a-813c9d96d954.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5f4ecb4d-824e-44c3-870a-813c9d96d954.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72be5f08876542769fe6bc06f8b22432684206ea38521d3ba2f4945a45f11800 -size 1265925 +oid sha256:abdb2adb4bb0eb90ed565794fc631797955c3a74470cef36583a403b5571cdc8 +size 949362 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_6c9158f0-6c3b-4ef9-9e89-c09c74149da8.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_6c9158f0-6c3b-4ef9-9e89-c09c74149da8.png index 8bd6163b727fe2ff6524f1dcc7fdb99c24f7ea2c..8c76e3cc113672be9136db1e49270dd33bfe80e1 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_6c9158f0-6c3b-4ef9-9e89-c09c74149da8.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_6c9158f0-6c3b-4ef9-9e89-c09c74149da8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e38908e020aefb76e63aa58777232f85279901426eb7b2614e93645818de2e39 -size 288766 +oid sha256:6b0b17df51de6a9210a2f89414a40121f2cc565afa5957325b14a6aa97309cda +size 230475 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7305cebb-6397-41a8-a67f-cec246c6c821.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7305cebb-6397-41a8-a67f-cec246c6c821.png index 66ca89a505de62ef1fc227440b77c3149e370569..f01d644b2635c84e68c5b31c18394aa010317427 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7305cebb-6397-41a8-a67f-cec246c6c821.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7305cebb-6397-41a8-a67f-cec246c6c821.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a51347afb417e8125e7169adb1f4fd178190f4cbc0018f3aaba72a165f9dddf -size 504779 +oid sha256:b74b2905b201c00f0153577beeecdac2fed03ddbe2b046ac5a104fe957636f3c +size 352086 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7d744575-cd07-4e36-9871-3feb82f857f7.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7d744575-cd07-4e36-9871-3feb82f857f7.png index f3ba9869bc7c3d8dbf0c24b3644263124880f320..242375328504c518f219944b2cb9c8850ca8850a 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7d744575-cd07-4e36-9871-3feb82f857f7.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7d744575-cd07-4e36-9871-3feb82f857f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfb10adf8d33cd8b8ea582b3f4dbe1c6714e5a691de18b3233f5d77aa758d8ad -size 1133573 +oid sha256:42d6637312d6a954ea5ccab800fc7d81acfea71e2609a83baede6b87cedaafba +size 1619552 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_85b15500-7ef7-44a8-bb3c-d956b2a1361b.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_85b15500-7ef7-44a8-bb3c-d956b2a1361b.png index fecd94a3bd8d9d4116d1dde8b807797a26ba17f9..4e8e10b8982f2a317d9eada9e09a95855fd8b5ae 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_85b15500-7ef7-44a8-bb3c-d956b2a1361b.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_85b15500-7ef7-44a8-bb3c-d956b2a1361b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9c684cab198a33bfe05275d099ac47b99e382bd4cf19603a34069c56cd6da5c -size 562472 +oid sha256:399d5186927e07ba289cf3c4ed2af6923569baeffabe4b86750f970294eeb56c +size 665139 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_adf9d11d-07e4-4751-8d2e-3cd3ce8f311d.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_adf9d11d-07e4-4751-8d2e-3cd3ce8f311d.png index af13cf1c1233a7713afec7ba61bbdc46a2bd5863..de138c5599a14c4a2c4cb0e7c5dfc09ba17d4872 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_adf9d11d-07e4-4751-8d2e-3cd3ce8f311d.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_adf9d11d-07e4-4751-8d2e-3cd3ce8f311d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41dcbcbe18ad72b5ff53a95a211161d041091369f48e6a0b965d3d7fdb4c2948 -size 489997 +oid sha256:7b518f33c79c8a228547344e15dd54d1c82a2c6efe8439d800da19d62054d329 +size 621477 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_e420dd73-9c53-48e7-b5be-51c7c081f040.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_e420dd73-9c53-48e7-b5be-51c7c081f040.png index 3b6a03cbf60fb71f5b1eec1df32bff58d7e3c35a..6b74294ec85160a78d7a324521894deadce50a81 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_e420dd73-9c53-48e7-b5be-51c7c081f040.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_e420dd73-9c53-48e7-b5be-51c7c081f040.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ada440b2918d4a2291faaf42a3b2a4696199b3a24f20b525baa61c7943b7060 -size 321615 +oid sha256:8f763aa5dd52f13ecf84befb23a80085c2c3004b4fe94d9a12a5da5ca660a3c6 +size 220806 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_f23cef2f-fa8c-4eb9-b7a4-4ece33aacea0.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_f23cef2f-fa8c-4eb9-b7a4-4ece33aacea0.png index f02cc4df02e74186e0d385b378f82b037027270b..ae8c47293e59ffb734168d257adc4098808d4605 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_f23cef2f-fa8c-4eb9-b7a4-4ece33aacea0.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_f23cef2f-fa8c-4eb9-b7a4-4ece33aacea0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2796223576853fe7dba3063b4f57f31cc24c499b5a160e4c43f2f1545b234cec -size 834604 +oid sha256:4c1ba4559924bbedf183a2ced16c3e4635cdbc95a64d3393514b5b1bb6cca8d1 +size 288854 diff --git a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_fc8e8688-fe07-461b-a576-85b64a501827.png b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_fc8e8688-fe07-461b-a576-85b64a501827.png index b5e86ccebc463ab809a69d648c73f703b6603d38..d768180144838c3b5a7d0bc328d6bbc52fd0c540 100644 --- a/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_fc8e8688-fe07-461b-a576-85b64a501827.png +++ b/images/7219145b-ada1-480e-a8ea-f4fe164ca84a_fc8e8688-fe07-461b-a576-85b64a501827.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a8d3ef002b754041a50ae4405e80fc97f9ee0faf345427eac28c95fab43b478 -size 1042917 +oid sha256:516b20c9ab238652040e48819c8dc6fe54b0549da3b53577b2aebd6bc934d151 +size 1417012 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_0e63a72b-be6d-4c64-bd1b-b745222e02ae.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_0e63a72b-be6d-4c64-bd1b-b745222e02ae.png index 33d0201a9a7b86e08fb6005d658f3af24cbbf6b7..047b9f6b626c4f91f153fd85eafb955d804dc74e 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_0e63a72b-be6d-4c64-bd1b-b745222e02ae.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_0e63a72b-be6d-4c64-bd1b-b745222e02ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4af30d84cb1d167b75c261ae66507ce1531f36fc2306609df99f4c92cda0d33d -size 1921084 +oid sha256:31512caf1eebc4753b328544aa1f6b8586f4b48a1c2e4d7d669f751c95591313 +size 1974162 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_2d7f2b77-ec35-4ff0-88c0-d11be25fb44c.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_2d7f2b77-ec35-4ff0-88c0-d11be25fb44c.png index 383f821aaafc2a744c1637561bf4d9af7ca518ad..d03cd9b57fd12b35943ed76c4ebc95037300fb72 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_2d7f2b77-ec35-4ff0-88c0-d11be25fb44c.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_2d7f2b77-ec35-4ff0-88c0-d11be25fb44c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46fb14d86ca5262e4b1db88db24738ed511be28c4b7137b1078a0352e5cd5fdd -size 1867214 +oid sha256:a7c0b65f6d4dcf70552eeeb1623cdc08648996b450a21f8c754d4c8e6fc7df3d +size 1258299 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_43f973b3-95d3-440f-8ecd-60b4b10a1d46.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_43f973b3-95d3-440f-8ecd-60b4b10a1d46.png index d9a5cb193ff9f061c67e2772c853368e18ffbbcd..79cafe7da235311d3b5c65e27d8e77041b990ab6 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_43f973b3-95d3-440f-8ecd-60b4b10a1d46.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_43f973b3-95d3-440f-8ecd-60b4b10a1d46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a5ebbbefba98a4ebf2f725013b323d703250193dba0678cecc97866c66bf8df -size 919507 +oid sha256:753041211c685f6a5628ce889b094523e9ae3293f825577f8fb71e10fea2c426 +size 1133653 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_5c7395ff-ffb0-411f-a8a1-bd2d6f51d101.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_5c7395ff-ffb0-411f-a8a1-bd2d6f51d101.png index 2bee9686a8f679505c8f9ca11e2f2724c6b9facb..35a5c0a52db52e5ba7d9f194c4959438b1c9fb03 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_5c7395ff-ffb0-411f-a8a1-bd2d6f51d101.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_5c7395ff-ffb0-411f-a8a1-bd2d6f51d101.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a468bb8bcba0e0870663822f788077784f8b2e78b0ab21dc9b5f20e995f1caf -size 1683898 +oid sha256:c8d63a029f335921caeb41240341f8a4d8b2676edc21bfdccebd75fcfb2c9adc +size 1774927 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_7e49b87e-b08c-41d7-bde4-c9a4cadedc66.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_7e49b87e-b08c-41d7-bde4-c9a4cadedc66.png index ae960e4d6d0b9d5ca702ae09c4e1bd6022c028ee..ccb087d9c024f2862b009fc3a70b718db9866a29 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_7e49b87e-b08c-41d7-bde4-c9a4cadedc66.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_7e49b87e-b08c-41d7-bde4-c9a4cadedc66.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0802be706ba1a1fd789bc5b818185265f51bfd975ec686d38ff049ca9998c0d9 -size 928146 +oid sha256:c4791eab538dcf8e9ca26eaf1529e3d1505b206be08460d26e667a3af010a0a0 +size 1096826 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_8ba30792-56ba-4381-9106-0a693cd4b83f.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_8ba30792-56ba-4381-9106-0a693cd4b83f.png index e943398800cfe81f56e3352e3be636bef351e68e..e5f3b0276f9c14e8a920b4d52cc4d6425541bb85 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_8ba30792-56ba-4381-9106-0a693cd4b83f.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_8ba30792-56ba-4381-9106-0a693cd4b83f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:843e3613c3d8175ed7cfb84118bb4729f1a1f0d54b9c15795e3bffdded2ff757 -size 1683112 +oid sha256:c61c7655fed4c6ca0dddc9458c7a334ab218c606d024f6cc7db2504a323f845c +size 1772064 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_9a83aab6-af9c-4062-afbe-ca6eaa4e3249.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_9a83aab6-af9c-4062-afbe-ca6eaa4e3249.png index 9428bd0be1b90d6d85492d04cf9f7f738ffce2d0..23aed2ccac8e3e0f22614bd3049a665a2a49a5ef 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_9a83aab6-af9c-4062-afbe-ca6eaa4e3249.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_9a83aab6-af9c-4062-afbe-ca6eaa4e3249.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c762fb9c741d1c34098a1b4824921f33010b5fe1842f9a44a5a9d253b445e40a -size 1001565 +oid sha256:1ced4a1a364384101a8dd06db3aa607d4c941395458d3bdfe46e45cd21800159 +size 847615 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_ad95a52e-a737-472f-89dd-9b9c096d10c2.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_ad95a52e-a737-472f-89dd-9b9c096d10c2.png index 823fef94cf2dfd07c5aa04415cd45e944c9e81d6..580be8212611f7124af894d2522433fb58b643cb 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_ad95a52e-a737-472f-89dd-9b9c096d10c2.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_ad95a52e-a737-472f-89dd-9b9c096d10c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc6832645c8cb4cf528144d2756c13298fd4203db2c2ac30a7ecf23ce15fc5c8 -size 1061401 +oid sha256:9e24fb5c961b947f81de27ab732587ceb810b4f71c755bcfd492cf9a7a341b1b +size 1144096 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_b75be239-fb3b-4d79-820c-e374efbe2c73.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_b75be239-fb3b-4d79-820c-e374efbe2c73.png index 276fe5d9365116637ca6849ec7e71a05905fa4cc..e58f35a1115edc7deda3c2a1254a34479316b476 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_b75be239-fb3b-4d79-820c-e374efbe2c73.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_b75be239-fb3b-4d79-820c-e374efbe2c73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcb152df2be8f5b9f5aaefafe1889669b4a2dd8848e15d677f2f852c5314099b -size 1590842 +oid sha256:1f6593de4e67fc32ae67d457ea57e0d8b03ed0322027f66fb984608f4f560ba2 +size 1330608 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_b92ebe23-bc7d-4bad-a928-aa3ef23ca849.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_b92ebe23-bc7d-4bad-a928-aa3ef23ca849.png index a72173db95bc5ecd1e2cc4aeea8fd5283636c38a..133862d1c6bb8cabdff89fc625cfe302ef1155d0 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_b92ebe23-bc7d-4bad-a928-aa3ef23ca849.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_b92ebe23-bc7d-4bad-a928-aa3ef23ca849.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:168cea925eeb761b56c937c2f130aa141c43a3d51f46d57072df1a4c8ac14433 -size 1368599 +oid sha256:049b4385ed5a1847eebdee99ee9e5423b2d815ff0d54865dea7f546765d3f7c3 +size 1260682 diff --git a/images/725c38e7-7987-40b3-8f19-8450f8277f06_f9eba4a9-dc63-44b8-9382-822cac46e582.png b/images/725c38e7-7987-40b3-8f19-8450f8277f06_f9eba4a9-dc63-44b8-9382-822cac46e582.png index 406ad96f2d3a95e4735ec882a2e1327774a9e15c..14232f8d88570fb8e65c7b070a4ee6891c5173eb 100644 --- a/images/725c38e7-7987-40b3-8f19-8450f8277f06_f9eba4a9-dc63-44b8-9382-822cac46e582.png +++ b/images/725c38e7-7987-40b3-8f19-8450f8277f06_f9eba4a9-dc63-44b8-9382-822cac46e582.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bab5072c112669a0781abdf6921145cb662974d41b1d9fbd734ff5e6c23376d9 -size 1069707 +oid sha256:4a69cf9da272d20f6f21c6be450addec2f8070e05c845cf52f150a1e591f3b12 +size 1032494 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_062a6bbc-b371-4d55-9970-603857dd185b.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_062a6bbc-b371-4d55-9970-603857dd185b.png index fc6aa68ba717931670fd7599c771bf4040518136..353a182ad24f3c966ffa0a313636a83be06ecabd 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_062a6bbc-b371-4d55-9970-603857dd185b.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_062a6bbc-b371-4d55-9970-603857dd185b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c5c20600388e3f3ee75f8b517f93a32e331fa99e6dbb0d43b195e49cc03aa70 -size 507874 +oid sha256:9d33f8ea4f566d99c85b8646f83e304b9f8c40273369be580f68a52d7e688223 +size 428245 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_2b005599-77bb-4e09-9eaa-3cb686343ee2.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_2b005599-77bb-4e09-9eaa-3cb686343ee2.png index 5b2eb8c14347352bf02e94906869c34f04137c07..39ba564631993eb6734aee2f81a5e2ecbde0626b 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_2b005599-77bb-4e09-9eaa-3cb686343ee2.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_2b005599-77bb-4e09-9eaa-3cb686343ee2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08a57c464095e2e43c81e66de8963d8d9c279bf0a11bba62d9805532cb8e7da3 -size 524701 +oid sha256:02ed9b3d595cea075907603d822b42deb505815ce6807b9fbc9d14633088ed32 +size 454968 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_a3a0d384-cfcc-439e-a071-d50217cb46b6.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_a3a0d384-cfcc-439e-a071-d50217cb46b6.png index 4598e74cd1fb14d77e38bef2f16ee29f3a4c6e17..78157c7ce4954b53d6a00dad38bff3acfeed0aee 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_a3a0d384-cfcc-439e-a071-d50217cb46b6.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_a3a0d384-cfcc-439e-a071-d50217cb46b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6215ba4badf0b6022dba87bed2bffdb6ea4ceb17c9a99df6e1df955299c83e68 -size 431944 +oid sha256:8b9e9c8578e2b98f50fdcfeabbc4858a53dab49d59b535434035b51b5160c099 +size 327811 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_a55fd649-8057-4b3b-877c-bbde4b4ec8a4.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_a55fd649-8057-4b3b-877c-bbde4b4ec8a4.png index 3d135105aad5673239638c571c038a51a063ac60..e08c1eaa583fbb9462f8a2b26be3e11097ab8943 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_a55fd649-8057-4b3b-877c-bbde4b4ec8a4.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_a55fd649-8057-4b3b-877c-bbde4b4ec8a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2542997d4cb802ad748a4f66ea81c20c9c404a9002f2a3f9184f6745de15ed5 -size 532054 +oid sha256:a052e8130e917bff2d106518f558f772e9e5416a7a61ac503299813fb4526618 +size 533156 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_b33a580f-0820-45bb-8bf0-deaf9de822f1.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_b33a580f-0820-45bb-8bf0-deaf9de822f1.png index 570d7f84eaef94c0b4a8835b7e1c991c13ea1b94..a1f3001904d8226ad98c51389397bae9b09da7ee 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_b33a580f-0820-45bb-8bf0-deaf9de822f1.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_b33a580f-0820-45bb-8bf0-deaf9de822f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b980847267d2f397a5e1264e491cddbf89045c05ec819706183c723c53bdea56 -size 531502 +oid sha256:7d3d3c3e50c6f71a2084bbd20c499be72fdbedfac593c7ad04a6b02412947a2d +size 436685 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_b88c0362-1340-41c4-be55-38cf7e7c180d.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_b88c0362-1340-41c4-be55-38cf7e7c180d.png index 50db6d741bf5959f1354581f7e13af30362d5590..45aab78a548c3c8b8c277b51209a11f88c5bc7a6 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_b88c0362-1340-41c4-be55-38cf7e7c180d.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_b88c0362-1340-41c4-be55-38cf7e7c180d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dee6dd9d76752289a53c416ac470fa6642ba1d7d581344c12e2c4b5015170139 -size 504863 +oid sha256:e339c812cd8f2f8469e94e49e62364cf38bfff8422fd519fe99babf17ceed3a9 +size 532364 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_eb17dc66-943c-4b2d-8533-f60580dd669d.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_eb17dc66-943c-4b2d-8533-f60580dd669d.png index 380066ab004075459a549221370f21f9f20de42b..d3164607ee3d6a2c5a10ecb8a36bfc6b3d579297 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_eb17dc66-943c-4b2d-8533-f60580dd669d.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_eb17dc66-943c-4b2d-8533-f60580dd669d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79dedc8fe96da7c126fe4fd96d987514216094742ae59e58570360822fca8e8b -size 430336 +oid sha256:8d2d4841adb376a74d72418e398865d6e987f1c05e0daae63240a898a2e2b63b +size 241748 diff --git a/images/73960473-941b-4e09-8eb1-f059b85c2dba_fb963821-351c-4418-8bbc-a5f87f916ed8.png b/images/73960473-941b-4e09-8eb1-f059b85c2dba_fb963821-351c-4418-8bbc-a5f87f916ed8.png index 4b443ff4fc7e47e278ae775f14e39c24e7787be5..3b59f77e5819635c18fc8d9ca7a2589a037267d8 100644 --- a/images/73960473-941b-4e09-8eb1-f059b85c2dba_fb963821-351c-4418-8bbc-a5f87f916ed8.png +++ b/images/73960473-941b-4e09-8eb1-f059b85c2dba_fb963821-351c-4418-8bbc-a5f87f916ed8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b96f6d170a8deb39e8756ddc7851fef12501386e7201eeb7001d67fc6b4bc15 -size 505544 +oid sha256:dff550090aa763db1ffe1df6563016fc4504b152184ec02b8d1d8426419ad855 +size 513114 diff --git a/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_702d51c4-4747-4525-b58c-324c776f600a.png b/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_702d51c4-4747-4525-b58c-324c776f600a.png index 350ef0a8e1d49ae76107912d6aa1ed42ad84f2fb..43f6d70822b3925a7130c9f392725d9d66b9efa3 100644 --- a/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_702d51c4-4747-4525-b58c-324c776f600a.png +++ b/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_702d51c4-4747-4525-b58c-324c776f600a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bc8d54aa997c7380a9b862456a3549acde5f2c50a198aea48cc33cd573ae00d -size 4131388 +oid sha256:072ebf49ac978be33802587d5146d586396bd8738590b2432e5172a59d823af7 +size 1297269 diff --git a/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_7806581b-01a1-4c64-80e8-249ca26e8226.png b/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_7806581b-01a1-4c64-80e8-249ca26e8226.png index 513a9aed2d648ff19ca5cfe108d23cfdbeac4ecc..362d716fe1e1b04a57a7a580dba082a30767c391 100644 --- a/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_7806581b-01a1-4c64-80e8-249ca26e8226.png +++ b/images/73cf6eec-cae6-4d5b-9b8e-e44359311565_7806581b-01a1-4c64-80e8-249ca26e8226.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2c215618c73270f55b1020d793214a49632a441566079888faa19b97d32f3e0 -size 2082112 +oid sha256:50d2120fd2bcb894c1e69817eb77e467258f92048a1da65540fd3fa91951fa4e +size 661757 diff --git a/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_55b748c3-742e-4732-89a5-6966da49d829.png b/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_55b748c3-742e-4732-89a5-6966da49d829.png index 4bcb5ec26a43bf8d16dc7ef52f8486eb7e066ed3..fcd6da25554890e4dbd2bd6b00f218381d661856 100644 --- a/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_55b748c3-742e-4732-89a5-6966da49d829.png +++ b/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_55b748c3-742e-4732-89a5-6966da49d829.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b5542663bc011cf7645ac1d547ce1a7be03f5b452a6f3e3ea3292045bf6a93c -size 1735072 +oid sha256:ef5354d6e9e8857c110f0750af49d87320b4ff7e9b479e8613fa083c327d7567 +size 1434701 diff --git a/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_f0b16e02-0be2-4f9e-9cf5-c08950f7b267.png b/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_f0b16e02-0be2-4f9e-9cf5-c08950f7b267.png index 25ca56859f2b99eaf9cbf77f49581849f0161bde..129e00f42fbf6e137deec034b2e4bb407d76ab96 100644 --- a/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_f0b16e02-0be2-4f9e-9cf5-c08950f7b267.png +++ b/images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_f0b16e02-0be2-4f9e-9cf5-c08950f7b267.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2237c774c8c25285cef973550830a293ae9465b39b08c6c64cf44c03c00646e2 -size 1969117 +oid sha256:e001c7ff309d7c9b3517ff0d45b0bd47aa569debb123e97a924f3dff8a7d50ba +size 1391970 diff --git a/images/74226fab-6285-45da-8582-d25a876aa7b0_40391570-2276-4e50-b19e-5677a25066e3.png b/images/74226fab-6285-45da-8582-d25a876aa7b0_40391570-2276-4e50-b19e-5677a25066e3.png index 02625bd82aabfae398d0adb425dfc0e4f75b0944..20382844efb73799aa9b2c845ec6ccc5960801a6 100644 --- a/images/74226fab-6285-45da-8582-d25a876aa7b0_40391570-2276-4e50-b19e-5677a25066e3.png +++ b/images/74226fab-6285-45da-8582-d25a876aa7b0_40391570-2276-4e50-b19e-5677a25066e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a18eb33ffaf2d2a4367334fd9f36ce7a0d6b66d09fd9dd417e57cc0e95f87ce9 -size 609304 +oid sha256:306becf9859f6efa444fbbbdac8b9aab12d7bf3a4c426f36e953289e49838d7e +size 491648 diff --git a/images/74226fab-6285-45da-8582-d25a876aa7b0_5874954f-7c2e-432e-bddc-1a6028f60421.png b/images/74226fab-6285-45da-8582-d25a876aa7b0_5874954f-7c2e-432e-bddc-1a6028f60421.png index edd1c372348cb7671a8360c9274cd550a3b79598..678dca06cf6451d0886e8d2383a111b5f0a0c94a 100644 --- a/images/74226fab-6285-45da-8582-d25a876aa7b0_5874954f-7c2e-432e-bddc-1a6028f60421.png +++ b/images/74226fab-6285-45da-8582-d25a876aa7b0_5874954f-7c2e-432e-bddc-1a6028f60421.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2121575be14de0b7b125d99463dd9aeb71c951b6a23c3f4d610ced0db9cd9ae -size 789307 +oid sha256:cfd6912cdfe9700e7f5181b14cf9a41407197f71e110cf61a9c953c1df773991 +size 815494 diff --git a/images/74226fab-6285-45da-8582-d25a876aa7b0_7fffde10-7954-4767-b39f-f913cadf8a51.png b/images/74226fab-6285-45da-8582-d25a876aa7b0_7fffde10-7954-4767-b39f-f913cadf8a51.png index fd9c1482182e6c05f1c99bd09f6ab21b307056bf..a3c8f0de638d5b776d38e8bc6d9f599967c630d8 100644 --- a/images/74226fab-6285-45da-8582-d25a876aa7b0_7fffde10-7954-4767-b39f-f913cadf8a51.png +++ b/images/74226fab-6285-45da-8582-d25a876aa7b0_7fffde10-7954-4767-b39f-f913cadf8a51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4f17b1d293fda418daafcba1fb9df60c95da5fcbc925b37e3ced3e0f2124e87 -size 590256 +oid sha256:0353793d60e5b1a3b44a510e870c8d1da1089f00ade9b4a294d4e77a058473a8 +size 438156 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_2dd318cd-167f-4a33-9395-981c43cd92ae.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_2dd318cd-167f-4a33-9395-981c43cd92ae.png index 44613cefd2e78e89416c37abec753510fc87a137..a2583ceee89711934485f4a1220a6babfb5c97f3 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_2dd318cd-167f-4a33-9395-981c43cd92ae.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_2dd318cd-167f-4a33-9395-981c43cd92ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76e14da131b0463a7c743520639b1935f4dd53392f4500a5a13282a84b0c5942 -size 801443 +oid sha256:7d60f32971fadf4d27c1ca696f074f3d66a46de871fec9a75c8d67a7261a00d6 +size 1021291 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_4c768a53-5540-42b6-8e12-ea72c58a1908.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_4c768a53-5540-42b6-8e12-ea72c58a1908.png index 8e595a4437f2f1095a2f7d38a673895b9440432e..aea79a9cf7c552d3b9146a065b3925661f4ca450 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_4c768a53-5540-42b6-8e12-ea72c58a1908.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_4c768a53-5540-42b6-8e12-ea72c58a1908.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1812b473627103bff5c02454cd46ae1a7435c740f227b23a7684dcd7b9e53c8 -size 1059312 +oid sha256:efe72212dacd54f30de88cef2349993d2fd69198df3d2fbed96bb67ec3817698 +size 862413 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_65d6ae1d-4269-4ca6-9e02-0aabb1af9aca.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_65d6ae1d-4269-4ca6-9e02-0aabb1af9aca.png index ea6e0ac4e7948b545e8e389882966530cfa49129..e745f629c5e76c569f0b96c25e317b4fe13465c8 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_65d6ae1d-4269-4ca6-9e02-0aabb1af9aca.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_65d6ae1d-4269-4ca6-9e02-0aabb1af9aca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11c2bb75093e4c665921d30662bca112f06d18a7dc2c4d412abf7b631ac31cd9 -size 1439304 +oid sha256:be31525b804d8d21992388e9e3d4e5c5fab7395b66a032aaf85df3ffa174252b +size 1111166 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_74c03d0b-0836-477c-ad20-05abb250cd56.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_74c03d0b-0836-477c-ad20-05abb250cd56.png index 9456f1469b8b7a490445c9722488d2594ed06772..117843b9454de46e8c660805a7ab9fdbe6d80206 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_74c03d0b-0836-477c-ad20-05abb250cd56.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_74c03d0b-0836-477c-ad20-05abb250cd56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4bc3cd887c391a41acae1764adda9121f8d9b476c24df182e172378534c0b1a3 -size 1154419 +oid sha256:d11be9eab0de3aa38301f10177221a65ecb0e93923926cc14ce1665fe0ddae02 +size 847019 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7a476beb-eca9-48bf-abb4-286b0d996196.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7a476beb-eca9-48bf-abb4-286b0d996196.png index 0d92db8ba5037b79ab88866c294727d3bb8c570e..758f6afdfd8c3e350ebfd77372f9c12e4f136334 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7a476beb-eca9-48bf-abb4-286b0d996196.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7a476beb-eca9-48bf-abb4-286b0d996196.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a6260becee0721c8955311a806bdb8d1b0383aa0efbea9be4f5ae3def375397 -size 749224 +oid sha256:02b6e7a0f5fc4a1a7a804da57e8e77f18827901aacd84a24209cd224eec5076a +size 963467 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d650769-7360-42e5-9686-c24cfbaf2a2a.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d650769-7360-42e5-9686-c24cfbaf2a2a.png index c2f0cce0a2dadcc180514afb90c080b6c0f1a53a..5665b58af93554f27be5ec79ee2d81927d8cca3e 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d650769-7360-42e5-9686-c24cfbaf2a2a.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d650769-7360-42e5-9686-c24cfbaf2a2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d265fe89a62464cf4922fe3b00b7ddb1feace6116a327fdffb4f626d60437c20 -size 1238950 +oid sha256:627e20a0a5c0871a385d851ed76987272bbb5693cab6db8722eb7dd476c91eab +size 1306591 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d72858b-a481-4bd0-bfb7-e2556ccf7ae3.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d72858b-a481-4bd0-bfb7-e2556ccf7ae3.png index 763920de8850e4c46be9517fea84aa300325d397..08128293d53f14d297cc3f408c3e70fcc28c6211 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d72858b-a481-4bd0-bfb7-e2556ccf7ae3.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d72858b-a481-4bd0-bfb7-e2556ccf7ae3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bab488ceef26ad1abd22dfa8338cd266f0ab3348e3abe95f33e01cf1b160ec0f -size 981679 +oid sha256:ff399786ee267152764ddd71e53ec6c99f5dfac95e58b8716fc41e0042ab7379 +size 1250701 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_a1272cef-9bbf-4485-bdf7-3c6181cef0f4.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_a1272cef-9bbf-4485-bdf7-3c6181cef0f4.png index 6473b279c2e329d5e7a64a4b1c9b77827447b4b7..1133fb86db4c2364563f077300977a143f8b5ecd 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_a1272cef-9bbf-4485-bdf7-3c6181cef0f4.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_a1272cef-9bbf-4485-bdf7-3c6181cef0f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f77732ebdf1cc7d915301e648b02b2343c72afcde85ca4115239d3c6e72e641 -size 868953 +oid sha256:3a87343ee59673890b571854c7ff5f58a31e6de8a332bebcd30c0ca9c0d8e06e +size 630788 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_dbadff27-1043-4cf2-adb7-329d4aee6c5f.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_dbadff27-1043-4cf2-adb7-329d4aee6c5f.png index 915d8ea081191263c7fe368480dbc6c60235331a..9a9a975a4dfd6fd7dad8f400b51f3890223962dd 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_dbadff27-1043-4cf2-adb7-329d4aee6c5f.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_dbadff27-1043-4cf2-adb7-329d4aee6c5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7787d3cda1957b8792fb0081b2ee04602df22c43f1be6cde726bbc661b5c21b -size 556559 +oid sha256:4e6112bb2833348b771c935b11406c9b0ae02346498fde4543e5b6b501013ab4 +size 443351 diff --git a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_eb660037-cbb5-4b1a-be71-d1b5ad6fd160.png b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_eb660037-cbb5-4b1a-be71-d1b5ad6fd160.png index cdc0bc4315bd665a118d9d3d344c7e166adf92a0..9db28f4c9f9ce89549bcc5b0318c3ba8014f3c3a 100644 --- a/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_eb660037-cbb5-4b1a-be71-d1b5ad6fd160.png +++ b/images/747429fa-7dac-4ab0-b604-13cb9bf787fe_eb660037-cbb5-4b1a-be71-d1b5ad6fd160.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5117dd5e99afc22b658862a4d041878eeb90cc32230967f8142fe91970c198d -size 749295 +oid sha256:39f22af16276fc778cd61513d6d26c4c519880fce49e212700e1a5e9e4b6785e +size 1031321 diff --git a/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_164ac1a3-6c06-47a9-93f8-0dd205f683dc.png b/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_164ac1a3-6c06-47a9-93f8-0dd205f683dc.png index a4c52fe1911898d7ce5f2213a0def87ee22a5780..f5316938f1d15d91aa190c78d0008c107cd7b923 100644 --- a/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_164ac1a3-6c06-47a9-93f8-0dd205f683dc.png +++ b/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_164ac1a3-6c06-47a9-93f8-0dd205f683dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7022aebd5441f2bd58b2722ab96282c57d0cf3ff25655d1c75cfd6cac18ef80 -size 1174082 +oid sha256:e655dfc0b38a861340c60d347fb6137ba6ed0d6df93a28920de63ddab08d2100 +size 1199826 diff --git a/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_d1f6fe24-c802-40ec-9de5-9c81c57b69aa.png b/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_d1f6fe24-c802-40ec-9de5-9c81c57b69aa.png index 4a106f4d21bebaecaf3737905593dc87c6044a34..f29626ea6256f2995892e6c6a11f56fb8a410ec7 100644 --- a/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_d1f6fe24-c802-40ec-9de5-9c81c57b69aa.png +++ b/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_d1f6fe24-c802-40ec-9de5-9c81c57b69aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52539673cca0c81d3106bc66de36cc843f66bdecb10eeb8f136f7d2b95eb47d7 -size 1315109 +oid sha256:8b57800017d3eabcf2806de80add9cd2c2b0ec630ba1aed366d70ab4d8c92eb8 +size 1724538 diff --git a/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_fe2547fa-bebe-490d-95b9-22a6f8cd70f2.png b/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_fe2547fa-bebe-490d-95b9-22a6f8cd70f2.png index 6fa06f49762497fd50c1e8532e25efdca26de9b6..de86a2be77d82929e53327bef9a37da3a046429c 100644 --- a/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_fe2547fa-bebe-490d-95b9-22a6f8cd70f2.png +++ b/images/749dfeeb-8293-4c25-9cad-84f1a93f165d_fe2547fa-bebe-490d-95b9-22a6f8cd70f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08791f79db3a73f43f95a8c8510d8ce8ac7dd4783c0dd91f12150d9066e0972f -size 1881848 +oid sha256:2aba4312155994bcece5c68261b2421281e7f34a459b570ccc0e28720cb9d177 +size 856009 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0c87b2bb-027a-4daa-867f-bc0d3d2382fb.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0c87b2bb-027a-4daa-867f-bc0d3d2382fb.png index 3059f6a916a33ce9725cae826e41e848d8b4a15b..c87c8488aae22d6cb5855a6b7b5df0809f8feb33 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0c87b2bb-027a-4daa-867f-bc0d3d2382fb.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0c87b2bb-027a-4daa-867f-bc0d3d2382fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02ed19579ec344ac6d6b28ffe70c225f69bf6cc5b65f3a3a33da98bd3588ede0 -size 606891 +oid sha256:fdd36795fc1c38b71f4962145a19a63219d9b4f021e425d9f767509ae4584cec +size 448643 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0cd3a314-e9db-447c-ac17-e07b23307fca.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0cd3a314-e9db-447c-ac17-e07b23307fca.png index 90a90b4365584b23e779d365cd56c365bf16e3e7..1d26a7d6c5b498b6b92c7e2d2bf0273c38590632 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0cd3a314-e9db-447c-ac17-e07b23307fca.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_0cd3a314-e9db-447c-ac17-e07b23307fca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d255f89aeea3d6e96809894563d64c7058dde6930f1ae7ff0eb77d88ebcca97 -size 461635 +oid sha256:9b0190aa391916e89bb88e223c21b925ffca6d8ea1ee4b82b66507ef2babe27a +size 608235 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_1eb20c82-4d2d-4d62-8ddf-3f902ad6e301.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_1eb20c82-4d2d-4d62-8ddf-3f902ad6e301.png index 806c62b3bb7ca9b5152e1878267aa5d40bc213e0..4d89a2697053acf2dd84fa36d7953782d39b13f1 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_1eb20c82-4d2d-4d62-8ddf-3f902ad6e301.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_1eb20c82-4d2d-4d62-8ddf-3f902ad6e301.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7b5ffd436e064a772d7c732410385f1c93b7dcb26162a9f9ff39185ca3775f7 -size 694272 +oid sha256:cd98de3db7af27e6d70ec0b0a75543e2c478a48c5bb3444ec7f5b06b6545bb02 +size 961817 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_24909b46-0dde-4b06-8dbc-150212c5eb23.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_24909b46-0dde-4b06-8dbc-150212c5eb23.png index 3b57cf49f0c66cb693a88b629e46627b0160cb49..cee46efe60ae23f464f9eab79f4bb7cafe473284 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_24909b46-0dde-4b06-8dbc-150212c5eb23.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_24909b46-0dde-4b06-8dbc-150212c5eb23.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94bb22025d0f315bb26a34cf827248df83aa7694a19b8ff21340aea28cbb3162 -size 2118705 +oid sha256:17fcb4f64ce973493dae9d9fd88f138eab042f5d9f0050ec54f23864d5e3749a +size 791916 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6290e759-d66d-4b88-9146-822223ef0530.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6290e759-d66d-4b88-9146-822223ef0530.png index fd2874995dc52fb8006d5bfc8d1d3663b56bb860..03c673d6483e289d5a0cad81360e960f8e20ff89 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6290e759-d66d-4b88-9146-822223ef0530.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6290e759-d66d-4b88-9146-822223ef0530.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42a395e26b28a3a0256d9075ef5d902f445810779c60bee4d904407b2acb1c79 -size 670493 +oid sha256:21f0cd2377bb419b0aad84b851e720477a073a7946a9600976e5a34eca67f7b0 +size 900409 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_69b85be2-186c-4be0-90bf-103fc674b6f2.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_69b85be2-186c-4be0-90bf-103fc674b6f2.png index cae99cc35a994b69afe0664f68235b7c1a33f631..66e90dc681214f603e74043830c1dabc06e8ba78 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_69b85be2-186c-4be0-90bf-103fc674b6f2.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_69b85be2-186c-4be0-90bf-103fc674b6f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d15f75143518056c41844dde96526b341d1b49ef310f13bd51a33b745e770b1d -size 1027942 +oid sha256:1b9116e7ffa07c2a79635396c6f8873e13a9332b4ac520dc76cd394d8bb4e7d6 +size 1543303 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6a2988b2-cb53-4b88-8132-7cb3c86dee20.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6a2988b2-cb53-4b88-8132-7cb3c86dee20.png index 4917d0c96e1ed4eea2abc503c5c8d5b19ea84d12..b54707db45a6d0a6fe3b81f23c4d1ce6d8ddbf67 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6a2988b2-cb53-4b88-8132-7cb3c86dee20.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6a2988b2-cb53-4b88-8132-7cb3c86dee20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75fce2c1d05f052354039fb09574e1e7f5601bee91fe48a1d73fc11b732b0db3 -size 366704 +oid sha256:cc936b93112a036eb8d688f07eed34bb14805a15ea63fc82356a89ed617dc15a +size 270531 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6b93ccb8-16b3-41bc-90ca-62441ccb33f6.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6b93ccb8-16b3-41bc-90ca-62441ccb33f6.png index 0cbb5aba2033d7fa51b42402b915dbc34c0dc87c..f3bd0e9c3d219c952d73a7a8a5923736736f3442 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6b93ccb8-16b3-41bc-90ca-62441ccb33f6.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_6b93ccb8-16b3-41bc-90ca-62441ccb33f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6a3cd0e296a7e29da3fbb10491ba72f2d73f99c9f2886df104d23ec09bb0206 -size 572937 +oid sha256:e23007577d8d4dcb8b3a23a8217c63205f530e5f66f1d3353fa8bff3a7fd7ffc +size 588771 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_850cc85e-d691-4b91-ac4c-1212d64d2b5d.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_850cc85e-d691-4b91-ac4c-1212d64d2b5d.png index 53d050e96e130e0d4dfa713b188d0afe551394cb..316a73b14675aa9eaf08523d601dad49f299b37e 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_850cc85e-d691-4b91-ac4c-1212d64d2b5d.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_850cc85e-d691-4b91-ac4c-1212d64d2b5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d53c8e382b2e9e5c3472051abbae3b92a4f02da54a43e66456781d21fb07d222 -size 667050 +oid sha256:4b8699d9577579ffb6dcaba8cfbb1d29ac59eeb125e85f7a6522778b46f1539e +size 895230 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_88036838-a91e-4722-97c0-8136bdd850b8.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_88036838-a91e-4722-97c0-8136bdd850b8.png index 780b6e0abab0882df64b6bd194e8f752bfa4b1be..c849ae8bc860a9703f19169c0f0b7b127c3310c0 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_88036838-a91e-4722-97c0-8136bdd850b8.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_88036838-a91e-4722-97c0-8136bdd850b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0ef918c52cf35cd44d89ebb2115ed0376235977ae043233832149ef636efe65 -size 636011 +oid sha256:b76f231448c65bcae6a16237c512cf5cf99ce69e2bdc65f748719b95911c2a0b +size 601397 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_8964f24a-8d05-46b5-a096-d8fe1fec006d.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_8964f24a-8d05-46b5-a096-d8fe1fec006d.png index 3f2eb1a52e3af957ee1207ecffd29935e7a87664..65f78431d256769c6c6b96527c359e05adf5ae31 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_8964f24a-8d05-46b5-a096-d8fe1fec006d.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_8964f24a-8d05-46b5-a096-d8fe1fec006d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3fba0bc406ee31b578a04f1016f08c052560e5955d89a1fc10f1bb183d1f4fc -size 1091823 +oid sha256:bb8e491f990e6716423a0a80c2509901cc6c95ca1209e5f51fa99e6767fdc23f +size 1742583 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d35e0d76-e5a4-478c-ae41-af9e27ffd454.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d35e0d76-e5a4-478c-ae41-af9e27ffd454.png index b25fa1a3bc9aa3c44c6302ed0fd74c2ae7611b3d..ce221d20c8f8055cc1f56056535c3ac33cd6c97f 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d35e0d76-e5a4-478c-ae41-af9e27ffd454.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d35e0d76-e5a4-478c-ae41-af9e27ffd454.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c05fddf7474c53076b819fe7903799bc7cf9463501f6218950c58e1025e58f7 -size 673538 +oid sha256:362e5980c0b78f9f8974a598ea8731e566500e0d084d0f41f702c43ad261acca +size 603282 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d8059cb9-a62c-4a11-811c-e185798ece8a.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d8059cb9-a62c-4a11-811c-e185798ece8a.png index 206c04678f34dd3b4a26eb14cadc7de8036e9171..f5efc4d3ef4aeae6ddc373085fb7a35d27b4b240 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d8059cb9-a62c-4a11-811c-e185798ece8a.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_d8059cb9-a62c-4a11-811c-e185798ece8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8dabe2295e6256cb8c48230a2f980017f20d0060aaa0a075cfe020b766cb39a2 -size 728197 +oid sha256:5a4576a749f77209ec2a533034e05856c85b3ab1442e160497d3548afb9ae57e +size 586324 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_e99bb9c7-aeec-4826-b8f8-407c00622c61.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_e99bb9c7-aeec-4826-b8f8-407c00622c61.png index 361cc4a5da54c42ddc9e27f2875f7005b071a244..36037684f92cd2f7555f135edc86b67c17b463c6 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_e99bb9c7-aeec-4826-b8f8-407c00622c61.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_e99bb9c7-aeec-4826-b8f8-407c00622c61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5bb94f0dc2fcd6211f0eff24b5e95dc867f296c61f021759fb4d5f557fbbfdef -size 621174 +oid sha256:8e60a445e739d677e57c2e0809c3aef97e4b0f55c7b69acc3b853ca3c3f01530 +size 816546 diff --git a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_f3f39cfc-eb80-4f5e-ab84-0bce4f894d21.png b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_f3f39cfc-eb80-4f5e-ab84-0bce4f894d21.png index 18a837a0b76f9f019c23fb15fe22d3a82d4d5578..eb8238cfb3604d1c407f9244d795e9090adcb654 100644 --- a/images/74b456b6-0e62-429f-b13c-45861f2cdf82_f3f39cfc-eb80-4f5e-ab84-0bce4f894d21.png +++ b/images/74b456b6-0e62-429f-b13c-45861f2cdf82_f3f39cfc-eb80-4f5e-ab84-0bce4f894d21.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:231a810a33e63b239c63c0c42b7f7eb4ff87efb2bf188fa5cf78da2f5ad6b9be -size 658402 +oid sha256:749ae6fa71ba15301868009c5f19bac856522aaf934efa6af8f6d10d127415f8 +size 900127 diff --git a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_10995afb-c7d3-4055-b7eb-853178f8205a.png b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_10995afb-c7d3-4055-b7eb-853178f8205a.png index 5c3d38142799e1a06ddd925ccfbd8d954cf42a7c..b86de1772d3785cde076bffb65b24345ee30969b 100644 --- a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_10995afb-c7d3-4055-b7eb-853178f8205a.png +++ b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_10995afb-c7d3-4055-b7eb-853178f8205a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4de85523cadf4ae935aea41807389224a600fa5d2e052e55352b3060726b7e0 -size 1529166 +oid sha256:5ad427d1fa0d0f7888c1a3cd85b14a74f4565687b1463ed12d22735becf48735 +size 1491418 diff --git a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_21a13bd7-0205-4eb5-bf16-53f77d303977.png b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_21a13bd7-0205-4eb5-bf16-53f77d303977.png index e854e8450c6997186192dfe6eecb457a99c9db7d..0de643750448e5dba1ba0f006a79781dbd7a8f0d 100644 --- a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_21a13bd7-0205-4eb5-bf16-53f77d303977.png +++ b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_21a13bd7-0205-4eb5-bf16-53f77d303977.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2c1f03ad0395e1060600e60b33642efc99b46a7bc15b7db2ac4aad5f337ecf1 -size 1682886 +oid sha256:4c68adbc588559fc009dc9added6157317356b6615a964286dd52b42c28ee28a +size 1687193 diff --git a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_822476fd-11a5-4d57-88d0-dbc0ead7e7bc.png b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_822476fd-11a5-4d57-88d0-dbc0ead7e7bc.png index d4762202489598554a0b1b52a5034c182f12e6e7..1b7b5896b67eda06cf32d6a99a08f799d7fe481f 100644 --- a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_822476fd-11a5-4d57-88d0-dbc0ead7e7bc.png +++ b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_822476fd-11a5-4d57-88d0-dbc0ead7e7bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:101c76ff981aae41d2acc19c97e5d6b343dc948917ebfed350dd9712265f1a22 -size 1635171 +oid sha256:0df079cd53194226d5e33c92fe1f994bcbdadd887c6f8e5dd389b1cca0668bcd +size 1671891 diff --git a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_885aca06-fcca-4f82-b060-1578409d7c2a.png b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_885aca06-fcca-4f82-b060-1578409d7c2a.png index 1b80be13e04dedf97b8ef0098ea0b71feeca6fd8..aea579942159cd09deeee3849191c74fc8d3b44c 100644 --- a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_885aca06-fcca-4f82-b060-1578409d7c2a.png +++ b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_885aca06-fcca-4f82-b060-1578409d7c2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc18dccbbb2c9d3a839d72d06a4e28e1ca2e71a0126a1a9ab2a14f69d922e058 -size 1484425 +oid sha256:3363ef42d39774e048f401c52a30edeee3840fe01d21edd0a89a7f65bf05fa87 +size 1488053 diff --git a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_bc39ea75-d2da-4418-abda-7bda18e15c5d.png b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_bc39ea75-d2da-4418-abda-7bda18e15c5d.png index 92bfa7adfc6faa8431d4a2fe55754854f2aa7341..ae83039c573e76e593f34c4c3376c4f984931522 100644 --- a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_bc39ea75-d2da-4418-abda-7bda18e15c5d.png +++ b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_bc39ea75-d2da-4418-abda-7bda18e15c5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a13929d755e74c36ab189fd26350bd983d61e210f5a5eecb2d63f16321e521c -size 1719482 +oid sha256:b838041c9be79b78e45750a6659a4c426c2d5cae4c6f218ba6cbe540eddb810d +size 1181338 diff --git a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_c0154493-539d-46e0-a7d8-13d0ec9ea144.png b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_c0154493-539d-46e0-a7d8-13d0ec9ea144.png index 77cdcad3c7bdb5474ef4a5b1531af1fea5014ec7..801bc8595e34c6ccc4cb7394251789888b8f4151 100644 --- a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_c0154493-539d-46e0-a7d8-13d0ec9ea144.png +++ b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_c0154493-539d-46e0-a7d8-13d0ec9ea144.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:136c54567568ca1b209845b034daf8a821488320de04a995f3c2b19467757828 -size 1598497 +oid sha256:73087beeb7a44b36d56ae5b6516062084eb4a181abf98fc3a55bcc437287bf3f +size 1126611 diff --git a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_f99dcc87-41a8-46f2-9c49-357593e5e4e8.png b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_f99dcc87-41a8-46f2-9c49-357593e5e4e8.png index 327fc6ab80281244f248e5bf1fd0b32ae2b6e77f..194d5a7511acfc61379169e2fd5dc18a77ae5d3a 100644 --- a/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_f99dcc87-41a8-46f2-9c49-357593e5e4e8.png +++ b/images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_f99dcc87-41a8-46f2-9c49-357593e5e4e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f95a154714ae238060f4e3b3dd797943cc3ee2f95fe2676cf34cdc1b1b4aa2e -size 1472290 +oid sha256:0877ebd12807aadff9e8c88386c149c1ea2cac2bdf36a08f03eb859da2c032c6 +size 1067158 diff --git a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_8fac423b-2ae6-402f-85b7-48b356e7f5ec.png b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_8fac423b-2ae6-402f-85b7-48b356e7f5ec.png index 1687f8367c65344a3d8cfdab39e945165dba9902..b9b6fc2e783600add8bb5486ee2b03cc532023a5 100644 --- a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_8fac423b-2ae6-402f-85b7-48b356e7f5ec.png +++ b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_8fac423b-2ae6-402f-85b7-48b356e7f5ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af5522d78275d73bc4dd35b0b1de337a0621966e618a2990e88ded027ddd5ac0 -size 1459357 +oid sha256:5c01ef8ad083adb2a29277e7b69bcad11957c9b1ab8c8027f867aa842a311600 +size 721021 diff --git a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9c008888-c54b-42fc-958e-a7023fea0765.png b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9c008888-c54b-42fc-958e-a7023fea0765.png index 984cd58389ad65fea119b2d54cabb3e8b7e50025..5035d338fb4e2f28b6080a95472b76542bea27cf 100644 --- a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9c008888-c54b-42fc-958e-a7023fea0765.png +++ b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9c008888-c54b-42fc-958e-a7023fea0765.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fea80c462f25a14b0a06f992bba7843ee156eddeb781e33914811fb03733bc06 -size 1469048 +oid sha256:7f5de01c8547a036e9d16ab93ed48378d508d4ac35ee27e0a102c8526f8f6321 +size 1726191 diff --git a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9dd4b28e-4bad-4611-b5e2-93ebd1ed35c1.png b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9dd4b28e-4bad-4611-b5e2-93ebd1ed35c1.png index be3659fb9e609ba85c0073b927eb295449a011fb..e4510fcbe366e803c053d686868e9ad979811991 100644 --- a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9dd4b28e-4bad-4611-b5e2-93ebd1ed35c1.png +++ b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9dd4b28e-4bad-4611-b5e2-93ebd1ed35c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9d79c480b654fbd1330cb66180061af91b8f6523252737d3fff259e9fc5f1b5 -size 1354592 +oid sha256:e639f3f504b86fe9d066eb10b4a8b48b04d9438a9f138eb258a62e0e7d1daa24 +size 1272905 diff --git a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_a5a7935b-240e-460a-a742-723e9f435050.png b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_a5a7935b-240e-460a-a742-723e9f435050.png index 46aa158e8bbba524b4cc49b5cda2f6a43f92535c..5efd05704394fb15ee02b4ba63307505e85396ba 100644 --- a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_a5a7935b-240e-460a-a742-723e9f435050.png +++ b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_a5a7935b-240e-460a-a742-723e9f435050.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6190dbc04f20bfad92188ea1911c0c2a63389dc85fdc065971dcf48eee2d4eb1 -size 1058303 +oid sha256:abeefc7b92b5af5447e2df76d98cb19cd78e4ec58b6106ef1e619ef5b3b963b8 +size 1281735 diff --git a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_ad513ef7-dd90-4c7b-b00b-ec97683ed0fe.png b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_ad513ef7-dd90-4c7b-b00b-ec97683ed0fe.png index fd04b01c54075dae6f8c5f928d923d47e70861c4..7224c35332a13e7ddc9adad9a00a4826f36005d3 100644 --- a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_ad513ef7-dd90-4c7b-b00b-ec97683ed0fe.png +++ b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_ad513ef7-dd90-4c7b-b00b-ec97683ed0fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69ed5bc712592feb8da5c5e30386484c924a4b0980b9e0b1de11e87bf88d6db2 -size 1356678 +oid sha256:fb6c873e84465f37e4d959dc7a4f1473d7360c00ed120aecfa754a4d88f96f17 +size 1060332 diff --git a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_c89800b3-f7c9-4862-aece-bdb8b5e50736.png b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_c89800b3-f7c9-4862-aece-bdb8b5e50736.png index 0c278f45db3f9c1170ea8c92dcf00aa67c8db365..fbdebe2c839f87977d7157f8fbbce35776b6a03c 100644 --- a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_c89800b3-f7c9-4862-aece-bdb8b5e50736.png +++ b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_c89800b3-f7c9-4862-aece-bdb8b5e50736.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3637db17acd132515650b911f771b2efeb9935b948be206226766f69d3f446e4 -size 1353440 +oid sha256:1898b9c492e29a2af60b60dc1db3910052c7e9af070c42433542774393d51392 +size 1594182 diff --git a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_e12c9c4e-c1bc-4f69-9d7c-fce2c1ca59c2.png b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_e12c9c4e-c1bc-4f69-9d7c-fce2c1ca59c2.png index 2e75868c04ee79dca669c95abd617faec9491691..fea13c060c8ccc6a70e20c4f0a756f181af835f6 100644 --- a/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_e12c9c4e-c1bc-4f69-9d7c-fce2c1ca59c2.png +++ b/images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_e12c9c4e-c1bc-4f69-9d7c-fce2c1ca59c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:913f350c8895955ddd26083f210427ba72574afa5bc91c1ecce675cad444bfca -size 931908 +oid sha256:74569d76bfc641fc36fbb391d2e95b536096f63f4c61f144e6dd8d2e56335ba4 +size 1075137 diff --git a/images/74f01011-9bcd-433b-8405-975ca5c3f356_76a9fd32-80ff-45ce-879a-aa7d959c9b62.png b/images/74f01011-9bcd-433b-8405-975ca5c3f356_76a9fd32-80ff-45ce-879a-aa7d959c9b62.png index db8e9b093d5f40749693789f3f3a38efc8aee256..ee05adb027e3ca5337850a5fcc4f8cc06a034086 100644 --- a/images/74f01011-9bcd-433b-8405-975ca5c3f356_76a9fd32-80ff-45ce-879a-aa7d959c9b62.png +++ b/images/74f01011-9bcd-433b-8405-975ca5c3f356_76a9fd32-80ff-45ce-879a-aa7d959c9b62.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b89fc51a3a9d7b880b9168f5f143bc9d667a8f33d5dfc76fa5638c30ca7350cb -size 1165908 +oid sha256:187e62baacd447a3f220fac2e3d029b1d5ab37e7b2acb51efe21f66ec84b7605 +size 1008381 diff --git a/images/74f01011-9bcd-433b-8405-975ca5c3f356_8729dbe9-778d-4dc9-a7bc-3f3a6f0125dc.png b/images/74f01011-9bcd-433b-8405-975ca5c3f356_8729dbe9-778d-4dc9-a7bc-3f3a6f0125dc.png index 77d42154d0b3b7a60fdb68dcd3391495fc380f9b..2c53473e2d0b15504a122bf89622602f57f95cf2 100644 --- a/images/74f01011-9bcd-433b-8405-975ca5c3f356_8729dbe9-778d-4dc9-a7bc-3f3a6f0125dc.png +++ b/images/74f01011-9bcd-433b-8405-975ca5c3f356_8729dbe9-778d-4dc9-a7bc-3f3a6f0125dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef218bd37dd6f871dd816f2dd606a96beac3540b9cd78bb20d5872622cd3e453 -size 1494541 +oid sha256:c2e7d713c831e2fcb2c6b4ae015272392162ff3388c86a957a5e7170a74bebce +size 1523441 diff --git a/images/74f01011-9bcd-433b-8405-975ca5c3f356_a3033f2b-83bd-41b1-9972-66f3135bd083.png b/images/74f01011-9bcd-433b-8405-975ca5c3f356_a3033f2b-83bd-41b1-9972-66f3135bd083.png index 784e49bc9bf7e49bfae7eb392a22bca0797e37b8..0926859e736ac926149cd15827677fe2eaf306f2 100644 --- a/images/74f01011-9bcd-433b-8405-975ca5c3f356_a3033f2b-83bd-41b1-9972-66f3135bd083.png +++ b/images/74f01011-9bcd-433b-8405-975ca5c3f356_a3033f2b-83bd-41b1-9972-66f3135bd083.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:725485b251d7775d1125bd51e9c129f73676a9b6284a283833c30421cfb34ce0 -size 1084026 +oid sha256:02fbc86b8c4548e2dd74db98ff850c8e2f09a1fc1db619689c5ed8b27a882fd6 +size 857741 diff --git a/images/74f01011-9bcd-433b-8405-975ca5c3f356_acdbfdec-a930-4e6c-bbb3-2734441739b7.png b/images/74f01011-9bcd-433b-8405-975ca5c3f356_acdbfdec-a930-4e6c-bbb3-2734441739b7.png index fdba6a8fcc6cf7676a5a28dc354adcb4ac728042..a0e64d4c6fef95565f3b5881b0c7c9cceb5a8515 100644 --- a/images/74f01011-9bcd-433b-8405-975ca5c3f356_acdbfdec-a930-4e6c-bbb3-2734441739b7.png +++ b/images/74f01011-9bcd-433b-8405-975ca5c3f356_acdbfdec-a930-4e6c-bbb3-2734441739b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec4720685e9d4dd984c70f09cbba20fb22d54e98fb1a76199298e51a5606f2fe -size 953241 +oid sha256:75923fda1c2b83a353324223ad6d4cace89571abcd6e9576a0b4b2c99243c39e +size 723067 diff --git a/images/74f01011-9bcd-433b-8405-975ca5c3f356_c7548fe6-29eb-4ffb-a431-24ad7f535f5c.png b/images/74f01011-9bcd-433b-8405-975ca5c3f356_c7548fe6-29eb-4ffb-a431-24ad7f535f5c.png index f0013bf27e7fad08d0767a3d7636dabcc2a59a61..fc40e83795c573ba07786fcc51b06219c87d06fd 100644 --- a/images/74f01011-9bcd-433b-8405-975ca5c3f356_c7548fe6-29eb-4ffb-a431-24ad7f535f5c.png +++ b/images/74f01011-9bcd-433b-8405-975ca5c3f356_c7548fe6-29eb-4ffb-a431-24ad7f535f5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:085755630bf62fcc03602992bda0edb48a0016528656cb26a860b015aa57e2bf -size 1193749 +oid sha256:7051baec00dae3c1cc0b228290149885b61f32340eeced496cc0e36ca9a3c004 +size 1022696 diff --git a/images/74f01011-9bcd-433b-8405-975ca5c3f356_d5c808ea-5cdc-4d6e-b820-2b1a6406910b.png b/images/74f01011-9bcd-433b-8405-975ca5c3f356_d5c808ea-5cdc-4d6e-b820-2b1a6406910b.png index 9f6a9d90c8ef44bf91ca3bf9e6c01b9fc6567273..611d7f0b0b016cf94187efc2081052b1704196e2 100644 --- a/images/74f01011-9bcd-433b-8405-975ca5c3f356_d5c808ea-5cdc-4d6e-b820-2b1a6406910b.png +++ b/images/74f01011-9bcd-433b-8405-975ca5c3f356_d5c808ea-5cdc-4d6e-b820-2b1a6406910b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f21b328c51d4db6deec752fdc503b73beb47a4ddb3e3a62f8f288a4935d09ff -size 1493299 +oid sha256:1dbe99b39aa799d08af8a253d760be8cd286dd84bf4fb35c8b5d85c0368a5059 +size 1538970 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_13f39fc7-d314-4c0a-afec-4a96349324c6.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_13f39fc7-d314-4c0a-afec-4a96349324c6.png index 9d24f6eb9145e3752b671bc39f351693da0d556e..5ec1f5ee157763ff9c88bde165dee6624e2b580b 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_13f39fc7-d314-4c0a-afec-4a96349324c6.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_13f39fc7-d314-4c0a-afec-4a96349324c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f48d2fee70409a654f4ef3ec94420e535adb2e2904c58cd1399f221212a3b7e6 -size 1721225 +oid sha256:a05cf56e3ca085a4c2a272adb21c03826d2892580836b29454c0f24d53078fd3 +size 1635381 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_1ca75cb6-4ad0-4233-bba6-b07ccfdec468.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_1ca75cb6-4ad0-4233-bba6-b07ccfdec468.png index f03e8872e70abf49ebb25c56f3ee26b2026f49d1..ea886232f46a4d8391834802aaad3c656175309a 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_1ca75cb6-4ad0-4233-bba6-b07ccfdec468.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_1ca75cb6-4ad0-4233-bba6-b07ccfdec468.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e6b706f204850c275b08a37cd3dcf7d0057f918f5b7417f93cbc899959dba60 -size 1314176 +oid sha256:525568c42d14cf11da291de20000210ade93ba5d6bc73c824fd0ac10c206273c +size 1723602 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_3a4e82f3-9dd3-42b4-9302-c5e41465df9e.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_3a4e82f3-9dd3-42b4-9302-c5e41465df9e.png index 10717130506aa458783496f22535895cf44027df..9c487411c5635d38361e2d5229c2d5f47626b0df 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_3a4e82f3-9dd3-42b4-9302-c5e41465df9e.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_3a4e82f3-9dd3-42b4-9302-c5e41465df9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1fe899a8dbeeb14194cae40e88fc6b6639b6d144c6b2a63f8c774bb91096f078 -size 1912390 +oid sha256:b69c0d87912dcf1946c14e701e650aa3cd986201aae78506f9418a495827c73f +size 1126417 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_5be16eae-be60-40bd-ab2c-acfab3a0cd36.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_5be16eae-be60-40bd-ab2c-acfab3a0cd36.png index 3a4c73939f85d0ff8006db21c159894105d2c04d..b256f1ad0639a4aaed9b19877be969892b5ac4e8 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_5be16eae-be60-40bd-ab2c-acfab3a0cd36.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_5be16eae-be60-40bd-ab2c-acfab3a0cd36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d2bb40fb95d43b5273896a73922eca185f8c3673cf26005b033f25613af611c -size 1171513 +oid sha256:77915dd787c927e8d41380da2e41be58aca502ad79ab8ba06a53fc640c606669 +size 1787872 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_77299dab-129b-4d1d-a419-48f5c2ba558a.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_77299dab-129b-4d1d-a419-48f5c2ba558a.png index f011f7a850c5c08f58482fd351d8da5bf1210d3f..8318b82e8257b4be015b1b46f9c2538347ff91ec 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_77299dab-129b-4d1d-a419-48f5c2ba558a.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_77299dab-129b-4d1d-a419-48f5c2ba558a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d10727c3fc7607f788738166fd888aa566b4ccae3f7115ad8f1c4b987445a047 -size 1645420 +oid sha256:2ada5537ed3c483690c68f98f6de7dedd3beb4cf32916f90bbac9bd59917a9d7 +size 936176 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_7b44e78e-d6d9-44ad-8331-3930b3d959b6.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_7b44e78e-d6d9-44ad-8331-3930b3d959b6.png index b4f753c730a64951db080d2408570e2bc31d5808..63fe1c612b27db0f11bd6e89f1053a567454b3d7 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_7b44e78e-d6d9-44ad-8331-3930b3d959b6.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_7b44e78e-d6d9-44ad-8331-3930b3d959b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1312d907c1da9e34fc8a857baba7b93698041a4c6fbeb2d1bbe00ed83103557 -size 1132031 +oid sha256:9e02f709f208e4cf67540aa7d19a54180922ecdca28ea63916fbff1030f0e1ff +size 2116437 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_88c6723c-7217-4add-893d-bf5d72b68db8.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_88c6723c-7217-4add-893d-bf5d72b68db8.png index 28533aa85f3a678fe0e6c065d2447fcdd2929e19..9789c9094e7030f9fdcfee4bdcd01a42c62c8f99 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_88c6723c-7217-4add-893d-bf5d72b68db8.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_88c6723c-7217-4add-893d-bf5d72b68db8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b53bda9484b6af7a47ea609bb0cd0d103d4dda9a6695beb9e5bd311bca70141 -size 1572384 +oid sha256:044759d6146a0300042748685cf18432c4a947899dc0b2bfd74fde83848c1095 +size 1593553 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_892aaf63-9463-482a-95b8-2a7e145c429f.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_892aaf63-9463-482a-95b8-2a7e145c429f.png index cf2e53beb18170d7f0a80e00a1f0aecd233b8f4a..003c36be2dc443e6f71a1f5148cb4377cdc1272b 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_892aaf63-9463-482a-95b8-2a7e145c429f.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_892aaf63-9463-482a-95b8-2a7e145c429f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:316039079f7636535f9253d51c640640c2df4dd8c88f9b00fa6f12a6b1113db7 -size 3400951 +oid sha256:2e953bb52c4516ff3f2324a3cf27b928b0be01654b368ceabedbe9993ec9bcbd +size 382228 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_8c53712e-89d1-46d1-bfe6-d2ace827c9ef.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_8c53712e-89d1-46d1-bfe6-d2ace827c9ef.png index 12b0ad5d58fbcf417df0b984c18a87fe9ebd831a..f3050fa11b70c6ce7174271bd62d459e661b8d3c 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_8c53712e-89d1-46d1-bfe6-d2ace827c9ef.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_8c53712e-89d1-46d1-bfe6-d2ace827c9ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02501f5ca8dd31fe72b955b04572943b75308ea59e9372bf449353422423cefe -size 1432305 +oid sha256:194c0b56dc08a08b7b9c365e15aa0d29994ce7949d7f016f6a0083511346574f +size 760291 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_95a5b4f9-e96e-4315-abec-cd2a380ae344.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_95a5b4f9-e96e-4315-abec-cd2a380ae344.png index 672dce17f20b1e91a78ef7998d0dfd63956e1edc..4e2c6da0c6e530b6eeca1f3c60085476b115bfea 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_95a5b4f9-e96e-4315-abec-cd2a380ae344.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_95a5b4f9-e96e-4315-abec-cd2a380ae344.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e7e50fbbdecab99aaa869025214213ca77dde8e01d05390dc27f0ef13d40932 -size 1680946 +oid sha256:a0674993533807d86b2e15f17c9619182cc935182629e5b02e71a9bf8fc02d48 +size 1951108 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_97a7578d-a8e2-4089-88bc-e45292ac3435.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_97a7578d-a8e2-4089-88bc-e45292ac3435.png index 118d1d8a75a0ca97b375cf98c0d80c38b2d58041..7c6ad3ebc1f32a64ef3fa95468dec3421774fd35 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_97a7578d-a8e2-4089-88bc-e45292ac3435.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_97a7578d-a8e2-4089-88bc-e45292ac3435.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:371b53215e17b7372618d1b2718d1fe7f0331d62f359c2254486040efa5c0430 -size 1153777 +oid sha256:d2ab2202015a2ffd310c8738cea3df5474117a415787174d66fd3939d7b55af7 +size 1118885 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_a2a4a0ae-d58f-4e0c-9c4b-84c36e89dbe8.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_a2a4a0ae-d58f-4e0c-9c4b-84c36e89dbe8.png index c5cf7c64df1c52facd14d098ec8ef81e532e909b..cba54af178e286248360ea412195bed4c3d65dbb 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_a2a4a0ae-d58f-4e0c-9c4b-84c36e89dbe8.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_a2a4a0ae-d58f-4e0c-9c4b-84c36e89dbe8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a127f1fb5eddd295fc73ed4eb747dd143e02f74bc0a0810b9f68005048c14527 -size 1148346 +oid sha256:0cce6f8c9afd30521cfe56a176af9637cdf34c2d04ad717bef8baed6187cc935 +size 728384 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_acf2309a-3542-43c5-a8cb-3fef021a5c0b.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_acf2309a-3542-43c5-a8cb-3fef021a5c0b.png index d29665d1e51f5f2421ecbc0f2075dbad8a97ac99..9a602479c6e3d183b10e68bbf2a6591b277b7cd5 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_acf2309a-3542-43c5-a8cb-3fef021a5c0b.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_acf2309a-3542-43c5-a8cb-3fef021a5c0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23c56d2148739820ba1476582c72298543540126c6a1ab2e1e2370d5de45b099 -size 1239640 +oid sha256:87ac36bfd61f7c45615e206eeb3b9b520519b263bc6ab43c9efdb58cfc48338b +size 763191 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_c9b23ba4-feaa-4d70-b31e-4ab45b0de665.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_c9b23ba4-feaa-4d70-b31e-4ab45b0de665.png index 7fe3decfa4ef71c9b2ceaf4300a9c6bad3af002c..3644bc319446ae10eb77984a6a6deb13c14ac24f 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_c9b23ba4-feaa-4d70-b31e-4ab45b0de665.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_c9b23ba4-feaa-4d70-b31e-4ab45b0de665.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43105d6efca513e53753214fef7364ff53cba82eacdd7bbe926f789070e2cd1b -size 1487072 +oid sha256:13d16a33d71e332d3e9c557db037b1aa2178bd6bda31a76478256f506ea8d64b +size 1835757 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cabd36a8-0a4b-43c8-a930-64ae46695583.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cabd36a8-0a4b-43c8-a930-64ae46695583.png index 3a2fd5c0751f0b4f631c8d0b563b1a12d7db7851..3464b2964b046011e68aa72c215c076b906ffcec 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cabd36a8-0a4b-43c8-a930-64ae46695583.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cabd36a8-0a4b-43c8-a930-64ae46695583.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4be3411f56e065ca79447eb02d08d67549f702ecdeb84c5436b519141db9ca63 -size 1175321 +oid sha256:808e565ad6254898f58e96b2dc9f5b148f8c4953c5450a10a9036fe4356cd009 +size 1618002 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_ccb50965-fe39-45c7-8e51-1f00048585d7.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_ccb50965-fe39-45c7-8e51-1f00048585d7.png index fdaf5692d974796993a1bbaf21cb1418f92ad76d..32263b6e8fb577ca71865cdef21efd49d853bcbc 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_ccb50965-fe39-45c7-8e51-1f00048585d7.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_ccb50965-fe39-45c7-8e51-1f00048585d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbeaf0e3b6a7f5ae1f702e9d488f90aae0cf8d3f79d3d03d46f0748326f2b138 -size 1566515 +oid sha256:79165533053023a027a9c22e4171eaed469a98e3ab4b6950ccb90f3bc201c1fc +size 1385851 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cfa92282-3f8c-4a8b-a7c5-4cb5ad14ef19.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cfa92282-3f8c-4a8b-a7c5-4cb5ad14ef19.png index 12b0ad5d58fbcf417df0b984c18a87fe9ebd831a..b61fc3c331ff6d448cbda82ae1cacc4eb2d7d110 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cfa92282-3f8c-4a8b-a7c5-4cb5ad14ef19.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_cfa92282-3f8c-4a8b-a7c5-4cb5ad14ef19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02501f5ca8dd31fe72b955b04572943b75308ea59e9372bf449353422423cefe -size 1432305 +oid sha256:407526017ad6f0074efd03bcea1638ce40c8084717b3d7b795fab1bd48b12ebc +size 269255 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_d5abb011-938b-47a2-965b-33584ed07f91.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_d5abb011-938b-47a2-965b-33584ed07f91.png index 6d7bc0d4667a6d8360dbb34f73982c03e7884940..239d2ae1b8d0222cbc0c7477846f378cfad4d747 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_d5abb011-938b-47a2-965b-33584ed07f91.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_d5abb011-938b-47a2-965b-33584ed07f91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5dcda6df1ec67bb5879ec29682b2e4dc134f1969d71f9aead6d0b53f5d50cdf -size 1498193 +oid sha256:70c46d7234fc2625ef539b7fb923e0ea4f15b278b9a624a6978e6a0c834f3aaa +size 1050580 diff --git a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_e4ab6d47-920a-4d58-98a1-6ae5bf1c6cab.png b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_e4ab6d47-920a-4d58-98a1-6ae5bf1c6cab.png index 74dac1388592d9907797d55060f1ba4d1888d4b8..ecf9abd4fa41f394133df01d4af8e47727edc9dc 100644 --- a/images/754c8c29-92b6-4af1-9594-f9d3da37b951_e4ab6d47-920a-4d58-98a1-6ae5bf1c6cab.png +++ b/images/754c8c29-92b6-4af1-9594-f9d3da37b951_e4ab6d47-920a-4d58-98a1-6ae5bf1c6cab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42bceb172eef5c9aae2e14d305400d4e538db2511b9736f6148759feff979184 -size 1314446 +oid sha256:d148dab7b64486f205404c73c49862f7f9f184d609befcd9f9a5317735b5dc98 +size 1342814 diff --git a/images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_34e4693d-d86b-4536-ba7c-274c55c63850.png b/images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_34e4693d-d86b-4536-ba7c-274c55c63850.png index 5818d0e160f59a7173a1d68df056c9b87e1efca3..46c5b906e56d05657447c365a3702981b319687c 100644 --- a/images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_34e4693d-d86b-4536-ba7c-274c55c63850.png +++ b/images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_34e4693d-d86b-4536-ba7c-274c55c63850.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4b66733798d31614fe21a5885456dad429b93b5dfdaef63410bed9f2d2f8a61 -size 1325879 +oid sha256:04860ac6ddeba4f423c1072c00eb50278b2290725d31269231c64cec9cc122df +size 959593 diff --git a/images/759d1c94-3314-481f-8763-5918b00567c4_1ef99a0e-af72-404c-a371-1815204eea54.png b/images/759d1c94-3314-481f-8763-5918b00567c4_1ef99a0e-af72-404c-a371-1815204eea54.png index 4e38c337a009b5cb7b8e50692f40fcdfd0717c07..d00b20172216c32a2ea6bc09e00ad388de408639 100644 --- a/images/759d1c94-3314-481f-8763-5918b00567c4_1ef99a0e-af72-404c-a371-1815204eea54.png +++ b/images/759d1c94-3314-481f-8763-5918b00567c4_1ef99a0e-af72-404c-a371-1815204eea54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba06f4c104600c59ac34a08a0f611ec275f1bd760323b0647be3b563c050f43c -size 1218613 +oid sha256:50fd8a08acb7d4dcdc60b3c388bf2ea54948766ebf68c78ca1725f44289d50aa +size 1934424 diff --git a/images/759d1c94-3314-481f-8763-5918b00567c4_39ca15cc-f268-42e1-ba92-de0f199ac70b.png b/images/759d1c94-3314-481f-8763-5918b00567c4_39ca15cc-f268-42e1-ba92-de0f199ac70b.png index ae20b11388e6fea95a6dd8e5143823eb26018356..a404af8572321e4a2fc4673fe9ab4951df9e73e5 100644 --- a/images/759d1c94-3314-481f-8763-5918b00567c4_39ca15cc-f268-42e1-ba92-de0f199ac70b.png +++ b/images/759d1c94-3314-481f-8763-5918b00567c4_39ca15cc-f268-42e1-ba92-de0f199ac70b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e7b93b5d5960841c714f0c20866e76f88b922afd11c2008c23c6ef5c0d97138 -size 1875667 +oid sha256:b67941c50606b77242a7ac6435ffdccb3bedf1b24a60a21eca91be4c2405602d +size 1870344 diff --git a/images/759d1c94-3314-481f-8763-5918b00567c4_478e55a2-a3ab-4f80-b442-3f6c356c95f4.png b/images/759d1c94-3314-481f-8763-5918b00567c4_478e55a2-a3ab-4f80-b442-3f6c356c95f4.png index b47bfbc18862980b0d94851366907a4d8ca450c0..00bb569fae3ec40a6f6aea80b3ffba14f1859b29 100644 --- a/images/759d1c94-3314-481f-8763-5918b00567c4_478e55a2-a3ab-4f80-b442-3f6c356c95f4.png +++ b/images/759d1c94-3314-481f-8763-5918b00567c4_478e55a2-a3ab-4f80-b442-3f6c356c95f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89de6de9b9de1e6a8a712c6ad6a9a9d3c32d01822bb9b43e951d40dc56bb35a3 -size 1181413 +oid sha256:e2eb1c993c71cdb265b7515e5be93d1d577ec70d5b45827c912b243905d75c9c +size 1441554 diff --git a/images/759d1c94-3314-481f-8763-5918b00567c4_4ac7cf83-cd41-4c71-b91e-f48fa542319a.png b/images/759d1c94-3314-481f-8763-5918b00567c4_4ac7cf83-cd41-4c71-b91e-f48fa542319a.png index ef592d33d91badc504f425b29d9601a8be8bec7b..bb758b040b35c386920cb7a3b1e6df84cb4ace75 100644 --- a/images/759d1c94-3314-481f-8763-5918b00567c4_4ac7cf83-cd41-4c71-b91e-f48fa542319a.png +++ b/images/759d1c94-3314-481f-8763-5918b00567c4_4ac7cf83-cd41-4c71-b91e-f48fa542319a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1da96639b42ebeb6207aff981b7fa2c63df82673d1f42f3f50a2de9d2e55f5d2 -size 1625743 +oid sha256:39601036041d40e3516d89f0b43849f3a570a06fd3350a82ab7bad12b5e80900 +size 881072 diff --git a/images/759d1c94-3314-481f-8763-5918b00567c4_97d43de2-df7c-4880-9368-ea38fa587621.png b/images/759d1c94-3314-481f-8763-5918b00567c4_97d43de2-df7c-4880-9368-ea38fa587621.png index 71143de607c49fbbcf342f955707808fd569a5ba..573f84d9345ac360613bce9401c7d6788419a190 100644 --- a/images/759d1c94-3314-481f-8763-5918b00567c4_97d43de2-df7c-4880-9368-ea38fa587621.png +++ b/images/759d1c94-3314-481f-8763-5918b00567c4_97d43de2-df7c-4880-9368-ea38fa587621.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8566075e816f76fec8cdf0719958c80ad666b0aacb4a69b92e8385a21117b3f -size 1529338 +oid sha256:42dacd27ef7f5940e355a116c44bd9d1dd14c554937fdb7bc3b7ec20daf474da +size 1295958 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_054fab53-5095-4ef9-a358-ccfae23ddabf.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_054fab53-5095-4ef9-a358-ccfae23ddabf.png index 8057ebd6d9c7a92be0ed880ea2c930198fd7a377..46fa4e2722b9b56d17f0d1804ea810543ed75cfd 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_054fab53-5095-4ef9-a358-ccfae23ddabf.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_054fab53-5095-4ef9-a358-ccfae23ddabf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02629bf6b20d95dbaac22cd847a2a4daedad36ee68a2892ec7dc2ec1f769a598 -size 1436328 +oid sha256:8c972876de13b902e0d65dba271d893f3e453d8aa544792fa0a45cc38fc351a5 +size 1316128 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_1817cbf8-9fa6-4bba-9c16-d9485c6a8b6c.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_1817cbf8-9fa6-4bba-9c16-d9485c6a8b6c.png index 073ae601233b58c535510db388c5b4981fcd61e6..950f22aa582c8769d2169676cc2aaf5acb3ccaa9 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_1817cbf8-9fa6-4bba-9c16-d9485c6a8b6c.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_1817cbf8-9fa6-4bba-9c16-d9485c6a8b6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:396d49079642603753fe340045db3c6227100c87674f68fcbfb085183d55bba3 -size 1913943 +oid sha256:ab56ee5498793aeebb5cd98db7493fc2965e236269bf7b5924bcf4ab1e458cab +size 1320153 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_2bb2c81e-02b9-46b3-a3a0-89d174bd1e53.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_2bb2c81e-02b9-46b3-a3a0-89d174bd1e53.png index 6fe285b668ab7eed0c40ad66835fb8beb1708c1b..0b3bd9db6dd98f258520af098ae30768a8fb6588 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_2bb2c81e-02b9-46b3-a3a0-89d174bd1e53.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_2bb2c81e-02b9-46b3-a3a0-89d174bd1e53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd2cefb3ff4deada5a2bc1057a8790e4b2ebf5c68ccf4f19bc858297a84ce1e6 -size 1136396 +oid sha256:8f68501e2cee59c46ded9f89848b87fb08167c799ad1155bcd9e74c788dcc393 +size 544926 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_442ce85b-f9ff-4a4b-8fc5-7c41fc303963.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_442ce85b-f9ff-4a4b-8fc5-7c41fc303963.png index 83d6226b698baed2d0d6ad63eef6f2cc5ab2627c..31d81fd3da4eb7fc309d7b68495cfbbf8651c33a 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_442ce85b-f9ff-4a4b-8fc5-7c41fc303963.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_442ce85b-f9ff-4a4b-8fc5-7c41fc303963.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7706ec467cc61bfce70ebf2bff8f4b4b1cb0e5c2ab8a6ce3b55de679052a6bc -size 1641520 +oid sha256:ff59b76dac2956e8638c8bff766c81032c71233ae7ea42abff3a3b2441d67d57 +size 1593811 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_54555413-eb7c-40b7-8f49-d78f658e881b.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_54555413-eb7c-40b7-8f49-d78f658e881b.png index 444bdbfbd8bebd19e491299f41e043bac84f46df..ef8e6e209b50de49bbeff61c0baedd42eb894bc3 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_54555413-eb7c-40b7-8f49-d78f658e881b.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_54555413-eb7c-40b7-8f49-d78f658e881b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f106d3b3f152f8bc831c0a046250c88f620bf4ead448e8eee1a2d9e5e27e166a -size 1244812 +oid sha256:383fa1de1badb929cd29a4c3645ba7cd5554a452cdbd61caa332a754462317b9 +size 1085816 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_7f3f485c-961a-45b8-bec6-288eedb4e5c4.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_7f3f485c-961a-45b8-bec6-288eedb4e5c4.png index 8fd6703820d2dc09e14a26a67af7d8db54743c5e..1bf2932edd6237fd1a2bb406599bc84be6424547 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_7f3f485c-961a-45b8-bec6-288eedb4e5c4.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_7f3f485c-961a-45b8-bec6-288eedb4e5c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36af791a8df38d519312ce98fdb62122e44d7aacf3f54b1d331777b3d0f993a7 -size 1670333 +oid sha256:f391e22e5f8fda26f5c9947f460be8a1b2836c9b3d8568ccbaf39a4702a141b1 +size 1884330 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c6a46943-7ccf-4d6e-a06b-13264890131f.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c6a46943-7ccf-4d6e-a06b-13264890131f.png index cf11b0c2169cb83e7d1552a0322f0a17799be820..1d43d81050cd427d5771ef4ccec7e9cad68f0c43 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c6a46943-7ccf-4d6e-a06b-13264890131f.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c6a46943-7ccf-4d6e-a06b-13264890131f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:135b66496939ac47c219f151270e05ffc4f6260cd5fba3b7d340c5e6226c02d8 -size 1660649 +oid sha256:36abb78736e8a4a5048dd711e5fa8e9b59656fff676d34efa7e087bc3a535ec0 +size 1252817 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c8de24b2-8468-492f-bcc6-dfac28f0b19e.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c8de24b2-8468-492f-bcc6-dfac28f0b19e.png index 502512f4ef0d53e7e6663e776e08de109436a79e..137c310e7f958b226f163fe02e51962aa02b782b 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c8de24b2-8468-492f-bcc6-dfac28f0b19e.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c8de24b2-8468-492f-bcc6-dfac28f0b19e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a413c7ff48a51b8672b8121e4e56cc19c4de70c8e329be1c198e11c9f73555f4 -size 1199686 +oid sha256:66b2f5cb6fec81fa1ac0ca795392ded1a8890c7b610626a257d3e0041cd65341 +size 925609 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_cc690f3f-8b0c-4cea-8060-4fb8bb31a372.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_cc690f3f-8b0c-4cea-8060-4fb8bb31a372.png index ac667ec5953c5c63eaa2574e2c93f4561f515baa..10c746ef947a3e5e3d6f985c56c69545c144247f 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_cc690f3f-8b0c-4cea-8060-4fb8bb31a372.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_cc690f3f-8b0c-4cea-8060-4fb8bb31a372.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49f4a49ef555c8163382e4f9fb1bc041697a2faf3b580b0721c3dda4e4d93d64 -size 1196269 +oid sha256:2b2f422d5e4a2db37445a2caf2082d59485e6832f924727c0bd7c65238fb6db6 +size 1125308 diff --git a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_dff98ff2-9f90-4274-abe1-de38cb0767d3.png b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_dff98ff2-9f90-4274-abe1-de38cb0767d3.png index 4d228c8f9e53d6a6b880157f26f37457dd5654b8..a8ac950140f4f9c2e1967a552aa89cc3b9cf13f1 100644 --- a/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_dff98ff2-9f90-4274-abe1-de38cb0767d3.png +++ b/images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_dff98ff2-9f90-4274-abe1-de38cb0767d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f3c7bb1c600bbb58d3e4fb94bbf4f3b23e452ff12f7d5b13c55e3203b05b945 -size 1072466 +oid sha256:c1be6b8e3ceb29c10498dbbf815fa108152d63cd4b4609750c9d80c9f903cf99 +size 1301362 diff --git a/images/76294719-e146-4f92-986f-42b86a9808c7_4c5c24aa-b9e2-4824-a82e-3e44302e8707.png b/images/76294719-e146-4f92-986f-42b86a9808c7_4c5c24aa-b9e2-4824-a82e-3e44302e8707.png index 2a647f9e9afd163bbd66b182b560050312e5139d..12b7a3b1dcebc61eb3a16da838da4015fef61cc2 100644 --- a/images/76294719-e146-4f92-986f-42b86a9808c7_4c5c24aa-b9e2-4824-a82e-3e44302e8707.png +++ b/images/76294719-e146-4f92-986f-42b86a9808c7_4c5c24aa-b9e2-4824-a82e-3e44302e8707.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4b1b27ab0f0beede190ef917ff794aa1c16efb399be50471350fbd7e2d23808 -size 269702 +oid sha256:7ce500c9264a012b4a426a5ab79b638909fe7146a849fd0224f8e92615324b83 +size 555167 diff --git a/images/76294719-e146-4f92-986f-42b86a9808c7_83e6f82f-518e-46a3-83e7-9512d36279d4.png b/images/76294719-e146-4f92-986f-42b86a9808c7_83e6f82f-518e-46a3-83e7-9512d36279d4.png index 2d868ebee3a0e93cbd9189288c13bec69d95a0e9..00e7ca282c885c19b633a882d8ed1ad5442c1f34 100644 --- a/images/76294719-e146-4f92-986f-42b86a9808c7_83e6f82f-518e-46a3-83e7-9512d36279d4.png +++ b/images/76294719-e146-4f92-986f-42b86a9808c7_83e6f82f-518e-46a3-83e7-9512d36279d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:871e4ddf085a16699b7fff1fd736bf0523dbc459cd13d02c9bc2a5faa13cbc19 -size 239802 +oid sha256:486f7d1b23ac6f66253c907514c6600ee6830cf38aa5ef34f344237db84f210e +size 122678 diff --git a/images/76294719-e146-4f92-986f-42b86a9808c7_9b6f677f-1342-423b-bb5f-68412e75b9ab.png b/images/76294719-e146-4f92-986f-42b86a9808c7_9b6f677f-1342-423b-bb5f-68412e75b9ab.png index 7edce6baf2dad4032f9241a40c6ed72f1d5dade2..5570d584db4d5129e40ec88a7e1f2009537ee37b 100644 --- a/images/76294719-e146-4f92-986f-42b86a9808c7_9b6f677f-1342-423b-bb5f-68412e75b9ab.png +++ b/images/76294719-e146-4f92-986f-42b86a9808c7_9b6f677f-1342-423b-bb5f-68412e75b9ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d43ccb3bd871224e4b36518f900d2c22be6b0d9d8624db3e303a3bcaa54d9a99 -size 287956 +oid sha256:85d2bc9976f218f6e0978bd75a6a96fcd56df728ad943280c59c024c28776a43 +size 356372 diff --git a/images/76294719-e146-4f92-986f-42b86a9808c7_aae409a5-3510-4340-a48c-8f4040559538.png b/images/76294719-e146-4f92-986f-42b86a9808c7_aae409a5-3510-4340-a48c-8f4040559538.png index 0ec20932d3e672a7f24c26b91486a35d7ce07624..edeb555c282d524b31b85a8484031e2b86f8213e 100644 --- a/images/76294719-e146-4f92-986f-42b86a9808c7_aae409a5-3510-4340-a48c-8f4040559538.png +++ b/images/76294719-e146-4f92-986f-42b86a9808c7_aae409a5-3510-4340-a48c-8f4040559538.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5d31aea6cc54f88f7dd74e24b313d923d3272bf0798f350b60cd5f93153333b -size 1075065 +oid sha256:599e8d0d4db19112030a741e00a496d92682a1cd72f1913257e28294e6fd60ee +size 830269 diff --git a/images/76294719-e146-4f92-986f-42b86a9808c7_b1592aec-679c-4045-8cc0-3fa78dddac1d.png b/images/76294719-e146-4f92-986f-42b86a9808c7_b1592aec-679c-4045-8cc0-3fa78dddac1d.png index f21a409b4633097235a0dcb8d3094239dde4b372..6b56eea2d55a0b42796a6d5ff68926ec14859f25 100644 --- a/images/76294719-e146-4f92-986f-42b86a9808c7_b1592aec-679c-4045-8cc0-3fa78dddac1d.png +++ b/images/76294719-e146-4f92-986f-42b86a9808c7_b1592aec-679c-4045-8cc0-3fa78dddac1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9a31f12057a824f09c9bc174a8940619127c412ee5c449b62a358bbec8eb2f5 -size 1342792 +oid sha256:1a5149e0d20a4b39edbaaf698625104d70a0bb52b1d78b86dbd3f70683c06392 +size 1364406 diff --git a/images/76294719-e146-4f92-986f-42b86a9808c7_b91fc8ad-9716-4df7-89ed-d728a87b758a.png b/images/76294719-e146-4f92-986f-42b86a9808c7_b91fc8ad-9716-4df7-89ed-d728a87b758a.png index 4c254981b97131e86ccc7a611976e35e1710cc7c..31663bff7edaa3ddd81e3f9a2f02843c7c31378b 100644 --- a/images/76294719-e146-4f92-986f-42b86a9808c7_b91fc8ad-9716-4df7-89ed-d728a87b758a.png +++ b/images/76294719-e146-4f92-986f-42b86a9808c7_b91fc8ad-9716-4df7-89ed-d728a87b758a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02534cc56f9dd9f398e7383876c9b160fbeeac2202867b8eb16617f7f2e56a82 -size 1274166 +oid sha256:7b746e55a47f48ea19366f4327df9ce25dd8884c3071a942a7641a40bd933f0e +size 1776278 diff --git a/images/763deda0-f51c-4520-b102-5277f702e8bd_49717bd1-bd15-48ca-a3e7-6e3bffe0ed44.png b/images/763deda0-f51c-4520-b102-5277f702e8bd_49717bd1-bd15-48ca-a3e7-6e3bffe0ed44.png index 803b52e4f87fb6711480dfbd21c7e4b7b4dd5e12..8bc16147a8df398397c74cc8abe2739315e93cad 100644 --- a/images/763deda0-f51c-4520-b102-5277f702e8bd_49717bd1-bd15-48ca-a3e7-6e3bffe0ed44.png +++ b/images/763deda0-f51c-4520-b102-5277f702e8bd_49717bd1-bd15-48ca-a3e7-6e3bffe0ed44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:626b6fb31248b3597d768ded37575fb532feb05d50511be65ae6bcde3b237dd6 -size 1213938 +oid sha256:f3ebfb0dad96aa7b4bf5af77d21f2e8662687f1cab081bf11b75abaa279c2d45 +size 1700876 diff --git a/images/763deda0-f51c-4520-b102-5277f702e8bd_c2d435f9-82ee-451a-b32e-6045541e4c48.png b/images/763deda0-f51c-4520-b102-5277f702e8bd_c2d435f9-82ee-451a-b32e-6045541e4c48.png index bb17a7345c4b1118c2e87a3a50227d6ed9511cde..e2ddf8f41ddc225c8f60ed69d1dc715b3a71ec18 100644 --- a/images/763deda0-f51c-4520-b102-5277f702e8bd_c2d435f9-82ee-451a-b32e-6045541e4c48.png +++ b/images/763deda0-f51c-4520-b102-5277f702e8bd_c2d435f9-82ee-451a-b32e-6045541e4c48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c3e8809bd12ce5ab51ebe1d8e4a4246b4214271d0d3f573da52006462f69e33 -size 241196 +oid sha256:62c9971415a65d0be81a7af1918e88193a595f3995915e184b620b901deeaaa9 +size 448523 diff --git a/images/763deda0-f51c-4520-b102-5277f702e8bd_d67c1e8e-be13-4094-9d39-bb0daffc2f14.png b/images/763deda0-f51c-4520-b102-5277f702e8bd_d67c1e8e-be13-4094-9d39-bb0daffc2f14.png index 8f0d38e2bb3997a9e028635e7b5cb2cf91b78447..4112a38ca2671841d480e78421d5aa4fe58d4ce1 100644 --- a/images/763deda0-f51c-4520-b102-5277f702e8bd_d67c1e8e-be13-4094-9d39-bb0daffc2f14.png +++ b/images/763deda0-f51c-4520-b102-5277f702e8bd_d67c1e8e-be13-4094-9d39-bb0daffc2f14.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5379e54efeb819d50c05c299c27bbb508ec04e36ccfbc1e07acd6b4a26d97daa -size 651712 +oid sha256:c06463e330dafdb6def1af18ba74cb9bfd91b3af244467c70960e850f08d8521 +size 1160022 diff --git a/images/763deda0-f51c-4520-b102-5277f702e8bd_e49a338c-0d26-48ba-a268-5b8914da3639.png b/images/763deda0-f51c-4520-b102-5277f702e8bd_e49a338c-0d26-48ba-a268-5b8914da3639.png index 9dde9d94000c3dd7b4bd914704bce46ff286bdad..02d861e636a95fb0ee9b3926d855b26f0a13acd6 100644 --- a/images/763deda0-f51c-4520-b102-5277f702e8bd_e49a338c-0d26-48ba-a268-5b8914da3639.png +++ b/images/763deda0-f51c-4520-b102-5277f702e8bd_e49a338c-0d26-48ba-a268-5b8914da3639.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60d35a3baee3a852e3eae3a31012b983138ae25dc71cdba16c9cd8c6868d498c -size 493879 +oid sha256:22952794ec33aac0e345ad6f7a1f60d6063ba17136aa517fb1dce46b7b1866ac +size 753668 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_08747f7d-7119-4877-821b-f4fc61f180d1.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_08747f7d-7119-4877-821b-f4fc61f180d1.png index 2fb6972c98ebfed5d9e8c97bbfd111418d715932..28548bf20b78e960e7105869f3147d730aaf7668 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_08747f7d-7119-4877-821b-f4fc61f180d1.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_08747f7d-7119-4877-821b-f4fc61f180d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4612708655bb4dc54afd4a06675f369f13b60d4f495b7482a0e67599999b8699 -size 1196316 +oid sha256:8daea3c1a7a2e7319b196cc32bdaecc954c09c3c54ac9c0488bf5fab2922f22d +size 743288 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_42393190-d323-4591-a206-ae9287b98ff7.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_42393190-d323-4591-a206-ae9287b98ff7.png index 51ffbdeee2dc4eb9606dfeac66a47451686dffbe..1deaefbc5d14c1f8d2052c2a2e2e486f5cd374de 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_42393190-d323-4591-a206-ae9287b98ff7.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_42393190-d323-4591-a206-ae9287b98ff7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f649bdddad7bc45a0ebe392fe7c737272a83022f13d3dcfdea65cff6733dc11c -size 939609 +oid sha256:621f4eedfabcfeb01f094bb42495ba19901c25f209fd79599dc5a6408f4e7cb6 +size 753094 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_4fc26c69-ac92-4f10-b4ac-36bdbe42d9af.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_4fc26c69-ac92-4f10-b4ac-36bdbe42d9af.png index f8c8d91bafc5b8096d64b4d7f323ecf259a853a7..dac6cd5b2432199a75f263ba64c6eaa2ef680756 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_4fc26c69-ac92-4f10-b4ac-36bdbe42d9af.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_4fc26c69-ac92-4f10-b4ac-36bdbe42d9af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de09e71886d0c4d6824444ce466cf42f8eb543726d631dd52bef741775cf892a -size 1537862 +oid sha256:1f073e10e1cda6024ce78340d3501ae57d2ed0d67b2e0755f51938b66061c78d +size 1254641 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_6a5d4462-eb16-4b06-9b5d-e146aed21024.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_6a5d4462-eb16-4b06-9b5d-e146aed21024.png index c50baa6643717a9e680c51218637cdada1360119..e543e8d169ada55a5a1b0e0f2a63f9cbef1f6dd5 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_6a5d4462-eb16-4b06-9b5d-e146aed21024.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_6a5d4462-eb16-4b06-9b5d-e146aed21024.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a0e08603843eb01a0e0c9e0d724a09e745699ce0693a76e0678bf80600e4c42 -size 549761 +oid sha256:d0f72239e6c8c00047420d68a3e06d523f6a29311cb82220ee7c7a80fa17ab04 +size 542391 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_79d473dd-a77c-4f65-afc8-e214c8355550.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_79d473dd-a77c-4f65-afc8-e214c8355550.png index 4e3ef609b9e666c7bc55d3d7237d74b495b067be..79d1a71d0b867a45f5ab2d02a5ab19551b3caf31 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_79d473dd-a77c-4f65-afc8-e214c8355550.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_79d473dd-a77c-4f65-afc8-e214c8355550.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa948c2666975dce8264d4dc1571c211f1d77140cb804dde224856adf1f50920 -size 1299789 +oid sha256:92d5338aa192304e07b9f5558e11ffde0c007ba95e79fae358f254097671a766 +size 795940 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_8ed71c65-50b2-4399-9ccd-41e9efbd5525.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_8ed71c65-50b2-4399-9ccd-41e9efbd5525.png index d9132839c9b411f5f021fb66ef8312c4d662cbed..7b73e7d4a660407a40fe648b39b55aea804eea31 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_8ed71c65-50b2-4399-9ccd-41e9efbd5525.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_8ed71c65-50b2-4399-9ccd-41e9efbd5525.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6cecacb92d7cc00bd66d95b1cba8bd9dd6f882df709b1c39a416a6bf1b833ef -size 1896339 +oid sha256:e6441993d81b08c08de4cd152bd580d80b72ea11312cba39df5a5abb1d6c72d0 +size 1810389 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_9620c2bd-5128-408a-83d5-9cdbcb8313c9.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_9620c2bd-5128-408a-83d5-9cdbcb8313c9.png index e0555c360488677bec6e6546fb0f5465dc77140f..3e2c27dad3d662bb18e0769b2c863d3d7002d40a 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_9620c2bd-5128-408a-83d5-9cdbcb8313c9.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_9620c2bd-5128-408a-83d5-9cdbcb8313c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11b162ba938fd0ae7f26cc58e5ab1fb8a4798d1a17db9a2e55ebf44acd00de1c -size 1299702 +oid sha256:3ae4c5710737a5a1f9d28034c1ebf6599e3f1f1706b2581bc4b23ff85170a3b0 +size 1227042 diff --git a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_a14dcd4a-847a-4935-9708-4dda2e60137a.png b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_a14dcd4a-847a-4935-9708-4dda2e60137a.png index 851d7764e7b62605dbf531d53f05148265827bf0..ce83c831b0b893b2ce3cf20c3c61d37acdf33acd 100644 --- a/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_a14dcd4a-847a-4935-9708-4dda2e60137a.png +++ b/images/76514d5c-cc1c-40b0-8c08-427cedb106e4_a14dcd4a-847a-4935-9708-4dda2e60137a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bc51978535ca448bfa3bb0acb0391ba9fb6b19cf317031e09e4c2ec18eeea78 -size 1164414 +oid sha256:7af69de77817711fd31fefc463d3e311395ab3e2d6694f05856a2554f81ba145 +size 672826 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_1d0ce156-c4ff-462f-9503-71e97ddc7bc5.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_1d0ce156-c4ff-462f-9503-71e97ddc7bc5.png index 1ec051c150f7fe4b4bc03c187f56702bd84bcb47..b225312c7ea1be681ca05ac1429045d72bdc1f8c 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_1d0ce156-c4ff-462f-9503-71e97ddc7bc5.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_1d0ce156-c4ff-462f-9503-71e97ddc7bc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d654780a4c8005130b1e94668f9f27bf61166289e8bf4de78ea01f2b52add2fd -size 1197413 +oid sha256:b7327578ccd49ad984e7f02b25c3c728a2cd254d40a9ba3de568600bb2cd6485 +size 1439865 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_2fef2ebc-4457-4de8-a2b1-20a39a197b6e.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_2fef2ebc-4457-4de8-a2b1-20a39a197b6e.png index 5f27142ebf10a502c276a3af4851761057b3a2bc..e95d331f95848328b82d12b14c7a2a52ee6a1a7f 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_2fef2ebc-4457-4de8-a2b1-20a39a197b6e.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_2fef2ebc-4457-4de8-a2b1-20a39a197b6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aab7018b0dcbb9587e97d34021b07ea3c592b3b3ed2c224ee327d18ab47ee534 -size 1350829 +oid sha256:4489a9a6d6f850be657cee3da8b1c4753395f1782d23383338e257252d8a8b91 +size 1469208 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40137cca-0d78-4d63-9635-8352aa17f0c3.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40137cca-0d78-4d63-9635-8352aa17f0c3.png index 737f9fc5b8b5e16f75a5485d0665fd8752f08082..c656ec8d1cc383581630f10b8eb56921f5b2cddc 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40137cca-0d78-4d63-9635-8352aa17f0c3.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40137cca-0d78-4d63-9635-8352aa17f0c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1cfec8a438a6b8010851f60d3e7c7b1ae3e3c91ec58cf5aceda49f7416e1ff00 -size 1523458 +oid sha256:563f77a6cbccec0b493bf0f458b8cdafbe2e3a5237566330876c099c44143cd3 +size 1306390 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40f1da67-fce1-4c14-9e24-9b3f57fe90cb.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40f1da67-fce1-4c14-9e24-9b3f57fe90cb.png index 87e0a16d716df6fbd9a7c83e5f862f937db3ca1e..26aa16b3ac979117e5e9650211a0c05ab45d6bfd 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40f1da67-fce1-4c14-9e24-9b3f57fe90cb.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40f1da67-fce1-4c14-9e24-9b3f57fe90cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ec6d7a73c37da40cae82ac146b22967ca69bfd367ea5f85f7fa22d280ae06ea -size 1051653 +oid sha256:461e05e108af219c7cfea72eec97e7a729ac426cce18f876dc560b7b35604407 +size 1055751 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_55d1b23e-c4cd-4459-8ecc-fc8db8334fb8.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_55d1b23e-c4cd-4459-8ecc-fc8db8334fb8.png index c9c4160468c7d0cd96dfb33b640b4523650ec27b..0804ded1a5488a17c898b3125a8f0839eb3dae7d 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_55d1b23e-c4cd-4459-8ecc-fc8db8334fb8.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_55d1b23e-c4cd-4459-8ecc-fc8db8334fb8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08057b8f73620a3accd2c51d3ef9242ed4700d063e315b4265b96892ded93aa2 -size 712140 +oid sha256:d07d998a7ec80a7d07b5029988947fb8f06a0d0a5b93d29c56fee5870676bf51 +size 357154 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_56ae15c7-ae7b-4d02-aa81-ade2de73778c.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_56ae15c7-ae7b-4d02-aa81-ade2de73778c.png index ed626163cc26ba9c00b03541c6f6bab962c7bb33..738ff1ab966c913b10f86660c7d7b8a47a3c0ca2 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_56ae15c7-ae7b-4d02-aa81-ade2de73778c.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_56ae15c7-ae7b-4d02-aa81-ade2de73778c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9a9b38c777973c0a9c886699089e64cbfcfc9fd8c3666d59647fde5ebd7bd2e -size 1465078 +oid sha256:fae2a6b28a6b1496a384a24e54640cacf696c7d225ba169786ed90638a9497b0 +size 840274 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_5d992829-9076-470c-9e36-dd1dd1918ccd.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_5d992829-9076-470c-9e36-dd1dd1918ccd.png index 17d4f36f83d086c375b704c62c34486d3a0444dc..f04f6fa745936a7ed94f8992abb8d1c69f639098 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_5d992829-9076-470c-9e36-dd1dd1918ccd.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_5d992829-9076-470c-9e36-dd1dd1918ccd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67d660caac6320f850fe09b071d585e4bcf51d2857d182f1659abbb154936a1c -size 1138551 +oid sha256:0d073aa72feef2e924758cf988f3c127a20db0779d60886ccde1f074e89fa962 +size 1527962 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_627fa5b3-5e4b-4b8b-aaf7-6fc28b256a15.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_627fa5b3-5e4b-4b8b-aaf7-6fc28b256a15.png index 241c89643a0bca5ec81f27f09a2e2a533853b11f..9e074958429ec4776b9ca3b7bed8c61a7b30df5a 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_627fa5b3-5e4b-4b8b-aaf7-6fc28b256a15.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_627fa5b3-5e4b-4b8b-aaf7-6fc28b256a15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:183350f619f8c868cde6f6da6444e9d4082987f4ee9b2204ae152c120089dd51 -size 1208594 +oid sha256:4a6b0a99c060a18abd58c8514ea87e0774ada345e688ae968cc3d890e835718b +size 1105690 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_6dec575d-ef6c-419c-98f4-c906218623e1.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_6dec575d-ef6c-419c-98f4-c906218623e1.png index 8086c27460ece3f9bafc2d80bdab918fe6e0fec5..8f0e37583ad49d3dfdf34d982a3a51c9449378be 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_6dec575d-ef6c-419c-98f4-c906218623e1.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_6dec575d-ef6c-419c-98f4-c906218623e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4b907cab4e0781217e036a8e74d401cc806842e2e2396e3589997afeba93b5a -size 1340831 +oid sha256:1835b71b79d2ebba18d2678dff272d01b035d3a89e87fd23b34641ee9a6c0439 +size 1133823 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_869e8d1b-c77b-48a8-9a52-9b34eace9019.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_869e8d1b-c77b-48a8-9a52-9b34eace9019.png index d934ae29decc8b198f811f3cf4e4d78948905895..87e69b1520be166845674c4548096f19ba190f18 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_869e8d1b-c77b-48a8-9a52-9b34eace9019.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_869e8d1b-c77b-48a8-9a52-9b34eace9019.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cba1aaf88f76da781b3b8e1cffe921e317f7922bdef20baedf799bdc846f2bd7 -size 1476352 +oid sha256:9bd10d11288fd597421e2ea43c112569fe8ad0db0f45a59aea88b418eab4bd81 +size 1319170 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_997cc562-d56c-4861-bad7-1022dcced9d1.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_997cc562-d56c-4861-bad7-1022dcced9d1.png index 5824789414237ad00e0c0c279888b86d70c5d39a..04dcd50215676bc67d5c426ef2a7a05f8eb48459 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_997cc562-d56c-4861-bad7-1022dcced9d1.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_997cc562-d56c-4861-bad7-1022dcced9d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d615b1ff3f359da3885763816344c09689cb8481cd77d30edda0dbc15f492f2 -size 1321053 +oid sha256:64192c938cdbfc989eb104891ec770bbbc7b7386ae7dbdb4bfa068c8ec866a39 +size 1528009 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_99c101a0-07d4-433d-86f7-9d16d464c14b.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_99c101a0-07d4-433d-86f7-9d16d464c14b.png index ed72a6b0d5ed24afad25364e662116dc0456b839..ba61f7cbda24804ea7a61c13809e41f6a318fba8 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_99c101a0-07d4-433d-86f7-9d16d464c14b.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_99c101a0-07d4-433d-86f7-9d16d464c14b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:844f0e15b7e96a2e58a9d43bfde4805834f666451d37798163064a8a1838ac81 -size 1285407 +oid sha256:fc682b0e7008fb59e5c41c947724180a08569fd86f836621d2359df4c95f8f61 +size 1027437 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_9c998cb2-c740-4cc3-8c99-58824e200687.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_9c998cb2-c740-4cc3-8c99-58824e200687.png index f4a2d466695ff2834fafb13a9f9bbd765efe32c7..04dd75c1bfca7a744cde53c44becd07fb6b5b4c7 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_9c998cb2-c740-4cc3-8c99-58824e200687.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_9c998cb2-c740-4cc3-8c99-58824e200687.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:807461a7a2c422592d7c6d04014e39a6936065072159e5512e1ef7ec3adf83a5 -size 773918 +oid sha256:991a6b86702f932106249553d965122750e736028dc9cb4c07498a14fced0f76 +size 893286 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b0b71afe-6f0d-43ef-84e1-9739bd184012.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b0b71afe-6f0d-43ef-84e1-9739bd184012.png index 944c8fd0fe7a140a2b35b15b980f08fe7dc1b5d1..34cae5691e52023b6b60d80130913f6f2bbdbbd3 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b0b71afe-6f0d-43ef-84e1-9739bd184012.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b0b71afe-6f0d-43ef-84e1-9739bd184012.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:055a2c1eb1b99718a241f7fb6b628c12c6192287dedfdafa725a5aff1402d2a4 -size 1153600 +oid sha256:97e37de0ebbeceab15740e0f4fc654a32a4371c8097f7de3ae3807ee6a83347b +size 1630054 diff --git a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b18889ca-80bd-49a5-a847-ed799ac183bf.png b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b18889ca-80bd-49a5-a847-ed799ac183bf.png index 531abc18e2527cc2536507b0565616690d4b0b71..a181bf6ef0dfec24dd516536f2605b4dc3f018fe 100644 --- a/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b18889ca-80bd-49a5-a847-ed799ac183bf.png +++ b/images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b18889ca-80bd-49a5-a847-ed799ac183bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f453acc77703f0db8bc763fd8403c788bd7f306540ed6fc4f57d9c6ccede97a -size 1145298 +oid sha256:ab3bc7e9e68e939b74e459eb48b191aeef12c69606fc2e6d8b1228e3c3b99b09 +size 1442123 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_21480087-2574-48ae-be40-92f1dafdf19c.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_21480087-2574-48ae-be40-92f1dafdf19c.png index 708cf48e26b27ccf89a4f7e5661cda189ac51437..2ee3f477ee43b32c1b42788b4c643fa3508e485a 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_21480087-2574-48ae-be40-92f1dafdf19c.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_21480087-2574-48ae-be40-92f1dafdf19c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4a7c432f612ca959829a48cd64e7213f2496155ea4f27f208c016a101326cd9 -size 4167847 +oid sha256:b9c6eae3200b2dccf6c25f4e4595c93a683a4315b6371495a67ba83f9d0ff39a +size 420429 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_38f4cf88-6985-4927-9e8c-c6115ea700af.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_38f4cf88-6985-4927-9e8c-c6115ea700af.png index 087a4d2c8f0047e59b9fd0f855a3644b19643862..a9fc1a3656ac9d9234a5e12401c64c2ca582cfec 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_38f4cf88-6985-4927-9e8c-c6115ea700af.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_38f4cf88-6985-4927-9e8c-c6115ea700af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d4c691ecb9f7a9719ee4d4d2bd40ea0fbb4a88a70edae75347ac1735b016582 -size 768447 +oid sha256:40be197a38f09b298af4258d46df25a686697117ebb68e3b6459a082b8d2520c +size 482372 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_562c577d-ae8f-4c46-bb04-2877a53444f5.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_562c577d-ae8f-4c46-bb04-2877a53444f5.png index 1be5a04a7022a528d4f6552d93363f69dc711166..515ad13b00a68bf8fa9772e2ca42cbd2341b64af 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_562c577d-ae8f-4c46-bb04-2877a53444f5.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_562c577d-ae8f-4c46-bb04-2877a53444f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1fbabe27f103fda278f217422f80cb6c173c5c05fb5253081d14ff2f21ee332 -size 794313 +oid sha256:8b1085261e0ec41f4aa0a7b357ded08249f1a3003d84cf91330d0ec2990d0f7b +size 849980 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_6359600f-6012-4e5a-a3bc-26b3faaf6d51.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_6359600f-6012-4e5a-a3bc-26b3faaf6d51.png index 6832b92da0b1038f98a38e6c82ef2b282ce7e41d..ad3ed6014992110eeb07ab7640bd0855725e420e 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_6359600f-6012-4e5a-a3bc-26b3faaf6d51.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_6359600f-6012-4e5a-a3bc-26b3faaf6d51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acf08d5ed9269cc7b1419618808bda52a06b94fbce88ea789b849d88b97dbce2 -size 791154 +oid sha256:735467352a99ff0883a6c20fb1bb60c36784182595331da96ace6d4e577166fd +size 723141 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_82f690c6-2231-4e3f-b030-7f3f205d1e20.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_82f690c6-2231-4e3f-b030-7f3f205d1e20.png index ca061eb41f42b323ce58272c529c3d816e88eb23..1d8497f076acd9851f4ba68ee1d014a053d4d35c 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_82f690c6-2231-4e3f-b030-7f3f205d1e20.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_82f690c6-2231-4e3f-b030-7f3f205d1e20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a023aafc5494fa5d0f0eea7e42778833ce007d5a394f0a7b1c1057649ddc7cc1 -size 876873 +oid sha256:5b12bf15bb164788ea2284a7ac707dd0953862a0510864476e5bb58ec02496af +size 709589 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_8c8991bb-aa38-4939-b1bb-0b4b358b991d.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_8c8991bb-aa38-4939-b1bb-0b4b358b991d.png index 328b72229a1ed9a26cf8b32574d0207df6289ec2..127dafa2416644a3b7a9e329bd2e567141478718 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_8c8991bb-aa38-4939-b1bb-0b4b358b991d.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_8c8991bb-aa38-4939-b1bb-0b4b358b991d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1cff5ba08a4b2875dcdcb536110ca0d1cc88a3f0c3779264bf308d79cbab97c -size 756971 +oid sha256:3f9f8439ec7874cc17d35cb206fe27220692ae1e834bfa88c18abfe0bec3bccc +size 602522 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_ab1f2f97-9c1e-4336-8c3f-a252a460eb1a.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_ab1f2f97-9c1e-4336-8c3f-a252a460eb1a.png index 19b9222d440fb88bf77180604b4f052f1597dd81..0f0e27511321f0b534ace1027213f1bafd2c748e 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_ab1f2f97-9c1e-4336-8c3f-a252a460eb1a.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_ab1f2f97-9c1e-4336-8c3f-a252a460eb1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86f509b7d0f7f9a24eae62525a618a7382c91fd76aee1407d31c874ae0b60876 -size 754848 +oid sha256:f62493e6a9599f0b41464ae72a3e84d16d9a8d5cfa6a41a83fb05358e45d4b4b +size 706624 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_c1977bd0-8644-4263-937b-c5b4d681d54b.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_c1977bd0-8644-4263-937b-c5b4d681d54b.png index 0cd102e07b20cde7e628556b1ea20dbb12432b1b..9c476c99a8a1be2604b007e5c26e453b33f3e36e 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_c1977bd0-8644-4263-937b-c5b4d681d54b.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_c1977bd0-8644-4263-937b-c5b4d681d54b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6177bd494ba1d114d8b1de380f16d6f6a52313f0bca2e1ad458a4507657cd976 -size 790748 +oid sha256:93c7bbe9a5968e0b51a69b4991ce6d4d1794ac15b8273a486b7ba5b6385193ef +size 986176 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_c3039d57-1d54-4442-b250-b233b580fd64.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_c3039d57-1d54-4442-b250-b233b580fd64.png index 2f23c32411f5d15ed6ea5d30be97092525580635..a748f4a773ede0dae1916df242eb1ee4f04ddc8d 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_c3039d57-1d54-4442-b250-b233b580fd64.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_c3039d57-1d54-4442-b250-b233b580fd64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2424e21f53c96bbc5c794920fb4c5c5ceefbb0f653c8afa88b315a58b265bbdd -size 784363 +oid sha256:94c22c19899b014e4e8b67d7711471075c88f4c02e71400ffcb1d3208a6f8468 +size 968690 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_c909f54e-a1b0-4e7d-b89d-53df496da5ac.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_c909f54e-a1b0-4e7d-b89d-53df496da5ac.png index 57bfea2d566c648159e9212c18692f4f1bcfd20e..eb792bffd949c9ce02e9ebf2678fa370e652ef05 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_c909f54e-a1b0-4e7d-b89d-53df496da5ac.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_c909f54e-a1b0-4e7d-b89d-53df496da5ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5dc94e44ed494405cf286bcf00b0ab09b47239463d2b5a44f29be302e58d6319 -size 813265 +oid sha256:97d73e72c56a4c4031a96a128ad37afeb2ea5001b2b3c4cdb9726135ba1cda01 +size 697650 diff --git a/images/7685e8ad-3989-4316-85dd-746fac2956be_e4c84777-b378-44df-8696-64999d1c0000.png b/images/7685e8ad-3989-4316-85dd-746fac2956be_e4c84777-b378-44df-8696-64999d1c0000.png index 5ad0c6fef79354e3b06042f1f1b39bb34f09b99c..fe088ee9ac0885f38cc77991f80373febb42598a 100644 --- a/images/7685e8ad-3989-4316-85dd-746fac2956be_e4c84777-b378-44df-8696-64999d1c0000.png +++ b/images/7685e8ad-3989-4316-85dd-746fac2956be_e4c84777-b378-44df-8696-64999d1c0000.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d888f836856f9ec14a0b878bce4eb0100d906aedcb25dec7d619d0588b441084 -size 780967 +oid sha256:a937a4a475c7dee3fafaa8a40d6703486c63345a07dae3ff435b11d1f9a4f390 +size 740548 diff --git a/images/779cec8e-eef5-4de8-a42e-b449363664df_05b42c16-2b58-423e-9f0d-c4ef3203b528.png b/images/779cec8e-eef5-4de8-a42e-b449363664df_05b42c16-2b58-423e-9f0d-c4ef3203b528.png index 01c90194b67d3d188207888befc6c37f275e7b33..bf0f3e73ca2da9202675092d11f0a9d0186117a1 100644 --- a/images/779cec8e-eef5-4de8-a42e-b449363664df_05b42c16-2b58-423e-9f0d-c4ef3203b528.png +++ b/images/779cec8e-eef5-4de8-a42e-b449363664df_05b42c16-2b58-423e-9f0d-c4ef3203b528.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6610937b4228fe468b39332150ec2f2306bb62c0be497011efef0b0101f7f360 -size 938024 +oid sha256:b5c1cf3686a53fe69caac6ffbb309ebf9875e122c61ee431a7cf634b976dd43f +size 919576 diff --git a/images/779cec8e-eef5-4de8-a42e-b449363664df_d5585212-4b78-49b6-8185-eae5dec350f5.png b/images/779cec8e-eef5-4de8-a42e-b449363664df_d5585212-4b78-49b6-8185-eae5dec350f5.png index 308e6a6d0b60f04b8327208a06053d67cfb208ff..73a29ad9f3ff8ac0a598a59bdd72916a576b00c5 100644 --- a/images/779cec8e-eef5-4de8-a42e-b449363664df_d5585212-4b78-49b6-8185-eae5dec350f5.png +++ b/images/779cec8e-eef5-4de8-a42e-b449363664df_d5585212-4b78-49b6-8185-eae5dec350f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a09edd7bdfea12a31a76b77cb324057dacf28c22caabebd92eaa4bfa40df9769 -size 988540 +oid sha256:fa9680d2d947e0832ab31ce484233ee427329dcbddbf0ae978d9da1426ab851b +size 1072560 diff --git a/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_a0f039d4-ca5d-4ad5-aec8-3e2db31d10b5.png b/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_a0f039d4-ca5d-4ad5-aec8-3e2db31d10b5.png index bff893efcf100492d96c5911acc66f60191445e0..99971caae30f5b6384da447849dae33e34253f2f 100644 --- a/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_a0f039d4-ca5d-4ad5-aec8-3e2db31d10b5.png +++ b/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_a0f039d4-ca5d-4ad5-aec8-3e2db31d10b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:869bcd94fb59272daa34503b1d42f6a72ff63f2845d84f294187e1513100bbc0 -size 1231155 +oid sha256:451a1337984e1b00494bca41a0c87058f3b164abaa17dfd4e1200d237dba0995 +size 1004054 diff --git a/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_cdacd995-13b3-4369-94a3-ae13afd2727b.png b/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_cdacd995-13b3-4369-94a3-ae13afd2727b.png index 522d059023fb0e16080a730cb3a8f2adb4aec94c..56662b319cd7475bbf7415aa488647734ff05065 100644 --- a/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_cdacd995-13b3-4369-94a3-ae13afd2727b.png +++ b/images/77be98ff-e4db-4745-9b87-6ce69754c4c2_cdacd995-13b3-4369-94a3-ae13afd2727b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85ddf7645a12e55bbee8bbb2a9f62cec1ed93999c4f0ce7b5ce822cc95040a8d -size 835925 +oid sha256:90e6fcf2e56c23b475ff57b66f2f2087ee5414f69158d6f238abd62059747376 +size 1308565 diff --git a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_13ed3791-9c2f-4f2c-a0e5-2d2a472e1fd2.png b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_13ed3791-9c2f-4f2c-a0e5-2d2a472e1fd2.png index ef225685563fbd085d0fb2e74a5d8c48437d789a..db08ef33e15b184bfeb858c47adff7691d9856c1 100644 --- a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_13ed3791-9c2f-4f2c-a0e5-2d2a472e1fd2.png +++ b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_13ed3791-9c2f-4f2c-a0e5-2d2a472e1fd2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4976d97c4d93858ae4b9626c244f2eb27780cb7a1311e87c10febed9a0023af5 -size 3640606 +oid sha256:8c5830f956cc25158ea507f08a1c531a7c2a5f82db1b52e5a92a4de0054105ee +size 746539 diff --git a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_1bb44b82-30be-4dc1-910e-458594103813.png b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_1bb44b82-30be-4dc1-910e-458594103813.png index 615f3e02aaaa6b747ad08012ecfc293877b69ce9..130e99c91213890060d97b3bf0815ffd8c39f2c4 100644 --- a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_1bb44b82-30be-4dc1-910e-458594103813.png +++ b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_1bb44b82-30be-4dc1-910e-458594103813.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e37960cae185559f950ad6fe3bbc7e0f1f228a05abc58784316a67d8f23d072 -size 2207858 +oid sha256:f0da0a0a8a6ff206badf8d15d1b2ff4dc8398f0943cfc1df7282647b031bddae +size 2200263 diff --git a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_61cb1a42-d50a-4a13-a642-4519069dae8d.png b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_61cb1a42-d50a-4a13-a642-4519069dae8d.png index 4d8c298bccb84d3d301f3def5e3fe72fc1c4f3ef..ef3af45e142a89f0a95eb6d837790f71e231916f 100644 --- a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_61cb1a42-d50a-4a13-a642-4519069dae8d.png +++ b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_61cb1a42-d50a-4a13-a642-4519069dae8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5dac02136625ba169f3a9f25a11cda7071dd187953365de72a86efa596afed31 -size 1810751 +oid sha256:cb3065e57810041502321402a6cecff9c3f86eefa85187f69baae82ba1e6b8ee +size 811687 diff --git a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_90fb63c3-5f3d-4c14-9878-f5ce0458bb6b.png b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_90fb63c3-5f3d-4c14-9878-f5ce0458bb6b.png index ffdb9b811be41faf59f8a6eb8d2952dd8bccb88c..24cb05fd71242432d98b6910739c8575c5878b2b 100644 --- a/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_90fb63c3-5f3d-4c14-9878-f5ce0458bb6b.png +++ b/images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_90fb63c3-5f3d-4c14-9878-f5ce0458bb6b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72a3e917c62ef3a87fd54a2b833399dac7b4ca505015f6656c1392399d8d25e0 -size 1703774 +oid sha256:35388cb886d31d1173719f0d8f1eeaf8ffe326135c0d8e7d1fa960d824e1e213 +size 1688095 diff --git a/images/78915162-53c9-4cb1-86e1-6be6047528e0_4aee1c2f-31ad-464e-8dc6-bdddbf81f193.png b/images/78915162-53c9-4cb1-86e1-6be6047528e0_4aee1c2f-31ad-464e-8dc6-bdddbf81f193.png index 07c7cfd7e34985b87f3a2c45f51f2a848951a0e3..e86b7013b41d1874f0f359f2f160d433c30cbcff 100644 --- a/images/78915162-53c9-4cb1-86e1-6be6047528e0_4aee1c2f-31ad-464e-8dc6-bdddbf81f193.png +++ b/images/78915162-53c9-4cb1-86e1-6be6047528e0_4aee1c2f-31ad-464e-8dc6-bdddbf81f193.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd5e52444c0caa2b763fce3bd8f73030743fe708e7bbaacbbc4ff283140ae770 -size 1363997 +oid sha256:4731eb4f8621b4a3edb0af558ba0a55b48f09b199bdd9afd1cef6ceff4797335 +size 1384649 diff --git a/images/78915162-53c9-4cb1-86e1-6be6047528e0_4da78b8c-50b7-4b50-8e3c-bad054eacd2c.png b/images/78915162-53c9-4cb1-86e1-6be6047528e0_4da78b8c-50b7-4b50-8e3c-bad054eacd2c.png index 6c8dcfd8279099251c6b4c13eb08c29dccc70846..96d0ba6209894dcb41062f6f5d8ef219194c247a 100644 --- a/images/78915162-53c9-4cb1-86e1-6be6047528e0_4da78b8c-50b7-4b50-8e3c-bad054eacd2c.png +++ b/images/78915162-53c9-4cb1-86e1-6be6047528e0_4da78b8c-50b7-4b50-8e3c-bad054eacd2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d00409fdaa6fdd4807073ec74d65aadcb4866e0298cb69cc8629d441c15a2b0 -size 1441007 +oid sha256:09f55cb5dbbaba62b3079a26afaccc2d2955562c6bf894f71f1d6f921752c166 +size 1446862 diff --git a/images/78915162-53c9-4cb1-86e1-6be6047528e0_5a1b9d08-5eb0-4ae0-8fa1-7183a2c8a7c5.png b/images/78915162-53c9-4cb1-86e1-6be6047528e0_5a1b9d08-5eb0-4ae0-8fa1-7183a2c8a7c5.png index 07c7cfd7e34985b87f3a2c45f51f2a848951a0e3..acccc01de3a51c24c9c0e476d86ee839341194ff 100644 --- a/images/78915162-53c9-4cb1-86e1-6be6047528e0_5a1b9d08-5eb0-4ae0-8fa1-7183a2c8a7c5.png +++ b/images/78915162-53c9-4cb1-86e1-6be6047528e0_5a1b9d08-5eb0-4ae0-8fa1-7183a2c8a7c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd5e52444c0caa2b763fce3bd8f73030743fe708e7bbaacbbc4ff283140ae770 -size 1363997 +oid sha256:fbb49e165be38966d43b9bf1172724cd374d42899d9bd09a60af73df235e7e7e +size 1367835 diff --git a/images/78915162-53c9-4cb1-86e1-6be6047528e0_63bb767f-b11f-4830-8208-0ee804fa1842.png b/images/78915162-53c9-4cb1-86e1-6be6047528e0_63bb767f-b11f-4830-8208-0ee804fa1842.png index 674d2fad529a783a1b207c119f982ac05e735f5b..6212f5142478ec34175689d12d739577dac04082 100644 --- a/images/78915162-53c9-4cb1-86e1-6be6047528e0_63bb767f-b11f-4830-8208-0ee804fa1842.png +++ b/images/78915162-53c9-4cb1-86e1-6be6047528e0_63bb767f-b11f-4830-8208-0ee804fa1842.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:856677d5be5c0575349730a3fefe5ccad1d410ee5d874bf462d103159cb8244f -size 1669538 +oid sha256:3304e5861901d73d9e551f84158829efd96c6ed7bf47fcbc9f08612d24fa1175 +size 1089907 diff --git a/images/78915162-53c9-4cb1-86e1-6be6047528e0_966832cc-a997-4288-a3ad-1d8a567483a3.png b/images/78915162-53c9-4cb1-86e1-6be6047528e0_966832cc-a997-4288-a3ad-1d8a567483a3.png index d5a816b853837d3465197ad62f641a3f42985ba6..8c46b170c6b4103a32f1817850762f94a425cea9 100644 --- a/images/78915162-53c9-4cb1-86e1-6be6047528e0_966832cc-a997-4288-a3ad-1d8a567483a3.png +++ b/images/78915162-53c9-4cb1-86e1-6be6047528e0_966832cc-a997-4288-a3ad-1d8a567483a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a56b47de311b5f4609a9df2155f36c2767541c20c5391db7e715925d51438c19 -size 903335 +oid sha256:3df8fcfa8a2dfdafd0a1b0d7569c04690df78e432177dae8f8220853d19d2fa7 +size 924784 diff --git a/images/78915162-53c9-4cb1-86e1-6be6047528e0_e1f6fdb8-4efe-45dc-90d8-624bdd5a4e2f.png b/images/78915162-53c9-4cb1-86e1-6be6047528e0_e1f6fdb8-4efe-45dc-90d8-624bdd5a4e2f.png index c3e37a7601196c89b16438acebd123d90d5c392b..392fd8a2e4fe48cfedbc81236a1905308abe2c74 100644 --- a/images/78915162-53c9-4cb1-86e1-6be6047528e0_e1f6fdb8-4efe-45dc-90d8-624bdd5a4e2f.png +++ b/images/78915162-53c9-4cb1-86e1-6be6047528e0_e1f6fdb8-4efe-45dc-90d8-624bdd5a4e2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08e22ebe384bdca58f5e86cc26198bd47464715e8b2529d628dec8afe2e6e7ad -size 1400231 +oid sha256:7eaa9cea194d92f40b1d23446199070fe96a289e465e6e47196090325b5e9a8f +size 1618929 diff --git a/images/789b7d2d-fb01-453c-b933-383965e6123c_3f6c2c3c-bb4e-4a97-93a6-670b449ee82d.png b/images/789b7d2d-fb01-453c-b933-383965e6123c_3f6c2c3c-bb4e-4a97-93a6-670b449ee82d.png index b1ab8a5132c4695a8482a70606cf22874c8efea9..53aa434013123cbdd49d32f3cad25fb9560b60eb 100644 --- a/images/789b7d2d-fb01-453c-b933-383965e6123c_3f6c2c3c-bb4e-4a97-93a6-670b449ee82d.png +++ b/images/789b7d2d-fb01-453c-b933-383965e6123c_3f6c2c3c-bb4e-4a97-93a6-670b449ee82d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a640b2be91db1bb8b7592a55c0f2a3abb08051a405b5a576a91d3a18117e430d -size 336988 +oid sha256:b602ef5978fc581722c5d85d9ace28d63a8afa09698b1a5d929f523b74626939 +size 712857 diff --git a/images/789b7d2d-fb01-453c-b933-383965e6123c_5e40a7b5-18ac-44c8-959a-a530a564942a.png b/images/789b7d2d-fb01-453c-b933-383965e6123c_5e40a7b5-18ac-44c8-959a-a530a564942a.png index d6ae5eea693280f0e3c00fc624bc2cac8b434b2e..1e5b81f79f22189b10137b603ea55c41201519e6 100644 --- a/images/789b7d2d-fb01-453c-b933-383965e6123c_5e40a7b5-18ac-44c8-959a-a530a564942a.png +++ b/images/789b7d2d-fb01-453c-b933-383965e6123c_5e40a7b5-18ac-44c8-959a-a530a564942a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26771b912574c203e6357e3dbab97e59f27fbc0ce8e624744fef7fe68834cf3e -size 979271 +oid sha256:c2aecda51446a0958f57af083e5d174eb7de4835915b7194382800fb9c76c4a4 +size 1476894 diff --git a/images/789b7d2d-fb01-453c-b933-383965e6123c_e7becd76-12f3-404f-a927-5c51aa736b85.png b/images/789b7d2d-fb01-453c-b933-383965e6123c_e7becd76-12f3-404f-a927-5c51aa736b85.png index b868b387bc1ea921cf2efd5c3a633f501a49d672..0c96c09c3324f41f93957e9f84bf23b7b7291391 100644 --- a/images/789b7d2d-fb01-453c-b933-383965e6123c_e7becd76-12f3-404f-a927-5c51aa736b85.png +++ b/images/789b7d2d-fb01-453c-b933-383965e6123c_e7becd76-12f3-404f-a927-5c51aa736b85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2ca20efe4499dbe1861cf401f2b2210e1ff086e6ac79399f4d3bf0f366aa302 -size 1620209 +oid sha256:156cee354e0a5bdb4580e4b6e6ba34e4a4236af2408aa2e3481784ed03b10456 +size 1132380 diff --git a/images/78c52592-76e4-4c45-afd5-f94cf213314e_0a182a4e-608c-4def-8a52-ea6734b8cc01.png b/images/78c52592-76e4-4c45-afd5-f94cf213314e_0a182a4e-608c-4def-8a52-ea6734b8cc01.png index bd213a8e84d0fa3cf21037c08202dae8e75d61d6..a72ac98e50d69be4cb362c5c4b57dfebe9d15bf8 100644 --- a/images/78c52592-76e4-4c45-afd5-f94cf213314e_0a182a4e-608c-4def-8a52-ea6734b8cc01.png +++ b/images/78c52592-76e4-4c45-afd5-f94cf213314e_0a182a4e-608c-4def-8a52-ea6734b8cc01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c979156af71b29da97b7a40e4530c4f0105a0fbf2530b58b340c84b54cefd9d7 -size 329533 +oid sha256:70fa162b0f9045d3cf33b8e119a98891defa67475ae6f15fc223dbda609d109f +size 336086 diff --git a/images/78c52592-76e4-4c45-afd5-f94cf213314e_3f5e842c-d368-42bb-a2c6-1407fa5b61d0.png b/images/78c52592-76e4-4c45-afd5-f94cf213314e_3f5e842c-d368-42bb-a2c6-1407fa5b61d0.png index 6f9b2441763d14e5709b89b3eefc9eb42eb01925..90524ff2f153e32a3e43f9c61bc5656d915344bc 100644 --- a/images/78c52592-76e4-4c45-afd5-f94cf213314e_3f5e842c-d368-42bb-a2c6-1407fa5b61d0.png +++ b/images/78c52592-76e4-4c45-afd5-f94cf213314e_3f5e842c-d368-42bb-a2c6-1407fa5b61d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66c19a15b29ffd3fb20ac8ca9dcfbef7eeda0f72bb1083894a33075a6bbf855a -size 430565 +oid sha256:9b5a21fd22f8f56ebfde0e619bc889466914b202f376a80ff43d82163453033e +size 511639 diff --git a/images/78c52592-76e4-4c45-afd5-f94cf213314e_9149d122-ada8-4f08-98c1-30557c30f762.png b/images/78c52592-76e4-4c45-afd5-f94cf213314e_9149d122-ada8-4f08-98c1-30557c30f762.png index c3e0678c2ea0b5eccc026785d878d71c05e60526..a31afbe0b039ee09b1ac1ce4e47e6133405d5d54 100644 --- a/images/78c52592-76e4-4c45-afd5-f94cf213314e_9149d122-ada8-4f08-98c1-30557c30f762.png +++ b/images/78c52592-76e4-4c45-afd5-f94cf213314e_9149d122-ada8-4f08-98c1-30557c30f762.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72364bfe7f10774cdaf39396c8ccb66cf136bba5c0c45ad1e6fa18b17532893e -size 430042 +oid sha256:7b01f6c29dbe6a80f6464022e72363acf6dc5bcb9fc66fec3352814101e4ae9c +size 590092 diff --git a/images/78e346d2-3c80-4243-b921-ed35c4c4d923_1f3155ce-428a-4cd1-bb4a-b7fbf7469ddd.png b/images/78e346d2-3c80-4243-b921-ed35c4c4d923_1f3155ce-428a-4cd1-bb4a-b7fbf7469ddd.png index 563c9c7d6479cd957d8b54537002c2e4f9206460..40586a31dbc99039b3f1f8ec560b02991ce8c525 100644 --- a/images/78e346d2-3c80-4243-b921-ed35c4c4d923_1f3155ce-428a-4cd1-bb4a-b7fbf7469ddd.png +++ b/images/78e346d2-3c80-4243-b921-ed35c4c4d923_1f3155ce-428a-4cd1-bb4a-b7fbf7469ddd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81af98ee8c56800aebad555d298ab65082f091ef24df83da7c5e599b82423edd -size 901050 +oid sha256:104cdd285899bd75adfb9d3dcb3804cd9f9aa16bbe953d5aa0372c91417a0f92 +size 2321375 diff --git a/images/78e346d2-3c80-4243-b921-ed35c4c4d923_3eec37dd-f749-468a-9e10-8cd36f12e224.png b/images/78e346d2-3c80-4243-b921-ed35c4c4d923_3eec37dd-f749-468a-9e10-8cd36f12e224.png index d11dccba70e46389cf3080ce1eae2536cfb4bdf5..a5fc161aa5a6bd86bce755db04110cc8173ac083 100644 --- a/images/78e346d2-3c80-4243-b921-ed35c4c4d923_3eec37dd-f749-468a-9e10-8cd36f12e224.png +++ b/images/78e346d2-3c80-4243-b921-ed35c4c4d923_3eec37dd-f749-468a-9e10-8cd36f12e224.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01ca81f9fada9b6faa90024831d5b743e06e1a694a035dafb9d2670abfe29225 -size 425626 +oid sha256:6c011e3e61deca1e88feefbede4f3e3e0c0f2a1117816ac03199b26dff9200ad +size 454962 diff --git a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_14bd050c-7014-4023-8da9-9c0b2974c571.png b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_14bd050c-7014-4023-8da9-9c0b2974c571.png index aeac7fbb4449c82a595c5685b4ea59afadd22418..9ecdb6bae5496e6fdf29d8d19a20012257fbefc4 100644 --- a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_14bd050c-7014-4023-8da9-9c0b2974c571.png +++ b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_14bd050c-7014-4023-8da9-9c0b2974c571.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51cc11a9a49c7da1e3149c72d7037e79ccaf271ecc1903c7076b8d628ffb0823 -size 641847 +oid sha256:56dab2e4dea65a1b58ac468eccfc541fad24e08bb81acf13ba65043afa8ee3b9 +size 771549 diff --git a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_28f01eb6-f60d-4efe-82b9-6bab5fb6765c.png b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_28f01eb6-f60d-4efe-82b9-6bab5fb6765c.png index a278f27805070a7eb16207e45777d14af3efe32c..db60f6328db69dc22ce9afa13efa3b49308faffc 100644 --- a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_28f01eb6-f60d-4efe-82b9-6bab5fb6765c.png +++ b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_28f01eb6-f60d-4efe-82b9-6bab5fb6765c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0ec42e80f01a939b9dd997207c02a07a4f0627e9152f80227fe06a01bf6875f -size 809119 +oid sha256:24c96d5e41b49a38df6f2f7dc7b103a0dcd52fb96d58b2395f5d84e4e15f0697 +size 670460 diff --git a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_454819fe-c9be-4427-99b7-f70b68c0c6b0.png b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_454819fe-c9be-4427-99b7-f70b68c0c6b0.png index 4521605ebbdab48a6e426f4a3bef4ca0baf017bc..fb82e9711f7f122ff2327a44adee520119b6c2d5 100644 --- a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_454819fe-c9be-4427-99b7-f70b68c0c6b0.png +++ b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_454819fe-c9be-4427-99b7-f70b68c0c6b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea1a9995f658d11e245959205aed96cf07bbfd385e00d93414d62adc5f7c36db -size 554539 +oid sha256:e72db24893ef76caa0a9f7c74644eb080bbfd6b31bae43f239facd35fa5ad501 +size 511687 diff --git a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_d9556517-1528-497f-b701-9edc906c9343.png b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_d9556517-1528-497f-b701-9edc906c9343.png index 64b85188a13a1ee3321a99de0f28d5936a300048..42654b1cee165e01f1e97b9765efa5654e216f54 100644 --- a/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_d9556517-1528-497f-b701-9edc906c9343.png +++ b/images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_d9556517-1528-497f-b701-9edc906c9343.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e08dafc526645149752b6882cdcd12bac4a7765bc7edca65defc54766c9ae433 -size 536919 +oid sha256:5ee97b0e18ba729488c9efdac231a86b2dc74df15e4d19baf9b8ecfab1223b1d +size 555319 diff --git a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_0e9b3fb1-56a3-4609-98d6-fd91fb47d49a.png b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_0e9b3fb1-56a3-4609-98d6-fd91fb47d49a.png index 3da144cb6e04fe6fd3ba80c437ec1906b8063e36..f27f59f8643f0adee72b3a212a4acd47bd67d1a8 100644 --- a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_0e9b3fb1-56a3-4609-98d6-fd91fb47d49a.png +++ b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_0e9b3fb1-56a3-4609-98d6-fd91fb47d49a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50f234bc54523d25cac7ad9b361cea83684ecba463fba346a4bb29ded6bc2be6 -size 1065874 +oid sha256:cc96c0fabf615860883cfec07e30d1971c522baf57de775858a278ac7d9e41c6 +size 2174148 diff --git a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_3f3803f1-9aa7-4da7-807e-d31136723db3.png b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_3f3803f1-9aa7-4da7-807e-d31136723db3.png index c5e2c4618f5a51793b58361c29e2c50af6287232..25f49d0762946982507be9016b3f7d51024f709e 100644 --- a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_3f3803f1-9aa7-4da7-807e-d31136723db3.png +++ b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_3f3803f1-9aa7-4da7-807e-d31136723db3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41f82ad8d9062de74b0a00d4325c47c08d00bccd2ac442b0e1e7a5abdbd6380b -size 696077 +oid sha256:30c16b218e57b3cd1b9ddd15a8bfabf88edb9b895159e8f89757b5f64d60fcfb +size 1367724 diff --git a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_888fcb67-1235-4bb3-9cdc-b96d07a8dc10.png b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_888fcb67-1235-4bb3-9cdc-b96d07a8dc10.png index a564a0675e25a273ad434d72919fb036b3460241..e00a102f9e21270babf4ae3aa773c8f838bb6373 100644 --- a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_888fcb67-1235-4bb3-9cdc-b96d07a8dc10.png +++ b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_888fcb67-1235-4bb3-9cdc-b96d07a8dc10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:758fdcbcd5de3952c2bc77a4adb2628b39d1ca34418235ee3f9aad9d551dc395 -size 1114152 +oid sha256:9e68369f843c4156f05fc5eee1a71eed1de6be1eb8fa78d94420435296cf9493 +size 1963200 diff --git a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_eaebc513-fd13-45da-8a09-78a30eb928d8.png b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_eaebc513-fd13-45da-8a09-78a30eb928d8.png index e862369925a2dedfc3366d5d3e3dc66f7f434647..78ba39ad8f0914d3020dd0019819690a24bb2d06 100644 --- a/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_eaebc513-fd13-45da-8a09-78a30eb928d8.png +++ b/images/7a632871-a6a8-40fd-b48f-6fea49cf48da_eaebc513-fd13-45da-8a09-78a30eb928d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c0b7835fed7d84f24f2d08065556747f16b273c226c677299250bd399d00e64 -size 714208 +oid sha256:7252f33977fc55b91b9534ce8205e29244515a33a86c057b578ec8988cb1bd55 +size 1340975 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_1c4496af-1ba1-49a9-99f6-61f547787b5f.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_1c4496af-1ba1-49a9-99f6-61f547787b5f.png index fc433f6f95ae6fe547d7902a304dda27ba15637f..855d8c7ee61e7697dd2a11679a7766ee15cd6682 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_1c4496af-1ba1-49a9-99f6-61f547787b5f.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_1c4496af-1ba1-49a9-99f6-61f547787b5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aed55744fb818786a8e4db17011d49baac1bc1eb93ea05ab4745ac5452109843 -size 544505 +oid sha256:ab2a5cd9bec53e6165c1c78e2ac4b2002ba0e0bb8ce70578797ae2f0514e6431 +size 542783 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_2eacf5f3-80a5-491f-9d67-5d5793a8d030.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_2eacf5f3-80a5-491f-9d67-5d5793a8d030.png index 18e730df42055ef73f4ce4341da1d07493dfae78..6c91e107a92fcb51b890a7350d74816a88b900ea 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_2eacf5f3-80a5-491f-9d67-5d5793a8d030.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_2eacf5f3-80a5-491f-9d67-5d5793a8d030.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7188308f2738d45f1831d7310845bd2630a6e30183cfe28230b3537c74c8f153 -size 609888 +oid sha256:ebb7d7c42c7dde22c917739393ab667c9e6a0abf7cbc30e572f601be770beff1 +size 1144583 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3c3caece-89d2-4110-b976-242c6070e947.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3c3caece-89d2-4110-b976-242c6070e947.png index 2c2f84fab297aa4dffd45aee3be4685d58fc0ded..68ab661cd6da3e02a3b52d97f74e3091592a0e72 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3c3caece-89d2-4110-b976-242c6070e947.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3c3caece-89d2-4110-b976-242c6070e947.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd47887490a73d37f6f1aa277cb7d02902e5c96653ce7fd9244def8b924ca489 -size 643400 +oid sha256:38d9c15a6a7dc008799c473004351a562d17d62fe4245a6fe133efb7e106646b +size 325097 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3e5ef950-3ea6-411c-86a9-59318940c3aa.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3e5ef950-3ea6-411c-86a9-59318940c3aa.png index 27908be108bfcbaf1cbd89ca988027c3eed5a0b1..e9f93bbaf79e4d147990420ab3f19cfa44ead808 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3e5ef950-3ea6-411c-86a9-59318940c3aa.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_3e5ef950-3ea6-411c-86a9-59318940c3aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccc2ccd6c677669bac1f2b5222e0a888501877c0a4cb85e9e49347dd41f752ae -size 644786 +oid sha256:6ca0ea7936c4367d88cc128e031ae976208a0963a4d9d800072b4dc3f59666c0 +size 1110158 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a21b93a5-223b-4203-b8d1-b50e53371daf.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a21b93a5-223b-4203-b8d1-b50e53371daf.png index a4ca8ba77abd0f452b74335363f7f97f7ae852fb..1556949502006415f27ffcbd37ccb1dbc6c31f6e 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a21b93a5-223b-4203-b8d1-b50e53371daf.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a21b93a5-223b-4203-b8d1-b50e53371daf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33e4512fc440afc06e35fed34ee09488970c3c94696d166bc9d7b2a62e983769 -size 596739 +oid sha256:00c6fe6c6260a66ee09c7692890c7968b2f52ea577537c68fe817cc3bd2a044a +size 1075975 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a30dba5b-dfd0-4cef-a4b1-2a1fe4a13829.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a30dba5b-dfd0-4cef-a4b1-2a1fe4a13829.png index 8a7ece3c6caff6528ff0631310359d8f921f171d..9126e87009c0ffcefbc712298dc32357412145ce 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a30dba5b-dfd0-4cef-a4b1-2a1fe4a13829.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_a30dba5b-dfd0-4cef-a4b1-2a1fe4a13829.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5bcb886696eb7b003469a7df30d6350e0e5f2f1ebb96c0104169bc1ab77c071 -size 1136867 +oid sha256:58b2f5ca527055245e091067a4e702c7a7080c52a866559c36f08ba6c2430b43 +size 1100045 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_e63e526c-0f1e-4a26-8fb7-bcdabb7c51d3.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_e63e526c-0f1e-4a26-8fb7-bcdabb7c51d3.png index 9539385be38780a8926f210b6f7d0266c10d0ef8..6a476715817d9736e6344c4c97eeaaa25226f34f 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_e63e526c-0f1e-4a26-8fb7-bcdabb7c51d3.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_e63e526c-0f1e-4a26-8fb7-bcdabb7c51d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bb6471c9ea35688a3cd6504393793cef732e7ead0383180bdea3bcdeeb697ef -size 346141 +oid sha256:4fd047390c3d992109c6853a0decc2c0d0ab4c8d7a5e1f05bfc0ca85d7b6057c +size 235582 diff --git a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_fc1303fa-b215-437a-b69d-1269e991988d.png b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_fc1303fa-b215-437a-b69d-1269e991988d.png index 082863aa793d26981c8f537e2b2a416c10b72993..4868707f2b8b5b9b634e2e3aeda5df92b5e57fd6 100644 --- a/images/7a698566-2a8f-4d9d-9da9-17288b66917f_fc1303fa-b215-437a-b69d-1269e991988d.png +++ b/images/7a698566-2a8f-4d9d-9da9-17288b66917f_fc1303fa-b215-437a-b69d-1269e991988d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98742fe42fd3f94f9a7b04c3a3e38863a73eddbac67ee07790147c81ebdc4026 -size 596130 +oid sha256:6d3eeb37e573373d288f706a0213519dcdaab4753ed5f59e10855424e185f2a6 +size 1075191 diff --git a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_08676b6f-dfe9-4f7c-acb9-b85f4e91123c.png b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_08676b6f-dfe9-4f7c-acb9-b85f4e91123c.png index 66efa93fa7ae0fb57603df5d067d535a7eeb2060..81d03b527cedfa0c2cc2267f22a6b31b974f6446 100644 --- a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_08676b6f-dfe9-4f7c-acb9-b85f4e91123c.png +++ b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_08676b6f-dfe9-4f7c-acb9-b85f4e91123c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8bf398d65d673e2d71e47a3ba24272b2bec1af43d0c966783c0f1d2045a9ad81 -size 2298635 +oid sha256:edbc0affe6d61abff1e054804f63a0dd102036f68f0934ae1dd800b3ea761bf8 +size 1943811 diff --git a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_2d1b8584-a901-4e9a-b1d6-fd6e6df2291a.png b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_2d1b8584-a901-4e9a-b1d6-fd6e6df2291a.png index 500aa542f97f04941514207f9f98e0ce2842fcb7..684002a80a3c4361d8ceaba35a651c71561b53e9 100644 --- a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_2d1b8584-a901-4e9a-b1d6-fd6e6df2291a.png +++ b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_2d1b8584-a901-4e9a-b1d6-fd6e6df2291a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:336979799ff3167a7dd26772657e0fcd33a9c4e3dbcce347ec1dd2613dc8819b -size 903199 +oid sha256:f14587fd370381ef1fe6ed562ef0ac85167ed1c79b9707674679730fa50a1f94 +size 1144811 diff --git a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_417ecefd-898d-409e-b06a-fedebbcfd761.png b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_417ecefd-898d-409e-b06a-fedebbcfd761.png index fc00903ba4ff6b50d305fb1218d6879cb7c4e5e5..cdf45933ef96abe4c8264f5b80629c46ce9f3788 100644 --- a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_417ecefd-898d-409e-b06a-fedebbcfd761.png +++ b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_417ecefd-898d-409e-b06a-fedebbcfd761.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11f5ef8825d50b0118e80ccef309920219b6b32b23eeb60130622dfc86108dfb -size 2955929 +oid sha256:6fa06797f01fb5b3487bca288661b70794474ef6bcfd123ec4f08f682d66c957 +size 1996023 diff --git a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_5ea6f7f1-9226-40bc-921b-fbaba9cc580a.png b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_5ea6f7f1-9226-40bc-921b-fbaba9cc580a.png index 27e794aaac6588fc21041c9080495d3a60870c5c..0a1b2338931570cadf7e6603e1eb9071c3c81aa4 100644 --- a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_5ea6f7f1-9226-40bc-921b-fbaba9cc580a.png +++ b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_5ea6f7f1-9226-40bc-921b-fbaba9cc580a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59c568eaf96198ee64148eeda6d27183b76b9871174e6e1ba87e3cd053439f27 -size 2005832 +oid sha256:9230c0292c1d228c460bfc968ffaf8827265aafeddbe42f6ee177ef3e44b7e03 +size 1506193 diff --git a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_ebe6ec91-c8b0-4150-8180-728167110e5a.png b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_ebe6ec91-c8b0-4150-8180-728167110e5a.png index e5a20fb4a0eb55b57c9966320fd6dd4171b0783d..de2d9ea65ae00973ab5bea029a011265f532c914 100644 --- a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_ebe6ec91-c8b0-4150-8180-728167110e5a.png +++ b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_ebe6ec91-c8b0-4150-8180-728167110e5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52fc0cff70a27dae43ea49ddfe13729e6bb37bfbcc0b0b03cd930ef6a8211274 -size 671466 +oid sha256:747ee21db073a4815dc86d78b545f30b2a1a88c5b6e12859d5d067b0dda36e02 +size 564697 diff --git a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_f202d61a-2054-4123-96e8-3ef0008ddc27.png b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_f202d61a-2054-4123-96e8-3ef0008ddc27.png index cafbb6fce186bfba7e9a283ae443f6e622449eb4..e4363e02f56ef0dcb72a16a247a049a72c213003 100644 --- a/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_f202d61a-2054-4123-96e8-3ef0008ddc27.png +++ b/images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_f202d61a-2054-4123-96e8-3ef0008ddc27.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bad476a4113d31c6d283c1715c16efea06620349414a9f0da43e4db7f3cb704f -size 647421 +oid sha256:475209d2f78c838f17320973e27d09ed5bd84cac11cd7e8dc647af77a371d8ac +size 571969 diff --git a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_0127f704-8546-4c72-806e-70ad7a2c3a07.png b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_0127f704-8546-4c72-806e-70ad7a2c3a07.png index 6033314ac11db527a275abe63d33d6e2fd292c71..0dfd61b2a6ef05bfeb1c9349e4c0ea7ac4c57450 100644 --- a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_0127f704-8546-4c72-806e-70ad7a2c3a07.png +++ b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_0127f704-8546-4c72-806e-70ad7a2c3a07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f864448da9268339825426d4f02ca75242ebd9f9b95a4fec6091903e1db857d -size 1286552 +oid sha256:8df9cf7291d7fb72faa822cb467aace707835bd0da0645adfb10b1ee59a2909c +size 1744886 diff --git a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_37ce8975-b564-4ed3-9ef4-93ef6e3d31cf.png b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_37ce8975-b564-4ed3-9ef4-93ef6e3d31cf.png index 5aaabfdb8c0437b90febecd24c742f9d105046a0..9cce691659ec1cad21a9414b0d12b7897207be95 100644 --- a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_37ce8975-b564-4ed3-9ef4-93ef6e3d31cf.png +++ b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_37ce8975-b564-4ed3-9ef4-93ef6e3d31cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef4758045cccb5e824f0301ffe95f49ec99906a0259aa2a7d2497b203631aad3 -size 1498918 +oid sha256:d95853feb03fef97be4db2d4dfe65e0ad8290a3c0215a86be9f622d58a1bcc55 +size 2061183 diff --git a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_4618ccf0-6373-4138-aaab-c3e1e86094bd.png b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_4618ccf0-6373-4138-aaab-c3e1e86094bd.png index 768ad6b2f327b47cf4d917d03c0ab0e744c346b9..c64435e95ad7f7d4a251f5272864082a9f0a92b3 100644 --- a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_4618ccf0-6373-4138-aaab-c3e1e86094bd.png +++ b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_4618ccf0-6373-4138-aaab-c3e1e86094bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3898c646405b9fe8a73175e26fe454e4f0d5ce1b1c77c4c619e1b89232a9df31 -size 1288473 +oid sha256:0ae516ae18a8eb12c1f7da3d1337e5752bb6097dae55b2a9a68f7b41ec06711e +size 1420850 diff --git a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_8ca400b4-34ee-4d74-b6be-b8074b17cadf.png b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_8ca400b4-34ee-4d74-b6be-b8074b17cadf.png index 69e7d77ca439b311b5c79f28320d08d03abb225a..c12542f9bc66781d673a05028fc2023e0ab4230b 100644 --- a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_8ca400b4-34ee-4d74-b6be-b8074b17cadf.png +++ b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_8ca400b4-34ee-4d74-b6be-b8074b17cadf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4825aaa3481546e755eb723502fce3e1793cd995a3c7e6c411f8be7340475b2 -size 1213668 +oid sha256:b8ee35f2badebb1ae0c9579209cd316358a88775ac90b8b14c1f26b4fdf05a3c +size 1711221 diff --git a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_cd309bbc-3a76-4037-a334-4a8af50af9fd.png b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_cd309bbc-3a76-4037-a334-4a8af50af9fd.png index a06d336f3dbbb8003231b923f164eed13a4dee00..5eed42906bc1ca0f02319902c090aa6b0911f2ec 100644 --- a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_cd309bbc-3a76-4037-a334-4a8af50af9fd.png +++ b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_cd309bbc-3a76-4037-a334-4a8af50af9fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:598921e9b01b6895638098f457c234effc19bbfccb6899fb7883dcce12bdbe72 -size 1477776 +oid sha256:bded40ff583bbd6f566c97a4b66864a3274d6f59f2b3b3f6f3131fc98a3c265f +size 770414 diff --git a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_d59b9cb4-58a1-43ef-885c-cbb45c1d1897.png b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_d59b9cb4-58a1-43ef-885c-cbb45c1d1897.png index 764275ffe052bc9180af39568895741ec3ce32de..0175787ecdc979adcbefa243a2dc84649d67875d 100644 --- a/images/7b05f537-af7a-4fd7-972b-123ce5a34294_d59b9cb4-58a1-43ef-885c-cbb45c1d1897.png +++ b/images/7b05f537-af7a-4fd7-972b-123ce5a34294_d59b9cb4-58a1-43ef-885c-cbb45c1d1897.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3fcc53829d60749fff2291c393432da7dde9052672a780c432dd972b4aeadcd -size 1496891 +oid sha256:c6f00e366c13e30dfaf5d3d171a501a5c6bda0b1e2b41993ee51d7b8941f2953 +size 1990860 diff --git a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_42dc7dd0-6b08-48bc-b31f-c62882e67b35.png b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_42dc7dd0-6b08-48bc-b31f-c62882e67b35.png index f9216ecc1fe3655623a98738646fdf42bb7ed30c..1e4cce015121bc170f03e055d62a8f9da446b871 100644 --- a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_42dc7dd0-6b08-48bc-b31f-c62882e67b35.png +++ b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_42dc7dd0-6b08-48bc-b31f-c62882e67b35.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8babea7ed4edf04fa9c15fb39d144709d69f5735a32af86d1390ebc01ee0622 -size 2497388 +oid sha256:e8b73f6829882675b392977d167fe37a33e04d8acfd8eddb4a4427df81e913c1 +size 2735302 diff --git a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_64a3ed10-4de5-4698-84dd-c9fe2059c059.png b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_64a3ed10-4de5-4698-84dd-c9fe2059c059.png index b543a9fbd6501171a119f4eebb610d5ab1b134f7..7fc751ecd1b8cc6d8c5c44af62ba2349cdc531d2 100644 --- a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_64a3ed10-4de5-4698-84dd-c9fe2059c059.png +++ b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_64a3ed10-4de5-4698-84dd-c9fe2059c059.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7789b02fb6148dceea5f3e01b1fd8ec283068408d445f1d20ad0e3455015384a -size 1616958 +oid sha256:06b56bf5dd235898995e2f3c2f7c5b50a63f229178c334152bc76f961e9d227f +size 2396856 diff --git a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_e1cdcfc1-fc66-4d3b-8858-876e11893c7c.png b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_e1cdcfc1-fc66-4d3b-8858-876e11893c7c.png index c664c4f26130f24bfc72e228c1a433d42faa6163..6db4dff79d8a8bc30f786e562064a0c1695f92b2 100644 --- a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_e1cdcfc1-fc66-4d3b-8858-876e11893c7c.png +++ b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_e1cdcfc1-fc66-4d3b-8858-876e11893c7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd7daaee7b6db95b1e33321b0784052d7c39df86ebf50c152ecaa3406e1d5e7b -size 1431249 +oid sha256:cab0a0bff3be2679f2a6ef8656b7a6e15e3ea8bbf6c75f39fcc312a2e39f50d7 +size 1431029 diff --git a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_fcec7df7-3669-4c5f-8162-19849487f0c0.png b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_fcec7df7-3669-4c5f-8162-19849487f0c0.png index 51391d300e06340f489f3b1c45a72272ebf506a0..1a3609c5e970a4d302f0545203f4a8d91b052c67 100644 --- a/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_fcec7df7-3669-4c5f-8162-19849487f0c0.png +++ b/images/7b5b2188-afd4-4279-b738-c37a1b5f2142_fcec7df7-3669-4c5f-8162-19849487f0c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:215862edc45312acb27db98f027b97ba79bf9b2aa8516a25dcceba3520fa5530 -size 495918 +oid sha256:37f5a970bde71466b9cce57b6ab21d98f23c128c160f11cad66e8f3052765763 +size 1762453 diff --git a/images/7b7079f0-8891-426a-8e53-0818a02a0159_17fddc65-7f90-4e09-ad51-64f7224c3242.png b/images/7b7079f0-8891-426a-8e53-0818a02a0159_17fddc65-7f90-4e09-ad51-64f7224c3242.png index 74d331a18f6ba8287acc58039ab04d37a05bae1a..7f82563d78fff4823bdbe51433c2febd2e12bbcc 100644 --- a/images/7b7079f0-8891-426a-8e53-0818a02a0159_17fddc65-7f90-4e09-ad51-64f7224c3242.png +++ b/images/7b7079f0-8891-426a-8e53-0818a02a0159_17fddc65-7f90-4e09-ad51-64f7224c3242.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d89ae938bbe7a9b92513480e87ab007d54d9249a4003a55a6d4c1b37b63bfaa -size 958869 +oid sha256:c1f8199c8460bcb232b55adc93ae723296e5fdde571978ea549479180b90dbcf +size 904808 diff --git a/images/7b7079f0-8891-426a-8e53-0818a02a0159_39c6a073-6af9-4a14-9be0-2a6d782ae73d.png b/images/7b7079f0-8891-426a-8e53-0818a02a0159_39c6a073-6af9-4a14-9be0-2a6d782ae73d.png index b66ffc1c1bb961c70cb7c43f88c58728baae2e0c..0cad4f37cd7c7d9f9a0c6958e75d08075db413a9 100644 --- a/images/7b7079f0-8891-426a-8e53-0818a02a0159_39c6a073-6af9-4a14-9be0-2a6d782ae73d.png +++ b/images/7b7079f0-8891-426a-8e53-0818a02a0159_39c6a073-6af9-4a14-9be0-2a6d782ae73d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29bd4ea256383e166cc9d6718eaa84706c281e9694b6aac4c6db60c70b1a199e -size 955447 +oid sha256:29e317409ecf52e823bf7d88959249cd2575b2a07e0c6b5d9277f537b77a36fe +size 854726 diff --git a/images/7b7079f0-8891-426a-8e53-0818a02a0159_4c8a7cdc-5981-4172-8c9f-9bdb8344d39f.png b/images/7b7079f0-8891-426a-8e53-0818a02a0159_4c8a7cdc-5981-4172-8c9f-9bdb8344d39f.png index 0f367e014d8408e1ce9218fe849e47d644c20f39..59062edfcb78b838a49c5a70e5256a6abe74b166 100644 --- a/images/7b7079f0-8891-426a-8e53-0818a02a0159_4c8a7cdc-5981-4172-8c9f-9bdb8344d39f.png +++ b/images/7b7079f0-8891-426a-8e53-0818a02a0159_4c8a7cdc-5981-4172-8c9f-9bdb8344d39f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c28f966bc9ff7f9d54c92254ff5e496a3bd330b49a891a15a2203a21b76bee1 -size 1395270 +oid sha256:8953710c9452107da9e7113a0475445f8fb5aa29c024c750460cbbccb330b691 +size 990605 diff --git a/images/7b7079f0-8891-426a-8e53-0818a02a0159_84e40751-b41d-4447-9230-62c763c51494.png b/images/7b7079f0-8891-426a-8e53-0818a02a0159_84e40751-b41d-4447-9230-62c763c51494.png index 19b8486e8b49792bc26e8b2b356a039b56fc0c6e..35b6daad277bd305e57f50dadfee7d892b5d452a 100644 --- a/images/7b7079f0-8891-426a-8e53-0818a02a0159_84e40751-b41d-4447-9230-62c763c51494.png +++ b/images/7b7079f0-8891-426a-8e53-0818a02a0159_84e40751-b41d-4447-9230-62c763c51494.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fbb45ff4a1bfec1cbb6d1cceb12265401039938ac181c1e58370ac3c342149c -size 1401206 +oid sha256:6d3424f959cb7214f349eb51883bfd561f50d3219c34ca866bde0cc8ac6b23af +size 984775 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_2b7cd081-ffb7-4ead-9b48-8c8a72c92b5f.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_2b7cd081-ffb7-4ead-9b48-8c8a72c92b5f.png index d047f87d2aeb71bc76fc5420cd7c3f14eb8bf273..856aea31c64cd6d63fa3ce2356c2b8192d5a54e0 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_2b7cd081-ffb7-4ead-9b48-8c8a72c92b5f.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_2b7cd081-ffb7-4ead-9b48-8c8a72c92b5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e396694338fdc673648c98f7461dc644173c6a3b5ab607b28987ca97cd97b2f -size 774575 +oid sha256:4418193f5b4a01b250c65347139dc020eea235c565de96c4e6d162a9cf3d09f9 +size 929280 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_30732d9f-efee-4ba4-8b2a-a72e47d5bde6.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_30732d9f-efee-4ba4-8b2a-a72e47d5bde6.png index 4333ef70eb170efbbb46d4e469d441db075fa424..675bbd1493611cc240d281bb8baf9f71a494293f 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_30732d9f-efee-4ba4-8b2a-a72e47d5bde6.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_30732d9f-efee-4ba4-8b2a-a72e47d5bde6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a1a9b1f78f932a238616e215b93316f05bc25ed5947151a13b3ca6cda1c2d5a -size 777317 +oid sha256:8e32b34568bb77833a4e406b36e3737c2448482c39c5c6308be483588fd9ac34 +size 827513 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_34eccecf-fd3e-43ca-965c-98d3be310a29.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_34eccecf-fd3e-43ca-965c-98d3be310a29.png index b04a1b3357dfaa0ee017eb2da03e05e8bedf1d6a..8097dba38b26bb999dfea182b90d632754a87872 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_34eccecf-fd3e-43ca-965c-98d3be310a29.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_34eccecf-fd3e-43ca-965c-98d3be310a29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aedcff5783efe9de31ce9de3d7c58e317210cb814b4a00dfd5561118135f7ccf -size 337982 +oid sha256:335497d339f654db28aa8a4c4a96808a4e5f5356bdd0142175b56fd4427e9145 +size 496796 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_5bbb896b-8564-4603-9fc7-16ef2a072d56.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_5bbb896b-8564-4603-9fc7-16ef2a072d56.png index 04849d310c388e21445c3407cc251557bd002324..e74bbb00579683e6ddcf68527c145928f369cc48 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_5bbb896b-8564-4603-9fc7-16ef2a072d56.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_5bbb896b-8564-4603-9fc7-16ef2a072d56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69d2dc12e5827fcb3c25df040c9279f6f03554c3c856ba883ee377b60a17d737 -size 331379 +oid sha256:d3c4fd6c6656f7657013a9e8256196f513afc31006240b60d02e0a6f1fe0f649 +size 433000 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_60794dd3-fdc5-4c9a-9b1f-c84d44ea1544.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_60794dd3-fdc5-4c9a-9b1f-c84d44ea1544.png index fad30a9e2389e32869d2749ae02f1bad3683ac39..64b87c7f79b52b514b03905c8320f4d014c4bcd5 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_60794dd3-fdc5-4c9a-9b1f-c84d44ea1544.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_60794dd3-fdc5-4c9a-9b1f-c84d44ea1544.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ea1c6d8f366d92f6807c221acf42d32bccf07c28b8712a5ce7619988f495b0d -size 633185 +oid sha256:772be01c6f220327202f91b40e7e4aa58b09efda429846b103dc74912fa0aca4 +size 764278 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_961c3a5e-f8ce-4c71-a917-aa546dcea7fb.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_961c3a5e-f8ce-4c71-a917-aa546dcea7fb.png index 0a4843330678b6de5d25b42916c0d129a4062c1d..6df4aaa9f5d5a4a62b54f03eb3692511245b7ae0 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_961c3a5e-f8ce-4c71-a917-aa546dcea7fb.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_961c3a5e-f8ce-4c71-a917-aa546dcea7fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f968cc8299ec72f9233faf72afff967ee91fc17857ddc97544fbec4273b9825 -size 781926 +oid sha256:aebb5bfa5547a2d1cd385fb2176428f7ba570afd4ec24947a58b18a126790181 +size 831871 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_a645bce8-e7b5-44ef-99b2-045410868809.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_a645bce8-e7b5-44ef-99b2-045410868809.png index 08f7103e532131810739cf9e563586d084f01670..be36eb1b320d2623b949b3e47001b2323f083a8f 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_a645bce8-e7b5-44ef-99b2-045410868809.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_a645bce8-e7b5-44ef-99b2-045410868809.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9b41c86e07f6c9e019bb4f88710d1d53f8f9f52293bf8648a0db72552ebf6db -size 775157 +oid sha256:fdc6958f41dd5df3bf76d4d77be3826a4c1365c29fac8a76b1bfcc8d0f3f686d +size 901281 diff --git a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_dc327d5f-d07b-496d-8680-400483790fce.png b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_dc327d5f-d07b-496d-8680-400483790fce.png index b26355aeaec405713d6d7b820f0d90b5923f998b..615338add659c0de326c72156c58cbf2f74213d2 100644 --- a/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_dc327d5f-d07b-496d-8680-400483790fce.png +++ b/images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_dc327d5f-d07b-496d-8680-400483790fce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7294943fc59b976ea55fe4a8f102d9b9645f89163cb1ad6779e26967fac42887 -size 1456334 +oid sha256:e93c70bdf2defad39537038f450bc551e47cc5b425c4c69cf255c5e25eeb0e13 +size 945950 diff --git a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_103182c3-4574-4ff1-bb5f-9dce65f2f2e2.png b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_103182c3-4574-4ff1-bb5f-9dce65f2f2e2.png index da9bad7d3e696a7884bbed29cbee730d1f10038e..4a5418aa69bd3121ec4b674244428af2198e0db4 100644 --- a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_103182c3-4574-4ff1-bb5f-9dce65f2f2e2.png +++ b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_103182c3-4574-4ff1-bb5f-9dce65f2f2e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e40e84e4f78f96f9b904896032214d82e38ee53d64d8af9e70c79d541505cf60 -size 873768 +oid sha256:ae085a04f3edb3eb87de00cbdc4c3c9bb8769d61fd99105a94d4740ba9bffcb6 +size 972111 diff --git a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_1e192c02-4f8c-4ad8-b6de-6efa760df8bd.png b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_1e192c02-4f8c-4ad8-b6de-6efa760df8bd.png index b5a4b509801d86d27f20413a11dd29e89b4dd267..023fb82c605a04de71758ce83d27e0b7658a352e 100644 --- a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_1e192c02-4f8c-4ad8-b6de-6efa760df8bd.png +++ b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_1e192c02-4f8c-4ad8-b6de-6efa760df8bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e7a8297f93fab05fb01119c1a8dce0a4988be14d7a2a420e642d6f33a33b0fa -size 829498 +oid sha256:249c5e1025fb06cffa6f522520d4b462371c70b0bb669356bd88763c622fdf2c +size 1657826 diff --git a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_aa07204b-5ca1-4418-b291-5e699c085977.png b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_aa07204b-5ca1-4418-b291-5e699c085977.png index 2f0999dfa0e18b5a38ddd66cd5c68464cd8f64b8..43b56b2a6d69b624bde57d3361c2d8663b492c73 100644 --- a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_aa07204b-5ca1-4418-b291-5e699c085977.png +++ b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_aa07204b-5ca1-4418-b291-5e699c085977.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd22614a4d47b2a681b18d10f2868cd25ebb119a353e755b083ac22dca6b6566 -size 727864 +oid sha256:22bc530d21ff9c06d37831452f80974c8d357463c56dac315065353bfde87b93 +size 1149291 diff --git a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_bcbd4fa2-dfa8-49db-9c12-836d1369cb1e.png b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_bcbd4fa2-dfa8-49db-9c12-836d1369cb1e.png index 92ce999b40a6bd03ef2a1adfac745e806bd19d06..083943e863b6f68731966e9a831db04490472b4b 100644 --- a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_bcbd4fa2-dfa8-49db-9c12-836d1369cb1e.png +++ b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_bcbd4fa2-dfa8-49db-9c12-836d1369cb1e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3c83b45cc60779f219539c0cba5b83cb2277dce74ff8b6bce8b24101c124002 -size 2684014 +oid sha256:820fa33ded2e7a1a02f0177321ed8728f17f730a318a8f906566d40d9ca5ded3 +size 894805 diff --git a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_be2f81b7-9bba-4448-bfed-6a56c9582521.png b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_be2f81b7-9bba-4448-bfed-6a56c9582521.png index 50e8b80b7082b4ed37493f26042852f379d5d1fa..8092b52e057bd152370f62bf1b2ef127a3fd4ddf 100644 --- a/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_be2f81b7-9bba-4448-bfed-6a56c9582521.png +++ b/images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_be2f81b7-9bba-4448-bfed-6a56c9582521.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46850a568caf8240499a4817a1db82e2e1d21d86ca61fefc272c46d14387ffc6 -size 2699012 +oid sha256:4198520ddd729301a690a33f4929a00bf8dac0a7f9cb05aa638ea4484f8b1320 +size 1370134 diff --git a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_3ec075a2-b4a2-41b7-80ab-fa807aac5c9f.png b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_3ec075a2-b4a2-41b7-80ab-fa807aac5c9f.png index f9216ecc1fe3655623a98738646fdf42bb7ed30c..2ebd447f76c6bb6c4a1980b98c43276143140a6c 100644 --- a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_3ec075a2-b4a2-41b7-80ab-fa807aac5c9f.png +++ b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_3ec075a2-b4a2-41b7-80ab-fa807aac5c9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8babea7ed4edf04fa9c15fb39d144709d69f5735a32af86d1390ebc01ee0622 -size 2497388 +oid sha256:8a69a28d2314204acef6c5e6758e710d7ef7a60dfe3f4c026be6ffeb0379c0ee +size 1778290 diff --git a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_4b74b3ef-b1db-41c2-a207-02fec15b8daa.png b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_4b74b3ef-b1db-41c2-a207-02fec15b8daa.png index fa3255f7a38a5f56a6f4e1ff84eebc0beda58aef..89236160586babb103295bc4046ce01641e756cd 100644 --- a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_4b74b3ef-b1db-41c2-a207-02fec15b8daa.png +++ b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_4b74b3ef-b1db-41c2-a207-02fec15b8daa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a893fbdc651439fb2e4235ce4453c4a42bfc07c0f3d86f2f5f014e8a3802295 -size 1636499 +oid sha256:d9a0d3983bc736a809b99346b1230cb9b9163e426f6353765396aab603b89cb0 +size 1044883 diff --git a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_6b855ba0-f2bd-493a-bc6f-9a7379dfbd8c.png b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_6b855ba0-f2bd-493a-bc6f-9a7379dfbd8c.png index 6647cd755937c9e8e7114a968ddd5516041aef17..2f0fed7a61f34b070745130df9032e103de682c0 100644 --- a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_6b855ba0-f2bd-493a-bc6f-9a7379dfbd8c.png +++ b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_6b855ba0-f2bd-493a-bc6f-9a7379dfbd8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e44145bfb8c2992921d26fe6db9138c3180c571c9b0ad3c8e0ad5eb03259833 -size 2445643 +oid sha256:b71d757bc41b6a15343e03bfe79b8a4182b1c84784780551d4986df7d97abdf2 +size 1669741 diff --git a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_c829579a-b47f-4091-a86e-57467ac96607.png b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_c829579a-b47f-4091-a86e-57467ac96607.png index 73338d5c4392362ee23d77959d959f1d3e450ded..6e1988de4cc6e28d366fab20d68ad188b7454b84 100644 --- a/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_c829579a-b47f-4091-a86e-57467ac96607.png +++ b/images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_c829579a-b47f-4091-a86e-57467ac96607.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d351d9026317cc9ae51482cb26156cc20e09d30539e7400c695954494424c704 -size 1908749 +oid sha256:3af0498460a16e8c350eec61df7365012d5ece9aa925e6e9a70283d5ee4184df +size 2262876 diff --git a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_5200e3d5-946c-41fe-b34d-015858be3dec.png b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_5200e3d5-946c-41fe-b34d-015858be3dec.png index b7e8c9706d335499b0a0bc0fe960672dc6c2737a..a1178bd166d32a3ff4bc2d0d867cc75264ec5e82 100644 --- a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_5200e3d5-946c-41fe-b34d-015858be3dec.png +++ b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_5200e3d5-946c-41fe-b34d-015858be3dec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:506fb16d5584732dcbd5bceadff34efb6b96f70bb983d1747b8e61b56fd5e5f4 -size 301349 +oid sha256:ef1e04fff5ee61d2b48e26d23ee8c8c3f2d875c299947acb9e49c503ff8e69d1 +size 282788 diff --git a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_55ba9bda-3735-48f8-8ce5-bdb904725fe2.png b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_55ba9bda-3735-48f8-8ce5-bdb904725fe2.png index b564e359adcdd6dc29e7d9b8be93d8a58e41d03c..36a4b26924247858dcfec981d035d1f6780d9833 100644 --- a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_55ba9bda-3735-48f8-8ce5-bdb904725fe2.png +++ b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_55ba9bda-3735-48f8-8ce5-bdb904725fe2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aab43197baa5cabfd707333c6866d66c73f37b6a6f0b6f2d632402a0e9ed6229 -size 456004 +oid sha256:bf5d7d68d881521b3c142dac7233b91bc34c90d11de887da3f445064aca8f2c2 +size 336305 diff --git a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_6f6671ae-98f0-4b14-8c7c-870d0ed1d39d.png b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_6f6671ae-98f0-4b14-8c7c-870d0ed1d39d.png index 55806f922ecd5af5eefd4e82c95e32e7e47ce1e9..0bc69d934bb7ebd114033afe7d22e22c18d472b1 100644 --- a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_6f6671ae-98f0-4b14-8c7c-870d0ed1d39d.png +++ b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_6f6671ae-98f0-4b14-8c7c-870d0ed1d39d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea3e0a6e9fcbaa3f4913747ad06cc92fc5545fe136b9567bc5c340ac9484ebdd -size 291061 +oid sha256:c44dafb4091e300f14c43ce5bc5fb8a2f83749404945d934b044633c27d1367a +size 300844 diff --git a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_dc8e26e9-cdcf-4135-b829-4ef2137c2758.png b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_dc8e26e9-cdcf-4135-b829-4ef2137c2758.png index aa81ecfabc8dd78f68ecf980bbe5a2d616c56840..71e9cdf1c67441a28311d0da02b230c41c5701b7 100644 --- a/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_dc8e26e9-cdcf-4135-b829-4ef2137c2758.png +++ b/images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_dc8e26e9-cdcf-4135-b829-4ef2137c2758.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dac39e709cf9f5289c738b163ae9e870cc3b2339b644676a6102e5d1eb321fed -size 530988 +oid sha256:f0d291f77f93232eef72b4c1bce72b02fc27965ff42d72da0f7608bb167c29f4 +size 531914 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_04bf0b33-af4b-4c0e-ac4e-cc990e747c79.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_04bf0b33-af4b-4c0e-ac4e-cc990e747c79.png index d31a1554c77620a02f7b651e070df404b124a30c..c8a81a9c61bc0946d1d4c995447ac7e9d6a3f19c 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_04bf0b33-af4b-4c0e-ac4e-cc990e747c79.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_04bf0b33-af4b-4c0e-ac4e-cc990e747c79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23b9323b0c2e56dedb40cebdbcfc69729c38a0df194319cf511c3e63b620804e -size 520127 +oid sha256:683742c37be3d2f02324f56df1e0ade53bb061fc6681472687c2bf28f5b9dcb3 +size 296391 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_4eabbe64-62d2-454b-bad9-12f4206627dd.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_4eabbe64-62d2-454b-bad9-12f4206627dd.png index d71341abbf86dc7e21c801400a1588f3f4a3444f..82a64ffd257894042b0e6c89c64f78e9629c56c3 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_4eabbe64-62d2-454b-bad9-12f4206627dd.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_4eabbe64-62d2-454b-bad9-12f4206627dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef8b1fb89c2c947e66d7a53fa707bf2363550cd33435e6787158ce7c17aad18d -size 819801 +oid sha256:010a1c4bc62ed41cbc0a576b866f2ef69babc9298dae58d22f9a5603edb7f7a9 +size 973938 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_73330912-2776-4a17-99b0-8b5976828695.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_73330912-2776-4a17-99b0-8b5976828695.png index 4d531affbe18ac085ff8baeeacce65978477b7fa..73d570d8c7efa74326eafe0e9f5ca4ab83b6f595 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_73330912-2776-4a17-99b0-8b5976828695.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_73330912-2776-4a17-99b0-8b5976828695.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7bdfbe311ce7c21069332573a695e383c8b6a54879ad3256df028bea28a780e -size 797476 +oid sha256:3e0778ddca1ea09cbbee097f03c3d6c6516be0d8cb357397925588ab8e1c97e5 +size 816082 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_85c9acad-b16f-4c31-bc8b-86e56639c5e6.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_85c9acad-b16f-4c31-bc8b-86e56639c5e6.png index 36248de756c282c5bdceb080d45f825508e978de..4b8c0a4e1ed104a725f0ed595f1c554760d74dff 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_85c9acad-b16f-4c31-bc8b-86e56639c5e6.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_85c9acad-b16f-4c31-bc8b-86e56639c5e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7c8b7a5e2387d07b436354b379a18d0fc8c56010f3d9b568a49249911a4d1a5 -size 540013 +oid sha256:b956611c657a46be888fb24fd43b727905224f478feebd3c425fefa2e2a5e855 +size 507298 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_a77a3a7f-cc1d-447b-903a-d09588b8a89c.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_a77a3a7f-cc1d-447b-903a-d09588b8a89c.png index 49bfef1b3928ffe1406a43122e5f597da90a1762..30cb4fe790f0c710d39a8742a8c705d6bd7a8a94 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_a77a3a7f-cc1d-447b-903a-d09588b8a89c.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_a77a3a7f-cc1d-447b-903a-d09588b8a89c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8bdc03417623b60cf1500035fceb608106ec44abfa29db705e3812d2383d15c6 -size 802357 +oid sha256:8d97203b8d17304a9c50c6c6cfd4e9267706c018fcc982c25436e2e505a159cb +size 848767 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_b7b26215-0fc1-4125-824d-34fab74c6e32.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_b7b26215-0fc1-4125-824d-34fab74c6e32.png index a2dad4e5685cc7b5b64f8dce12f08e743f36de7c..1129732a10fb86f228b1bcb361406031f30516e3 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_b7b26215-0fc1-4125-824d-34fab74c6e32.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_b7b26215-0fc1-4125-824d-34fab74c6e32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:253bb1eb91b3f354f368a37462972c5c8925f0e23cfbd68b1947a2b21292a5dc -size 857086 +oid sha256:3c6d1315b073bcb60d206235088ac25e7c8a867b3600134eb2ba933746c49ebb +size 1648230 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_ba706103-fef4-4462-9ee1-8c8022b3388b.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_ba706103-fef4-4462-9ee1-8c8022b3388b.png index 1f74a4df0c6736e7a2ab8b672024b6d9705ef9eb..d477c677e5eb76948d131506d877cbfdf6ccf2d2 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_ba706103-fef4-4462-9ee1-8c8022b3388b.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_ba706103-fef4-4462-9ee1-8c8022b3388b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73f8505a966dba0252ebfb627e4138003ed3d341b543d3af47cf86126672378a -size 810470 +oid sha256:832ecf7d18f56c47dec877dd920d744358c767df11734ba7351015267b906f1d +size 1009666 diff --git a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_c2bc0783-09d4-44dd-b45f-ba953a1a7a08.png b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_c2bc0783-09d4-44dd-b45f-ba953a1a7a08.png index 857a86b10f18d4312fc85431439d48dc2dad1b47..20a9a1d97f6ecc66e465e178bf6e1da664272b95 100644 --- a/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_c2bc0783-09d4-44dd-b45f-ba953a1a7a08.png +++ b/images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_c2bc0783-09d4-44dd-b45f-ba953a1a7a08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d85dd83d09f30664aaf031918325ee64e253280867f5bde77e03bc238ae2e595 -size 264459 +oid sha256:a0a5f9f0964d5c8555d2387664c40addd31683fd7ced474ff5cc890cf7bc71b5 +size 403529 diff --git a/images/7cbd1771-ca62-47ed-97db-5577644d285c_445a3b2c-9bf3-48c9-bb84-97c89020d5e9.png b/images/7cbd1771-ca62-47ed-97db-5577644d285c_445a3b2c-9bf3-48c9-bb84-97c89020d5e9.png index 33cac1f3cffc2664f861bfbf04c66bf11e58f71a..85f20dedc01d75a7838724d968f8a007ecf257e7 100644 --- a/images/7cbd1771-ca62-47ed-97db-5577644d285c_445a3b2c-9bf3-48c9-bb84-97c89020d5e9.png +++ b/images/7cbd1771-ca62-47ed-97db-5577644d285c_445a3b2c-9bf3-48c9-bb84-97c89020d5e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d70db8f70d07dde577f7f4df65e4644dcf47fe63ff813e24c771c6b2ec4e80d -size 1477901 +oid sha256:9c790a25e4634478e426524e8007f37a70de20b026c792880ebb28d006db9206 +size 2461671 diff --git a/images/7cbd1771-ca62-47ed-97db-5577644d285c_6838510b-e62e-416f-b389-46cd59c40012.png b/images/7cbd1771-ca62-47ed-97db-5577644d285c_6838510b-e62e-416f-b389-46cd59c40012.png index 680aeaade68293ca4b20d3042a3e5cc597aee349..ea0a91d45ef54a99f2f0718f3e92982877341578 100644 --- a/images/7cbd1771-ca62-47ed-97db-5577644d285c_6838510b-e62e-416f-b389-46cd59c40012.png +++ b/images/7cbd1771-ca62-47ed-97db-5577644d285c_6838510b-e62e-416f-b389-46cd59c40012.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1acc6544a20106a5d41f696b10fc3ba8fcc96fe935c28b01dad02f18f54ea04 -size 1325539 +oid sha256:9ff80c85661c0aee9a8012bd52c7422fa4b7e00b8c4668c0acb37a04f8e63952 +size 2322114 diff --git a/images/7cbd1771-ca62-47ed-97db-5577644d285c_bb6aa598-dc1b-4818-ad11-2f54fde43845.png b/images/7cbd1771-ca62-47ed-97db-5577644d285c_bb6aa598-dc1b-4818-ad11-2f54fde43845.png index da8ef68d0796abd04068ce69331430221bcbf894..d465287c00cd4fa40387e9fb4a3899f4c899b69a 100644 --- a/images/7cbd1771-ca62-47ed-97db-5577644d285c_bb6aa598-dc1b-4818-ad11-2f54fde43845.png +++ b/images/7cbd1771-ca62-47ed-97db-5577644d285c_bb6aa598-dc1b-4818-ad11-2f54fde43845.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61b9037e79e42aadffd237009310a8006dc2acbb31439ff8f6a8d7de04802a02 -size 1952344 +oid sha256:2a5ba024639c1d6aa41c71d8441385d86e987f60ff9e01995d0f93aa54fc81bd +size 2347139 diff --git a/images/7cbd1771-ca62-47ed-97db-5577644d285c_bcf1c6bd-f9df-41a2-b31f-2547f79a5ea6.png b/images/7cbd1771-ca62-47ed-97db-5577644d285c_bcf1c6bd-f9df-41a2-b31f-2547f79a5ea6.png index ebf1d4728ddf3b5b44392ba5db702b7499bac143..0812394a1f83de28ca79ed3f5ac2eb62b8f91937 100644 --- a/images/7cbd1771-ca62-47ed-97db-5577644d285c_bcf1c6bd-f9df-41a2-b31f-2547f79a5ea6.png +++ b/images/7cbd1771-ca62-47ed-97db-5577644d285c_bcf1c6bd-f9df-41a2-b31f-2547f79a5ea6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be0c2fb3a9ae1b3e922166fc780bc4b82918d1ea43bd5e3281c537a9af808e48 -size 1958456 +oid sha256:aeb673daf723bd190c5c866b99f3c8b84ea42854e6058f49a5883d978dc4c396 +size 2348910 diff --git a/images/7cbd1771-ca62-47ed-97db-5577644d285c_e7501c60-a8f3-453a-8f8b-bbb68c545ace.png b/images/7cbd1771-ca62-47ed-97db-5577644d285c_e7501c60-a8f3-453a-8f8b-bbb68c545ace.png index 5e64848b67deefadde007dee6705539ed032b7a7..3ae74bb2a98735c7441aa25eee3ac68e7458af7d 100644 --- a/images/7cbd1771-ca62-47ed-97db-5577644d285c_e7501c60-a8f3-453a-8f8b-bbb68c545ace.png +++ b/images/7cbd1771-ca62-47ed-97db-5577644d285c_e7501c60-a8f3-453a-8f8b-bbb68c545ace.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:650c4cd0dfd24c05a3ceceeb11fbe5a5cb1c4070f434cd6fa70c9a8738d3c82d -size 1686894 +oid sha256:031c6fa40c055216ef01868dad690b19710ee50babec510eec98b52889417b9c +size 1176689 diff --git a/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_6dd05ca6-239a-4a8e-b976-b8399dd021fc.png b/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_6dd05ca6-239a-4a8e-b976-b8399dd021fc.png index 37e3b40c63ab26f116aa06f8cdbad82240d0bc23..f08e2c7c97cdf984721c01033e685f18c8126c17 100644 --- a/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_6dd05ca6-239a-4a8e-b976-b8399dd021fc.png +++ b/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_6dd05ca6-239a-4a8e-b976-b8399dd021fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc3ec4b47e809ebb1a5a4a9f8df2bda384ca518865b7db51552fd5750be2bc2a -size 2028898 +oid sha256:62be313a048abcb416a8d450365fdb8997e4bc84eb52f20f31c45a455300f15d +size 1542779 diff --git a/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_ff9510d8-86fe-40ad-b787-0a90b1d78a19.png b/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_ff9510d8-86fe-40ad-b787-0a90b1d78a19.png index 9937f51e22b2eb74ea3c0188ae2e7a57bd5ad0d8..90ec7b4b724c219be27193e6e714d2a1aa0f25b7 100644 --- a/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_ff9510d8-86fe-40ad-b787-0a90b1d78a19.png +++ b/images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_ff9510d8-86fe-40ad-b787-0a90b1d78a19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29b8c7642070dd46f7a206c62c9ed3eafb64f668ea6d2deaf7484e354e324ff1 -size 1522345 +oid sha256:1da4c2e431fad5fcc84cebfb2f4a3b6d37b6c0c6e9f0b21a191c09071de3c0eb +size 530647 diff --git a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_07e02302-548c-4332-9e6c-188c7e6baade.png b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_07e02302-548c-4332-9e6c-188c7e6baade.png index 8de65895708cf46bcae01cde4b0d3b65696728fc..989d489303788a4fd153da2ce6d5772eaaf762d9 100644 --- a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_07e02302-548c-4332-9e6c-188c7e6baade.png +++ b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_07e02302-548c-4332-9e6c-188c7e6baade.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79060243a553212e5a9c1f2ea3f259b1882d21e05420b81618b31e9d07051d65 -size 1779315 +oid sha256:f0e38fde2a3287c07b52b710e2caa2e4c46467597c1d0078583edd96a5c3d001 +size 1565903 diff --git a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_11a73dfc-fd0b-4135-94dc-02552e25ead2.png b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_11a73dfc-fd0b-4135-94dc-02552e25ead2.png index 9660e2739f32f1b6570ecfb5d00f8265167fc58d..a37ae06e61b16eaacd981c1a62acd9ce89a294a6 100644 --- a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_11a73dfc-fd0b-4135-94dc-02552e25ead2.png +++ b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_11a73dfc-fd0b-4135-94dc-02552e25ead2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b915dbf7285498d59be62a31889c7f0dcc7f669b0cc22cff915ce163d72cab5 -size 1298659 +oid sha256:850978234c5a0ca12304b5a02cf3a7389d381a8a1ae42b58acf3cbc8351765f5 +size 1517973 diff --git a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_414ca573-101c-43e1-9a61-dea8ac4d6a54.png b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_414ca573-101c-43e1-9a61-dea8ac4d6a54.png index b9c78d787710a6c231b32eadcc03a0b871601287..906ae1d3d8cdf8bc498cc53e11ab2d5ad288d0f8 100644 --- a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_414ca573-101c-43e1-9a61-dea8ac4d6a54.png +++ b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_414ca573-101c-43e1-9a61-dea8ac4d6a54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7814976d5e42742268c6b0b6efd8467b58903c4dcd7b49700e132eb1f9f51b74 -size 1994241 +oid sha256:a0f0f181506dfe7727aa281c2cc77f20bdcb510e9aadcab2ab02f2ead51f4f17 +size 1042583 diff --git a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_6d984d16-dfbc-428c-b948-d82c0d1ca057.png b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_6d984d16-dfbc-428c-b948-d82c0d1ca057.png index 5ec0ad031b6c8157712bdb4fbd7afa6a8794ca08..6ca34e5a6809b27c187dcbc5000dc8f38efebbac 100644 --- a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_6d984d16-dfbc-428c-b948-d82c0d1ca057.png +++ b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_6d984d16-dfbc-428c-b948-d82c0d1ca057.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3780653cd1bd40bd3d4e612a0216bda1748784f1aec15c29bf02c335b97c63a -size 756166 +oid sha256:cdd174b7aa04b03c49086410635f063a6f14c9bad719024126d323a7f9820fcc +size 1039315 diff --git a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_71afffe2-cba0-43d4-abc9-095a2bcd083a.png b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_71afffe2-cba0-43d4-abc9-095a2bcd083a.png index 1726851c8b328350e92ba3ccb53b381db6f62adc..62081aea23ebece735d372c45e8067bdaa399cff 100644 --- a/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_71afffe2-cba0-43d4-abc9-095a2bcd083a.png +++ b/images/7ce76343-dafe-4cf6-9bfd-918834d0c641_71afffe2-cba0-43d4-abc9-095a2bcd083a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:274e41de8dafe90b88e5666401418af527abede8950b53334fe48c916d10c8bd -size 1612424 +oid sha256:5472748b6b766bd4b39c59cba12dd9038c06cf0720c7e41d9c96a6199ffbcba1 +size 2231817 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_2c586599-8f29-41ca-a0b1-87e1e3789284.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_2c586599-8f29-41ca-a0b1-87e1e3789284.png index 2ae50785a8653815c1b7fa1184f17f870496e9f1..9f0cdbd84c45b45cda9911ab88f75c92391015c3 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_2c586599-8f29-41ca-a0b1-87e1e3789284.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_2c586599-8f29-41ca-a0b1-87e1e3789284.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b41907550d96d04e5a05cc41395316fd1d22ffae92d002dad3d6071d85628c5a -size 799420 +oid sha256:fe250f143b4c7a035c415b93af9be3bd1bce25da76776037de0fa5dcf8b778bc +size 621109 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_35c777e6-4dd6-4380-8684-9ebf15d75980.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_35c777e6-4dd6-4380-8684-9ebf15d75980.png index 595f9beb5eb6da1d33398f1f606423b215e24e17..d613024d606dea329133363e464c106981bc1636 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_35c777e6-4dd6-4380-8684-9ebf15d75980.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_35c777e6-4dd6-4380-8684-9ebf15d75980.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d080187b30287a3927fed487a606b83ae6ac6a49f69669b89c106e1df42f0e0e -size 871564 +oid sha256:faa39feb12c2904ce3d74b3197df34d7c23bc70a27d2ac18b63bc62ba81eb158 +size 828289 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_5335dfb4-618b-4282-951a-e9066ef63841.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_5335dfb4-618b-4282-951a-e9066ef63841.png index 1faacd8b08ae1b3105b11db3d07b0c1a0e244c54..6e0f9142eeed17837c378b90c275018fd4fe2a0f 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_5335dfb4-618b-4282-951a-e9066ef63841.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_5335dfb4-618b-4282-951a-e9066ef63841.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5644f10af2d4b28c425925a0e488c7827f768e80af86a821d674e10e6ba598e3 -size 884333 +oid sha256:dfbcb75690b445a191489ea1568f6d97a6bab2df0535f9ea110f50349b3e5810 +size 878067 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_9a5bcd22-5ab4-495e-ab2e-5a5979182205.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_9a5bcd22-5ab4-495e-ab2e-5a5979182205.png index 8ec856665c35dbea7b95dbaa5fe95e34befc79d3..b68fad03e80a491254a577dcc1ad15c25f56b2c3 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_9a5bcd22-5ab4-495e-ab2e-5a5979182205.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_9a5bcd22-5ab4-495e-ab2e-5a5979182205.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f99597d2d27cbd2a05c8e14d7970e3bc2dd94876c14e6a8af573d81837aaff66 -size 892140 +oid sha256:92d61b9cf99ff765ba8158812f69976aec8258525ee43c4f6e1a6da60b360a57 +size 883969 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94fc7a4-d560-4fa5-a4ab-7c97572032f0.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94fc7a4-d560-4fa5-a4ab-7c97572032f0.png index 29b23db193a4bad5731979e4175f42c34e435b65..2327cdca101782ed1dd5de91bc38292dce230561 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94fc7a4-d560-4fa5-a4ab-7c97572032f0.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94fc7a4-d560-4fa5-a4ab-7c97572032f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:491c8ce067eddbc505cbb3a34a8cda20748e5eb3a515b1127c4b2450a3291297 -size 881817 +oid sha256:987ee3d39f23dc55b8e03685c5105f4152e98aba3c9582d107fe3bdd3b084fb5 +size 844171 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94ff5fd-45f1-46e0-bfba-90fd5f6dc7d6.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94ff5fd-45f1-46e0-bfba-90fd5f6dc7d6.png index c297d1ccaa9eed439b7423951307fa968eeec60d..ccc9896dd9c6d88e7edb33c7489c75bdccbfc83d 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94ff5fd-45f1-46e0-bfba-90fd5f6dc7d6.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94ff5fd-45f1-46e0-bfba-90fd5f6dc7d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29e991e714a18ada3f0cc003cf1c86ab413b9219d5462f1d012eedd697644ab7 -size 1560684 +oid sha256:a4f82be020d274a8c691cc18c5cf928dc057313e48e4945d92cda843b654b4a0 +size 1691273 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_ac795b03-c8fc-4cdb-9ed7-600429a37873.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_ac795b03-c8fc-4cdb-9ed7-600429a37873.png index 7a2991b4899b62a46800b570bcb7bee978e1d360..1a28f6b2a6b7cb4fe7153349a885de0c8ffa4bfd 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_ac795b03-c8fc-4cdb-9ed7-600429a37873.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_ac795b03-c8fc-4cdb-9ed7-600429a37873.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7be14236493019d4813babb27a777c0b2cf79f323df6ee368551da2c7f858a5 -size 427607 +oid sha256:b6c0a415c9b104aee40098076516d4b757351e1b38352b48e4d1b76e47272c83 +size 551742 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_b977193d-503a-4389-ad04-7ace55c70e04.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_b977193d-503a-4389-ad04-7ace55c70e04.png index 0cf5006ef7f321495803fbb9c8ec0de9022d3a4b..0d18578cb0db0012488d7655014371267d288211 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_b977193d-503a-4389-ad04-7ace55c70e04.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_b977193d-503a-4389-ad04-7ace55c70e04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b053c6d50fe959fd499d0b365af5d5defe2bf4e9a1682bed06eb3ec52e1234c -size 910367 +oid sha256:e0af6a8d1bc618f83c29efcc4b35a3d425b78feafcf574c3e6add5788a9d94df +size 544457 diff --git a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_d6a1eadd-6cae-44c4-850a-a5c685fc157f.png b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_d6a1eadd-6cae-44c4-850a-a5c685fc157f.png index ce47adc9b7bc3b945551f450c30c621923fc9f96..0e83a32212b2ff3c8fd50334b018c1f3d5780e2d 100644 --- a/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_d6a1eadd-6cae-44c4-850a-a5c685fc157f.png +++ b/images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_d6a1eadd-6cae-44c4-850a-a5c685fc157f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11f207f928fa5186ee60314ac9ea43d5e9c2aebd30d7d9419ab7cd06f4b3940e -size 872430 +oid sha256:a219e1af2d3d31324d8ad7bd91d9eba1bc16b5984ddaa9513777323fc3e1d5c7 +size 737437 diff --git a/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_758a6b1b-74f1-42c3-84ba-bae21ea8afd4.png b/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_758a6b1b-74f1-42c3-84ba-bae21ea8afd4.png index 0eef7264bd4565384b89c3a4d9276d31d377cb3d..b56db7f46b9c20410f2501750075b854897a290a 100644 --- a/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_758a6b1b-74f1-42c3-84ba-bae21ea8afd4.png +++ b/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_758a6b1b-74f1-42c3-84ba-bae21ea8afd4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e410d4c07deaab4db4f4bec4b29e363abec4fa9271f4e0b71b6d83a225cb4f5 -size 1249414 +oid sha256:9016d9faf774c9becc7e87da0cda2c788764a8e045727f5f8274f3ac9489f2e9 +size 1919121 diff --git a/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_d0e650ae-54ec-4146-9ae4-b3380b3d6c02.png b/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_d0e650ae-54ec-4146-9ae4-b3380b3d6c02.png index 4169f856521e07ad19f5664f2c3a1b9c112cf30e..c215bdb2eb86532e1e3ea139b85e0dfd57c40b1d 100644 --- a/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_d0e650ae-54ec-4146-9ae4-b3380b3d6c02.png +++ b/images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_d0e650ae-54ec-4146-9ae4-b3380b3d6c02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:971a47ad09ab20d60361898e6c3fbc9c2e28403072dd1224e9e42c66e836219a -size 1255560 +oid sha256:4f6015a28b613f220d0a28074ecd7db7bbedf7baf9ba92b7be3d0cf7f5274e2b +size 980276 diff --git a/images/7f0d7056-07f8-48b3-8093-e48abb301018_481eba4d-b954-4f4a-9def-fa3045120562.png b/images/7f0d7056-07f8-48b3-8093-e48abb301018_481eba4d-b954-4f4a-9def-fa3045120562.png index aa92d31a59802ff03ae51dbbeee3dcc39be481e2..b4042874a0d6dc3d7e64e94a39369d4bf31d32d7 100644 --- a/images/7f0d7056-07f8-48b3-8093-e48abb301018_481eba4d-b954-4f4a-9def-fa3045120562.png +++ b/images/7f0d7056-07f8-48b3-8093-e48abb301018_481eba4d-b954-4f4a-9def-fa3045120562.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f06dd693102532e30b36bec742dd4377a87cd94b46c102fca7dda44c42a06c9 -size 463835 +oid sha256:5eef25951d9edfa195f1e2f43c664e37afefe94be933daf61e38aaf0681d5990 +size 759331 diff --git a/images/7f0d7056-07f8-48b3-8093-e48abb301018_c6d1a72d-6a78-4b55-a5e1-a7360cf50158.png b/images/7f0d7056-07f8-48b3-8093-e48abb301018_c6d1a72d-6a78-4b55-a5e1-a7360cf50158.png index b48a0d4cb1c81c6fc63f80c1937426d06d174cb7..e13baae4460cdca7140ee6c333b4ada94de54d2b 100644 --- a/images/7f0d7056-07f8-48b3-8093-e48abb301018_c6d1a72d-6a78-4b55-a5e1-a7360cf50158.png +++ b/images/7f0d7056-07f8-48b3-8093-e48abb301018_c6d1a72d-6a78-4b55-a5e1-a7360cf50158.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:654eacb926600d72fa05223f66a1e0ff654fcf48dba2ec4063b773ca154ad559 -size 929057 +oid sha256:5a5a0fd1bdb8474a141635ea89e7b83d4a299b4653546a308dba1b845efe111d +size 482366 diff --git a/images/7f0d7056-07f8-48b3-8093-e48abb301018_d28d30a1-9e44-4374-aa29-49d616e71df2.png b/images/7f0d7056-07f8-48b3-8093-e48abb301018_d28d30a1-9e44-4374-aa29-49d616e71df2.png index 4be8655a5825be7d432f6ab3f029dbd3ff717843..aa20bffe16a56ca3b91bf57783a9535fab8a2b1f 100644 --- a/images/7f0d7056-07f8-48b3-8093-e48abb301018_d28d30a1-9e44-4374-aa29-49d616e71df2.png +++ b/images/7f0d7056-07f8-48b3-8093-e48abb301018_d28d30a1-9e44-4374-aa29-49d616e71df2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c5f9cc3bb615cf5df2ccbf5490fa6bc2f7780938c8eeb0e23aec4dab358d0d5 -size 3330732 +oid sha256:2502fea8892a3b6f75f49d955212415d5dceb078e7af32b66371b4fbab5f3b15 +size 2383276 diff --git a/images/7f0d7056-07f8-48b3-8093-e48abb301018_dc4a1187-c24d-4f06-bcad-066836cd1a30.png b/images/7f0d7056-07f8-48b3-8093-e48abb301018_dc4a1187-c24d-4f06-bcad-066836cd1a30.png index bbd6eb5889794071ccfc44e09d5d228732ed8a5f..2286df0822073bc17e9e6f7850ffbcd93dd24d4b 100644 --- a/images/7f0d7056-07f8-48b3-8093-e48abb301018_dc4a1187-c24d-4f06-bcad-066836cd1a30.png +++ b/images/7f0d7056-07f8-48b3-8093-e48abb301018_dc4a1187-c24d-4f06-bcad-066836cd1a30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86325a25dd8dddcaf40a05e3209e4ab1accedab2619f25f076349dfe9fd65e95 -size 680598 +oid sha256:82a1e46a9a84bd1facd06bbb2338bbc92ef43db2318a42516c92bf14c44bfcd3 +size 1047030 diff --git a/images/7f0d7056-07f8-48b3-8093-e48abb301018_f12955aa-647c-4dff-af41-24a5357f42df.png b/images/7f0d7056-07f8-48b3-8093-e48abb301018_f12955aa-647c-4dff-af41-24a5357f42df.png index 2aeae78fdaa3ef46f6bbccd61dfa7d9ca434bffe..8738ecfbe12d3ae83a557509006fcbf878ee8cd3 100644 --- a/images/7f0d7056-07f8-48b3-8093-e48abb301018_f12955aa-647c-4dff-af41-24a5357f42df.png +++ b/images/7f0d7056-07f8-48b3-8093-e48abb301018_f12955aa-647c-4dff-af41-24a5357f42df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9368c6e4ee3b85223282f5d47837e8394bff0c0a64503928079c0f602f7010e7 -size 529833 +oid sha256:733b8d6603dcae6d8d88cdaafdc4598b7c7f2fd620419df53b4981f6580c400e +size 528615 diff --git a/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_43ab932b-04e2-4282-86c2-2e7af016655b.png b/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_43ab932b-04e2-4282-86c2-2e7af016655b.png index 557a9091401abcbc297d6f6d78d8f0c857ecbba0..ffbd608e80f978684a8f33fb2d20a8530d7ad8f3 100644 --- a/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_43ab932b-04e2-4282-86c2-2e7af016655b.png +++ b/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_43ab932b-04e2-4282-86c2-2e7af016655b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fcdeb86e0e74892bdbc3989dfc38bb20cef469832c36735cc0eb734939949c7 -size 861244 +oid sha256:1a0d660d3f92caa42371ea83c8925d127d8b109532ea7ae51c4ee9c2b9f21cb1 +size 739741 diff --git a/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_71f6ece9-2a59-4408-8da1-d01c4e8a36a6.png b/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_71f6ece9-2a59-4408-8da1-d01c4e8a36a6.png index 019146d9a8cb7469949357e152bbfebadf387544..a8760e3ef83c04a0686e56ccb0b54e60be061912 100644 --- a/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_71f6ece9-2a59-4408-8da1-d01c4e8a36a6.png +++ b/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_71f6ece9-2a59-4408-8da1-d01c4e8a36a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f263f5abdc649248d6f613b08c704e1bbe2b608235d7e45366fa5185a4291d9d -size 1021238 +oid sha256:d1676624ad99454b7d8355e048517f0a5055050775bd4032042c4a1a8b3c76ff +size 1022805 diff --git a/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_a6242145-4277-493b-86f0-175a233fea76.png b/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_a6242145-4277-493b-86f0-175a233fea76.png index 5e67f237f301c02544372fe052ed30578a03a8b0..20c58ecf912c6a14b70cc4ff5000804bebc96cb7 100644 --- a/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_a6242145-4277-493b-86f0-175a233fea76.png +++ b/images/7f1f085b-5765-40f8-86c7-8df6e8b68053_a6242145-4277-493b-86f0-175a233fea76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c32b69600d7385ab45da0c7a01d83cf502115dcb0ccc0b2474222cdacbc71f1d -size 386898 +oid sha256:81911c96f9b783be4943efefc46fbd78275d3442dad1667fb2522c31c738135f +size 388708 diff --git a/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_3014cec8-07b7-4224-8737-3260aa0ca81b.png b/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_3014cec8-07b7-4224-8737-3260aa0ca81b.png index 73b0c827a774d3a6777db46a60db2eab8b625027..9f987939e56f9c11adef3061590dfe60e1026472 100644 --- a/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_3014cec8-07b7-4224-8737-3260aa0ca81b.png +++ b/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_3014cec8-07b7-4224-8737-3260aa0ca81b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c18f3d41afff9a1cd2cd2a8f87ec425793194dd5df83189662c10f1c51f3603 -size 1650867 +oid sha256:abf6e298ced2baf025cbccd9fc8706c5cec009b8a643319d7ce162a3839452cc +size 1661585 diff --git a/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_757c2d0e-783e-4e4b-b3d6-ae763877604f.png b/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_757c2d0e-783e-4e4b-b3d6-ae763877604f.png index 3133fac36ca39288fecf7c9ea8edacc27b6cf5b8..ee443c06465f870450eba83fe7369c3e15ed66bb 100644 --- a/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_757c2d0e-783e-4e4b-b3d6-ae763877604f.png +++ b/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_757c2d0e-783e-4e4b-b3d6-ae763877604f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9acc4c7b424c6e34fc1ac4adb59c19280c2e589eb11b97853e1045093f9d0f6b -size 874194 +oid sha256:5916bae7c47f00986c3423a215229f6e7d1719b7667982f26b10b97a00cfddeb +size 1257730 diff --git a/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_95f17546-5fa4-44c6-a51c-d57bf20770b1.png b/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_95f17546-5fa4-44c6-a51c-d57bf20770b1.png index 9ecb31ecc53394465b4e40c1a6d201da4b417689..897bc2d704b3ef7452746d4c89cfce3b649e10a3 100644 --- a/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_95f17546-5fa4-44c6-a51c-d57bf20770b1.png +++ b/images/7f640279-bd9d-45ae-b3fc-43338977e2c1_95f17546-5fa4-44c6-a51c-d57bf20770b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c3b6470d6870847e495c9c43574fa06d34944c52d37214360f23cf4d181905e -size 1326854 +oid sha256:778fd9ada7aae5b130dd7c52bb86ab75470222ea408915a6e0d64671a2cf34dd +size 980042 diff --git a/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_48bbbdea-1b37-4a5f-bd55-a9cb309e5507.png b/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_48bbbdea-1b37-4a5f-bd55-a9cb309e5507.png index d59d265cc9f863aaaf609717118476e00137c1e9..60ce4c27df6052503f0f3656b4308b678da7c565 100644 --- a/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_48bbbdea-1b37-4a5f-bd55-a9cb309e5507.png +++ b/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_48bbbdea-1b37-4a5f-bd55-a9cb309e5507.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ce932d7072ce72ca633cd03004aaf7c862f67ab434b346e3389a7fa8d71aaab -size 1334273 +oid sha256:94cf5368d097243c9cd3b622777450fb8a61212b65d57b85b4c23e9f80e08be1 +size 253098 diff --git a/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_ca58d7fd-9205-48c6-960e-83307f6d843c.png b/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_ca58d7fd-9205-48c6-960e-83307f6d843c.png index ccdf2f7816ebbc21e3db777cd8ffc1d8065f07ba..c21cd8bbf2077ba66bd4b50bc35e97160223889d 100644 --- a/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_ca58d7fd-9205-48c6-960e-83307f6d843c.png +++ b/images/7f94386a-d032-43cf-9dbe-2b64430c9c28_ca58d7fd-9205-48c6-960e-83307f6d843c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:780f0f9ce855ee94eaebb427203a6c28197043877b8ef6234747de0b5e4d40c2 -size 503580 +oid sha256:552639c840a37842c9816941ddd9b3ca3ca3cb44fa9d1a003dda58879ae8f2ef +size 527270 diff --git a/images/8082086a-10a7-4631-9792-b57337426f9a_05076a4c-ba04-4130-b156-b64d7acf1594.png b/images/8082086a-10a7-4631-9792-b57337426f9a_05076a4c-ba04-4130-b156-b64d7acf1594.png index c86d79d930a0a9da791e80e28ccf6c3b968c9471..ab4a110918ebac03f1e064e8840ff50fcc2a731c 100644 --- a/images/8082086a-10a7-4631-9792-b57337426f9a_05076a4c-ba04-4130-b156-b64d7acf1594.png +++ b/images/8082086a-10a7-4631-9792-b57337426f9a_05076a4c-ba04-4130-b156-b64d7acf1594.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3aba92c8699bb198f7b7a8c8d1ea3b9807fbe391ebe4fe41288c788575c22df -size 1833919 +oid sha256:13f73664790bb18944c9e6d9423115c2a461c021b0dc6651e942f250d0a25a87 +size 838236 diff --git a/images/8082086a-10a7-4631-9792-b57337426f9a_0938ce44-198a-4d1f-a88a-c26cd07e7a2b.png b/images/8082086a-10a7-4631-9792-b57337426f9a_0938ce44-198a-4d1f-a88a-c26cd07e7a2b.png index 24ea93d84f083d0ec07967aec12ff24bb8b3be9f..505344f865083b9708edfd811381b1707e7a2516 100644 --- a/images/8082086a-10a7-4631-9792-b57337426f9a_0938ce44-198a-4d1f-a88a-c26cd07e7a2b.png +++ b/images/8082086a-10a7-4631-9792-b57337426f9a_0938ce44-198a-4d1f-a88a-c26cd07e7a2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f50b845db34a7b9062d37f152b48f16aef9e8529783e22657edccf8286bfc136 -size 958212 +oid sha256:c8ec4f9e53519926b11b0b9d4d36eb44c16e424b6044f245b4a863c705c28b74 +size 894905 diff --git a/images/8082086a-10a7-4631-9792-b57337426f9a_0c859da4-62dd-45c1-9935-aad323de8426.png b/images/8082086a-10a7-4631-9792-b57337426f9a_0c859da4-62dd-45c1-9935-aad323de8426.png index 05dd1582fa0140e355fcfd260b1695995de975cc..26590b3b8a3f915935bd8f3b2acd0b8292c243eb 100644 --- a/images/8082086a-10a7-4631-9792-b57337426f9a_0c859da4-62dd-45c1-9935-aad323de8426.png +++ b/images/8082086a-10a7-4631-9792-b57337426f9a_0c859da4-62dd-45c1-9935-aad323de8426.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c88ceb58e63bc8ee68931c84408d83e4f164a8d0d1810e94d3ba701a33ccbc51 -size 922001 +oid sha256:7b1174cc0242a9a47fbe3d315c9018f999ad85e3e6d6b099b5b7066e5ed09249 +size 904448 diff --git a/images/8082086a-10a7-4631-9792-b57337426f9a_55282477-0c69-47cb-aab3-15caa2215b85.png b/images/8082086a-10a7-4631-9792-b57337426f9a_55282477-0c69-47cb-aab3-15caa2215b85.png index e0ac3ba7df1bf21fbbac8dc8a5200cbba01d01b3..e973626187fe8372dfca377a54f35a68a1e2bae1 100644 --- a/images/8082086a-10a7-4631-9792-b57337426f9a_55282477-0c69-47cb-aab3-15caa2215b85.png +++ b/images/8082086a-10a7-4631-9792-b57337426f9a_55282477-0c69-47cb-aab3-15caa2215b85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:172d61d3e9081f507548d8d5d9e1702e9bbd0658a869b514369d40a16838ab85 -size 916283 +oid sha256:9affe9e98240f0dcbd86871308a987f56ec2ac9ffd51d338888205384faef3fb +size 1217319 diff --git a/images/8082086a-10a7-4631-9792-b57337426f9a_88037d8e-d35e-43e7-b65b-3effca4aaeee.png b/images/8082086a-10a7-4631-9792-b57337426f9a_88037d8e-d35e-43e7-b65b-3effca4aaeee.png index dbcea80db594c1ac5d109afe1b27d63f614e9173..afc2a2cefaeff8813285a428d1ba8d2309a76fb0 100644 --- a/images/8082086a-10a7-4631-9792-b57337426f9a_88037d8e-d35e-43e7-b65b-3effca4aaeee.png +++ b/images/8082086a-10a7-4631-9792-b57337426f9a_88037d8e-d35e-43e7-b65b-3effca4aaeee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f051dd59f6ef8b3bd1b9200f9eb5d48f81f2d7fe572ddfe87ea6c669c0fc563b -size 919767 +oid sha256:92450c892d24d3d1c7794691c2ff8d1dba5ac4a627d5e65cf9241093e4d888be +size 919038 diff --git a/images/8082086a-10a7-4631-9792-b57337426f9a_afbb5253-41d5-4896-8fb9-a49db36fecf7.png b/images/8082086a-10a7-4631-9792-b57337426f9a_afbb5253-41d5-4896-8fb9-a49db36fecf7.png index 4540371beb65f669875c094e70233697bfaf56ed..b412ecbb68827c55385709400102b620a294b4a1 100644 --- a/images/8082086a-10a7-4631-9792-b57337426f9a_afbb5253-41d5-4896-8fb9-a49db36fecf7.png +++ b/images/8082086a-10a7-4631-9792-b57337426f9a_afbb5253-41d5-4896-8fb9-a49db36fecf7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de6b84da9f61c840083ce65be7def2567664db307492d34fb6eca184a146fb2b -size 919315 +oid sha256:74f74f449fa2362a03f1b3a5b061670671f86c704fe3a0863245d67b8f55740e +size 1187161 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_009a6173-dd0d-4afc-89c3-25931c746449.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_009a6173-dd0d-4afc-89c3-25931c746449.png index b99c2f9c52c816b533c6c59688e10de55a3f5789..b404966c35e170c306d2375ed25429de0a9e42b1 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_009a6173-dd0d-4afc-89c3-25931c746449.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_009a6173-dd0d-4afc-89c3-25931c746449.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:966c88a4ede209e97ef63379f81090170ba816581194950c255e2c1e131e0f93 -size 347860 +oid sha256:66d6f2504055b6305d8f95ff7a7d502c37d6ba5baca75d4a7317f6b1035e1c20 +size 577868 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_1a61760b-4d3e-459f-9940-00033fd2555e.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_1a61760b-4d3e-459f-9940-00033fd2555e.png index 0b3dde51041243b48a0ace200e1572f8db84ae95..ee21ebab8919fda80a49e20aba6586227210e7e9 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_1a61760b-4d3e-459f-9940-00033fd2555e.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_1a61760b-4d3e-459f-9940-00033fd2555e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bd636040585b5e978b0d358a7fc2b5a54a41e1b9a50fe37e5b12c80e9934ba6 -size 1685285 +oid sha256:3c789c49bf1d35df6149882a20de177483e0fb8afaeb8939d9cf3d84b4a79593 +size 1282479 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_2fbd874e-8a6d-4382-8e29-670c173354bc.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_2fbd874e-8a6d-4382-8e29-670c173354bc.png index 1d35d0c3f13b2906a5e29595ebe8eebe543ef311..10d8dc4aa1bd2a727bd0adbe57e0aff346697ef5 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_2fbd874e-8a6d-4382-8e29-670c173354bc.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_2fbd874e-8a6d-4382-8e29-670c173354bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9010755fe8b0d36bb85e2457a5af698ea16c291f73a88f5f300a51995eada441 -size 1417257 +oid sha256:745915a197341f4b720420d0ef40f468cd581425dcc4e43611e088accda9f45e +size 1603114 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_40c833ba-e627-4bdc-9593-c749ee3807a1.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_40c833ba-e627-4bdc-9593-c749ee3807a1.png index a7faecdb68f17007fde64c9bbfe060a64236fd85..f6b9b5df6830fd4dafacdfc89ebef9ca57543af7 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_40c833ba-e627-4bdc-9593-c749ee3807a1.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_40c833ba-e627-4bdc-9593-c749ee3807a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a54d85121b1903303676db65989f0210ac172ef9ac06b94357bed1c94cc8a286 -size 329467 +oid sha256:b91db0fb789b0d80ade2097abee81ffcccc486e599e737c02190d5d384a37556 +size 870688 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_4b90093f-4363-4c73-8a02-87ab5e4686d0.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_4b90093f-4363-4c73-8a02-87ab5e4686d0.png index 3a84edd775e2a2348f9f2872f2313dd48e696178..33e2b6e910b73dd3fdbe0f7e50c83eac48b616fd 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_4b90093f-4363-4c73-8a02-87ab5e4686d0.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_4b90093f-4363-4c73-8a02-87ab5e4686d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:322d9b3107c6496784708fbb37c3cdadf7993e1773a1940488adb0e8cb74507e -size 463001 +oid sha256:a202747df627d9ee01b8e4b6ad32b9684bfd4db9420171b15fa5257c2a0b4226 +size 304115 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_4f567ffc-1405-4110-89d9-9b0671eb7202.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_4f567ffc-1405-4110-89d9-9b0671eb7202.png index 1d35d0c3f13b2906a5e29595ebe8eebe543ef311..0984998f379083cc101bb871d1d5a3899d4965b7 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_4f567ffc-1405-4110-89d9-9b0671eb7202.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_4f567ffc-1405-4110-89d9-9b0671eb7202.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9010755fe8b0d36bb85e2457a5af698ea16c291f73a88f5f300a51995eada441 -size 1417257 +oid sha256:730e790901fb74d6ef517bba8229e2bea307686142c8dc300d4973cea58becda +size 1349642 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_4fb89993-2c2d-43b4-8021-5cf94957b393.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_4fb89993-2c2d-43b4-8021-5cf94957b393.png index 8d68c40e7f6def17813f1ed5ab27147061568380..60c9e8b04153e15d0daf177e82a7808dceb5af78 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_4fb89993-2c2d-43b4-8021-5cf94957b393.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_4fb89993-2c2d-43b4-8021-5cf94957b393.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc280285b39e4f292520de6c36f44e430bbf250991e53ec0694113fc95d0f663 -size 267809 +oid sha256:230361296e7d46dea0fe62853bbcc84bbfc09cfb33bcbc3910ebf77e51172fea +size 795343 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_522a5110-9344-48c9-a348-5cf143bdfd09.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_522a5110-9344-48c9-a348-5cf143bdfd09.png index 9cefa4f716dee309f615f1f930768ab684174685..e8b5b0c86f27f9f2bcb0e7779e20c172ba3ea199 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_522a5110-9344-48c9-a348-5cf143bdfd09.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_522a5110-9344-48c9-a348-5cf143bdfd09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7cb867137650760fdf06ebfc010bd8d3a8c4a05d676f55dba8492d8d9175040 -size 302384 +oid sha256:0861f1baaf27c6bd7c715f87b21f5493962cf70bb9bfabbe5daefa1ac0ae9cfe +size 767625 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_57ecc027-a48c-4a61-9ffb-931ae1fab2a5.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_57ecc027-a48c-4a61-9ffb-931ae1fab2a5.png index d9b1ce397c7df1d3848b81bf0a43c5899fde7109..2ba63876e4281d8f6e24e2a3e0cee7e0fc8e7b97 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_57ecc027-a48c-4a61-9ffb-931ae1fab2a5.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_57ecc027-a48c-4a61-9ffb-931ae1fab2a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7ad7cc449c182cfbf175c8888354e4eeb5796b9fefedbcf7c0d38e410f93e18 -size 874703 +oid sha256:e0e76aa0b43e60162726d4b6d35f8ced18c2ad290f822428b68e1a403cab78b0 +size 684106 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_74a19b41-3a33-4bab-b089-69728a1ad3bb.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_74a19b41-3a33-4bab-b089-69728a1ad3bb.png index 32bcb9891ef11406d7b2b387468d2e183fc59ac4..c9077934748d5743137aae972032d1365c6597e1 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_74a19b41-3a33-4bab-b089-69728a1ad3bb.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_74a19b41-3a33-4bab-b089-69728a1ad3bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:387e6f56537af552b0c3d946341d14bd544fbc1ec1715492aa7de3d557c39a61 -size 1166403 +oid sha256:28007ba336c5de4c061890fdb14dd6c3f2b75049cadb1b9f42df73ba0ff637f5 +size 963217 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_7e1f4374-8e17-49c2-be93-d6bba3d0ec0f.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_7e1f4374-8e17-49c2-be93-d6bba3d0ec0f.png index 462b0458d5ff4b875a0a70efa6871fff06541ced..f05dbf692b954471d8c8a207692b05b0571fbfcb 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_7e1f4374-8e17-49c2-be93-d6bba3d0ec0f.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_7e1f4374-8e17-49c2-be93-d6bba3d0ec0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7105e52587c32809ffefdcaa9bed69964df840f49a728aaf7b9476ee68c189a5 -size 630527 +oid sha256:9cc6d2ed9ae06065861e7288b7d3b10b080dd2964a0457fe4bffc678ebdd3305 +size 984621 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_8efdb3e2-d599-4e4c-91de-518fbcfe3e4d.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_8efdb3e2-d599-4e4c-91de-518fbcfe3e4d.png index 1ec481650e29bb291fc29ae43fc6522d5f020320..a657fc601c0374caff4634f2b102e4f5e6e5485e 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_8efdb3e2-d599-4e4c-91de-518fbcfe3e4d.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_8efdb3e2-d599-4e4c-91de-518fbcfe3e4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33596148d1c47d073f73fdfb5066c291696c2de6db940706510399da2dbdb382 -size 509920 +oid sha256:9ddfda06e369f0d734c5e8d15cc67dfef246eb688a759fd7bb0c11ddf9565819 +size 470038 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_9d0945af-a93e-4af8-8aea-b8350b3741f7.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_9d0945af-a93e-4af8-8aea-b8350b3741f7.png index 22f55f715f0f183f7665ef135832a21fc3eb9bd3..c77a248b82582324186c9eaea9c360cf9b6e57aa 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_9d0945af-a93e-4af8-8aea-b8350b3741f7.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_9d0945af-a93e-4af8-8aea-b8350b3741f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ce1ed614cd5fc4b6975bf007edf824029bc833ee372448aa6ac726212301bcc -size 459309 +oid sha256:8658fe19e8829e108cdaf0e81db31519c0a9f1a22dd0bbe3a843a1505173b104 +size 291028 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_aa3d9f1d-e6d8-4a6f-bb93-ae6037c428f8.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_aa3d9f1d-e6d8-4a6f-bb93-ae6037c428f8.png index ce60b7c69c7831d02f6e02fc5c49f7de926f3943..ead45dbea3dfe7139b336b8d6f2996d8b2651b6a 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_aa3d9f1d-e6d8-4a6f-bb93-ae6037c428f8.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_aa3d9f1d-e6d8-4a6f-bb93-ae6037c428f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a7af6d7626c51c10ecfe3e295cf7db608fa531266e1ec592f592c24f4bb4854 -size 1052206 +oid sha256:cc4e4d0a3e25ca90d3c1e69b5f00dea641ef88936be954a77846a8b372f30379 +size 1296233 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_bbd16dc3-1b95-4fc5-b68a-ff2a7e6cfb95.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_bbd16dc3-1b95-4fc5-b68a-ff2a7e6cfb95.png index 1e51790ed1031561ae9bc42ded0cd028471e1334..9bd13c6edc534bb3594fd38568dc137023c3da63 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_bbd16dc3-1b95-4fc5-b68a-ff2a7e6cfb95.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_bbd16dc3-1b95-4fc5-b68a-ff2a7e6cfb95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdeaafdf4068af8f2e276bb0bc8aceabe1667a58ad06ceb297de5eb7cee147a1 -size 1237589 +oid sha256:41f09c45c111542c9c5a9d4c8753e59887171ebf68f7cd1087b381fd56c0b7c7 +size 981214 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_c595cdaf-154a-4496-81bf-4db06cba5982.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_c595cdaf-154a-4496-81bf-4db06cba5982.png index 2851b2321f366682409374ef678ac7076c9797d9..9f402b65ba592059083ed522fec2241d7c476372 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_c595cdaf-154a-4496-81bf-4db06cba5982.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_c595cdaf-154a-4496-81bf-4db06cba5982.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:222924ba7d1522073f7f1844917d14b44ba8816b70edcb93cf9a0eb163c8448f -size 594740 +oid sha256:42e096571ca679774f3d0979693b76370099ad0aa49b8d7c67f7bb3fca76f7da +size 818411 diff --git a/images/80e12375-19ad-400f-9e35-2a3853173bed_e89bb795-2d24-4e2c-bcae-1294e3501dfa.png b/images/80e12375-19ad-400f-9e35-2a3853173bed_e89bb795-2d24-4e2c-bcae-1294e3501dfa.png index df73a743b64868d504d8586651f2db4c81ed236d..38648be74abe38bdd13517a05e2acc6fa547e13a 100644 --- a/images/80e12375-19ad-400f-9e35-2a3853173bed_e89bb795-2d24-4e2c-bcae-1294e3501dfa.png +++ b/images/80e12375-19ad-400f-9e35-2a3853173bed_e89bb795-2d24-4e2c-bcae-1294e3501dfa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0a3d1a4f8e62724a92aacf2636b8472cc13f860947e52984e79689d03c6a329 -size 874724 +oid sha256:870ecf3b60c9f33e5b5080d31bf79f0acda3c7d82ff2a1fff866d8d23b499540 +size 943944 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_179c061d-401e-4352-a450-913609704574.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_179c061d-401e-4352-a450-913609704574.png index 597f75190de1b323daa775cafa6a86b95b441014..675b6290c8d06acfac03bc20496b69bac246eaee 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_179c061d-401e-4352-a450-913609704574.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_179c061d-401e-4352-a450-913609704574.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a988148f358e9c916926d459ce6e4e184699efac4ca608575b3a33ddfda893a1 -size 1339988 +oid sha256:636900ed1aff929b2feb51e3e21e5e75ac3c338f01ce0cfc7645b5ed51c76d0d +size 1297161 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_36910aa0-a074-4234-955b-a3d43e59bdc0.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_36910aa0-a074-4234-955b-a3d43e59bdc0.png index e5e18e240f6fce36b621f3218e8d843ab021bde8..7dc0fdf250764ab230d268400f26ca4679999c0f 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_36910aa0-a074-4234-955b-a3d43e59bdc0.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_36910aa0-a074-4234-955b-a3d43e59bdc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e169840abcfbee02150e0f85c64f6b3bf78a577cec7f26515d0be5c7df188782 -size 1049549 +oid sha256:d90de010deec87460d79664ce69f83fdf9cc021a898caabc75b4810f5916b05e +size 1271935 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_3cf4529f-653a-47d7-9d84-f577dd79329f.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_3cf4529f-653a-47d7-9d84-f577dd79329f.png index 2b3c44391a4c066d2bd95de84290a5200a9559a6..bca098653d8630c81340d11b994f78257f8041db 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_3cf4529f-653a-47d7-9d84-f577dd79329f.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_3cf4529f-653a-47d7-9d84-f577dd79329f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:232929a5beecff129793988648a25d821dc3dd7a39ad38ae25ce642661c651ae -size 306622 +oid sha256:e3b8be0595177a8585f12da5758b3656ffc6639f8935e7241d6fe93f26c47153 +size 394618 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_5728ce0c-5baf-4b2c-98c4-dac3a0343b10.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_5728ce0c-5baf-4b2c-98c4-dac3a0343b10.png index cf0ace6739dbea4c1552dd6ff48a5e1a005177d0..3c034849e075a701cb3c1a05f527f0c9c949c0f3 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_5728ce0c-5baf-4b2c-98c4-dac3a0343b10.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_5728ce0c-5baf-4b2c-98c4-dac3a0343b10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9bf8c082cbe1876126f032741b08f34b6785f126ee387b2dcc361595771e364 -size 284274 +oid sha256:f85e75c1941eda89da6d9bbd5cb88e91efa5cc49a6f4aeb661f22ee381991418 +size 572614 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_6197c144-0a0c-4e0d-abcf-1b380989feed.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_6197c144-0a0c-4e0d-abcf-1b380989feed.png index f1bd9a5eaa721802361ea59052a45c934147431e..9c35d7df8e1a0ece0a3a5a502db483790e55f7a6 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_6197c144-0a0c-4e0d-abcf-1b380989feed.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_6197c144-0a0c-4e0d-abcf-1b380989feed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56a9ab16f476b200c1725ae809f99fd547f55a347e5c9facd44083ac31abdfa8 -size 1694794 +oid sha256:93f91f6dfd85bbf78cbd958430e1daac63b3892887c43f567bfff75e9ddfe27d +size 1364980 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_6f701fb9-97ae-44d8-8687-8b254b1ffb58.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_6f701fb9-97ae-44d8-8687-8b254b1ffb58.png index 4bbeb8127d62530ca0725b438dd25a74dd02bef4..71f09626563764658efe002080049c4aa080e301 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_6f701fb9-97ae-44d8-8687-8b254b1ffb58.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_6f701fb9-97ae-44d8-8687-8b254b1ffb58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba8d4d6a3853b08b096ca59ad6c97adbbe18f89380e49a52caaae531c44ccc80 -size 296089 +oid sha256:38f743ca19c95b12cc77a37437220b8504e53fefd66494fb5b1f6f988fb23f09 +size 265541 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_726bec92-5493-4eaf-ae53-ccf5041b29d7.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_726bec92-5493-4eaf-ae53-ccf5041b29d7.png index 59416d61a1691403dd6c5d7890e9293004f7e2d4..97c3b466bfc2756bfac09f05682cf7d5c1fc16f7 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_726bec92-5493-4eaf-ae53-ccf5041b29d7.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_726bec92-5493-4eaf-ae53-ccf5041b29d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47cd0595f2049aa5b5f87e8784331c90128faaebf899b9aaeab203f8020f55d4 -size 548495 +oid sha256:59cd85678c0837031df2c54b3d5d6eb5b73749884ff703d7d17cc182af101eef +size 516898 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_790f3994-0282-4c98-a80d-4758ef216776.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_790f3994-0282-4c98-a80d-4758ef216776.png index 854bed7096095667a0a7c9d631a2c5442904e5f6..b994d3809fff4914a636f7a9689d8bdef668a7e5 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_790f3994-0282-4c98-a80d-4758ef216776.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_790f3994-0282-4c98-a80d-4758ef216776.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15921b134b70724579a81cc5f4b77730b051fdfe3a2c1be00878e4cf63cb8dd3 -size 213119 +oid sha256:392cc3ac9a42aa82644da229fc5a2cdbf7df402b7dc483332e663a6f7e3fea91 +size 235425 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_8cbee5da-e8dc-449d-8239-aad7bfa21b40.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_8cbee5da-e8dc-449d-8239-aad7bfa21b40.png index 14a156c821bf0159413893e88837bb85499e7292..4d30b5801b1934b66bac7085fcf7b0a65560289a 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_8cbee5da-e8dc-449d-8239-aad7bfa21b40.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_8cbee5da-e8dc-449d-8239-aad7bfa21b40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23a7471b83d6b6caa8e629a5d3af11575a5810c30293bf272626e2490a0f0757 -size 1296399 +oid sha256:12bd37bbfe2ed5af51ab0ec85c3d115c8f5a3f1432cac887246452b5b7223872 +size 1296909 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_8d138ab7-7a82-4a74-b799-e1e64d929f58.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_8d138ab7-7a82-4a74-b799-e1e64d929f58.png index 098d45786cf8cdb6a13376434941ff5c82f34d4c..17318e3637df5325a79dd784bd3f710f67aafa65 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_8d138ab7-7a82-4a74-b799-e1e64d929f58.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_8d138ab7-7a82-4a74-b799-e1e64d929f58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f484ecae5439fd87ecd409c1334cd78b9e0334e5649ed3ce9e28aead712b862 -size 318366 +oid sha256:580f40aa8a7f7b8fd7656ff95aa30c2913c277ae9886d8b661d6c8f951f46e35 +size 605842 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_9b7e228b-1dc4-478c-9137-946f5ef3034c.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_9b7e228b-1dc4-478c-9137-946f5ef3034c.png index e68ee420fb80ee45f38d7a5535cd7fa4065c41f0..947827db5c36a6e97b2797d4b0b7183d3a69e601 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_9b7e228b-1dc4-478c-9137-946f5ef3034c.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_9b7e228b-1dc4-478c-9137-946f5ef3034c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ee6f95e9a98d19675cc9f210c3cd0f953b0b06a08dbd330db9ce8e11075da27 -size 318112 +oid sha256:5b5147ea3662b9c85e52fe032a15aef64c0d10fc67cafd272ba148001a2d11e0 +size 320670 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_a7515530-9517-4dbe-acf5-de91208b0e87.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_a7515530-9517-4dbe-acf5-de91208b0e87.png index 4884db1ee82454cafd8515014797f49966042565..a87b9b9a8c9799b7965be1f8365f983d373f9dc3 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_a7515530-9517-4dbe-acf5-de91208b0e87.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_a7515530-9517-4dbe-acf5-de91208b0e87.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6e44b10786fa2350f323a78f526368a081e490940474dba8b6ef196a1d9f74a -size 1100069 +oid sha256:abd5a715a73b41ef72a24a7b23550dec4188725d7427fe5aa865f28fce6185dc +size 1242550 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_d91dd847-e852-4522-8725-3ddd418c8f7c.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_d91dd847-e852-4522-8725-3ddd418c8f7c.png index 515c208457210bad23832fd8814e9b496a8d3e0b..de5a4bdcb5d11a786a04d931f179d8da016ab780 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_d91dd847-e852-4522-8725-3ddd418c8f7c.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_d91dd847-e852-4522-8725-3ddd418c8f7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef679ebcb78e89680ef68b71b78aca148b8d971bd0fbeadf9714a028354a619e -size 322721 +oid sha256:c52fdfd21c14caa0f2fc08043411339124cfc11d6befbc809942f0a6a2bc54b0 +size 502904 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_deb51466-b520-47d8-bba4-841ca652c58f.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_deb51466-b520-47d8-bba4-841ca652c58f.png index 4ac2491790ac9f7045a652ec2cdf7652bf2e5706..b5c7d363eb25b80fda0733872c23b9ca991f7da8 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_deb51466-b520-47d8-bba4-841ca652c58f.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_deb51466-b520-47d8-bba4-841ca652c58f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67fe0754083aa5aa8c63c8283fddb11645ff363e36cea71aa37b167bd813a6da -size 287555 +oid sha256:1e052f98a570c44afa2c5bfbc9b76b66d394a7483e1a426a892f63c77334a34a +size 301230 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_dee8a727-5865-49e4-b498-d1e5742c704e.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_dee8a727-5865-49e4-b498-d1e5742c704e.png index a22be4d9859a7fbfb5ca354fc0dfd794250bf5e1..a203ac04fa21fe2c1935bce85128f27d61917a41 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_dee8a727-5865-49e4-b498-d1e5742c704e.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_dee8a727-5865-49e4-b498-d1e5742c704e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ed5c6eadd3def178cff082e9ca7f0e61363dc78baa5c8c7c6ad0edb85b4fc48 -size 1051947 +oid sha256:38a76af5c96fef3f069784a4c1b48a34e2e0097ac68430953664149dde339924 +size 1063564 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_e8dcebbf-9804-4061-9c08-d4008deb715e.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_e8dcebbf-9804-4061-9c08-d4008deb715e.png index 86490a17378a666309068598c20751a06df9cc00..13d92410635c1cebdb4840c1adc36da0b778d57c 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_e8dcebbf-9804-4061-9c08-d4008deb715e.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_e8dcebbf-9804-4061-9c08-d4008deb715e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b136782dfe8471477ae4f9c25c9262e4c8ceb69cc1b015d566327591e86f3971 -size 321028 +oid sha256:4ef9b5af89e96d55d5ffcdfcec3bb5941c12d5095f24653eb2e3eaaac7595be5 +size 628251 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_f3c95979-f964-4912-8cf9-a627b0322a93.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_f3c95979-f964-4912-8cf9-a627b0322a93.png index ea75668912e16155ab9412eaa46093ae83498bfa..f205ee3aeb4e3c959e329317a590052f5ecc1950 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_f3c95979-f964-4912-8cf9-a627b0322a93.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_f3c95979-f964-4912-8cf9-a627b0322a93.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c41e710cbf830d36d5e97764f76612137629cf5fb85659976f2c516b6f3ab93a -size 338725 +oid sha256:ba0ac280b38eaff5e5af4fb8570c08c54faa027e8f3ff01407d347d7409a6cd3 +size 353325 diff --git a/images/813e47ec-6304-42c5-82d4-1fc461247965_fb716bd0-0cb0-4d20-b64c-5c603a5af0e9.png b/images/813e47ec-6304-42c5-82d4-1fc461247965_fb716bd0-0cb0-4d20-b64c-5c603a5af0e9.png index 2bf09a6a3e8b1cd6cbcbf279a849d3b95732add4..539ce79bf78e739a70a40e3be8069e42ddefce16 100644 --- a/images/813e47ec-6304-42c5-82d4-1fc461247965_fb716bd0-0cb0-4d20-b64c-5c603a5af0e9.png +++ b/images/813e47ec-6304-42c5-82d4-1fc461247965_fb716bd0-0cb0-4d20-b64c-5c603a5af0e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f39eba5048fd3aadd4cb95cde04939ab6876ee267ab4a76c3dc807190dd87ebb -size 331880 +oid sha256:33d9290dd51d7922d02be686ba6150dc622906f3ca7a81d58567d560eb1e92c0 +size 310397 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_0ee2e627-345c-4b15-8542-3c13034733b1.png b/images/81835704-aebc-4600-abd4-02102509fda5_0ee2e627-345c-4b15-8542-3c13034733b1.png index 525eefbc7cdc3209411373267f481a633bb57693..f65a0e2dc9f22827dd2fe6e3138df3844ea96dc3 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_0ee2e627-345c-4b15-8542-3c13034733b1.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_0ee2e627-345c-4b15-8542-3c13034733b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59b93a8353faffcbe2276623868dbe13ae424ad462cbee77d7fdeee27e3214e6 -size 919140 +oid sha256:0ba2ef3f1322cdb82d3c94c09057e163f6634751c671fc7ebf465ee71ae31387 +size 954854 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_16158a8f-e6d0-46ee-b592-bb982f0ea0b3.png b/images/81835704-aebc-4600-abd4-02102509fda5_16158a8f-e6d0-46ee-b592-bb982f0ea0b3.png index 7df780da2f19f7d53357014366492b29c9eaed60..1088a08079c820268113af5153708463960f4ddc 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_16158a8f-e6d0-46ee-b592-bb982f0ea0b3.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_16158a8f-e6d0-46ee-b592-bb982f0ea0b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6998faa10fde2e042c944832b169715e1e3f355371f1dfba4531dbb03e715766 -size 723252 +oid sha256:391bb68a349db6de2da3e0a7da11e4144d39c3c51606d59a1f0c236b6dab38c1 +size 618384 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_2642d695-660f-433e-b9fa-6f820ccfc7e3.png b/images/81835704-aebc-4600-abd4-02102509fda5_2642d695-660f-433e-b9fa-6f820ccfc7e3.png index 77822c72cf1b7025e8abef96dd566a47a061eb61..4b71013872f914c60d9e0fec2fcd3c26494c9019 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_2642d695-660f-433e-b9fa-6f820ccfc7e3.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_2642d695-660f-433e-b9fa-6f820ccfc7e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6634d633b4448ce029f5fa37c0d39fc2f87f4872d1cbafd764474b74f0a155e5 -size 559548 +oid sha256:c0873e82dd538b8e107d42424e5586c1ec5ab8d6300d22c828890a33d6f2aa99 +size 421242 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_3b91e850-1a70-4008-9e51-f0c6dbdf6a74.png b/images/81835704-aebc-4600-abd4-02102509fda5_3b91e850-1a70-4008-9e51-f0c6dbdf6a74.png index 724577955e56ddb6b8efa5487190008bd16bd896..aa0958776b9329e6d7852692675a3930cd2dded6 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_3b91e850-1a70-4008-9e51-f0c6dbdf6a74.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_3b91e850-1a70-4008-9e51-f0c6dbdf6a74.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:325df4e4ab2f812f4be2ed8147afc609f1e9175b887cc6c95cfe91cca860e1c8 -size 955781 +oid sha256:d964d23fba16b8d5e5e348c0d4639374390beca6bda9acdb3a9218d7a056239c +size 639428 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_414d6244-0798-42fd-8c5a-fba032091a90.png b/images/81835704-aebc-4600-abd4-02102509fda5_414d6244-0798-42fd-8c5a-fba032091a90.png index fced50200cd4ff28d6c6a481c8e3a2ae3d2e7a8f..e3d1805c2cda381402cf16be5d11269842cb538b 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_414d6244-0798-42fd-8c5a-fba032091a90.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_414d6244-0798-42fd-8c5a-fba032091a90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6f8a12748d66da8ab4dbc5d6559a3f5f18461ed77519be3fa90caa7079b8b2d -size 722401 +oid sha256:c0399cf618670331c53967d1ae1ab19627655789bae13579369878e09983b1b5 +size 919407 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_416cbb3e-b141-477f-b75a-2e4b3da93394.png b/images/81835704-aebc-4600-abd4-02102509fda5_416cbb3e-b141-477f-b75a-2e4b3da93394.png index deedc2a8e5e197e7506d23a4b019ca189e55534e..755b64f0b02636d2bd00f16d48debfd1ab4a6765 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_416cbb3e-b141-477f-b75a-2e4b3da93394.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_416cbb3e-b141-477f-b75a-2e4b3da93394.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3eaa4f61a419fe06cf1de8d06634fa21e2796de38ad383877ebca543ae58e466 -size 693765 +oid sha256:8642976c567b7ab614c3ef2e77776cda69c27d5b7116bd795af36ae57a5eb508 +size 510192 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_4c9dfd61-fbf4-424e-a505-a20212944a9c.png b/images/81835704-aebc-4600-abd4-02102509fda5_4c9dfd61-fbf4-424e-a505-a20212944a9c.png index 9c4b7987c037fd570d7f3711e6bd5a660816749d..dbbd27367e3ddbc8930d65abf7b02dd287b33247 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_4c9dfd61-fbf4-424e-a505-a20212944a9c.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_4c9dfd61-fbf4-424e-a505-a20212944a9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36a584f753f6f7cd70e51cb1171c3c40b7677e35525cb2e4630f7341b9dd43a1 -size 952117 +oid sha256:ff1c3b8cba2cbe5bf765aece82556eafc179fb034f50c28bb20b270e050282aa +size 1075019 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_5b577f52-8708-437d-956a-f196bb1aed0c.png b/images/81835704-aebc-4600-abd4-02102509fda5_5b577f52-8708-437d-956a-f196bb1aed0c.png index 234d7ad1639f03f09d3d40a102756450c7064831..ad27edb33d3ff64b3a844377c4cc737b90451c93 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_5b577f52-8708-437d-956a-f196bb1aed0c.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_5b577f52-8708-437d-956a-f196bb1aed0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d81e8e439337625740b2f983828083ea413754147d27ebedcee3c02608be44e -size 900689 +oid sha256:b835976b833f7e17b6c6a8284d7f0e97823d882363e7bc00bdb7696e8554d933 +size 846394 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_5c53350f-286b-4be8-b37e-346ce0772af0.png b/images/81835704-aebc-4600-abd4-02102509fda5_5c53350f-286b-4be8-b37e-346ce0772af0.png index 8d6d0b9fce94697c33fef104be3be2a865c8eb86..cc7cf4ef775fd307c35481cceee9ecf71bb46f49 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_5c53350f-286b-4be8-b37e-346ce0772af0.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_5c53350f-286b-4be8-b37e-346ce0772af0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c4d21599c48c91309b98ef00fd4f01c9113d7accb0a6056f9a64308458ab700 -size 900894 +oid sha256:4527b3e8eb622cca888f92a7ea3a7d8a8e577d630b064d5c1c2c0c7cb53149b7 +size 901234 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_78c3abc0-517b-4da4-b4eb-ce0788ed923a.png b/images/81835704-aebc-4600-abd4-02102509fda5_78c3abc0-517b-4da4-b4eb-ce0788ed923a.png index 51b894d7746c5d4914c9fe153d7724a7fb860169..5efd081a73b7ea0905252830dfae6229caaae71a 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_78c3abc0-517b-4da4-b4eb-ce0788ed923a.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_78c3abc0-517b-4da4-b4eb-ce0788ed923a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be25be5571fe67eb710e2bfbdc1f631db3f988ec5d221ca7b1c0ae897461e578 -size 888553 +oid sha256:ab073bbb18ffefdb647f6992598e5ce72cd4289cb36df8cb5c2f2ca9544e511a +size 1305633 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_7d9e6c1b-62ca-4ac9-9847-158fdce932e6.png b/images/81835704-aebc-4600-abd4-02102509fda5_7d9e6c1b-62ca-4ac9-9847-158fdce932e6.png index 53aec936e662172be1dbdca9dd14e9a6963397f4..fd73b2d4821ba1cc68c2f6ab9309b1ae7608b282 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_7d9e6c1b-62ca-4ac9-9847-158fdce932e6.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_7d9e6c1b-62ca-4ac9-9847-158fdce932e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b32292817d000d6e73dc49166e55cfb70457e60ab583286dcd5f6c0261fb932f -size 900927 +oid sha256:89a38532cc2773e502175be829c9bdc5c23aea744f9eaa553e8b0243cd95c71f +size 1022866 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_9a556b99-9709-438d-8d96-73c977afe480.png b/images/81835704-aebc-4600-abd4-02102509fda5_9a556b99-9709-438d-8d96-73c977afe480.png index d20f3e241a3cc582964f33f86d0709c46d40022c..0dee94cf45abf21a2caad525261a94ce6720300d 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_9a556b99-9709-438d-8d96-73c977afe480.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_9a556b99-9709-438d-8d96-73c977afe480.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b187f9660f77dab9d50d97a9319c6829e16f63856792c7968955931a6e5d0ac -size 944643 +oid sha256:63c42b813b93c903f07b530d639638047ae6e0b2a76708c839f9eb38d797fac0 +size 931441 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_b0d8ab74-8854-487a-805c-2f920b08af49.png b/images/81835704-aebc-4600-abd4-02102509fda5_b0d8ab74-8854-487a-805c-2f920b08af49.png index 0389aa2c713cf56844ccf8a2f38d89d967afb103..ad648670a63f4b2d2a2bf60cc9b58d132fc2187c 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_b0d8ab74-8854-487a-805c-2f920b08af49.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_b0d8ab74-8854-487a-805c-2f920b08af49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb36b05167c8ac72b58ca9d24425ea7c44720529ac294156cc53ef99c729b5cd -size 914020 +oid sha256:b46aab0038d119092511ebced94445601b02a041a9ed2d8dfb4135c5175602c1 +size 979271 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_be0a740c-1b47-4b45-b879-8a45d32f7c0a.png b/images/81835704-aebc-4600-abd4-02102509fda5_be0a740c-1b47-4b45-b879-8a45d32f7c0a.png index 1453edb30e5c9d0b64ed4c7f90eab241767a5bda..4cd88bba2816fa817e1347e018073b3000013073 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_be0a740c-1b47-4b45-b879-8a45d32f7c0a.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_be0a740c-1b47-4b45-b879-8a45d32f7c0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07c132a9630f43d8252b36399546cc057deec928b29a414bcbbe6e18c808a9fb -size 896948 +oid sha256:23f3b807d79b55e93788eec7ba2278509d366b81f05148e20fcf92cf3a6417fe +size 1017226 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_c523456c-2910-4d24-a99f-3ca35aa410c0.png b/images/81835704-aebc-4600-abd4-02102509fda5_c523456c-2910-4d24-a99f-3ca35aa410c0.png index 20ed2b63d0ce49a601587ecf85c1cc7507c8d15b..6d83ffb2247c53007ef91518b9fa5361d7b6316e 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_c523456c-2910-4d24-a99f-3ca35aa410c0.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_c523456c-2910-4d24-a99f-3ca35aa410c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2176fccc84bdcc4b72bfe37ca0af11aa8e7ea86d8d6df0ea2d1bc84429ffda10 -size 698414 +oid sha256:e36f15907d64a2be2c849a49f0443fdd136687b03191787c4e490fd1d17f77d3 +size 903095 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_d7ce815c-bfdb-421e-b2a4-c5ccbb3a1470.png b/images/81835704-aebc-4600-abd4-02102509fda5_d7ce815c-bfdb-421e-b2a4-c5ccbb3a1470.png index d1e29c3f0f35e2ece170af6f4281580ed7710ba0..715d2f884060f780ada7a6c8b751afdeb7addf1c 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_d7ce815c-bfdb-421e-b2a4-c5ccbb3a1470.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_d7ce815c-bfdb-421e-b2a4-c5ccbb3a1470.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:375403f4c3082c89929978595915557d2ad37fa2336cee9783ac374f3fcd65b9 -size 886314 +oid sha256:bc63f9b6526294a37cdc6f05c4a4368f5ad2927ab05c6b8c138deea83d2ff198 +size 948858 diff --git a/images/81835704-aebc-4600-abd4-02102509fda5_e0a29b99-5021-409d-b09e-cbf39a4b1dd8.png b/images/81835704-aebc-4600-abd4-02102509fda5_e0a29b99-5021-409d-b09e-cbf39a4b1dd8.png index 3ecc41340828ad6e7cb7dd8c9a1bef005e5c72b8..56afd84391c214461f2cad6e412dc45544d65e6d 100644 --- a/images/81835704-aebc-4600-abd4-02102509fda5_e0a29b99-5021-409d-b09e-cbf39a4b1dd8.png +++ b/images/81835704-aebc-4600-abd4-02102509fda5_e0a29b99-5021-409d-b09e-cbf39a4b1dd8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7cc992118272708f1c8da86555f6595b31094019eed611476236d435755406bd -size 710220 +oid sha256:b03ce6e247fbe1ce81b19bb4272b4c661d5cdb4ad57c8398c07c986b6b3749dc +size 542729 diff --git a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_23038b09-7316-4566-ac68-64d95c9eccbb.png b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_23038b09-7316-4566-ac68-64d95c9eccbb.png index a43dd1f32bf1527ffde883154650177ffba3ad20..0d12e55b3ff86033c4352ad361bc8d7f28860889 100644 --- a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_23038b09-7316-4566-ac68-64d95c9eccbb.png +++ b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_23038b09-7316-4566-ac68-64d95c9eccbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edf3fda921005d19481a44faeff827ef1ef7cb78603f70694b161949640227cd -size 604815 +oid sha256:196bfceb1d9b5e6207827c7b64f618a32e8c5b032272b61b5218238edd0aa136 +size 266149 diff --git a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_88b3d2aa-9a00-423e-9dcf-8527c310e228.png b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_88b3d2aa-9a00-423e-9dcf-8527c310e228.png index 305f86a730b7d0f5c2ba9b9c169b7e29c02d5a8a..72963a0d6d7872258d0b8069495599cea2212976 100644 --- a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_88b3d2aa-9a00-423e-9dcf-8527c310e228.png +++ b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_88b3d2aa-9a00-423e-9dcf-8527c310e228.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78a5241006bc8b2ccbaea4caa55a1a16cf3d660702bfebb904905f17c6d49cb9 -size 697203 +oid sha256:25277a14116acacf8c8f3894a871e4e3a7a615fc0778d5fa291f55488d653c84 +size 696849 diff --git a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_aa99e548-cd12-4d60-876a-1b739f2c9009.png b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_aa99e548-cd12-4d60-876a-1b739f2c9009.png index df0047da168824220f81ba9d9c763e953f5f3175..86a59ffefe451e068cc04046d6be16989a2da5c4 100644 --- a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_aa99e548-cd12-4d60-876a-1b739f2c9009.png +++ b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_aa99e548-cd12-4d60-876a-1b739f2c9009.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12501407e9f83517fee430db581b145cdbac93b39878c95081d578952fad0f17 -size 169854 +oid sha256:e9fd9616ee3300162665ff985d387e535e1be0222fc5b2c2b94fd46c1a2180fc +size 523478 diff --git a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_de9053b8-703b-4782-a562-66e97a63276b.png b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_de9053b8-703b-4782-a562-66e97a63276b.png index b144853339884dda11ca94522695d9ea3b26eff3..0c9125a4abcd30125bed61e9366a4aefc5046c3f 100644 --- a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_de9053b8-703b-4782-a562-66e97a63276b.png +++ b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_de9053b8-703b-4782-a562-66e97a63276b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:322b7f45e0f7f851be5c64b1c6ac090cf5004628c174e8fb60a1c64091bb9ffb -size 594903 +oid sha256:c2f48b7a13a99c8c06734a17e333fcab8da6b68d90620c6d0f191e7db3cbd655 +size 189318 diff --git a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f46868bc-232a-4680-8b33-8e5198c0010c.png b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f46868bc-232a-4680-8b33-8e5198c0010c.png index 9c7c2304aba3e8f27376870145de9339dd1f5115..c8e737902c3dee7679fa589f5a3dc293c9e1d68a 100644 --- a/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f46868bc-232a-4680-8b33-8e5198c0010c.png +++ b/images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f46868bc-232a-4680-8b33-8e5198c0010c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2d28155b186ef6f2762c36055f683818b064df3a1cf7bcbf41c2944b7180e11 -size 187639 +oid sha256:3f43698cb7199cd09eb7ef1d2c741a5359bc5122e471e32e8b882153367eec5f +size 186104 diff --git a/images/81fb481b-a234-4e25-b494-9ed26a03e977_01450a81-82d1-4492-a961-c81534798a36.png b/images/81fb481b-a234-4e25-b494-9ed26a03e977_01450a81-82d1-4492-a961-c81534798a36.png index b9536751fc82a1ae00e16e8593912d958d63f69c..9b76d6521139b561a4026cb4b3d87b1a2ca03ded 100644 --- a/images/81fb481b-a234-4e25-b494-9ed26a03e977_01450a81-82d1-4492-a961-c81534798a36.png +++ b/images/81fb481b-a234-4e25-b494-9ed26a03e977_01450a81-82d1-4492-a961-c81534798a36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69320362206c72b4f4a03683c1a0ecd3c81ddababaacdce52ef88b7e632c73a3 -size 329481 +oid sha256:edd5f550c3c0511e250396be0124b31c95ec53dc4e7d2cfe1239c6dcfb08fbd6 +size 332690 diff --git a/images/81fb481b-a234-4e25-b494-9ed26a03e977_0d8093b0-56b9-45e6-b9c9-b8d9c0f501cc.png b/images/81fb481b-a234-4e25-b494-9ed26a03e977_0d8093b0-56b9-45e6-b9c9-b8d9c0f501cc.png index 34ae16f411afcb7d0dc8c10af66408dd7c49e004..24bd684f6cce2868ef1c4fecbf591596b449d76a 100644 --- a/images/81fb481b-a234-4e25-b494-9ed26a03e977_0d8093b0-56b9-45e6-b9c9-b8d9c0f501cc.png +++ b/images/81fb481b-a234-4e25-b494-9ed26a03e977_0d8093b0-56b9-45e6-b9c9-b8d9c0f501cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4319d9744ff525701d3c8ae7561263e9d901cf50b69cc115ecba9b8a90e6bf2a -size 237952 +oid sha256:c18741f1b8efbde33242461588bb614f926242943b51ca25bd02643f704375c8 +size 353585 diff --git a/images/81fb481b-a234-4e25-b494-9ed26a03e977_be4e1af7-e734-4d31-bba3-fd751a4fd8a9.png b/images/81fb481b-a234-4e25-b494-9ed26a03e977_be4e1af7-e734-4d31-bba3-fd751a4fd8a9.png index 0896912570efccb9e14aa603c295100e04af7fd2..31ee82222cf8e366bc063b76399bba2379240fb6 100644 --- a/images/81fb481b-a234-4e25-b494-9ed26a03e977_be4e1af7-e734-4d31-bba3-fd751a4fd8a9.png +++ b/images/81fb481b-a234-4e25-b494-9ed26a03e977_be4e1af7-e734-4d31-bba3-fd751a4fd8a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02a5195a4e3c049f461fefee927d9e0298cd6f84801c0a7f732510ad1728b904 -size 600331 +oid sha256:80bfacc3b74732e282eb3cc4892e794068d8dc143130b3de7e5b8b519e4c8a3a +size 642167 diff --git a/images/81fb481b-a234-4e25-b494-9ed26a03e977_eaf85801-dbb0-4f5f-bc2f-75832d6dcfb5.png b/images/81fb481b-a234-4e25-b494-9ed26a03e977_eaf85801-dbb0-4f5f-bc2f-75832d6dcfb5.png index e69bec9a99afffc5ff188e6da14168b51ca49ea1..af0af59be9b50b5ed227e1e0efdbcf15e39d00d5 100644 --- a/images/81fb481b-a234-4e25-b494-9ed26a03e977_eaf85801-dbb0-4f5f-bc2f-75832d6dcfb5.png +++ b/images/81fb481b-a234-4e25-b494-9ed26a03e977_eaf85801-dbb0-4f5f-bc2f-75832d6dcfb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cc59d57af29203b435beb20fb0828e61420ef2ad47759fe86d0b93da7f11bd1 -size 1293851 +oid sha256:2bac0fff28c0e0e7e1cda3f90d346e756452b2853aea89ef14c14e5a9edfaad2 +size 933583 diff --git a/images/82c5913d-8392-44a2-8873-6627a281fa23_0d4d9216-bd04-4cdf-9c48-81a60644bb42.png b/images/82c5913d-8392-44a2-8873-6627a281fa23_0d4d9216-bd04-4cdf-9c48-81a60644bb42.png index df53118d5d9fc64368e8d7579c8cc9d8041676c3..286721a6ef2c8ceebdf53528a30a9924c7776cb0 100644 --- a/images/82c5913d-8392-44a2-8873-6627a281fa23_0d4d9216-bd04-4cdf-9c48-81a60644bb42.png +++ b/images/82c5913d-8392-44a2-8873-6627a281fa23_0d4d9216-bd04-4cdf-9c48-81a60644bb42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a322bde80079a6906462c645afc613830d2cb5db0aed447035b56f39e81677d -size 691893 +oid sha256:4d68dc7828b79cad51d9afc10fc843bee506407f91ccc8eb5de10a87aedae931 +size 1058717 diff --git a/images/82c5913d-8392-44a2-8873-6627a281fa23_3f8b1ce5-738a-4d8e-8fe9-e682ff2cb865.png b/images/82c5913d-8392-44a2-8873-6627a281fa23_3f8b1ce5-738a-4d8e-8fe9-e682ff2cb865.png index b8bf2a24dab05bc64b3cfbade6351272077798c3..b38109342edc2dcc8e5430495fac0c9cf40016a9 100644 --- a/images/82c5913d-8392-44a2-8873-6627a281fa23_3f8b1ce5-738a-4d8e-8fe9-e682ff2cb865.png +++ b/images/82c5913d-8392-44a2-8873-6627a281fa23_3f8b1ce5-738a-4d8e-8fe9-e682ff2cb865.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1e008fb214a563d77147c75338d1e43dd75e2835998d584f5da1553fddc7722 -size 1235790 +oid sha256:cf300bd39cb1135938c48c503e76166d90361016049d70dde4fd41f556be06ee +size 1655908 diff --git a/images/82c5913d-8392-44a2-8873-6627a281fa23_72b18f17-346a-41ad-887f-c8a7dfb072a3.png b/images/82c5913d-8392-44a2-8873-6627a281fa23_72b18f17-346a-41ad-887f-c8a7dfb072a3.png index 6f6799730c5a1294b2d931698d29af5f9ff07040..c128a635a6b04b40c3a9db75431d522613b9eb8c 100644 --- a/images/82c5913d-8392-44a2-8873-6627a281fa23_72b18f17-346a-41ad-887f-c8a7dfb072a3.png +++ b/images/82c5913d-8392-44a2-8873-6627a281fa23_72b18f17-346a-41ad-887f-c8a7dfb072a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e23b7cadbfbd7c1e5b8f28f4d8f4e6d66c17363e2bcebe45e3edf468f13f1fe -size 737538 +oid sha256:64cf2b38eeb4e3fa88493e9c2ab187fc31ef95732a6d3818f485f041892edfb8 +size 1022699 diff --git a/images/82c5913d-8392-44a2-8873-6627a281fa23_731f1c88-2e9c-40af-a9a2-5cbb8486771a.png b/images/82c5913d-8392-44a2-8873-6627a281fa23_731f1c88-2e9c-40af-a9a2-5cbb8486771a.png index 63afa61bf6e133bd22d7a3055b258ec0d3488b8d..f024167867d813de104464a871a2e59d46a4f288 100644 --- a/images/82c5913d-8392-44a2-8873-6627a281fa23_731f1c88-2e9c-40af-a9a2-5cbb8486771a.png +++ b/images/82c5913d-8392-44a2-8873-6627a281fa23_731f1c88-2e9c-40af-a9a2-5cbb8486771a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f9bd2b9a1ecb3592800e5d51ad3ecd62646855fdc69651f19c59cbfb30ad74e -size 924365 +oid sha256:6c1380134b02966c305d6a791d37c687f11895811f0b2599017cbf4bd0c3b6c8 +size 1030727 diff --git a/images/82c5913d-8392-44a2-8873-6627a281fa23_82d66396-a339-49bf-94d7-e088d54ab356.png b/images/82c5913d-8392-44a2-8873-6627a281fa23_82d66396-a339-49bf-94d7-e088d54ab356.png index 46d5c90aeb654b119e72256aa5f2fe7ce32f3d2a..d7cdf221d76eea02466cc0646ef2b89e319695ec 100644 --- a/images/82c5913d-8392-44a2-8873-6627a281fa23_82d66396-a339-49bf-94d7-e088d54ab356.png +++ b/images/82c5913d-8392-44a2-8873-6627a281fa23_82d66396-a339-49bf-94d7-e088d54ab356.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac44869d78e60d1a87225109f29e01dcbd0e1ee4e4a2e29e9122641712d36ffb -size 885411 +oid sha256:5ed3f78143a59db527e6880021cc4d9d2d07da2ea17c83431b48afd173a4f714 +size 1022983 diff --git a/images/82c5913d-8392-44a2-8873-6627a281fa23_b00bea60-30f0-44e1-8bc5-b691cc38c391.png b/images/82c5913d-8392-44a2-8873-6627a281fa23_b00bea60-30f0-44e1-8bc5-b691cc38c391.png index e37361e4676d00694a744c33d51909d6c6da6f4d..0df040ff4fe19fcb174bb04fb73ae5b99d6b69c5 100644 --- a/images/82c5913d-8392-44a2-8873-6627a281fa23_b00bea60-30f0-44e1-8bc5-b691cc38c391.png +++ b/images/82c5913d-8392-44a2-8873-6627a281fa23_b00bea60-30f0-44e1-8bc5-b691cc38c391.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:500fa878f37699182287b55a8b56632a0485bb6a31cb1bc252b9858a9078dc82 -size 1528146 +oid sha256:38c8ced6754383a071d2b17982ec22e552dc590604102d04e5d2ff029821da50 +size 1951963 diff --git a/images/82c5913d-8392-44a2-8873-6627a281fa23_e3071752-60a1-41f5-9b69-33df9f273c08.png b/images/82c5913d-8392-44a2-8873-6627a281fa23_e3071752-60a1-41f5-9b69-33df9f273c08.png index daf735597001fbfbd7d2eb8352fd827ff83906fa..95e0157395f3141c413968ce21033f20cc430734 100644 --- a/images/82c5913d-8392-44a2-8873-6627a281fa23_e3071752-60a1-41f5-9b69-33df9f273c08.png +++ b/images/82c5913d-8392-44a2-8873-6627a281fa23_e3071752-60a1-41f5-9b69-33df9f273c08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e0f6c6fb74466bb10ff933672a1a5bde79efa0242e0b4b5f1c8efc5afb8acdd -size 1030824 +oid sha256:a01a9a1f98b6adc64f2bbf0201b1fc212add255ee2ff9977620ac17886dd85e4 +size 563458 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_05f51ed3-a3e2-4ef0-909a-353e91edf249.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_05f51ed3-a3e2-4ef0-909a-353e91edf249.png index 263f7c0eaa466819c3458cf0f49a298838b914d7..c211fb6be9d63645c110300b574afbd1d4fee9dd 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_05f51ed3-a3e2-4ef0-909a-353e91edf249.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_05f51ed3-a3e2-4ef0-909a-353e91edf249.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:259fb9ae2ac2dd4bfa6dceee2ed1a5ed29ea8b1d904cc38d158442a523b3167f -size 1074208 +oid sha256:3e29397b4900566261a4ca4ba6c04c86fd8131467d736874c2889956401bc84f +size 923443 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_24ad4f85-593a-4b4e-bbfc-ecec0c6f3e00.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_24ad4f85-593a-4b4e-bbfc-ecec0c6f3e00.png index a16976aa755a745670837962c5e53855a661df9b..4534b159e6cbe5ac91e9f3d302d5e05e52b67606 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_24ad4f85-593a-4b4e-bbfc-ecec0c6f3e00.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_24ad4f85-593a-4b4e-bbfc-ecec0c6f3e00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cf55c5f82dd12e5c6e4601dd949c25a04c33557a63d5c35db81e127e3faf28c -size 1078849 +oid sha256:195322d52f6763fe0a3c99aa920103d83bf6dad5ae81f63118c4a1e2d047dba4 +size 780480 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_40c60443-a21c-4b2f-90a7-67bc59037f55.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_40c60443-a21c-4b2f-90a7-67bc59037f55.png index 1e777a5c98ce3aee60b322a77f607a1f162ec2f7..e42a2fac301f06763a612eb4a2f2058d0f21a272 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_40c60443-a21c-4b2f-90a7-67bc59037f55.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_40c60443-a21c-4b2f-90a7-67bc59037f55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57018cc6cecb23a0a1440656d1a1ed7cedb46f93860788d945c4710013253bd3 -size 289225 +oid sha256:c5ec3b0772830acc09a1fb30d00ec554a6a4430ffe9a99bf7cd987787ded72a0 +size 395202 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_640f1bc7-d87d-4abd-b427-251868d68256.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_640f1bc7-d87d-4abd-b427-251868d68256.png index 888f3b23a69595beb705e8f71bed3da43625b271..64f5ac0c78490b0cf511b8144ca01d78ebf1a949 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_640f1bc7-d87d-4abd-b427-251868d68256.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_640f1bc7-d87d-4abd-b427-251868d68256.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f348977f2ade9baaf197b14ff81412773a3c3047edb98949b13678d1110ebfe -size 1057919 +oid sha256:910323431b84c7adf84a144e5aa3fff0ff162e6715a25915e81d47a79cd6709a +size 1279530 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_981cfbcc-0b50-4f18-80a7-35a4cf18e9d2.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_981cfbcc-0b50-4f18-80a7-35a4cf18e9d2.png index cdcb185a330e50ab97baf9582f2e9f03f9bfe9a7..01966ad561fd58115eb43de557dd23ccce1d4838 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_981cfbcc-0b50-4f18-80a7-35a4cf18e9d2.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_981cfbcc-0b50-4f18-80a7-35a4cf18e9d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0883d861ea006b6b3e8e2eeb3ba76b6cd1462c3a18d919e70dd67556ebd929e0 -size 1070966 +oid sha256:3794202fb7e4047cc4b3121d413e7a52be7e5ee144ee935b7941b06f36a91200 +size 1342305 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9992efab-d6f6-4d4e-81f3-0ce885f45457.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9992efab-d6f6-4d4e-81f3-0ce885f45457.png index 95151efdbf67bbb45a3db9fa2f37af791dc11e60..05b9c68aee1d70bc5f5d068c06571baf71c197aa 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9992efab-d6f6-4d4e-81f3-0ce885f45457.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9992efab-d6f6-4d4e-81f3-0ce885f45457.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a22feab8245ab627c25737e6659c983adc6203cab32692c77026590bcbc5f6e6 -size 1118160 +oid sha256:a361d17658f8ec7b8acbac7f4e58a5bacd91fef3f7690cedb2af5928daa04461 +size 1248204 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9eb1b18e-f4c5-49f7-b46d-5a8ba355de59.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9eb1b18e-f4c5-49f7-b46d-5a8ba355de59.png index e1b166660167d1fb9d79660e23f786bf1ca8d72d..c8a5248088dafe76f31681467ce5081f382cabea 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9eb1b18e-f4c5-49f7-b46d-5a8ba355de59.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9eb1b18e-f4c5-49f7-b46d-5a8ba355de59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfbf2084eb27b0c963ca468ebd2b01a83a581ee3550c2f9a61245f7c9166c6f9 -size 1075444 +oid sha256:7024a628dbbadb33a0b5cda91c7e4f20fae015ea8b198b8abadaa5f7f7510b25 +size 1300500 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d0df170f-377c-437c-83e8-4519a6387c77.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d0df170f-377c-437c-83e8-4519a6387c77.png index a9326da4728b3f40b99340b5ff1001a59d2b19be..d0ef69b64086075222d335dd9f7cbd3e23630d80 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d0df170f-377c-437c-83e8-4519a6387c77.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d0df170f-377c-437c-83e8-4519a6387c77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:794bedd8ed65cab505c473de03d40d110b9bedb264e15bcf55139cf845c1c69a -size 295784 +oid sha256:7085ef350fec1009197dec02fda47122c9553e503f30d15b4e426d352eac475a +size 442834 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d9021cfc-99a8-49c8-8f1e-d3a6a3dcbddc.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d9021cfc-99a8-49c8-8f1e-d3a6a3dcbddc.png index c379ae5339b7ae901b4ef0153414db621c1e155a..8a251be0879d0d9cc3106eaff12f12964f092112 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d9021cfc-99a8-49c8-8f1e-d3a6a3dcbddc.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d9021cfc-99a8-49c8-8f1e-d3a6a3dcbddc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84d8d0022daa38c2bc0733c900ec380d161d33a7ab6f016a3c35d762490bcf7a -size 1196583 +oid sha256:b2319c22b26255daef9712921bf039e7a5b4b94270edaa13b64dd03d04abc732 +size 781878 diff --git a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_f083d98e-f278-4e39-9c59-c02d95e8dd2f.png b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_f083d98e-f278-4e39-9c59-c02d95e8dd2f.png index 610250d194e07fb5ee271db08638a389d7651ac9..05978f31b94a13fbc6492da309de8d2753b210a0 100644 --- a/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_f083d98e-f278-4e39-9c59-c02d95e8dd2f.png +++ b/images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_f083d98e-f278-4e39-9c59-c02d95e8dd2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31139d7a0b1ff42f37708e6d157ea2903d11956a31611491a8a90bf2649c2155 -size 1061229 +oid sha256:777c289a1b4fc3a5bec694ba1e3c55e02ad425224ec8ee4a41630a30b096ee79 +size 1282421 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_05079796-ff63-4353-b6f4-58469eeb7be8.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_05079796-ff63-4353-b6f4-58469eeb7be8.png index f416e2ef73f7ecc377e724e5a366a2587441125b..ce28bf43afbb28ccbef1e3c401922f58e4681e3a 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_05079796-ff63-4353-b6f4-58469eeb7be8.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_05079796-ff63-4353-b6f4-58469eeb7be8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a09e6e64c413da7b330476fd714379bf71ef02f29dda0dfc6db6ac72efe2914 -size 832024 +oid sha256:b2bbbd437c6fdc1eb476fb894ee0c94f48f444f830486188c7173e071bcbecb9 +size 968185 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_0fb30cdf-7ea1-47af-ad9a-010175cc1fb3.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_0fb30cdf-7ea1-47af-ad9a-010175cc1fb3.png index 9ec4ae063f07f411a065f4b838acfa80244871a9..87cc28f45eac3b4cbeceacc61f0815bb30029b4c 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_0fb30cdf-7ea1-47af-ad9a-010175cc1fb3.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_0fb30cdf-7ea1-47af-ad9a-010175cc1fb3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b01c8ae7aaf0971bdd248b22d5c71db6d4edf63df0f473ff4d6bf5c41c188e5 -size 1348384 +oid sha256:8afa73f41ff27394d70cc8f2ac6375037d616c6ce86beefbb65ef1c42e56152b +size 1027291 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_3107856a-910f-40db-a9d0-abe314f18545.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_3107856a-910f-40db-a9d0-abe314f18545.png index 926d0fd3c16a86c4a5de4c09ba47e0c99ef91bb8..79bf6cf07099b101a362650e24f9eabbe224cf1b 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_3107856a-910f-40db-a9d0-abe314f18545.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_3107856a-910f-40db-a9d0-abe314f18545.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a70d6c1dee28f770ddf63cd8ae1b7b6120356d02f829bd8096240376a865e3d2 -size 1225650 +oid sha256:14e2f7394d6d9baed16c13ed8735753bdf341d1c6220acc0ecb2344ad9481326 +size 1268328 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_334334b1-8249-4f2b-8bbe-957ca969ed1d.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_334334b1-8249-4f2b-8bbe-957ca969ed1d.png index 223ddcd6085bc754899cffea933f9f650420e70e..679a2d6a1c96e10749cee191b9ae78105b98c706 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_334334b1-8249-4f2b-8bbe-957ca969ed1d.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_334334b1-8249-4f2b-8bbe-957ca969ed1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba52d52cfa148841aa0fbe30bb0be8e27f2989444d59280db494849b203f378e -size 1420244 +oid sha256:c04b32df6cad8ecc29f71d3a507298f272537f767640e72369ecb98627b72ab6 +size 653986 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_5c130bc2-2d39-4454-9e2b-13392f1fbe4b.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_5c130bc2-2d39-4454-9e2b-13392f1fbe4b.png index 5bc28c29d27105044c7c0bb3ff8d70af5a8ca5e2..aceaa6e714563d73886cce02edb0ff403145db55 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_5c130bc2-2d39-4454-9e2b-13392f1fbe4b.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_5c130bc2-2d39-4454-9e2b-13392f1fbe4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83b53dff9f0ef9d982db93509a08934b07415d71e75f6fe23472b55550d3f853 -size 1609112 +oid sha256:5b3f051a45d3984a35bd6cd9c540a5395e65d1f31dcaae0ddfef2500b7749428 +size 1422190 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_8f546563-ff13-45a3-8764-50c7781f81b8.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_8f546563-ff13-45a3-8764-50c7781f81b8.png index 6e45625ac359ec42888a3df22803598dcc275872..7cb93b0cb6d3b2d7883cca76314acd29e1c1e879 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_8f546563-ff13-45a3-8764-50c7781f81b8.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_8f546563-ff13-45a3-8764-50c7781f81b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11a7b391f619f3e9b1fa79488dea6b2d47307d2163aa82d655ab31e2b5b75235 -size 865184 +oid sha256:1c2cd7c9f661032a54a5d8d7d0d7673f87e7db9f4b26f1899d74a5f9aa313e7c +size 688023 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_9789cf64-f7e6-4e99-b1b4-77eb41a6e876.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_9789cf64-f7e6-4e99-b1b4-77eb41a6e876.png index 2093114b4dedb60e905f8780e87be767295e5341..f0ec2921e471450689e979bf72f52d3b8e840e23 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_9789cf64-f7e6-4e99-b1b4-77eb41a6e876.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_9789cf64-f7e6-4e99-b1b4-77eb41a6e876.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:718f36b851985e6f940467d8ffe0a9ac3a17a4c0b28141fa3361ba19603bfe29 -size 1414810 +oid sha256:520861ed640cade325b0a427ae4d79dedd8e5cb5f19a2b2c9c0f934757a7b37b +size 1341777 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_b542e191-6e7d-40fd-bc21-5c9cf5e57afa.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_b542e191-6e7d-40fd-bc21-5c9cf5e57afa.png index c0c84fc85e1ee5ec50e3e7ebd78d94e7e52f91e2..99f50bc149e057b5336baafe2bb3fe1752dcc28b 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_b542e191-6e7d-40fd-bc21-5c9cf5e57afa.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_b542e191-6e7d-40fd-bc21-5c9cf5e57afa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce55937106bdc695a9ac659623e0cf7a59ea2c05015c28a9762d0d29fa52ea21 -size 1933329 +oid sha256:6bed8b977f1b9376428cdd109ee9ad91723b770a98d1c7ed9298a775e18475dd +size 1637747 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_bfe95a78-0720-4801-b97e-f4661a8e6de7.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_bfe95a78-0720-4801-b97e-f4661a8e6de7.png index 9479ecf1b75adf4ddc19b1aebe6ddcc2587f1fd3..448c25bebad07724b12d79a05f767b6295955aa5 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_bfe95a78-0720-4801-b97e-f4661a8e6de7.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_bfe95a78-0720-4801-b97e-f4661a8e6de7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8ba41556846000e780d884acda09578c02fcda0189b6432227ceedb499f503a -size 1793888 +oid sha256:01e7c45206697535f0558e60a994cb1158364c086e0c71e697fdf2d67585d27a +size 1012268 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_e78c04e6-b25d-428f-8632-af2289e2059d.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_e78c04e6-b25d-428f-8632-af2289e2059d.png index e9429ac0fb75c5b02958a2aad0b4eedb3ba8726e..d2256dcfc7c15dfebbcb0859f68e749030afbb33 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_e78c04e6-b25d-428f-8632-af2289e2059d.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_e78c04e6-b25d-428f-8632-af2289e2059d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6003540bc7e60c0d87193bae943935ce554cc73af68ead99518791bbf78afb43 -size 1227494 +oid sha256:ea5a1c366d1461e24ae0ca90c691fdf82c630d1d1f75f0cdbef4a32586e3a955 +size 1477724 diff --git a/images/8308d10f-3904-473a-a186-c6b8f939f018_eb3902e3-de1c-4124-9b4b-23b2190d5e8f.png b/images/8308d10f-3904-473a-a186-c6b8f939f018_eb3902e3-de1c-4124-9b4b-23b2190d5e8f.png index b42050ddb4cc79d547a62c5cb7053a4019d8927a..b08a9291f1e996931622175c820d19b1f4b87c31 100644 --- a/images/8308d10f-3904-473a-a186-c6b8f939f018_eb3902e3-de1c-4124-9b4b-23b2190d5e8f.png +++ b/images/8308d10f-3904-473a-a186-c6b8f939f018_eb3902e3-de1c-4124-9b4b-23b2190d5e8f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8b4ce25b8165199a85a700ededc0797ee6cde26eff3e804102826da45ac259d -size 1797083 +oid sha256:4a44d9f7d00502df619f4775855b7cfde0cc99478c5a61c34c755d51997927fe +size 1811741 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_05857621-902c-41b0-b42e-96bb7a9958bc.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_05857621-902c-41b0-b42e-96bb7a9958bc.png index 70e558ffc1fd4e2c3fb81bf84aeed0153de3a907..f7756d169e30ac7147e7f2c4e5699ed7a446d110 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_05857621-902c-41b0-b42e-96bb7a9958bc.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_05857621-902c-41b0-b42e-96bb7a9958bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:052f4e072f0479d7b328c627af4a51b73610fd61af506dc3d3f6f65e59b66f90 -size 1254230 +oid sha256:4a8d6b5715a456195b71fa70a194d60c775175c320bea162dc21cc854f5a87ed +size 1228403 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1463c3d8-4e6e-4c2f-897f-4ad740d598d6.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1463c3d8-4e6e-4c2f-897f-4ad740d598d6.png index 6fde4181b7ce48313ad9f0b1a55e2a59df87fd3e..bb11192a946d9f63a7c2f934e8c73ad1990b91da 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1463c3d8-4e6e-4c2f-897f-4ad740d598d6.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1463c3d8-4e6e-4c2f-897f-4ad740d598d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38f3c89d4d93caa57842bc857ec31de64bcd96f32ede5965ecc39e10bc428c95 -size 883820 +oid sha256:99b7f901872148018936396863fafed2ef2e77b31fa3cf94e65dc86771549646 +size 1255065 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1d6a6c4d-7dec-47fe-a26d-a596304c0ef2.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1d6a6c4d-7dec-47fe-a26d-a596304c0ef2.png index fecf49c2c1423bbad8d734cc75cfb9db5c30b5f3..f6c27f8bc94c0c6cac7bf62c66613f782db0ec6e 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1d6a6c4d-7dec-47fe-a26d-a596304c0ef2.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1d6a6c4d-7dec-47fe-a26d-a596304c0ef2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59f8175310db01406c1bc82c268ce612b130f0705724273fff87402385111530 -size 759604 +oid sha256:4041725e5c38f195ed30e3348912a12cc7a7c47d9594c36092b78c2e3f4c2cf5 +size 936056 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_251bb8a8-5a58-4219-8fde-c24c613d4337.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_251bb8a8-5a58-4219-8fde-c24c613d4337.png index bf9fdaa3a19ef443b889cf1c421cc9397b69e115..6d86d70b31d9c0b86b905b8ad11d66fe038251f0 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_251bb8a8-5a58-4219-8fde-c24c613d4337.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_251bb8a8-5a58-4219-8fde-c24c613d4337.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4befac26664bcc7ac16081cde21a2ea753de2cb2600120c801a67fc9509b30f2 -size 798675 +oid sha256:a8e924ca31dde4e6c723784cfa140f8084896f1c93f577c7c1b2c84a836daf21 +size 963925 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31217e2a-0ae5-4c3a-9559-dbf6eba97bf0.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31217e2a-0ae5-4c3a-9559-dbf6eba97bf0.png index 4658cc2986cb87bb0c46e3dfd2c91d58df367411..c9d070fe9a1a4001f676d6932df71512d5e5405e 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31217e2a-0ae5-4c3a-9559-dbf6eba97bf0.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31217e2a-0ae5-4c3a-9559-dbf6eba97bf0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b54ce2fa3b67e85bb1b0904ee8c7a73a46ea922c98252a0f876ad85acc6584a -size 1124697 +oid sha256:e8186dac7afa0c1dcdc657220abd5f7e9f6ad449af8aea881f25a7018bc69212 +size 828686 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31a26c55-4c7a-4283-92d7-1653956d7fe4.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31a26c55-4c7a-4283-92d7-1653956d7fe4.png index 0e6937b738818b750126fd38942ef2c338b56c42..a9e5d121737cfd32f5702eef273126d786bb83d4 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31a26c55-4c7a-4283-92d7-1653956d7fe4.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31a26c55-4c7a-4283-92d7-1653956d7fe4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4c124bd310dd3dbcea2cb752f7c934f36a8d44a6cddc6b3faa3bb0c30114fee -size 766545 +oid sha256:521d8f4a957e404bb4c2e0d2b0032fc313b8cbc54ffeba0255550c133d74289b +size 927902 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_42a254f0-62bd-4b7c-b209-b0dd924e05d6.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_42a254f0-62bd-4b7c-b209-b0dd924e05d6.png index 9df354c6c986224ec0881a076b88a1620b4d31d2..e9e7a76cebfbecd34d05f9f6f02d41a99b714277 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_42a254f0-62bd-4b7c-b209-b0dd924e05d6.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_42a254f0-62bd-4b7c-b209-b0dd924e05d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d31219f7832cbfafce006807e232f72e2b9242d2c9eff82ae11f565b03eec0c3 -size 1163292 +oid sha256:aad41a8415b0fd0d2e368cb98cc036a67097e05aba68b966a62743f6647f424d +size 1249648 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_51b4faa6-0136-4f49-baad-9bd1bc178051.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_51b4faa6-0136-4f49-baad-9bd1bc178051.png index ec21779f4274677940f9c9c29a98a6230cf99df4..07b4e9b772731ccb4488f7c53bfa3758f8ad5f8c 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_51b4faa6-0136-4f49-baad-9bd1bc178051.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_51b4faa6-0136-4f49-baad-9bd1bc178051.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9fb0dc0c57ed47b68e278370999ba8a3094b046ebfb894ffb9b9907602fd3e1 -size 815439 +oid sha256:fff4fcfa3cc1f3f14f0f6a6f59a16fddc6ec3271305271326dfa8c0f964cc92c +size 1265448 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_61eea936-7790-4dc4-b778-5e3ceae09c14.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_61eea936-7790-4dc4-b778-5e3ceae09c14.png index b0df62de441b0c46f5fb4f5a79f046e5202b0be6..b8546acde6dc8359457ee31a342fe7978e880dcf 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_61eea936-7790-4dc4-b778-5e3ceae09c14.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_61eea936-7790-4dc4-b778-5e3ceae09c14.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a530b57c3107a919202c4fd79180729cb1edbd5fe8a0e7d5f8282ce5b648451 -size 986869 +oid sha256:5dfb6442a97b90e22ca222e4579c2b2e069957dc5c53f2de5cdc25f8d87402cb +size 1261156 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_724a9c62-8906-4da6-afd7-50a4c3a8864d.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_724a9c62-8906-4da6-afd7-50a4c3a8864d.png index 4cb6f365dd87ee5043aaf2540bc938e70b0185da..156e35ab90d09ffb3d0141b5163c8487c4556ea4 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_724a9c62-8906-4da6-afd7-50a4c3a8864d.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_724a9c62-8906-4da6-afd7-50a4c3a8864d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8eb9b95438e72ed9034c2f1f1704609935c65b6a1c1523bb71ddf3941de8ef8f -size 797532 +oid sha256:9b8bf688654c7f151af14d29eb4a78e299aa0518f2a7b17f6a8e54b10f96cbb6 +size 814370 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7d0e900d-c57c-45f7-a2fc-a41e9ae471a1.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7d0e900d-c57c-45f7-a2fc-a41e9ae471a1.png index 88ac35f37556f4afb2360356ae66d8db04fa8d7e..c8a0031dd797add34e243aec242514bd8f7b47de 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7d0e900d-c57c-45f7-a2fc-a41e9ae471a1.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7d0e900d-c57c-45f7-a2fc-a41e9ae471a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91a65d2bfb27e680a52df182595e716978dd7215d1f4b75f7719760a3602e313 -size 519315 +oid sha256:10fa171fd336cfa08351617d997d09006761f8815c81313e06eed614d3ee2c42 +size 702328 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7f0414c5-2299-4fb1-9b20-cbc1cdf35486.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7f0414c5-2299-4fb1-9b20-cbc1cdf35486.png index 8460415b9b3fd21b67f345dec93ea056419f03d5..b7143fc87ddef119d0f1a01d475c668884cdf667 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7f0414c5-2299-4fb1-9b20-cbc1cdf35486.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7f0414c5-2299-4fb1-9b20-cbc1cdf35486.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46ca8f38056c7ce894af09ed30a106a3b6878b0b9aadce48c47354d0eeabc65c -size 883111 +oid sha256:d5cc726cfce0c12873befd8074b131b46006564968f2b0d8e34051ed25db861f +size 457330 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8b26cb30-f938-42d7-ad51-858d186a5422.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8b26cb30-f938-42d7-ad51-858d186a5422.png index f6fad2f936fe6e6a267a67c5d56b8b134de407d0..9016cda81cf19c8a757c7f154316b437a6ce80c2 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8b26cb30-f938-42d7-ad51-858d186a5422.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8b26cb30-f938-42d7-ad51-858d186a5422.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82a9cc9737c51b2c891ef7c200631895e0dbfde15e867981846beed764596b06 -size 728857 +oid sha256:a76d4a1b92ce777f6de9c3b116d6247b66eced73bef01809ced1d629298df004 +size 789081 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8d380612-6d85-4cf3-9691-a9ba0257e423.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8d380612-6d85-4cf3-9691-a9ba0257e423.png index 58907618998be58ba2270c746dff051ceeb06ba9..47e4276ba49a932dec63082bdfae84a6600d24eb 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8d380612-6d85-4cf3-9691-a9ba0257e423.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8d380612-6d85-4cf3-9691-a9ba0257e423.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c4571dc9d472a2d1c3381f11e7db44a814c5fc9f830638710274a5f17b297ea -size 532021 +oid sha256:b735b3ca6136f0bb69ac957efd83a1aa8abd3f6e66a57b5dcceb058a7779a27c +size 782566 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8deea034-568e-4813-992f-b74bdf900906.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8deea034-568e-4813-992f-b74bdf900906.png index 8a470db48ee9d45c5c015909e4edb3407feba114..4cfd3586828a5b53885e9c4200ae284e06906e06 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8deea034-568e-4813-992f-b74bdf900906.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8deea034-568e-4813-992f-b74bdf900906.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69465d5c4cb011b4dde3dc37445c1dc2cc0dd08569f1de89a2b7577d3d12a993 -size 398671 +oid sha256:01c256c46321ed24df469bb8916070cbee90684f839637a20c84963bf3c75bbd +size 277256 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_98338e7d-fa1c-4ab3-a522-00aa99888699.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_98338e7d-fa1c-4ab3-a522-00aa99888699.png index 3829858f99861bdf5c48b8a245e7c9b54c4012b1..5def4e0051d7cd8761f7b13463c1a7e2c9455a7a 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_98338e7d-fa1c-4ab3-a522-00aa99888699.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_98338e7d-fa1c-4ab3-a522-00aa99888699.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d71ff8a21c0b8d300a5324898866ed9d8145baf538a4e2af6676dcf98f5166f -size 828319 +oid sha256:e404489da4a00a7d267b3db8f19a326bc8510fda09c8d135ade3eb71f73b20b2 +size 973772 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_a2078d5e-eaad-4060-a2e4-c26ecefb4a9a.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_a2078d5e-eaad-4060-a2e4-c26ecefb4a9a.png index 9002cb7040cf89b25f01dbd1e3d052d114802e43..4ab9c001ab3b534de6ea2632949c1f3b02ac6359 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_a2078d5e-eaad-4060-a2e4-c26ecefb4a9a.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_a2078d5e-eaad-4060-a2e4-c26ecefb4a9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e2779b0b167be5fa7eb7cf06b31389d87db6f244cbd854e470f1beee23aedb1 -size 885116 +oid sha256:2ed5d407a8c73f871d451c85682f065ab5fdff6d01fa27812b96090e568f429b +size 795386 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_b35e7803-4bfd-4c47-94eb-9055e61c98fb.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_b35e7803-4bfd-4c47-94eb-9055e61c98fb.png index d8601c68c84e90bf69554989a5db2b1bfe412a2c..7d4ec847c0d32d95ebfd78510476b717011245b1 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_b35e7803-4bfd-4c47-94eb-9055e61c98fb.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_b35e7803-4bfd-4c47-94eb-9055e61c98fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbd862c2429353be2eaaedd4e347541afd8cedabb747c163f83c4624e1ad14f1 -size 860166 +oid sha256:21cb2cc3df71b45d8b6f3eba2be31a39f8a9cc65400d639f5d361f5f979666c4 +size 1090506 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf14f1d4-470f-4110-b3f4-019a9f7d0aed.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf14f1d4-470f-4110-b3f4-019a9f7d0aed.png index 3e06e8844ecfdf87429f16070dc561fe4eefca60..10ab7bbef9ee5f68b7678f3b39f87ca97ce8dd4f 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf14f1d4-470f-4110-b3f4-019a9f7d0aed.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf14f1d4-470f-4110-b3f4-019a9f7d0aed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:516ca07de6bf1ee2a6cf46280b3e7c98f532fc416ab420bc6a5dd2da7017783b -size 763734 +oid sha256:bd21f8bc489142cf0daed2e805a7551d7ecccfea78275c17cad51a8b6a67a06d +size 576709 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf489b94-792e-475d-aa34-32cdcda0f2b6.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf489b94-792e-475d-aa34-32cdcda0f2b6.png index ecc0abde1b1074cb54f0124268dd0030fe7681e6..3230e6972d799fceb963089eefbb8ca0c27d7bd0 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf489b94-792e-475d-aa34-32cdcda0f2b6.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf489b94-792e-475d-aa34-32cdcda0f2b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70b702eaaf0099c25239b25327be93c456c89794e20b76731ba4353787cbf687 -size 753054 +oid sha256:cf6ae78f5b0252096ad89ab4582de1c00a22cc39ebc555224752a624ff640c33 +size 983368 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_dcad1685-0929-496c-b434-2f408805f4bc.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_dcad1685-0929-496c-b434-2f408805f4bc.png index 569f83ef64c8657755190d842cde26d1590c74f7..8af6a3ff394ebb8702059967db743074b489f70a 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_dcad1685-0929-496c-b434-2f408805f4bc.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_dcad1685-0929-496c-b434-2f408805f4bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ecc702de92a8f13f6c866da432442d57aedb6b79427f369c3afb4fe134e9a977 -size 878396 +oid sha256:9486e0874de1a70914c9df286296fe36a3450922e099ef44ba6300d7b7fef325 +size 1050705 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_ddb2dc50-ccd8-4d59-a989-a955e7f43f9d.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_ddb2dc50-ccd8-4d59-a989-a955e7f43f9d.png index c38d726e365f4b90638d99078ccdf720fffb2900..367aba4ec3c94a5870b7a96e852f8d7f9412c84f 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_ddb2dc50-ccd8-4d59-a989-a955e7f43f9d.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_ddb2dc50-ccd8-4d59-a989-a955e7f43f9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:618a816fd688807e26cfcf2328ceec7b79068693a440300d4d5247bb06497144 -size 844216 +oid sha256:7e05dcb631acc7f64b1daf48929aed285d11a80b128746161e5b4cd198a127a5 +size 588942 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e11f907e-6778-4c4f-830e-df9acf69eaaa.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e11f907e-6778-4c4f-830e-df9acf69eaaa.png index e9a80905bf1d98afa6530e3ba77791abfa2be711..ad9a1aef83768028971aee81d85325ffb33d6647 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e11f907e-6778-4c4f-830e-df9acf69eaaa.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e11f907e-6778-4c4f-830e-df9acf69eaaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:927e5f0ab1eebe1ebd79afe42eac740a90c583141a2ea542864811c033c0d51f -size 1045119 +oid sha256:3031422215c30cad6c36809c81aa05fc556fa935cfc92b87b7004445df94c998 +size 1025139 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e9e59734-447c-445e-bdb7-bea4db2729a8.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e9e59734-447c-445e-bdb7-bea4db2729a8.png index 925ca794a44eb52031b2ed2ddef287cb65424148..e20b9ff2c1cc987a7071e7aff2b7e7f18bcf1732 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e9e59734-447c-445e-bdb7-bea4db2729a8.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e9e59734-447c-445e-bdb7-bea4db2729a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3dfa9b41cb91f867a21313925335922e3999612c76852d54a09cebc8c48ce45d -size 794665 +oid sha256:5cf54617302139cca20914436612ed9c092aa99a256f9e62fa88e7b22542a2a3 +size 1095751 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f1904386-8aaf-4d31-85bd-37a9301574a8.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f1904386-8aaf-4d31-85bd-37a9301574a8.png index 663c00a9e342b1459291606531b13afac4410c5a..ad53c50b5fa1932d50b2d946f8e02ee9a7f000e6 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f1904386-8aaf-4d31-85bd-37a9301574a8.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f1904386-8aaf-4d31-85bd-37a9301574a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a31dd49e3a6f467de5bded6960b88f679df440416137b1ab41eed1536416340 -size 434660 +oid sha256:95db5022e9cc8a793b8a06cccea5a5e603ce2ad133d1bdf4ef99d6051c7ab517 +size 573551 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f2aa38c8-10e2-4a9a-8305-480422409dd9.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f2aa38c8-10e2-4a9a-8305-480422409dd9.png index f2ed6534007299277fc0a4491b33575e4031acd8..c73cd3cd2e279f8f15b48b66a2948b754a846106 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f2aa38c8-10e2-4a9a-8305-480422409dd9.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f2aa38c8-10e2-4a9a-8305-480422409dd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f2e44a8d2f224ad61bc7e9274f9aa4c01ed6b454dc3b90299490fdc08723bae -size 863697 +oid sha256:31b7f748eff2fec558430ce023819e16cbac5c03ddfa52f875e0043233c98736 +size 671674 diff --git a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f9bef785-c84b-4232-9461-02f5773cd5b8.png b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f9bef785-c84b-4232-9461-02f5773cd5b8.png index 85eff01a238071e885b5d40576d3a7b4c1e09aca..a13e4fad2eacab0b40f6e9ebf33780cf211cb35c 100644 --- a/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f9bef785-c84b-4232-9461-02f5773cd5b8.png +++ b/images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f9bef785-c84b-4232-9461-02f5773cd5b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cb2f6a57df6578a98ceea331ab13085ebebc592a0a62e69a1a4f006b847ade7 -size 419596 +oid sha256:0bf64606044fba4f43e7f2d4b1184dece87684a27181a6be4fc928d30f8f607e +size 590967 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_0c0f9528-0f31-46b0-b7c4-78507a1facc1.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_0c0f9528-0f31-46b0-b7c4-78507a1facc1.png index a39c7196b3fd95d0d8cb96db18369d0cc0054e6e..40eb2bf4c329c9ef24c64f9372d6a8a1a06d2511 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_0c0f9528-0f31-46b0-b7c4-78507a1facc1.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_0c0f9528-0f31-46b0-b7c4-78507a1facc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8be382b677c91d31b82c7fb6fde5742d07931fc59bff3febfd3f38ada3007f34 -size 813459 +oid sha256:275ecb78ff007fbfcc1b3c71121d03c51f8b723fe9428a4f66109400f8cf15c7 +size 1437011 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_113b23fe-b2b9-44f4-9d94-55f2490d9e41.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_113b23fe-b2b9-44f4-9d94-55f2490d9e41.png index e09319c6ff9e2416c2c3f43856591d175f83b977..1ff9e204728cea2d22ba009e57e01afcb5600373 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_113b23fe-b2b9-44f4-9d94-55f2490d9e41.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_113b23fe-b2b9-44f4-9d94-55f2490d9e41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d27e1a374d3e2b96d04fa46e6a8d4d1aabe1bb607146b9febe4091b8dcbf9be7 -size 603910 +oid sha256:94284e32d6f2e109973a486bf425062fd1106c5a9a3eaad9fef3483ace37f802 +size 1282420 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_2566660a-da4f-4da5-979a-0ffb4953d972.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_2566660a-da4f-4da5-979a-0ffb4953d972.png index a4346672a63eeeb93aee93050e120e2fda96fc4c..b1955620936a1038e2a237b13437f61c877e68e6 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_2566660a-da4f-4da5-979a-0ffb4953d972.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_2566660a-da4f-4da5-979a-0ffb4953d972.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d11a246fd35bc083015dae0774b3c8d08d9dcf2d1accf147b1d4017a8d601d6 -size 766181 +oid sha256:4ef1086cfd324e7053408dd3f5d2ad9019d0b0d7b1500d10be5266836fdfff30 +size 1402277 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_27977c97-543d-4538-bfb8-ac7679262132.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_27977c97-543d-4538-bfb8-ac7679262132.png index 4c72ca8dff69f8067ce100dc9565c7b229537453..5d7a847a0ed45e48d844b256f4f84c3c12f42019 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_27977c97-543d-4538-bfb8-ac7679262132.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_27977c97-543d-4538-bfb8-ac7679262132.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c657ef5561269d8e5387704a8603f50b62ba9685531ef5bd19087290f15c92be -size 740249 +oid sha256:36c656268fe3195c334c1b8852ba5356a67b437c8ea677470dc08452f724de18 +size 1418435 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_50fd7aaa-959c-4d4c-b224-5dd9a2bd05fe.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_50fd7aaa-959c-4d4c-b224-5dd9a2bd05fe.png index f210fbbb9744980ac61298ca4049aaeda41dfd82..1b88fb4abd8ad8f5bc6098f75ea095bdb9320273 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_50fd7aaa-959c-4d4c-b224-5dd9a2bd05fe.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_50fd7aaa-959c-4d4c-b224-5dd9a2bd05fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71d604c449769c0fab260fdaf812eeebea207cdc6b21f39ac35c789e7b5ae075 -size 742152 +oid sha256:0b74a44bec8e2913ede29bdd204f2543bc9b7e98990a0c98c7967789bc0392f2 +size 1390331 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_62b92129-4035-4743-aef3-0b72cc301caa.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_62b92129-4035-4743-aef3-0b72cc301caa.png index 42eac1482f962db8d037c3329827f2521bee7f89..1f73cc7378be07294b206b9ec170ccc33f497514 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_62b92129-4035-4743-aef3-0b72cc301caa.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_62b92129-4035-4743-aef3-0b72cc301caa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82d81b42538aa1f9b9d15202165b52b34f854828a3288bb0138a7c3b5c91875b -size 842941 +oid sha256:4a0962322ae029ae381d39dc57a2d019932b7b6837c3b49b6496452984c892df +size 1234563 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_6b277741-9a89-48d0-9635-b0323bb1270d.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_6b277741-9a89-48d0-9635-b0323bb1270d.png index 5c96723152748e7d1a7a2ef624d6c9ecb3646639..ccf065cdf82a0d7b77f5fa62370eb551fef63dc7 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_6b277741-9a89-48d0-9635-b0323bb1270d.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_6b277741-9a89-48d0-9635-b0323bb1270d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ab6d084900d717c8bc221c8fcb8f0f7b4d447953c311ab860fec26152bf4e71 -size 599952 +oid sha256:0202d94d90624a5479b274a1f827fc0ba9241864bc9844df9e4ede465d0f0844 +size 654808 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_70eb3436-556f-4fe5-8c18-78852559efc5.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_70eb3436-556f-4fe5-8c18-78852559efc5.png index 4bb15854acbfe2e828ed5a9dcb11de95d7de10b8..3a4781e93157c572cea38fcb1945eca547a3cafa 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_70eb3436-556f-4fe5-8c18-78852559efc5.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_70eb3436-556f-4fe5-8c18-78852559efc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f73ee83bb10134532e79262ec681cea7f11021c5f634403d9a7ac878f349143e -size 736683 +oid sha256:7e74e419615c63acb1aca37951146153a9394257eb343a05cd8171b7e0ac31ce +size 1325610 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_73fac390-4a7e-41a3-814e-caa47a3ad866.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_73fac390-4a7e-41a3-814e-caa47a3ad866.png index 3dccba75f822ece7a67b4e998e18f4226e72a7b3..db9fa22b2c6c4b5cd34d1b64f1967d8967a769ae 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_73fac390-4a7e-41a3-814e-caa47a3ad866.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_73fac390-4a7e-41a3-814e-caa47a3ad866.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:988d4164fef5ea38f6faf8cacf75834b14720a2677a47d2f4dc040abb84e2c30 -size 737957 +oid sha256:a1045a2e54c5bb2e489f037d771bef9eb8273d6fa2ae5cf0a01cc8239ea13f8d +size 1221945 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_74a7c5bc-3967-4777-8fbf-48549de950af.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_74a7c5bc-3967-4777-8fbf-48549de950af.png index 609a740f334878360e30d0ffac9dbc26eda6da83..9e7025d1c57cc1d218077fcd0257a41628f963fa 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_74a7c5bc-3967-4777-8fbf-48549de950af.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_74a7c5bc-3967-4777-8fbf-48549de950af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fb43ae42c519cff4af9eea3b28954147c8e53d49b2dd3f17527855629345f71 -size 749060 +oid sha256:6c8a881748dabb1f52e7af935439e81ab748edab2ce99401b406a8fbd4465c8f +size 1463805 diff --git a/images/837989e0-7476-496a-be02-e4c69f1f989b_ef69e26b-3544-4e04-95cb-b382313130d4.png b/images/837989e0-7476-496a-be02-e4c69f1f989b_ef69e26b-3544-4e04-95cb-b382313130d4.png index 8a459379f1051d73587b819191fb311883e4af68..720f2419d21e6c90e8601330303f24a7270a6c60 100644 --- a/images/837989e0-7476-496a-be02-e4c69f1f989b_ef69e26b-3544-4e04-95cb-b382313130d4.png +++ b/images/837989e0-7476-496a-be02-e4c69f1f989b_ef69e26b-3544-4e04-95cb-b382313130d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c9acc18560ccdc9b73dd64fff61e3fc555c961a58e8b4ba738fe7d6ba7069a3 -size 730059 +oid sha256:551e83fa3c50c031ea90b7312682bb62c587f2f4e9327c49a5380a1a763f2d50 +size 1408666 diff --git a/images/839ad551-da04-4c8e-96c3-093e762ea167_0739722b-b2ed-44ee-9d7b-4442f4e241b2.png b/images/839ad551-da04-4c8e-96c3-093e762ea167_0739722b-b2ed-44ee-9d7b-4442f4e241b2.png index 2591dd1d67dce48639995943fe87442d9bcf7df0..d280e104d26e9946d121898c28faeb422940330c 100644 --- a/images/839ad551-da04-4c8e-96c3-093e762ea167_0739722b-b2ed-44ee-9d7b-4442f4e241b2.png +++ b/images/839ad551-da04-4c8e-96c3-093e762ea167_0739722b-b2ed-44ee-9d7b-4442f4e241b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1e23b986e2026466db616d5133332fd2df5f473b89a18f7a3526e623dd0d4fb -size 955323 +oid sha256:d4c9443cb70e940b0d0548c3210ea656248e49cdea9bc0084abf9af3c40050b1 +size 234193 diff --git a/images/839ad551-da04-4c8e-96c3-093e762ea167_83ce3ed0-9d5e-4e13-885c-6cd6b0291439.png b/images/839ad551-da04-4c8e-96c3-093e762ea167_83ce3ed0-9d5e-4e13-885c-6cd6b0291439.png index 87ed31e0c608901001ee1069fdb421054a76452f..28b19d9b016a04fa7bd5330d64c00d8967610ce8 100644 --- a/images/839ad551-da04-4c8e-96c3-093e762ea167_83ce3ed0-9d5e-4e13-885c-6cd6b0291439.png +++ b/images/839ad551-da04-4c8e-96c3-093e762ea167_83ce3ed0-9d5e-4e13-885c-6cd6b0291439.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:229be6c7be88907389ef381153f140f7f3380ab78e5fc1a42f0b6d579cb4d3c6 -size 1578638 +oid sha256:cadfdea8813b40f4f69db9eb12cc4cf8b5776a440d444ee6cac5274f2a307157 +size 948207 diff --git a/images/839ad551-da04-4c8e-96c3-093e762ea167_9efe842d-9955-4569-9692-f96a5edd3d49.png b/images/839ad551-da04-4c8e-96c3-093e762ea167_9efe842d-9955-4569-9692-f96a5edd3d49.png index d408497aafd08cd0a78e9b3792ea96b8d68b2766..069f4555dc067aa9db69c3a3ba4ad66a4c4bdc99 100644 --- a/images/839ad551-da04-4c8e-96c3-093e762ea167_9efe842d-9955-4569-9692-f96a5edd3d49.png +++ b/images/839ad551-da04-4c8e-96c3-093e762ea167_9efe842d-9955-4569-9692-f96a5edd3d49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03bf047dbbf706ac9c1b0d087fc159a39c5b1c2dbd4ad072217b0ad12b141a8f -size 1308123 +oid sha256:8b5eee621d66ebfc57d92d527fae0227b64d3ceacff2f72ab708df26093ff6cc +size 1376088 diff --git a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_0549de40-9213-46db-9cef-488a057eae19.png b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_0549de40-9213-46db-9cef-488a057eae19.png index b543a9fbd6501171a119f4eebb610d5ab1b134f7..280b36a79bf225f028f9ea0415310e1d9ed958cc 100644 --- a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_0549de40-9213-46db-9cef-488a057eae19.png +++ b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_0549de40-9213-46db-9cef-488a057eae19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7789b02fb6148dceea5f3e01b1fd8ec283068408d445f1d20ad0e3455015384a -size 1616958 +oid sha256:587fc0479cc1ca6861b5eeef744758d1212dc08dfeb81d782d4f5f968f784197 +size 831792 diff --git a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_4f8da365-0e3a-49aa-a1d5-32e0ed17259d.png b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_4f8da365-0e3a-49aa-a1d5-32e0ed17259d.png index 1c89b0c43023dde7982ac7d277fbd48e2d4c68ee..a236fb0bdcdd5b2750af7f93c2bc24d6f1b1ccff 100644 --- a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_4f8da365-0e3a-49aa-a1d5-32e0ed17259d.png +++ b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_4f8da365-0e3a-49aa-a1d5-32e0ed17259d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3fcc90ac6ca2864bf9ff86dacfcf07d8f275e700ce6e6fd9f824c37b9ebca0cc -size 493955 +oid sha256:f90ce12b0d88c0b535130b0fef911be46dfc6e8f302b7e08104687875be6cbbf +size 558235 diff --git a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b0b16702-6153-482c-b402-5cd4ff52a76b.png b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b0b16702-6153-482c-b402-5cd4ff52a76b.png index 1b1bd3b81f0e6c82f1b3431e85c7274dc775dfbb..2738f0a0f8177d583f0872aa1d0934dc8203a11c 100644 --- a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b0b16702-6153-482c-b402-5cd4ff52a76b.png +++ b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b0b16702-6153-482c-b402-5cd4ff52a76b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6aa0c7ab83e46dfee0754e27d7e8257d111f0256b29ed055b7f0664b1b8d9617 -size 1259781 +oid sha256:de5d97153bebd85a2062fb8ce1562da99b3b0258c7af6ff9c083a22819292376 +size 1090784 diff --git a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_c944c80a-9545-44eb-a901-aad8b0834d7d.png b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_c944c80a-9545-44eb-a901-aad8b0834d7d.png index f9216ecc1fe3655623a98738646fdf42bb7ed30c..a9fa01977bd5574a227d8549da9a869a98bd5e48 100644 --- a/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_c944c80a-9545-44eb-a901-aad8b0834d7d.png +++ b/images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_c944c80a-9545-44eb-a901-aad8b0834d7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8babea7ed4edf04fa9c15fb39d144709d69f5735a32af86d1390ebc01ee0622 -size 2497388 +oid sha256:0293d64a65b62c0320f5cc71e52ec31a8f1851e910b7a345c39f218606a4ce88 +size 1921136 diff --git a/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_24eb707a-5e41-4be7-8b4f-b7d2233b07e0.png b/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_24eb707a-5e41-4be7-8b4f-b7d2233b07e0.png index 9cf4b7f7d48da1e5a3f59a599d54dd4bf99a69ff..3e6a44a079d590deea78ef429e7dac6111a5952d 100644 --- a/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_24eb707a-5e41-4be7-8b4f-b7d2233b07e0.png +++ b/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_24eb707a-5e41-4be7-8b4f-b7d2233b07e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2922d02c018b1c1e27eae41207a33f66411adeb49beac8cd8c02aa778b22788e -size 507213 +oid sha256:a46440a8479dd9c98b049ce9d7b243d174a9fe41314f71e16c75e9bfaeffd6c7 +size 526007 diff --git a/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_97649533-7183-42a1-ae1f-275a69e171b9.png b/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_97649533-7183-42a1-ae1f-275a69e171b9.png index 5db7d835aaec38fa4b7c9dae6b5fcb84993cbed6..ca53c05c39e4514fe1bad57ab4ed54994a217167 100644 --- a/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_97649533-7183-42a1-ae1f-275a69e171b9.png +++ b/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_97649533-7183-42a1-ae1f-275a69e171b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db649e7310e2cf638c53bfe3940c4ff892b2739fee889bbce8c5e6c8bfd100d5 -size 846693 +oid sha256:bf5e883f412abe121e0ef87ce01c377b7f936acc57511f88b9a02bd4e4b060c2 +size 871346 diff --git a/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_f7ca543f-c1ac-4a4f-9bf1-f9980a41a07a.png b/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_f7ca543f-c1ac-4a4f-9bf1-f9980a41a07a.png index edfa434c91b1516a3afbd57793388b4ff49037de..d1889564628204386f17420ec366baf357fd2e63 100644 --- a/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_f7ca543f-c1ac-4a4f-9bf1-f9980a41a07a.png +++ b/images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_f7ca543f-c1ac-4a4f-9bf1-f9980a41a07a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5eb058b5612499ccf781bfa5f7b5fc06f4292aea715fe2619dfb227532d165c -size 546266 +oid sha256:22899aeec659a8edc077ca3c955bd90ac7ed207b654b3108061d4269eae53a9c +size 1193548 diff --git a/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_59cfb94c-6b8b-4897-a5b9-a0ec07c8afda.png b/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_59cfb94c-6b8b-4897-a5b9-a0ec07c8afda.png index eb439be953bc1e936a84bfd137f2d1bce0014df3..2e0d1a759be14e2b332d8a251b5f9418a7f10049 100644 --- a/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_59cfb94c-6b8b-4897-a5b9-a0ec07c8afda.png +++ b/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_59cfb94c-6b8b-4897-a5b9-a0ec07c8afda.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac113ecf51133c23af55b49413bc05d8b89f7596ce1369f55cafaa21a7f1978e -size 813724 +oid sha256:7a28778d6d719a926640f575535b06cff47cc4ebb457d80f0e731726b23d5005 +size 798476 diff --git a/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_d8706414-226d-4656-b7d5-818d440c9c6d.png b/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_d8706414-226d-4656-b7d5-818d440c9c6d.png index 00f98900a61f24138c0e2f2bf4df2dc30df6079d..faf6a439327a6349c10e17fec27c46b8b7e89995 100644 --- a/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_d8706414-226d-4656-b7d5-818d440c9c6d.png +++ b/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_d8706414-226d-4656-b7d5-818d440c9c6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3e47106cb83567e9a56a42ac98b05b90e4fab0c312e3c14688b18d0b32596c6 -size 482031 +oid sha256:8940c80dcbe865f8616ceefeaa3d8d9f89c1776851d510cd3dc98e429a26aa81 +size 497821 diff --git a/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_ea265149-8907-442c-97ef-ed3136183634.png b/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_ea265149-8907-442c-97ef-ed3136183634.png index 60475dcea5a71388822d952437ef327a706c7fd6..28b0d747d450a94c922c7a06a809431a998ab85b 100644 --- a/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_ea265149-8907-442c-97ef-ed3136183634.png +++ b/images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_ea265149-8907-442c-97ef-ed3136183634.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6bc6ec64265640c02f03f163fea2957004b281c647a0cfaed65ea77981eebdf8 -size 478882 +oid sha256:49059cbe8675fdcd60b1fab426e60bef7d7ca1ce7ab266f670df352aafae07d9 +size 296580 diff --git a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_1014509f-e902-4d92-9c7f-1668d0bf2f45.png b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_1014509f-e902-4d92-9c7f-1668d0bf2f45.png index c7eca7f5fcba6570105631c1f8c070ef87578138..b91960f7089265903a4f7f937cc82768684f41f3 100644 --- a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_1014509f-e902-4d92-9c7f-1668d0bf2f45.png +++ b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_1014509f-e902-4d92-9c7f-1668d0bf2f45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cac1d1bc2454a92a25620be0af46ea15177ada9ec7ebfd93e86a2b1a215cd1f -size 1164294 +oid sha256:2103c85bd4a61692ee540077bcaaf3c88f668984a272378e2c0cb3837fa1ef40 +size 1632459 diff --git a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_353e5f4e-0dd3-4175-b341-462558576da0.png b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_353e5f4e-0dd3-4175-b341-462558576da0.png index dc6e79bc565124d100b3f87ee01b3482e727b76a..0577f557150d9366b4baa277fd98869fff7d328e 100644 --- a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_353e5f4e-0dd3-4175-b341-462558576da0.png +++ b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_353e5f4e-0dd3-4175-b341-462558576da0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6f6fc857140f91b94d49fd9b26dcda92d10c21098327e49beee67da56cb4c9d -size 603749 +oid sha256:384b067e1a92fc77fa60fd54cb02f0c4a79a688c15b2a0d91b24dec571073d7f +size 909881 diff --git a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_7e41a150-71e0-49cf-9c81-2ab0101e943d.png b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_7e41a150-71e0-49cf-9c81-2ab0101e943d.png index c7751f143b5a84740917c648929b8ea7e8e02633..5f1bf66fdd9ea523e4f62035d41e100058d0f3e9 100644 --- a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_7e41a150-71e0-49cf-9c81-2ab0101e943d.png +++ b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_7e41a150-71e0-49cf-9c81-2ab0101e943d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3136b99abb6bfd9c1b0423238a6cd956fe63ef011c45872a72515310f0a49dbe -size 604489 +oid sha256:24515c73107185099c77f03edf4028d0a41ab81b29f88b7df8500d07a07015d0 +size 945746 diff --git a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b12af52c-85cb-41ff-81ff-f93f40ac4751.png b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b12af52c-85cb-41ff-81ff-f93f40ac4751.png index 2d94f23f042e45874585d03bdacfb9c8479fb54b..af6e00e8cf2fdd4d8d062e2631af14cd5235063a 100644 --- a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b12af52c-85cb-41ff-81ff-f93f40ac4751.png +++ b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b12af52c-85cb-41ff-81ff-f93f40ac4751.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfa8a607799294cce0303368e7e1bf732080005c1b591d57d7cd941941db2ac0 -size 1650094 +oid sha256:0a8a0fa654a6ee461d54168c3eb4c445738ee622df787ecdc9cbe653516ff3af +size 1294611 diff --git a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b7062d2d-0889-4d33-9d6c-47e2e18faaf6.png b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b7062d2d-0889-4d33-9d6c-47e2e18faaf6.png index b87c040dafe668a0b7b3a13476991a746eec11b0..542d99a5b1b2ff447522afe553072db5108aed54 100644 --- a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b7062d2d-0889-4d33-9d6c-47e2e18faaf6.png +++ b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b7062d2d-0889-4d33-9d6c-47e2e18faaf6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b540e29182c19238ed201133e0a41fa5ce48711e72121dab629a13c472de02f -size 611293 +oid sha256:13954dc121d4b199862cfca4364fe132995348a69e828bc4690a6d5320b709b2 +size 630964 diff --git a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_fe69325d-0689-46ae-b411-dbb199d259aa.png b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_fe69325d-0689-46ae-b411-dbb199d259aa.png index 2e6b7922f9e36de203a9f3ced22985172fb96b24..f4c443ebc335de6eb542fac3b34d2fb101c011da 100644 --- a/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_fe69325d-0689-46ae-b411-dbb199d259aa.png +++ b/images/84f19aba-ad0a-46db-84bb-c279b5353b8a_fe69325d-0689-46ae-b411-dbb199d259aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:424ec988469490af023852cfe3f919540be3bb0b067e7c25e2ae6f8af1dfbebd -size 976490 +oid sha256:91d45e53ef150f3e214a17937e9c369abf852560a70aa318970598e95e5dc783 +size 743251 diff --git a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_7a51a447-19e0-40e9-9568-7af78ea6557b.png b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_7a51a447-19e0-40e9-9568-7af78ea6557b.png index 15fe5e913d1c45df0afb82329dd2eacca74f241a..cba8af6ea76f12e7d1cb157f324d4f33d4cc6d1a 100644 --- a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_7a51a447-19e0-40e9-9568-7af78ea6557b.png +++ b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_7a51a447-19e0-40e9-9568-7af78ea6557b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e3294a508bcc585a24d29837144b6e944dd8110c2568b05bd55ce78c0fa9efb -size 2067205 +oid sha256:58fa08c0de2ed7fb8f9c53f0690468b10774fec0bd1dafb733f9f32e16747363 +size 2075535 diff --git a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_ac062748-57fb-429f-a6a1-c6eeee2dee00.png b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_ac062748-57fb-429f-a6a1-c6eeee2dee00.png index c1cc5c508bb784bad2312e398af4b791e791ec43..5b49cb3107fe9b2587d2d4ac32a68eace48846f1 100644 --- a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_ac062748-57fb-429f-a6a1-c6eeee2dee00.png +++ b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_ac062748-57fb-429f-a6a1-c6eeee2dee00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57d4c1c6af3a922e782772a457d5d5eb0dd615adadda0867021884b023126621 -size 899681 +oid sha256:78fc5008c35504cdb4aebb715146902ce985776b74ef669ff357abc651abac29 +size 1511997 diff --git a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_bc47bc18-9778-4205-87e2-11cf7d6bad00.png b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_bc47bc18-9778-4205-87e2-11cf7d6bad00.png index e011371524f64ca861cbd968ba1b89b5138c5b6f..4ed71aecb0be0d7e2259a3300e212bb5a43e650d 100644 --- a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_bc47bc18-9778-4205-87e2-11cf7d6bad00.png +++ b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_bc47bc18-9778-4205-87e2-11cf7d6bad00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:601ad060a0cd426a55c0a3b510ec781ee7189c954f0efacb8b82fd9419669cab -size 1432230 +oid sha256:905c8a3f1be34315469a62878c46263e41ae7297b5aada9bb2b807808c493885 +size 2285346 diff --git a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_e8435fdf-1ad0-4c53-936e-9416382b58e4.png b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_e8435fdf-1ad0-4c53-936e-9416382b58e4.png index 03ee1383c6d321009fa2168a7486df16b4ba5b28..283dd4161ff433ab37a7f1600b794cb1371762ac 100644 --- a/images/851998b2-fda2-4bd4-a822-f1871a9fde12_e8435fdf-1ad0-4c53-936e-9416382b58e4.png +++ b/images/851998b2-fda2-4bd4-a822-f1871a9fde12_e8435fdf-1ad0-4c53-936e-9416382b58e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bffeafb0bc052247c12e89a7247eb1c41d4e462f9630257cd41b56b8127a7c87 -size 1077533 +oid sha256:1ed48ec2505e925d46ee3164f3721615c25dd0d41b4ed63e632849886ff2ce0f +size 2918926 diff --git a/images/85706225-aa42-43c1-8cfa-8696118698a2_062d3d9a-c1d4-47c7-8192-39cfc6fcf0a6.png b/images/85706225-aa42-43c1-8cfa-8696118698a2_062d3d9a-c1d4-47c7-8192-39cfc6fcf0a6.png index 681f5c2e874498ae9f8387368f03569d88f910ce..2bab2be0b7b2320d605d8fd1ef8b2138752146ce 100644 --- a/images/85706225-aa42-43c1-8cfa-8696118698a2_062d3d9a-c1d4-47c7-8192-39cfc6fcf0a6.png +++ b/images/85706225-aa42-43c1-8cfa-8696118698a2_062d3d9a-c1d4-47c7-8192-39cfc6fcf0a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc25e0834c9bc9c9c960f7c6a36d97702ca592622160e1235bacec2ca123a977 -size 383731 +oid sha256:5c594098742292f0a6559008dbfefc5873c3aa65073ddd1cc91098a8ce1b5a38 +size 364371 diff --git a/images/85706225-aa42-43c1-8cfa-8696118698a2_8800c1bb-c7a7-4b80-8edc-13b2fa4a5c29.png b/images/85706225-aa42-43c1-8cfa-8696118698a2_8800c1bb-c7a7-4b80-8edc-13b2fa4a5c29.png index 8b9cebd038759fabe4ef2113248908ee934dfe95..58074576abb74bf595492c78e4e75ad532bbea33 100644 --- a/images/85706225-aa42-43c1-8cfa-8696118698a2_8800c1bb-c7a7-4b80-8edc-13b2fa4a5c29.png +++ b/images/85706225-aa42-43c1-8cfa-8696118698a2_8800c1bb-c7a7-4b80-8edc-13b2fa4a5c29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a20fb11e3db3a8ba9e75d81793c30b9d24ca57c1e2d8971bef4fa888d4587a37 -size 281424 +oid sha256:2b8725490fd2bb70b7d14d38d2d71eb5cd694904c1946098617b4c08f6c37bbe +size 624561 diff --git a/images/85706225-aa42-43c1-8cfa-8696118698a2_a093b329-7c9f-44b6-ae32-b2ee3a114cc3.png b/images/85706225-aa42-43c1-8cfa-8696118698a2_a093b329-7c9f-44b6-ae32-b2ee3a114cc3.png index cd0d71cb0d9e198b6a696f724c518a98d554f32e..b67fb8bcb3c10b6f77c7fb8cc7ab1261ff4e5150 100644 --- a/images/85706225-aa42-43c1-8cfa-8696118698a2_a093b329-7c9f-44b6-ae32-b2ee3a114cc3.png +++ b/images/85706225-aa42-43c1-8cfa-8696118698a2_a093b329-7c9f-44b6-ae32-b2ee3a114cc3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b57a4331ecd345af62d100f9c3c803474785b3539fe5d5d4abd0bad536dacdc -size 404944 +oid sha256:62889eac9f3cd089cd544b90b00e8cf021e05bd6ffffffacf0596728d871a1ed +size 365531 diff --git a/images/85706225-aa42-43c1-8cfa-8696118698a2_a5c25f8f-2e98-4c30-bbdc-a56abaee49bc.png b/images/85706225-aa42-43c1-8cfa-8696118698a2_a5c25f8f-2e98-4c30-bbdc-a56abaee49bc.png index 9e322b302e3c8b71712918a8e40377da01549e42..9832cc3180c46afe620381b2b3598209afc0baab 100644 --- a/images/85706225-aa42-43c1-8cfa-8696118698a2_a5c25f8f-2e98-4c30-bbdc-a56abaee49bc.png +++ b/images/85706225-aa42-43c1-8cfa-8696118698a2_a5c25f8f-2e98-4c30-bbdc-a56abaee49bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e699e9c44f3b9abdee0eff1c3ea183d20ecdc8f6f83d9501ee604a4e29bf6e9 -size 310984 +oid sha256:369cd874aeae53a6630da87a43c4e2760b972cda24c8d0dacd743562fdc67c94 +size 312459 diff --git a/images/85706225-aa42-43c1-8cfa-8696118698a2_dba7625b-d345-4017-a7cc-2381cc5b5348.png b/images/85706225-aa42-43c1-8cfa-8696118698a2_dba7625b-d345-4017-a7cc-2381cc5b5348.png index 7d127bcb83507c9d578af9af0621c94b11ebc1b6..2ef8c31130a8943188515a2d8205330b1b2d13ce 100644 --- a/images/85706225-aa42-43c1-8cfa-8696118698a2_dba7625b-d345-4017-a7cc-2381cc5b5348.png +++ b/images/85706225-aa42-43c1-8cfa-8696118698a2_dba7625b-d345-4017-a7cc-2381cc5b5348.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:608b8fe3057ac4300e0bf55453414fb67d9dcec58945f6dcc707f4ea6c9fd606 -size 397899 +oid sha256:3bbcd8c0157d6dfce2a53b7f5df3f991ba0a647b00e94c30957ecb4c3b9b16fd +size 347992 diff --git a/images/85706225-aa42-43c1-8cfa-8696118698a2_df0101a1-cdcb-4001-a99d-5fe01a9d5f9f.png b/images/85706225-aa42-43c1-8cfa-8696118698a2_df0101a1-cdcb-4001-a99d-5fe01a9d5f9f.png index 315b0f8b28bf06696602cd200d9db8aa9380d490..9c94d1f456760b16df025fea2c5f0b0d6f2e5eb5 100644 --- a/images/85706225-aa42-43c1-8cfa-8696118698a2_df0101a1-cdcb-4001-a99d-5fe01a9d5f9f.png +++ b/images/85706225-aa42-43c1-8cfa-8696118698a2_df0101a1-cdcb-4001-a99d-5fe01a9d5f9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db8e425e94989573087bd34e7ddcc6337f724fe7a09e9446995442a15661b155 -size 410099 +oid sha256:dd3344a668df3a9022bf63dbb52a8013e7109ff5a74c8ab34cfa75a80e326efb +size 367299 diff --git a/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_4ef4bad0-ed14-4946-8d06-4d672c0f9bdd.png b/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_4ef4bad0-ed14-4946-8d06-4d672c0f9bdd.png index a08ae4cd2dfde4618d6ede13c352fde4c0d13b5c..b48ab6c9cf64241d70cb87a1f29d0ed580ba3ff2 100644 --- a/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_4ef4bad0-ed14-4946-8d06-4d672c0f9bdd.png +++ b/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_4ef4bad0-ed14-4946-8d06-4d672c0f9bdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf482c7ef44dc922f2d2a5429b1fdd9fddab12e8579f2b98dab743bd3ded4494 -size 872982 +oid sha256:2dc6af7e8d029e598850d65c53289f0a0c94446e3c1662a10fbb41b690ae3c05 +size 1107651 diff --git a/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_f74957d1-682d-41c3-a460-6813810fd440.png b/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_f74957d1-682d-41c3-a460-6813810fd440.png index 4a17952a3d9abbed41b099f7ab7f3d2dc365f49e..bd050c2a023a3c7a1c67678e963f0c74239ab4e3 100644 --- a/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_f74957d1-682d-41c3-a460-6813810fd440.png +++ b/images/85bd1881-6efd-458d-97c9-ae507ecba1ca_f74957d1-682d-41c3-a460-6813810fd440.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d860350d8d5909ba1cf845dee92221425bc020e9b381d5b1bae8b3fac9637560 -size 748590 +oid sha256:0046f931c5d921b0e783959a1d1ee26cf436f9e6616321732e59d94a3bd469a6 +size 960538 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_13ee71ed-2890-462f-aab6-cefc95bf6e81.png b/images/862faed7-449c-4624-902b-6b512a977d0c_13ee71ed-2890-462f-aab6-cefc95bf6e81.png index 46ffaec094a09cc956770a8a309fc840b27dff1e..397dc8fdd9b3d9d7331661d26fbbe33b43b69fcb 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_13ee71ed-2890-462f-aab6-cefc95bf6e81.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_13ee71ed-2890-462f-aab6-cefc95bf6e81.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0dd0232102391ce7fe9bc97bd08241e16d59a3e66d90a5db205b251ac857a0f8 -size 838150 +oid sha256:a5057f3a3c8b6eea64d4a92ce7bb7841f3068acc8332c0a6607d406ddc3639c1 +size 1115943 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_2a3fa5ea-4980-48d8-974f-86b5b0a904d5.png b/images/862faed7-449c-4624-902b-6b512a977d0c_2a3fa5ea-4980-48d8-974f-86b5b0a904d5.png index a64541716dad3836615f3a8446eab1893bff630f..8a9bf6bc518b7180c4e9a9d036165bc08d9cc4a1 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_2a3fa5ea-4980-48d8-974f-86b5b0a904d5.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_2a3fa5ea-4980-48d8-974f-86b5b0a904d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12b092d014561594e0339fa862148df225ae691112fb4807927cc252b2418410 -size 1037047 +oid sha256:7068ae99b032930762807c23ddd1e3a92a5ed9c636c0389dce7ad340b60b5601 +size 852471 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_2be16347-d879-4bba-abe1-8c4028244c19.png b/images/862faed7-449c-4624-902b-6b512a977d0c_2be16347-d879-4bba-abe1-8c4028244c19.png index de3639ffdd64f3cab811cc22e4ebe85067b90097..ec88a1aa014070e46ae321e33205ef14310b82fa 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_2be16347-d879-4bba-abe1-8c4028244c19.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_2be16347-d879-4bba-abe1-8c4028244c19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c35e7ca3a15bb26b66a27ec4c127ed2671da21c15b18316bc943436007e5da9e -size 2596311 +oid sha256:edb52bc02812c6e99bdf71a47c27b4a14e08a124249eaaba647572a8fbc86981 +size 1323716 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_5f0bc65a-a65f-4c3a-a9e8-ec714ee4a01d.png b/images/862faed7-449c-4624-902b-6b512a977d0c_5f0bc65a-a65f-4c3a-a9e8-ec714ee4a01d.png index 2d870a590e1c8e276ce794c193e127f65da8cd03..6a9f876e167a16a2c3b4e33268b770de325078c1 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_5f0bc65a-a65f-4c3a-a9e8-ec714ee4a01d.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_5f0bc65a-a65f-4c3a-a9e8-ec714ee4a01d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74aea987394ea459b72b017db9296315a4318901c506d485af3c415d5f281346 -size 970578 +oid sha256:25d080a0f52c0d7cd2a9e2e0186f51d89e40383e16cdd3bc3217dda68dfaeea5 +size 1143562 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_60b25c01-6a9c-456e-a2de-296e7090b8c8.png b/images/862faed7-449c-4624-902b-6b512a977d0c_60b25c01-6a9c-456e-a2de-296e7090b8c8.png index dfcc0ee39063285758fd0093a4b72b0c7a687176..19859f0900e27659de6966fb765d8e5abc123f0a 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_60b25c01-6a9c-456e-a2de-296e7090b8c8.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_60b25c01-6a9c-456e-a2de-296e7090b8c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a5c7c580c8affa4e0698b398f2168bd768b0a42524d35f9503205a30b7a2f2b -size 1025351 +oid sha256:de5626c2a25f0d4849a0a2c9a65abb7343b6b3c8f952dced039f2f23f887a27d +size 625571 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_77f619a4-6625-416b-823f-da4c81e06018.png b/images/862faed7-449c-4624-902b-6b512a977d0c_77f619a4-6625-416b-823f-da4c81e06018.png index e3e1fe3b5ca46ca94c45ffb0cdbb11b4ea23ea79..33785a96969746926ffa75ffe2a3190e7cf58244 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_77f619a4-6625-416b-823f-da4c81e06018.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_77f619a4-6625-416b-823f-da4c81e06018.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d1506f11584299ff612b072251197738155ff468b0f195cade23edf23d153e4 -size 893312 +oid sha256:1edc63d675d5b8c79ab423c85c4f6909eccadc585fc3a964b16831cfb1a16145 +size 939304 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_80303c11-8409-4de5-a1ab-7e724a8c74cd.png b/images/862faed7-449c-4624-902b-6b512a977d0c_80303c11-8409-4de5-a1ab-7e724a8c74cd.png index 11f0db1ef3c6e3b96a61133aefa22b5f995b0ae2..22833227dbb712e7ec90a36e398122019e84d4c3 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_80303c11-8409-4de5-a1ab-7e724a8c74cd.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_80303c11-8409-4de5-a1ab-7e724a8c74cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4f78e14dfcb05ff536f575b14213edd636cf34315f0e68456afecb2f97f2488 -size 938725 +oid sha256:0b73873a9a999805de6d4ac2ebb7f8bad649194b3902b4ba0cc58888ec2dbb55 +size 1370483 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_a031109e-9ed4-4b76-8497-83fe74913b87.png b/images/862faed7-449c-4624-902b-6b512a977d0c_a031109e-9ed4-4b76-8497-83fe74913b87.png index f044e9205c3cdaf0c18cc2df45c1eec514920d38..0312dc751edb492855185d55752bb6b0d532863f 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_a031109e-9ed4-4b76-8497-83fe74913b87.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_a031109e-9ed4-4b76-8497-83fe74913b87.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0c95eb3df9427348ac3ac862a312b59f3917b03c421af01955eb60154117b08 -size 870435 +oid sha256:ac9188fafc661a5c358eaaca39b1dc88e0c6153f613be7ccab9b8bc9fff86a3b +size 1120466 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_a2f2bb0d-a51e-4c53-9468-5b4fa030a112.png b/images/862faed7-449c-4624-902b-6b512a977d0c_a2f2bb0d-a51e-4c53-9468-5b4fa030a112.png index 71dc096ecbb45e7294ff7013c13291ee0678f6d8..a8aa7ce19074ff94df4f086cdddaa07a04d83f7c 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_a2f2bb0d-a51e-4c53-9468-5b4fa030a112.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_a2f2bb0d-a51e-4c53-9468-5b4fa030a112.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:407f4cfe658e3df4a5a621d14cf7623f158a8655ad8272a9abd7247c62abde34 -size 895281 +oid sha256:edead985cdc24644ddb3ad3a584ec845ec7dbe7ec3b6fc87856a78828ef8e32e +size 722251 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_bacc7f66-2ed3-4753-92c6-b517e447321b.png b/images/862faed7-449c-4624-902b-6b512a977d0c_bacc7f66-2ed3-4753-92c6-b517e447321b.png index b6bd7106f5b8434e3ca676b90822b2f7f8a06c0f..931574c5c67976c528c0483ce470686a55b68978 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_bacc7f66-2ed3-4753-92c6-b517e447321b.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_bacc7f66-2ed3-4753-92c6-b517e447321b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d07e2ddbe60c2e6d062b011c55b1e099b22da1c752a08029e66af37beb62994 -size 933627 +oid sha256:d67669f96af1746401c3ac96d156e7b428aaf7bf0211c0c5bf0388f434a96886 +size 909560 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_d25f3ac6-5b8b-4c1d-a4f1-905223ab9ea1.png b/images/862faed7-449c-4624-902b-6b512a977d0c_d25f3ac6-5b8b-4c1d-a4f1-905223ab9ea1.png index ad3e908f29b801a1480d0ed3ce59185f72cc0a43..ad7436098e4868b32da26d3a34bc8aa89f0d86db 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_d25f3ac6-5b8b-4c1d-a4f1-905223ab9ea1.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_d25f3ac6-5b8b-4c1d-a4f1-905223ab9ea1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8761a8d5537e405bd9ae4b5649ef7b932e9eb26d0175bfb30fd93dffd954b9a8 -size 877070 +oid sha256:6afcb3076c31cccb569d2491e5ce8c929bc5d6e93a613455eced453ec8ad50f1 +size 998924 diff --git a/images/862faed7-449c-4624-902b-6b512a977d0c_d82e3a62-a555-4e74-8436-dd6204ae1de5.png b/images/862faed7-449c-4624-902b-6b512a977d0c_d82e3a62-a555-4e74-8436-dd6204ae1de5.png index 32d98f04aaece5e7173009e35ef264827071d1c2..32f8f17034a789ad0984016f43d9bdd130865032 100644 --- a/images/862faed7-449c-4624-902b-6b512a977d0c_d82e3a62-a555-4e74-8436-dd6204ae1de5.png +++ b/images/862faed7-449c-4624-902b-6b512a977d0c_d82e3a62-a555-4e74-8436-dd6204ae1de5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51249492e54bf7f7d1b4dd21fafe224d50b1e27470c9ddcf01a0fa3256cf7cf5 -size 1133312 +oid sha256:0b0e07cbe440fd8a2aa4d27825c300c7adb841284083b29175f821739ab8ae2b +size 1548621 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_131228c6-f6dc-4cf1-8109-d54d5c4abe34.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_131228c6-f6dc-4cf1-8109-d54d5c4abe34.png index 41398e1150e15d7f19a782327365616a3cdef007..98a4954d131d9535baacf8db7a14ac598b233165 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_131228c6-f6dc-4cf1-8109-d54d5c4abe34.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_131228c6-f6dc-4cf1-8109-d54d5c4abe34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80bc9e65a30c915b3c5a45263fca099517b6519b35f97e1c193a3585f38c79c1 -size 787727 +oid sha256:1461ef84cfa27b9a5d078805055cf82c9b86429394a4f2035a887cd7dc6c4b1f +size 284918 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4afe1528-c021-4d9b-8a67-b889a015436a.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4afe1528-c021-4d9b-8a67-b889a015436a.png index 0d8938719f9cf61729df6c2ce9d07da87ed1f1fe..aaf5909894ee597f018330083fc9926ecc5b8e19 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4afe1528-c021-4d9b-8a67-b889a015436a.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4afe1528-c021-4d9b-8a67-b889a015436a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a66f6e01dc20bc8068166dfe7374088766073c64e9ddc55eadb4c719d9d51209 -size 430564 +oid sha256:f0378068884ec6e5c08b434a001761d669f11bebaf3090621f405006ecd2e44c +size 1023741 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4ff39709-c42b-4174-ab7e-bbff789845f8.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4ff39709-c42b-4174-ab7e-bbff789845f8.png index 6e61fab38e39f7c6e0b5507f01a71f9e37a2cd1d..c119b9dcf931e2b56fdeaacc49731a0d1716308f 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4ff39709-c42b-4174-ab7e-bbff789845f8.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4ff39709-c42b-4174-ab7e-bbff789845f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee7c2b63fafd48417b4ea12853090b40b3b1761a0a657b4982a9a32c05686b38 -size 787802 +oid sha256:d7cc7d9864170b6d5ba5c4f19a9aaa84ac46a4cbd548856f1a437cf1880b7601 +size 787689 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_62d5d1f6-80a3-4d6b-93fc-18c08f34309b.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_62d5d1f6-80a3-4d6b-93fc-18c08f34309b.png index 4ebf4ffa7531bf6ccc3054177dba39d3f4ab336d..8643c661e552249ad7f9d68ef44494ea917d64bf 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_62d5d1f6-80a3-4d6b-93fc-18c08f34309b.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_62d5d1f6-80a3-4d6b-93fc-18c08f34309b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba34411df548659e6ab386c46a7f2c1a61f7347aba15dafdf9e280f120cd18ed -size 1639713 +oid sha256:0069197746c14b141c6f472704f9a26924250f83cc999f46c5b7f287b6e6a9af +size 355677 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78bfd4cf-87db-44f9-9bc0-a390988df75b.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78bfd4cf-87db-44f9-9bc0-a390988df75b.png index 8128544df2e653da144a512d9bf39ffd799c9fc3..f6c19a3d1b8372d18ede162088197644683694fb 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78bfd4cf-87db-44f9-9bc0-a390988df75b.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78bfd4cf-87db-44f9-9bc0-a390988df75b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d0d3845db155dd2b4dd40fbe628a10fe0aae2640b222703c5a8cc3be689ec7c -size 1449574 +oid sha256:0db4eafc1c73466a3dab0d284a9ab7b2e9c2c0924d4265187fbf066b25699856 +size 513202 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78e4da0e-d2e1-458b-a8ec-ee5acaa8d971.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78e4da0e-d2e1-458b-a8ec-ee5acaa8d971.png index 0e7410ec12c72298f6096cb3fdbea70ed2bcbb96..f7647e83bdd6bd401f038a846714e28c072e7434 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78e4da0e-d2e1-458b-a8ec-ee5acaa8d971.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78e4da0e-d2e1-458b-a8ec-ee5acaa8d971.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68fe62fa413aecf8a0b663aed08750d747a48778a6ef5a937fdea92c9cdd213c -size 819900 +oid sha256:9a1a5f6857be7050ccd8f864804666571c854274ac46a9b6652b62b81f4cbc70 +size 47910 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_99e1fdce-02c7-4ac8-8777-6a8a73444332.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_99e1fdce-02c7-4ac8-8777-6a8a73444332.png index 5c24485cc942fe23be08d5fbee727a69c166ea23..d57b37d1dee8c5eb0aa16dd7b14f8269151d9308 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_99e1fdce-02c7-4ac8-8777-6a8a73444332.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_99e1fdce-02c7-4ac8-8777-6a8a73444332.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3d3295a0240a7b938e12ec99c9e3c1d477b2ee4b7219ac64ee42344cf5d9457 -size 485274 +oid sha256:6e9d791932dcf53d013191046cae55e5af1fc0d73a6b1e4f25e58d6b48cfcd32 +size 394846 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_9b9bea77-138e-40d6-bebe-86d163d835a2.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_9b9bea77-138e-40d6-bebe-86d163d835a2.png index ad43e4a5bbf8359904e13f559fb7b5f4fd75cbfb..ee6fa113f061fb9aff7308256dd890a464132418 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_9b9bea77-138e-40d6-bebe-86d163d835a2.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_9b9bea77-138e-40d6-bebe-86d163d835a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:128bd262e889d810cf0a691d3b0aac1368fea7e57f08e25d970ccf3e87313ed5 -size 787844 +oid sha256:f26cde17d3e070f2f9696f728600807f593a9c462af8caea3b27caec70c0d592 +size 787297 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_b9746274-9171-4823-b007-455876cc5a17.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_b9746274-9171-4823-b007-455876cc5a17.png index 4672995f1637c7c13bf3c3d8122feaa04c1a5198..914a787216bca8729d1fa6a1c95d610f538bfe69 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_b9746274-9171-4823-b007-455876cc5a17.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_b9746274-9171-4823-b007-455876cc5a17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcd0637949bce98761ab99c94074b2176f83f4bff9102682215947085856dc92 -size 787687 +oid sha256:aa8fbb0516d5034520a893cf2cd368c8753f3b9c9eb8d7c4cc24abb50dfad509 +size 802899 diff --git a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_fae2049c-2694-4893-b305-169ac217ea7b.png b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_fae2049c-2694-4893-b305-169ac217ea7b.png index 9a1c6503509858f9c3b9b334982af17896e8dad3..f59d284c3145443b7be5ccb858480a1679ca5470 100644 --- a/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_fae2049c-2694-4893-b305-169ac217ea7b.png +++ b/images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_fae2049c-2694-4893-b305-169ac217ea7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e267a9a4324c536fe4a068ec6d2aab73b0f9e22d622a288269c4bf9ab1edf424 -size 486492 +oid sha256:4a1fa386c0f94bbf3d8913ef16fb107d2e759ab392a83557cc941cdfa6034e90 +size 1104005 diff --git a/images/867dc9d1-5017-4871-a52a-a1511f239628_3e587fb3-fd87-4df0-9a6c-74394cabc670.png b/images/867dc9d1-5017-4871-a52a-a1511f239628_3e587fb3-fd87-4df0-9a6c-74394cabc670.png index 9c2f2fb100463231ba3ebaeffe890c2a533bba85..fe0cba0cf591b96788f40ae6fc251c245b589fc6 100644 --- a/images/867dc9d1-5017-4871-a52a-a1511f239628_3e587fb3-fd87-4df0-9a6c-74394cabc670.png +++ b/images/867dc9d1-5017-4871-a52a-a1511f239628_3e587fb3-fd87-4df0-9a6c-74394cabc670.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d47136fcf25c1fa70f845437ba1f60fa57d4a1f00ef75ce41565c330970ec0e -size 1396565 +oid sha256:50ae41da9c91a1b6a80f0a47271e978017e368017bee4b2b2b957f563401dc23 +size 1308549 diff --git a/images/867dc9d1-5017-4871-a52a-a1511f239628_8cae68b6-ff63-4283-8f18-a3a8e7ba48bd.png b/images/867dc9d1-5017-4871-a52a-a1511f239628_8cae68b6-ff63-4283-8f18-a3a8e7ba48bd.png index cf450e60b15371e4cc706423734cc076414ada41..c1fb32c857e1bd0ea0a8188cac7013ce7bd3666a 100644 --- a/images/867dc9d1-5017-4871-a52a-a1511f239628_8cae68b6-ff63-4283-8f18-a3a8e7ba48bd.png +++ b/images/867dc9d1-5017-4871-a52a-a1511f239628_8cae68b6-ff63-4283-8f18-a3a8e7ba48bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:571f4c2e01482229815b46f87d88e24d67c91ee48e9b3514021ba3a36b536d1c -size 1803190 +oid sha256:d0407de9d711013b4d3d4f26874e8b4d539fdade74cc329b4bbda0c7a900cd46 +size 1931953 diff --git a/images/867dc9d1-5017-4871-a52a-a1511f239628_d955349c-a5f7-4faf-90cb-4503103bb09d.png b/images/867dc9d1-5017-4871-a52a-a1511f239628_d955349c-a5f7-4faf-90cb-4503103bb09d.png index 25c94563c05aff6350b9c22a694cb2d51f95692f..e482dd82c46bb23fa6e05e12c8789185ad77b7b0 100644 --- a/images/867dc9d1-5017-4871-a52a-a1511f239628_d955349c-a5f7-4faf-90cb-4503103bb09d.png +++ b/images/867dc9d1-5017-4871-a52a-a1511f239628_d955349c-a5f7-4faf-90cb-4503103bb09d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f48183a40c3373964c6c96981897cf493b1451b7b80ae1d8302106a33aa06c01 -size 1817459 +oid sha256:8d1ea0975f5baaa7232054eb0f2161b84067e4ce44832897805fea5dd95aa234 +size 3069331 diff --git a/images/867dc9d1-5017-4871-a52a-a1511f239628_f7f3bec6-4a04-4892-9ec9-705082705c0b.png b/images/867dc9d1-5017-4871-a52a-a1511f239628_f7f3bec6-4a04-4892-9ec9-705082705c0b.png index 3f36021f9da5ca48b04076a54ffd422b30a1c190..075fcad93e5b878e7547a7964db2b8f4cb29be2d 100644 --- a/images/867dc9d1-5017-4871-a52a-a1511f239628_f7f3bec6-4a04-4892-9ec9-705082705c0b.png +++ b/images/867dc9d1-5017-4871-a52a-a1511f239628_f7f3bec6-4a04-4892-9ec9-705082705c0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7280a21108ce587f5755455c89e6cbb4a001b4f781fee03ff04a061193c193d -size 1331626 +oid sha256:f79f864acff6cf5f7e48830fe69fa37f8501b371a6443a9d29eff4f980fea7f2 +size 1709297 diff --git a/images/867dc9d1-5017-4871-a52a-a1511f239628_fd6c3519-38ba-4091-be70-5c82a7f542f0.png b/images/867dc9d1-5017-4871-a52a-a1511f239628_fd6c3519-38ba-4091-be70-5c82a7f542f0.png index c2a747e00cd332e8375b171a5a0dbd99a05cdc62..5ed270828695db156e906440f63e92463abd0594 100644 --- a/images/867dc9d1-5017-4871-a52a-a1511f239628_fd6c3519-38ba-4091-be70-5c82a7f542f0.png +++ b/images/867dc9d1-5017-4871-a52a-a1511f239628_fd6c3519-38ba-4091-be70-5c82a7f542f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e31df76c1d2e342b1212fabd76bc7a469f5873ee0628d921e3a612d6f77f24a -size 1333406 +oid sha256:abfa47693062c52d33a28f65db67ea39afa00e48d6b37f62bc3ac412a405d407 +size 2733160 diff --git a/images/867dc9d1-5017-4871-a52a-a1511f239628_ff47cd42-40a5-47a6-8b52-589ca150f520.png b/images/867dc9d1-5017-4871-a52a-a1511f239628_ff47cd42-40a5-47a6-8b52-589ca150f520.png index 293e4ec464243527d73e907dc6514890990670a1..701199fd0a84e85fa59584a93e48699bba65744a 100644 --- a/images/867dc9d1-5017-4871-a52a-a1511f239628_ff47cd42-40a5-47a6-8b52-589ca150f520.png +++ b/images/867dc9d1-5017-4871-a52a-a1511f239628_ff47cd42-40a5-47a6-8b52-589ca150f520.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50f7773c426e48f519946f5f8da3d5e2288e72c9df3a4f7b7ca58461eb9a3cea -size 1826740 +oid sha256:c46585c5a3ce75719730780abf4c25eeb0df7d73ed38f08d27f33cf7a530d84d +size 1844655 diff --git a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_21d23ff9-ab59-4c28-9f7b-4c08ee362138.png b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_21d23ff9-ab59-4c28-9f7b-4c08ee362138.png index 648a70b0e681e8d0b45f94a2758740112779dd32..2463fdeb4ab3d950f7b44ed70c443d2bf784d5e7 100644 --- a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_21d23ff9-ab59-4c28-9f7b-4c08ee362138.png +++ b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_21d23ff9-ab59-4c28-9f7b-4c08ee362138.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a311dad88f4ebe2324a5fe45b0c528c8aa0afdaf560083e72109f81f64412a80 -size 1417214 +oid sha256:16ebee5993f6d9018641b87b4dfc53196f95d37bfe880bc784373b651cbfc496 +size 440855 diff --git a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_47ed0fb1-3ad0-495a-858d-e826a4481c1d.png b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_47ed0fb1-3ad0-495a-858d-e826a4481c1d.png index e8e513643a078a9efab68c6596f27ab8cef21613..5e9b0fd253e701cb14e83179928dde296360ca60 100644 --- a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_47ed0fb1-3ad0-495a-858d-e826a4481c1d.png +++ b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_47ed0fb1-3ad0-495a-858d-e826a4481c1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7b02d50c3d121967b2245fa2d5c35bf329914f25985b1bd76b84e2ecc0eaddf -size 2208261 +oid sha256:54742cf0ba4232e285d3386cc28df1419a92c480d2df8b14f4440c1e33c05d67 +size 1176282 diff --git a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_49ca6092-7a1c-4313-9a93-16fd2713cbb3.png b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_49ca6092-7a1c-4313-9a93-16fd2713cbb3.png index 9f07bf66314974c699e3272901ea15fe558a18f6..5d6146082ff2c562e709f0ccbb1cc591b4e35b2b 100644 --- a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_49ca6092-7a1c-4313-9a93-16fd2713cbb3.png +++ b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_49ca6092-7a1c-4313-9a93-16fd2713cbb3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69f430741b38656d579c6e9ec81292e6a2cc2cfb4e5aaa1e53737d8c9cc3f17c -size 1326318 +oid sha256:e78f7e8513b651b77d42366ad7b0633bca9d7fad2322343fd5a462d5ee3cef91 +size 894577 diff --git a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_8fce0cf2-ba98-47ce-945b-36fc51b17258.png b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_8fce0cf2-ba98-47ce-945b-36fc51b17258.png index f0631a1bc35290a842235e16c16a6b4e82088528..492c170e9aef392452c6fcaf1e70840b81173128 100644 --- a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_8fce0cf2-ba98-47ce-945b-36fc51b17258.png +++ b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_8fce0cf2-ba98-47ce-945b-36fc51b17258.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5d6664bcfccf4a280e4f93e8d984caaf2c8fed70768429d5192c260e37a2b57 -size 1420647 +oid sha256:d05a4244e8558e7e7d803ab916bc6ac4afa63dd959301cb1b6fd9f4d78c80e92 +size 1138498 diff --git a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_c7a0d65f-bad9-4424-90af-42b14680cc05.png b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_c7a0d65f-bad9-4424-90af-42b14680cc05.png index 6fb28ed790979d716aba0a5f280f29533a94cd5a..54dd2ed8f8bad41cb035df8a38821a563aa0faa8 100644 --- a/images/86897828-35e8-4002-a98a-4e1dd26c6edb_c7a0d65f-bad9-4424-90af-42b14680cc05.png +++ b/images/86897828-35e8-4002-a98a-4e1dd26c6edb_c7a0d65f-bad9-4424-90af-42b14680cc05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdc773703850fe5748abe1a41ee3a09ef5897ff6c50b168526c0d1a7d598a005 -size 342656 +oid sha256:479bf5b80d6591669cf95388aaef668337329358d3bd9d120b64899da4f674c4 +size 217470 diff --git a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_08dce56f-9e31-44cc-b247-2ff269bbd19e.png b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_08dce56f-9e31-44cc-b247-2ff269bbd19e.png index a31b89663e2eae9b470a9f89e757f1cc37998e82..6f8ccbb96cb0c71c0eb9d2b1c3bb057d376a591d 100644 --- a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_08dce56f-9e31-44cc-b247-2ff269bbd19e.png +++ b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_08dce56f-9e31-44cc-b247-2ff269bbd19e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed9f69c8f945a9425467e1f34a605d172b556b20cb89b1b2f7777325a32e26d1 -size 1351390 +oid sha256:d6f08cbe857b1b48463181b4dc05e3d36a010cb76a20d42552e23d314da4c546 +size 2580684 diff --git a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_45b39ab8-87b0-414a-9d33-24d95074b735.png b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_45b39ab8-87b0-414a-9d33-24d95074b735.png index dac83947898fbcd3fcad890240deef6cee3d1bdd..7aeeb4c1fd67b48095fa5e7531c27f43ba6ff91a 100644 --- a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_45b39ab8-87b0-414a-9d33-24d95074b735.png +++ b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_45b39ab8-87b0-414a-9d33-24d95074b735.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb7901b4a0afcead3de4a37bfc3bca3b03bc8f959250814d437ef6fb07a8116d -size 1525739 +oid sha256:ad993972605042fa1255d076e7628f63195a5577153b4b164312d399ed607f41 +size 2546817 diff --git a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_66327fbb-2be0-43d0-9f9e-d4776f150711.png b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_66327fbb-2be0-43d0-9f9e-d4776f150711.png index 6b16983d879fa6f31a439987269f22d124236296..eab784d7a909a20efcf5bdfcc4f7c9b3a1dd4388 100644 --- a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_66327fbb-2be0-43d0-9f9e-d4776f150711.png +++ b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_66327fbb-2be0-43d0-9f9e-d4776f150711.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2b92f891adc675cb6541337112f25099aab1663a91dc41671ab542b605f774f -size 1774237 +oid sha256:7e23b2a13350cc6f0a744f2273954fd446f7607201a8f3e161055ef254b58b0b +size 1599153 diff --git a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_75331416-79a1-49ba-9151-cb36c58c21e1.png b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_75331416-79a1-49ba-9151-cb36c58c21e1.png index 8d4865ad3b7b972e09e29a480ce5044aa1eaa9bb..0ffe9e7d964a48b659876fdd25be92aa7954acd6 100644 --- a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_75331416-79a1-49ba-9151-cb36c58c21e1.png +++ b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_75331416-79a1-49ba-9151-cb36c58c21e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21114c08dae235c5ed9d4ec9e5557714438e75129b3d2da7a5528fa22bcd0d49 -size 1489920 +oid sha256:16a03d17047fcb5b370522f83cd876d20cd889fd99f2829fda08690a3b6e9d66 +size 2719717 diff --git a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_91b03325-cd3b-4a7f-b0dd-7a308e18b42b.png b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_91b03325-cd3b-4a7f-b0dd-7a308e18b42b.png index fff6a21ed5b6cc565afb5f143a6ab5fc8c46eecc..e5dba58e18785b9e3180d917b34442e2ca18a3de 100644 --- a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_91b03325-cd3b-4a7f-b0dd-7a308e18b42b.png +++ b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_91b03325-cd3b-4a7f-b0dd-7a308e18b42b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae8eac0f93feb78dc01709fad699cbf21b706f377fa92a2c6f4a2963d9d02c78 -size 1221551 +oid sha256:4586548686c9dbae7455345ee3adddce5b6b9e358d060b2a755acc57ea52e9d8 +size 2329481 diff --git a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_a5cac325-2527-4236-9fb6-6ecbfbd52c58.png b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_a5cac325-2527-4236-9fb6-6ecbfbd52c58.png index 553a0e24e2d20cdd2d0048947f68ac0e58965793..a309b6a90bea9edfef862cb8b07b1b5894454a14 100644 --- a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_a5cac325-2527-4236-9fb6-6ecbfbd52c58.png +++ b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_a5cac325-2527-4236-9fb6-6ecbfbd52c58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de01f145cc0e2bb64985f5926d2382d27948a204140bfab07dd5bca1f7db8326 -size 1397459 +oid sha256:1f8c600a3aee01d8d91efd380dc938a6e808f9b1cc77f915c28cd600436f47e2 +size 2416489 diff --git a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_c45ca820-5c91-49f7-8eae-05462119775d.png b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_c45ca820-5c91-49f7-8eae-05462119775d.png index 0c312a47ad6de3c53907866c857c4ff12d29618f..63cc9f3d843d18e6fa58d5cef92f957d084d7a18 100644 --- a/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_c45ca820-5c91-49f7-8eae-05462119775d.png +++ b/images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_c45ca820-5c91-49f7-8eae-05462119775d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:efb27dc019415ba40e679b8327d03a71f5e4a8f7a1eb83657afb4d93cc14530e -size 1456910 +oid sha256:76ecde7111f31c26eae2d9e1b984ba7e452f5b1817bfd3845bb8c6936301cb2d +size 2297975 diff --git a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_12e7f7fd-5ca7-42df-8962-903e1a49e4fb.png b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_12e7f7fd-5ca7-42df-8962-903e1a49e4fb.png index 577eea5affe820208618e2ba8c1a7e19f18a46ad..53a437f3adcd9ed9cd5083f7ac0ee16fc0755368 100644 --- a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_12e7f7fd-5ca7-42df-8962-903e1a49e4fb.png +++ b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_12e7f7fd-5ca7-42df-8962-903e1a49e4fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6501a5168d1a645c4a99ec5ed0331cad2eb9decec784bde54ccd32a8aa11197b -size 1490825 +oid sha256:9fe51cbb997930c4c5cca8bae458c78c460736b3cd3cbe3117b8dda2745023ac +size 1747848 diff --git a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_18dba795-9a54-4dfb-bb8d-b3b849528278.png b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_18dba795-9a54-4dfb-bb8d-b3b849528278.png index 8d7735c6fd05a7481f9e3167163e1f151e8b6362..231e7d03898ff35beec7c9dbd02e9d82f40ba57d 100644 --- a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_18dba795-9a54-4dfb-bb8d-b3b849528278.png +++ b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_18dba795-9a54-4dfb-bb8d-b3b849528278.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9c6943542ddcf396aa7834a1c087b2a830a025bd781d50c27ddfb0d9e79c04e -size 1666495 +oid sha256:c3b8e6829718ee95ff955f85e7d7112c3a95a5ced8f85ff3a05f1763678fbce4 +size 1504565 diff --git a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_1a611a4a-8c22-4c5c-ab4f-c061be863c91.png b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_1a611a4a-8c22-4c5c-ab4f-c061be863c91.png index 1c74dd7610f900d222a0d18d2a65396a64ac4ab9..7339b0d20ccbb7a4b93d13839e9ff6c6abdfcbfa 100644 --- a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_1a611a4a-8c22-4c5c-ab4f-c061be863c91.png +++ b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_1a611a4a-8c22-4c5c-ab4f-c061be863c91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fc8e0a62618db90b59739e1d041a9cedcef9aa143fe6e22aa052f2700c60048 -size 1094652 +oid sha256:fcfc5574ae18a35f96967d5051f6edaa55877aa3a620ed1d41d9ec5a58fea359 +size 1537833 diff --git a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_3a8e2481-e070-4ea5-8ff9-d87a03299985.png b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_3a8e2481-e070-4ea5-8ff9-d87a03299985.png index b0e80734a55aa29a2b53eb0f418abd6f66203bf0..155e1faaa3eff1777d02f11a58c53ea1e5edcc6c 100644 --- a/images/86ea50f5-1310-456e-97bf-799d8eb1896b_3a8e2481-e070-4ea5-8ff9-d87a03299985.png +++ b/images/86ea50f5-1310-456e-97bf-799d8eb1896b_3a8e2481-e070-4ea5-8ff9-d87a03299985.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b06f1fc03f389c6e1b4168a1087961983de42c7529d81ba1d8086be339d36dd -size 392701 +oid sha256:ec825bab3315c178b6765dae55f7b5e8c939237971aa7a7a064217586de0848f +size 571687 diff --git a/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_083450dd-1a24-4b01-a29b-f370c094324d.png b/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_083450dd-1a24-4b01-a29b-f370c094324d.png index 9741527d4a3f67c9c7e0b370ded7aa1d6f8738a1..209695d644cf4f78391546286ee77b932c98d4ac 100644 --- a/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_083450dd-1a24-4b01-a29b-f370c094324d.png +++ b/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_083450dd-1a24-4b01-a29b-f370c094324d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21f7badfac5f2f1c9e9ec0006f301a592b08631c396ea8f1c5ca34e9116f84e5 -size 973306 +oid sha256:38af68ff09b65895a55219b501ff1f2298cf2c412ad9a27b138f0c298a1b8f3f +size 1017247 diff --git a/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_78a8a34b-3cf0-4509-b428-953fd4f0c3de.png b/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_78a8a34b-3cf0-4509-b428-953fd4f0c3de.png index b9f00c2a01ce4a8255499e24f69690c39d2682a4..c629ac65fbe0273d37603dec5de12856c640183a 100644 --- a/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_78a8a34b-3cf0-4509-b428-953fd4f0c3de.png +++ b/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_78a8a34b-3cf0-4509-b428-953fd4f0c3de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddff1c332ed1ca63330334d48a7b09d98531666782b902faf5691db57e69169b -size 1080467 +oid sha256:a7fbe6ef0e74bab7938355c6c8132ebda9c810dd47f3f337c4faf64b93fe2132 +size 1812761 diff --git a/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_8b322fdc-9820-44d2-8476-1304ae1129e8.png b/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_8b322fdc-9820-44d2-8476-1304ae1129e8.png index ced1749713e329e74e80001fcb7cde103ac21823..b166495404d09b0937c11490ed9d626db1f86b92 100644 --- a/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_8b322fdc-9820-44d2-8476-1304ae1129e8.png +++ b/images/8710addc-5ff3-4aaf-b397-4c6165f285ee_8b322fdc-9820-44d2-8476-1304ae1129e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c710cfda46885aa8a14221dcbc8b02583a7935c3ddc6078267cb4263b10f02f -size 1142781 +oid sha256:efb5cde8e3c427099ef014ca0af830306bffb5bc4f8e6ed4141ab94822231d85 +size 1911399 diff --git a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3407d7f3-e070-45e0-8ee6-cb9b2512b40c.png b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3407d7f3-e070-45e0-8ee6-cb9b2512b40c.png index 01e775d84d7c95a03cba9fe895c6b84615e93392..2279ae5265b856ed2b1002f83406ce9ae67cb468 100644 --- a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3407d7f3-e070-45e0-8ee6-cb9b2512b40c.png +++ b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3407d7f3-e070-45e0-8ee6-cb9b2512b40c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d392d6da0262ad46a26d9d79e5764d0745e5254e116fd8050c190dc7fe7bc0ab -size 845659 +oid sha256:96650c2112356775e13893ed2afd1072cb98031bfd448ab7854401c456c026a3 +size 1050457 diff --git a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3de2c511-3989-41b9-8f34-d4cb2d3853b1.png b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3de2c511-3989-41b9-8f34-d4cb2d3853b1.png index 7631c743e7cf6bc8eb0802d673dacebaef2c2128..8e545b5e8eb917da90808f47207c0c20b8731914 100644 --- a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3de2c511-3989-41b9-8f34-d4cb2d3853b1.png +++ b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3de2c511-3989-41b9-8f34-d4cb2d3853b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6224b30aeb6956871927496057cd76821b5fb45607a530b3205f7cad782f451c -size 900378 +oid sha256:2035658192846291ca1398d924dc46f2bc172ef9badc82cc12044aaf57d5b0a1 +size 1015832 diff --git a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_78412c79-dfb9-4973-9e9c-c241d9af03fe.png b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_78412c79-dfb9-4973-9e9c-c241d9af03fe.png index b76a0af25f42d78f0700cc0ca2fee8b8fbd27695..01ef160f1b69f0f4bd1c1dddecc9f30e1d735e46 100644 --- a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_78412c79-dfb9-4973-9e9c-c241d9af03fe.png +++ b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_78412c79-dfb9-4973-9e9c-c241d9af03fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b05512d597128dc5cf96f4ea8139b80b493f4496ea4f98b26e496cf2f29e6a6 -size 1162652 +oid sha256:a80495ad7a3159b9119ec316940a1cba707e01386d48c8ec509695c93e7fb8e2 +size 1082136 diff --git a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_98a5fa7f-da39-49bb-a5af-0e4fc96dee15.png b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_98a5fa7f-da39-49bb-a5af-0e4fc96dee15.png index 281d8b33ce02895e01ecf639e5b5719fb550a69e..dd337a505e83e9994f246a2bce78226a5e9107d4 100644 --- a/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_98a5fa7f-da39-49bb-a5af-0e4fc96dee15.png +++ b/images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_98a5fa7f-da39-49bb-a5af-0e4fc96dee15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78e055a2826625bb06beeff38b5e878330bc0af02deea8b86e1c82e5c205d2a7 -size 930116 +oid sha256:54e34dcef7b8b267b270e75c91972e7e26da8a78fb732961cd9de1fff645038b +size 770300 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_1a65c0c5-0a3f-4937-b236-47abc8727a64.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_1a65c0c5-0a3f-4937-b236-47abc8727a64.png index 5510f5b77063803d0c90ab200828d785615fb553..e7f84bec6d221eb03ddd8cbd10a83d5581f81523 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_1a65c0c5-0a3f-4937-b236-47abc8727a64.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_1a65c0c5-0a3f-4937-b236-47abc8727a64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06ae97cbef9fb32478a4653be771d4ab9abc910c0f96cec127d8f202a999b814 -size 640042 +oid sha256:da49051314fa4998fc0f0dcefb64dfe41edd430ac51123a711c27cefb76d7c9d +size 517571 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_3f7ed9af-1f23-4b1c-aa63-1b897f1f8742.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_3f7ed9af-1f23-4b1c-aa63-1b897f1f8742.png index ef4f4f5ecf8d1f19e602e1180f4b77210e8c59ec..ad5c7baae56bc932966b1f229f375faccb049ae9 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_3f7ed9af-1f23-4b1c-aa63-1b897f1f8742.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_3f7ed9af-1f23-4b1c-aa63-1b897f1f8742.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abf8b22e5208f0179c6bcc08dd18a2d1f246b6a1e85ccd6db9a6835a15e41f98 -size 733757 +oid sha256:3cfb5b2549ff9e4f68d8c9e19233494d6a1d16720edd24bb137eef224614b789 +size 1012625 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_5df94a66-8e54-4048-adb6-54fe66727e42.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_5df94a66-8e54-4048-adb6-54fe66727e42.png index 4db1172d65ef231757236afbd096ad4577a8a156..9945b13be6e8603e64b66e8f81e599d4f05615c6 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_5df94a66-8e54-4048-adb6-54fe66727e42.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_5df94a66-8e54-4048-adb6-54fe66727e42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:185efa173f51dccf9f6886931fd55c72c923b23c358b9d514fb6d61b51be1de3 -size 814934 +oid sha256:f099aa589589c28840b24fe16613824380eee365d0224ab3368e34f7cbe5e580 +size 890388 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_66435b68-2782-4173-be98-4b9456a69591.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_66435b68-2782-4173-be98-4b9456a69591.png index 2a03b431efa539cd043fb30b87d4e5f2122e1ca4..bdbe7a358522d50e2b70352624dafc94b0ef1da9 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_66435b68-2782-4173-be98-4b9456a69591.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_66435b68-2782-4173-be98-4b9456a69591.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9d3d6ed468103e17979ac08be318f8ada96c702750e16438949d33f2fea4406 -size 607904 +oid sha256:79448bb28adca1d0a6587e18c61628b4dd80eab578a4ce24f9f1a29916b8a310 +size 486444 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_79305d4e-54ba-42af-8bb6-7ae0e8aa483c.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_79305d4e-54ba-42af-8bb6-7ae0e8aa483c.png index 2acdaf30bff8381eca99204f8dc059af1f0a9a28..2a7b9e9dc2ef87f3841069edc25226038e7b5ac6 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_79305d4e-54ba-42af-8bb6-7ae0e8aa483c.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_79305d4e-54ba-42af-8bb6-7ae0e8aa483c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cf5b6194a0be541168ec6ba5a0ac939c4e61f3ee1e35f8502a7c0ffd7561afe -size 734858 +oid sha256:27bba9fec3917549dcd05e1c77f9d167cc5ba14d8bc8bce12f293f855ae7d468 +size 765695 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_7adc528f-60dc-477b-8cb8-77ad576f840b.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_7adc528f-60dc-477b-8cb8-77ad576f840b.png index e2f99219fd5042b450a18e6c5a41a6bd8ee5b8c3..a96190f1a698583c5cfc3d732c3be75d7ec86a6d 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_7adc528f-60dc-477b-8cb8-77ad576f840b.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_7adc528f-60dc-477b-8cb8-77ad576f840b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2255abe5c103097a1fcdbf0d76838caae35355d3bd111375e884965bc476b46 -size 878082 +oid sha256:ecf0bc42a6bb5ce03bd11c2c7b81e856ca2c66d03bde956869257eee5373883d +size 1077817 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_875c751f-e7b4-444c-b6ba-c3516398869c.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_875c751f-e7b4-444c-b6ba-c3516398869c.png index dd221f0fafb31f4e9e468b3bad4c549580886e6a..f928cabfde544310c3003cc07bf2c0082bbcc6f4 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_875c751f-e7b4-444c-b6ba-c3516398869c.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_875c751f-e7b4-444c-b6ba-c3516398869c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a102877b2e8a432eeff46e40df59151c7444a9a267f6db7b620a8121605e7867 -size 941714 +oid sha256:e0557a2d3739f766779ff9e5a71844eb5ce32b2430b4f13dd8c444eed600bb94 +size 771239 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_95da4d8d-e01b-4bdc-9f1c-01ab6235c3ed.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_95da4d8d-e01b-4bdc-9f1c-01ab6235c3ed.png index 30b1a4928699c613a0e0996637274c0b17434d6a..8de4067a41f9337d0c26dc0cb8c4daba3391465d 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_95da4d8d-e01b-4bdc-9f1c-01ab6235c3ed.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_95da4d8d-e01b-4bdc-9f1c-01ab6235c3ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e76191e057f5e2e2e35ea833df66a35451d654fe19b01a6379d318a13fd3cdb -size 1339207 +oid sha256:66131fca10d8b0e3a61b55e24ae47b36fee75428971281bf587e0ddfe48b074b +size 752572 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_9b96a9ad-7b49-4f3c-90ed-d242d5015b41.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_9b96a9ad-7b49-4f3c-90ed-d242d5015b41.png index efcd870391119064727cc2741ba50adce89b589d..01ff80e62225267d4a80c820a1e25929568d2915 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_9b96a9ad-7b49-4f3c-90ed-d242d5015b41.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_9b96a9ad-7b49-4f3c-90ed-d242d5015b41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b996a00f1005f13737a80cb63e695f638738e87298594e8b3d4bd821903cfccc -size 615635 +oid sha256:7f52406c9bdc0c7b68c52fa23e05c6ba0ef1e9a54b78723e2f555254b717954b +size 779070 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_bc4fec9c-046b-407c-ab83-4c635522ec54.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_bc4fec9c-046b-407c-ab83-4c635522ec54.png index b1ecf51afd97ec02deb58eeef9d91fa35efd3c15..3f057a4f84d95f1d53cb61c51114a1dccfdb0143 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_bc4fec9c-046b-407c-ab83-4c635522ec54.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_bc4fec9c-046b-407c-ab83-4c635522ec54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45280b609bf97d585fe137991bc54d38b54f98414372273048801a0a4631ab08 -size 1173772 +oid sha256:86dea9bfb953a1c5cedeb4b241b49fd1ca34de1ee4f0652eaf03c3202f6ed000 +size 865637 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_d6620023-7972-4e34-8818-7f7a51768f61.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_d6620023-7972-4e34-8818-7f7a51768f61.png index b571d2eecbb67050f0bcaa5dba8750a4bc7a036e..c6a260ff9d732fe7a31915559ec67b1064b23782 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_d6620023-7972-4e34-8818-7f7a51768f61.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_d6620023-7972-4e34-8818-7f7a51768f61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d24096a745f36ce5d56a610419ab56f52b8f87189156c37a0e89d9dddd787d3b -size 1144091 +oid sha256:e537317bf789ec9e6c758d046bbca822baffe70653739f285dd847a916fb9c11 +size 803954 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e12856be-7e2c-4628-a1a4-9e78e1ca3e35.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e12856be-7e2c-4628-a1a4-9e78e1ca3e35.png index e40f0c3b0f99a7564a4207f19d5a49b084be6582..638c83f0e7327e81f7719256c3c0733b2484476e 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e12856be-7e2c-4628-a1a4-9e78e1ca3e35.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e12856be-7e2c-4628-a1a4-9e78e1ca3e35.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1156e24c9a69e157e8fc5994db65c2a630f9299b7cc7d9ddf606a4cfbdcd096d -size 1063911 +oid sha256:caa9e9f086c1c021430cf7e54a7f5702eac192f994fb9e3b83c509060990c5d8 +size 828672 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e8302760-d313-4bd7-9f3b-c38819b7d97d.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e8302760-d313-4bd7-9f3b-c38819b7d97d.png index ea9a6f5e7edb4f0140c68a285363116215ac6b27..1cfeb9310c1ccd1e91701f9064a3cee6283e11a2 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e8302760-d313-4bd7-9f3b-c38819b7d97d.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e8302760-d313-4bd7-9f3b-c38819b7d97d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e543fd6e87b61ac76a3c9e5c3a72717cf35a3923f7e8a081df2d9dba84b15981 -size 579241 +oid sha256:4b1adece21e4547b474f7da534c945edcaf9d418e63dd1f9940bd2986ed56934 +size 473848 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_eac5aaaf-f188-42f8-a20d-90b6695ad4b9.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_eac5aaaf-f188-42f8-a20d-90b6695ad4b9.png index 71a68c231f527be290743f793dcb3cb7643b570f..c1b73fe85bbea81c5e75a46ba0d0429843678ba1 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_eac5aaaf-f188-42f8-a20d-90b6695ad4b9.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_eac5aaaf-f188-42f8-a20d-90b6695ad4b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28da1363fdc706c5c1bd29c0e152f566e7aabb8d7942b1012a44e8b28eae47f8 -size 734315 +oid sha256:652bd4a3257a060fc9f75fdc336af1eea7fafbaf8c5fc9edee9c7588f2fe931b +size 942625 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_f32b6c7d-1aad-45f6-b201-b6f78fccc014.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_f32b6c7d-1aad-45f6-b201-b6f78fccc014.png index 1f8a8ffd86a12599bd9c20f4ece771893a1f6bb7..86ef3fdb9591458e305a06feb9ccd596407752fb 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_f32b6c7d-1aad-45f6-b201-b6f78fccc014.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_f32b6c7d-1aad-45f6-b201-b6f78fccc014.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd06674711de9cd0f517851389cbea422eac7e70836118b521e9db143d5909f5 -size 930249 +oid sha256:93ec098701e1d962e1444426124f6e4d807866736a4e6ab879a2a37265a9ecd0 +size 1263550 diff --git a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_ffc5cf9b-30d3-4b1a-8a02-875d0aef04df.png b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_ffc5cf9b-30d3-4b1a-8a02-875d0aef04df.png index c8231edcf2521bd5f0ed3bf275182dd5960c1d18..c34e10b1beb2052e9a9a02f9048591b9f33101cf 100644 --- a/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_ffc5cf9b-30d3-4b1a-8a02-875d0aef04df.png +++ b/images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_ffc5cf9b-30d3-4b1a-8a02-875d0aef04df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:278f2dc5b386d576fe32d5af883cd21acfc9e6226d1cb5905c355650713a391b -size 680115 +oid sha256:b21e2f9ac3b0df5e28babd0b7b9942a8e83a0bc041eaeec7d165cc4b424d341d +size 708115 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_0c112ad5-8f20-4d35-ab34-fab5d32abbe0.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_0c112ad5-8f20-4d35-ab34-fab5d32abbe0.png index f69b460572bff962bc7fd6701025b3d50a4a7e02..175b5650c1bea9d4ecf8a7e9077098b3239eee4f 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_0c112ad5-8f20-4d35-ab34-fab5d32abbe0.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_0c112ad5-8f20-4d35-ab34-fab5d32abbe0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05550ba9a45851fd206b7c55e305780a9edac1bb12a1fddfc08fe85ab526107b -size 1193061 +oid sha256:fa4838811c2ba5d6b56674d1f7f5ca6ed1cb6d04118fbc0fc6f940cb92d510b5 +size 1653636 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b1d71ec-5dd5-4948-a2b5-6303bf701bf2.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b1d71ec-5dd5-4948-a2b5-6303bf701bf2.png index 38d21a080fa1db5e14f0c6afc2eeeb5829ed4274..291411952c7b6e68223d255b48d5ca4d84ce4856 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b1d71ec-5dd5-4948-a2b5-6303bf701bf2.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b1d71ec-5dd5-4948-a2b5-6303bf701bf2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d49acbd8daf07459d373176f52d6be422aa8ca9b01c3dcc154c9e2c500135dfb -size 1252563 +oid sha256:19e786d7587f7d3cc1d95e0491c726a1d1d1ae04a4a4d35bbdaf42926463700f +size 1372603 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b545264-a8c9-48ef-a6c6-873b960fa27f.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b545264-a8c9-48ef-a6c6-873b960fa27f.png index eda0fa233a2f018383065d7c9bae07abb5bbb20f..0bcea4035c931e597dcfc4dd2d6bfc504b3c754b 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b545264-a8c9-48ef-a6c6-873b960fa27f.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b545264-a8c9-48ef-a6c6-873b960fa27f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c0151b3021cafd934e05d719fc6b4baebc6e8af0b563961926dac858c047073 -size 1119798 +oid sha256:a22aa614710bd8c3e12ce69f672d15c33e90715a5506e1b3d57493bd47c8b068 +size 1591674 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_2fda5161-1368-4436-8d1f-fc75151db6ef.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_2fda5161-1368-4436-8d1f-fc75151db6ef.png index c9799f0edd21fee71fba2929e2b76524d3f3b537..02378d9b7015c6e0501a9d623eb129affbdffef3 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_2fda5161-1368-4436-8d1f-fc75151db6ef.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_2fda5161-1368-4436-8d1f-fc75151db6ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13b7ffdfadaffaef5958fb5786765590fd0c3cf446ed52c5479d84339cb6370d -size 1183536 +oid sha256:6e99c383e286867301e236e97741f11781f8211989fca22498ba552fde4cfb86 +size 1123474 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_30f0a458-bb0c-49e4-b940-6b82a6d7b082.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_30f0a458-bb0c-49e4-b940-6b82a6d7b082.png index 38a15fcd117830bc2d336befadf5727532ec9346..7d8286bf0396dfe8597504a03193ed26e67b918d 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_30f0a458-bb0c-49e4-b940-6b82a6d7b082.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_30f0a458-bb0c-49e4-b940-6b82a6d7b082.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c895ae414928544ff4b9af9d6d5be64265de41145732d5072239f46302fa1110 -size 1170128 +oid sha256:d3bc3bae79a1d85ecfa1875a17a0c00786eba56ff810f8b31faef4b9e218ccee +size 1217542 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_383cc11d-8136-408f-bb05-a3222ccfdfc0.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_383cc11d-8136-408f-bb05-a3222ccfdfc0.png index fbd094de46c25a442fa92475f1cb687f5f77f6fa..fbcd537b6cd29adce2ccbc7ba872213585ef614b 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_383cc11d-8136-408f-bb05-a3222ccfdfc0.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_383cc11d-8136-408f-bb05-a3222ccfdfc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7908d044de8843dcb12c9eeb8baeff692960bb6bb8a3fdacd2c3526838a57a14 -size 1296157 +oid sha256:2479839b47109d006b96fc355664dae742a3c0dbe01842d8a1456b55db4da6ac +size 1469711 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_76377685-cdd6-4780-bfd8-b03bd4dec0cc.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_76377685-cdd6-4780-bfd8-b03bd4dec0cc.png index 630bdd7b25661b1886e41f34e65d76536d8ef52b..c56fa94c9b58b9f4eebf505d118356fd33169375 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_76377685-cdd6-4780-bfd8-b03bd4dec0cc.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_76377685-cdd6-4780-bfd8-b03bd4dec0cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f4029d614c28843775d894e2a2e8dc0e7cb9f019e7c78170924b89f8a4482cd -size 1118421 +oid sha256:20dca477998dfd1c6281553c9b79cbff99999759d4db287cbd3aac0761b93e31 +size 1507677 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_a35dc0e8-fa41-4eb8-a854-1f952e660828.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_a35dc0e8-fa41-4eb8-a854-1f952e660828.png index b63d88c226aeff1f15b5c695897270019dd7b973..b30ae6c6fa606db2c9bc17af6e3f93fbe4089703 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_a35dc0e8-fa41-4eb8-a854-1f952e660828.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_a35dc0e8-fa41-4eb8-a854-1f952e660828.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44a0c3344a6df60f194381402aff54934e29c5a01f1cab515f591c349ab6b2f2 -size 1276423 +oid sha256:5033b1ff5164479af69d4985187eadadfb9079660d4b8f39204ee4319e7979c3 +size 1381886 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_d4bf4941-facb-40a5-844f-31f00302fd71.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_d4bf4941-facb-40a5-844f-31f00302fd71.png index edd689d71f330397fd2d126786ab8528306f76a9..a443d4a842dfcb0bc2d10aa8e0ff1c6638fcd316 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_d4bf4941-facb-40a5-844f-31f00302fd71.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_d4bf4941-facb-40a5-844f-31f00302fd71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:faa872eb9b19ba83dbe7bc0955b59ef3ba4a274ef3c232e33c2ad3462c30f82e -size 1243459 +oid sha256:d132a4ff0c612af6ea33c2075fe6f842222357e513791235e6d51ebc2003d723 +size 1279256 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_de659bc7-ef2a-4d54-82e1-0f451dcb0ad2.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_de659bc7-ef2a-4d54-82e1-0f451dcb0ad2.png index a45d3753587548eb546b7f596041e828b6174208..fe39596f7bdffd6827587ff025979a19c9401ede 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_de659bc7-ef2a-4d54-82e1-0f451dcb0ad2.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_de659bc7-ef2a-4d54-82e1-0f451dcb0ad2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1498f06eed64f07751945f8c3829cf68a15a50b76487e14e791b4ca80a598eda -size 1148418 +oid sha256:6a3251ee93580c15b48d201a44420fe4a1ee27423d30133bcc84640eadb2be3b +size 1137708 diff --git a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_e834b996-182e-4755-bc68-504eb48496ca.png b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_e834b996-182e-4755-bc68-504eb48496ca.png index 4b254d1a8f5a424eef97fd4fa99487890abbd539..54241c624de5f1e9432683806dc9b45975283f78 100644 --- a/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_e834b996-182e-4755-bc68-504eb48496ca.png +++ b/images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_e834b996-182e-4755-bc68-504eb48496ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b66b0bf9ff09580ad3c5bded26c1b6653e167ade2d1a578639de37a2d1c0b03a -size 1295158 +oid sha256:2940c29d68c8e8a902d470f47e64e416bb621173f59f2149d8636ed6295c9737 +size 1283954 diff --git a/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_7ed0f607-4961-4ea1-b6c9-7ca428f4f9d9.png b/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_7ed0f607-4961-4ea1-b6c9-7ca428f4f9d9.png index 93bd75ae631282828aba89f717a39aa789f9387c..9cc271400c1ba6cf10598a09e8ae025cc7d32236 100644 --- a/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_7ed0f607-4961-4ea1-b6c9-7ca428f4f9d9.png +++ b/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_7ed0f607-4961-4ea1-b6c9-7ca428f4f9d9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7611ebc2004eebfec1ddbb440cdd004f76152cd8539cd051e44249872e86712a -size 1136124 +oid sha256:4407fe14fafd500a8547be2e09b1f26e841d74a250780281425ee5c4b2a284bb +size 1252404 diff --git a/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_904633a2-814a-4074-830e-bf4096bc461a.png b/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_904633a2-814a-4074-830e-bf4096bc461a.png index 027129930bdd72e10eb66650656e5a3a71fe372c..3da4e3a60e3bae677bb3f0e984616715cd28a93c 100644 --- a/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_904633a2-814a-4074-830e-bf4096bc461a.png +++ b/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_904633a2-814a-4074-830e-bf4096bc461a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:918e3ad2ecf0c6ebbc05f58d4296e5eef9b137b2b82974349a53b95e8728099d -size 979067 +oid sha256:7f66d3a5d6312affcffa4895af0d51d67d83a3768cdc4b6bfac1fe49ded125dd +size 1202606 diff --git a/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_a9e8fc20-2ea7-4e96-a9b0-57178326a414.png b/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_a9e8fc20-2ea7-4e96-a9b0-57178326a414.png index 2138e395607c32a7f096b0dd28a95941aa392121..5ba97d084c510a966fb0e7b326cf71992bfbff85 100644 --- a/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_a9e8fc20-2ea7-4e96-a9b0-57178326a414.png +++ b/images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_a9e8fc20-2ea7-4e96-a9b0-57178326a414.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cf3cc5bec2a8ea2acb560b4bdef8e2e26d0ebda37ab77519aea8bb79dc3ffd1 -size 1592361 +oid sha256:778dddfbd7759897d216d46ea108eda7cb0e07d5ab446c46e12bb102a360fe2f +size 1811055 diff --git a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_0b780e87-4aa8-4eaf-a19b-cb3457052141.png b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_0b780e87-4aa8-4eaf-a19b-cb3457052141.png index 596c3db53f4c3745cace86b5d45a6882cc1cc055..5de7d27866008ef3bb58f0f3a18ab36b25790b3c 100644 --- a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_0b780e87-4aa8-4eaf-a19b-cb3457052141.png +++ b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_0b780e87-4aa8-4eaf-a19b-cb3457052141.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21f2bbe196523f0085216b2890a69694fb7282f688ba809594cc2116ca512b1a -size 820606 +oid sha256:bdb48017cb03f5fef854d25a08d2a65f208aaf6f20164ff481f7f8f13bc94d91 +size 500480 diff --git a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_3d415174-370e-47c0-bb34-e9e442c78a84.png b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_3d415174-370e-47c0-bb34-e9e442c78a84.png index 76b558d7f7f862e4bc900868bd8be33e76ab20d6..1de55a0c54efdb472c68efd76d876908afa86276 100644 --- a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_3d415174-370e-47c0-bb34-e9e442c78a84.png +++ b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_3d415174-370e-47c0-bb34-e9e442c78a84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:024183606102929ab28b9885ba7412a5d8c4a34df36a1301ac055faa30d20be2 -size 1199499 +oid sha256:e6e9d427e234008d5803118de630a0ecade03dc50ef817353768ee1ec2637075 +size 1416104 diff --git a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_6464b9d6-3f44-4954-b9eb-b304fab198b0.png b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_6464b9d6-3f44-4954-b9eb-b304fab198b0.png index 7e026c9df718cabb7d6001e86f93bf81782c8005..f050d2a1f58a6b2b7db621c73b4a084c3dbc2d31 100644 --- a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_6464b9d6-3f44-4954-b9eb-b304fab198b0.png +++ b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_6464b9d6-3f44-4954-b9eb-b304fab198b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:623e6a220591fc666e649b6c19576152c0f6ee535f6320ceddb3ce373cc03b37 -size 1489948 +oid sha256:2598db3da2f82cdd1f65ecd86624f9d5414f6da7a226678c2f8b383277a4f2c3 +size 958525 diff --git a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_9783b6dc-cd33-4763-99c7-92b577797400.png b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_9783b6dc-cd33-4763-99c7-92b577797400.png index c1d34e3e9fc76d8b7e3a8353e3991df231aff9f9..8cff19a2d38f982db0764d1d7eb81f8970d930c8 100644 --- a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_9783b6dc-cd33-4763-99c7-92b577797400.png +++ b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_9783b6dc-cd33-4763-99c7-92b577797400.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c47fb3755bfddc40bb65e7a4cd3bbd774a7ab7078b518e82ba02b5bdbbf85cb2 -size 1214484 +oid sha256:93a399ba16f099315262b0b189a9fa415c03af51c963b9d8adc364c0341b3c02 +size 1509664 diff --git a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_be1bb8af-ec73-4160-82af-3279a45e05de.png b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_be1bb8af-ec73-4160-82af-3279a45e05de.png index 13df86b33dc141cd3660617e182a4b58b0caef30..3e36ba496b55be16b92b1d5ce3354abe2257390d 100644 --- a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_be1bb8af-ec73-4160-82af-3279a45e05de.png +++ b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_be1bb8af-ec73-4160-82af-3279a45e05de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f37ef07f50cffd37df60808a12f1cede0be9348f6103b45cbc55c1c963c1618e -size 1255369 +oid sha256:fc789f048b66b3c27ef23938846862daafcaf09197e57bc10d102e4567b8b657 +size 1297657 diff --git a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_fb8e030b-a84b-4126-a14f-c1cb8d319e00.png b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_fb8e030b-a84b-4126-a14f-c1cb8d319e00.png index caf41e63653e5ac877e2ca208e5233b6c7d791db..eba18034da151e56cf0c1652a0f6734afb0c9eb9 100644 --- a/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_fb8e030b-a84b-4126-a14f-c1cb8d319e00.png +++ b/images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_fb8e030b-a84b-4126-a14f-c1cb8d319e00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5480c919a06dd196680c7d46fd77aa9253a4204a39cd6ec5856bfa68bd6429f -size 1157904 +oid sha256:706cebc525d5462c790c1a0b312c02d68dfe530e635eb98b02851449a6fc2f16 +size 859186 diff --git a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_068d6834-e98b-463c-a33b-df3480f7731d.png b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_068d6834-e98b-463c-a33b-df3480f7731d.png index 7fb02a2530e628c9f3c7149d6d921c79cd341356..8a4a117960fa966d2a6b4c879524fc05b5a4cca3 100644 --- a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_068d6834-e98b-463c-a33b-df3480f7731d.png +++ b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_068d6834-e98b-463c-a33b-df3480f7731d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfba4bc30c0ed3ef87e23105f5662fc342f5601facec774a89dd5e5e40334d58 -size 1797405 +oid sha256:5ba01a4f05bf93a14e59b92e10adf9729befbb3eb0ab527bafbbda105c462bc9 +size 2379547 diff --git a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_15c67851-8081-4bf2-a0d5-a005fb7a2e98.png b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_15c67851-8081-4bf2-a0d5-a005fb7a2e98.png index 2ab4ad9ca537975aa4b43fcdf736bcc3137d79a4..0054026dff4b7d39ee96ccb6dd2f9d78286f95f6 100644 --- a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_15c67851-8081-4bf2-a0d5-a005fb7a2e98.png +++ b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_15c67851-8081-4bf2-a0d5-a005fb7a2e98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c50ed658304fa85a7c6e1f26797491c16225f1afdd4dd61fc09bd76b016c919 -size 1727967 +oid sha256:6f074dbedc8baa19639097554746c5420f22a78fd689a5c519330d86b7e442f0 +size 1478126 diff --git a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_3ea20727-aaf8-408f-91bf-7dd93234a5bc.png b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_3ea20727-aaf8-408f-91bf-7dd93234a5bc.png index 4c08969ba650daaba78fcc528d71184ea4b27afa..f21f841c650d652f0927dd57e5214adde3d8a242 100644 --- a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_3ea20727-aaf8-408f-91bf-7dd93234a5bc.png +++ b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_3ea20727-aaf8-408f-91bf-7dd93234a5bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4acdbbf862d7389a4b29f834d4cd1e133d9b1f2b4234acf74810e738e7edb23e -size 1267808 +oid sha256:9ee10d57719d0274d20d4df05ca05629ae4b005e498d349791ada0b8d683c065 +size 1518486 diff --git a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_b8a690b2-a54c-43a4-9e9e-85a85f00eee7.png b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_b8a690b2-a54c-43a4-9e9e-85a85f00eee7.png index 76e28ddd46ec03d24fc61bde9c5541beb069a7ce..9de07fcbd6f4d611f36f26a4066ec7bf07b2e0d2 100644 --- a/images/884a375b-e3f4-4f34-8e99-290f49141f9a_b8a690b2-a54c-43a4-9e9e-85a85f00eee7.png +++ b/images/884a375b-e3f4-4f34-8e99-290f49141f9a_b8a690b2-a54c-43a4-9e9e-85a85f00eee7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22cf37f77bae750164caaf81a61b7801b8fef01d67394a02df74327948a7f8ef -size 2436658 +oid sha256:593b553c554335a92a3eddd4685603cf72491cbb577cd28bb96d2cd3f1b88705 +size 2234847 diff --git a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_4f88ffbf-3206-47b4-84d2-d849707ed499.png b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_4f88ffbf-3206-47b4-84d2-d849707ed499.png index ba9123792f7a652747a6521e17af54bdb0811f10..ccde67163b7d1aa07b404f8778b83a5186daa54c 100644 --- a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_4f88ffbf-3206-47b4-84d2-d849707ed499.png +++ b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_4f88ffbf-3206-47b4-84d2-d849707ed499.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98beea53e0dcb7ad57820a5dfacb9938efd6f67f3f8ea0125eb86eaf701ee168 -size 1259041 +oid sha256:4201a1eb424c9170eb4cf6786396ddb4b52d811c9ec2c20c7961e3e1c72bcc9e +size 1467526 diff --git a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_5b4418c5-688a-41d9-8de6-0552c58d18ac.png b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_5b4418c5-688a-41d9-8de6-0552c58d18ac.png index 2732126ad21be7a2210ae506721d6008d077cbd9..4f99889878ce1de3efa90dd91ed1463d3afe288b 100644 --- a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_5b4418c5-688a-41d9-8de6-0552c58d18ac.png +++ b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_5b4418c5-688a-41d9-8de6-0552c58d18ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8726b722dfd1b5e34bd1c0de6622b865c522c20211e6519d7313d7b90603c4ae -size 791754 +oid sha256:3642ac18f96359402c80be35882a0fdee2e7f1e63294cce09f9383ed880e027f +size 885608 diff --git a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_9331a1b5-54f8-4de6-acd5-dd60c9a19d53.png b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_9331a1b5-54f8-4de6-acd5-dd60c9a19d53.png index c8b0f09a9661d1792379d16c3eae385704c174af..22f31d6b81878f29215f20f302de3ac2831dd344 100644 --- a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_9331a1b5-54f8-4de6-acd5-dd60c9a19d53.png +++ b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_9331a1b5-54f8-4de6-acd5-dd60c9a19d53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca5639db3fd05022f9d76b664716dad09c233ff7dbcd77ff09966f9553b675db -size 473638 +oid sha256:fe2c1849423075855dbb00b355380fb2bd37ff81e845f23218afe2df37e35d6f +size 764362 diff --git a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_93f20444-cbc4-4f91-ae8d-26e72b80b236.png b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_93f20444-cbc4-4f91-ae8d-26e72b80b236.png index a16d04477194611de1fbb6c2dd54bfcf88586d92..d247ae70cf4412c2d9e77307fb0fa003f509fe76 100644 --- a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_93f20444-cbc4-4f91-ae8d-26e72b80b236.png +++ b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_93f20444-cbc4-4f91-ae8d-26e72b80b236.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e586b92a34d3ee043b4a9d5559be2b6e3bea21be8a65bbeb9f5e638c0d6436ea -size 508615 +oid sha256:6d2234d4ad5a755a28dc13039e3b1b2dcb18638341d35ae561395a82c34bf415 +size 918716 diff --git a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_aa09ebe7-4fa3-49cd-9fd2-84b5ead50fa1.png b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_aa09ebe7-4fa3-49cd-9fd2-84b5ead50fa1.png index 7c2548f43cc663ce5c1d90259f3292937a2ca909..ac6ccad9c0f5f78d5cc4a2f321f753c1ff03e993 100644 --- a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_aa09ebe7-4fa3-49cd-9fd2-84b5ead50fa1.png +++ b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_aa09ebe7-4fa3-49cd-9fd2-84b5ead50fa1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2531d4e1fea6de8644e53452650a75f6577fc82a6b3d00317cdc60858dd56219 -size 687507 +oid sha256:ac6746aa226174c92f4f8cb21914fe94f8a370f54d795901c0c2578d4abcaf9d +size 1005674 diff --git a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_d684a2b4-143e-4851-b9e2-6b1bdef467fc.png b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_d684a2b4-143e-4851-b9e2-6b1bdef467fc.png index e130522605296ede1b3811f4dc4e852a3a8b1dc7..7588131fd28ed66042e10b6ccf686cf2137c8ec0 100644 --- a/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_d684a2b4-143e-4851-b9e2-6b1bdef467fc.png +++ b/images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_d684a2b4-143e-4851-b9e2-6b1bdef467fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1fc3d603640b16b2d167560746777aeae610e52f2f1843d5401ab982aa6e727 -size 437177 +oid sha256:293a38fe6bfee72f0b5f058a3886a5f0cbcd69d25b2903a7208062bdb625a478 +size 608565 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3d0769b3-8443-4f88-9b2a-25919abee6b0.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3d0769b3-8443-4f88-9b2a-25919abee6b0.png index 14064540eb41b7ac8963a56b53767112c570b3c0..58f5bb7a439342ed8797cd58b02c26afd6510c1b 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3d0769b3-8443-4f88-9b2a-25919abee6b0.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3d0769b3-8443-4f88-9b2a-25919abee6b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c442483f54aab086999257d0ce4f1b28bab8fb9dd3f7e76d6ecfa1a0d286839 -size 405768 +oid sha256:04a1658cd28b5efd489880daa1b1ca74b76199222cc22d19897b80f9b3ef11b8 +size 246793 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3dafde2b-5c39-47e2-b9b3-0c1e19c6dc3e.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3dafde2b-5c39-47e2-b9b3-0c1e19c6dc3e.png index 3c1e8b2e540ed293fcad3a63e41ea1f00a0932b8..0c5d3e9cc5b1d836055aa019a0ce52404dda9848 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3dafde2b-5c39-47e2-b9b3-0c1e19c6dc3e.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3dafde2b-5c39-47e2-b9b3-0c1e19c6dc3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6629638f1d34a00184bb92a793c6fb7cd8135dd71fe16e2d8e8e9d4da00ed556 -size 475706 +oid sha256:ae4a9745fb43b8fa418e56492c51c1f3a307273668a1c519e6fe27f2c80345f9 +size 405740 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3fd67889-8ab4-4640-b382-b8491611e103.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3fd67889-8ab4-4640-b382-b8491611e103.png index f08eefc2f05feaa58f80092d8dd713ed3774deeb..07742ddfeacfd710a3bd305f940bcfdf12187edd 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3fd67889-8ab4-4640-b382-b8491611e103.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3fd67889-8ab4-4640-b382-b8491611e103.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef04c7bf5120b15342679980d79f054300492af22320a8c1259e2fc111a266ec -size 1609536 +oid sha256:7cef0cd7db4e371a08b64c9a88a4f5d7c97f2fde29310fab1aab8f36a2811ac8 +size 1988943 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_4449ebe8-3714-47ac-b6ea-becc1926ca48.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_4449ebe8-3714-47ac-b6ea-becc1926ca48.png index 2777443972e7775b9968c2ec5ca5aeebd4ccd65f..7d6aaab3ecb59d0718264c44f29aa53885f5b583 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_4449ebe8-3714-47ac-b6ea-becc1926ca48.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_4449ebe8-3714-47ac-b6ea-becc1926ca48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:815dfb908e5245cf8d1b6ad664063f517cafe77a8912ff83da020a3825bc1302 -size 419500 +oid sha256:fcb44a415c9095d6895919a77b6ba7fe693b866a17705603dc2715f600ea6821 +size 508909 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_530c1c2b-4253-4258-be3d-ace6cee9102e.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_530c1c2b-4253-4258-be3d-ace6cee9102e.png index 01186230b1f8f6f83db22fb8c6d5a7056729af51..6d9e73c88f84704b25a1d2afafc5b7e9cdf5ffdf 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_530c1c2b-4253-4258-be3d-ace6cee9102e.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_530c1c2b-4253-4258-be3d-ace6cee9102e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65a963d3f35565884e49c97ad79e706fc41b24994197a305568a93a04b5219bc -size 475177 +oid sha256:561b28f9ed119d74e72709c8202df8b9b47bbc64690ec54929f7d21801d1ac5b +size 516717 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5cf2cb2e-ee55-47fe-8fee-f18dbe96fb3c.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5cf2cb2e-ee55-47fe-8fee-f18dbe96fb3c.png index 2777443972e7775b9968c2ec5ca5aeebd4ccd65f..7d6aaab3ecb59d0718264c44f29aa53885f5b583 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5cf2cb2e-ee55-47fe-8fee-f18dbe96fb3c.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5cf2cb2e-ee55-47fe-8fee-f18dbe96fb3c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:815dfb908e5245cf8d1b6ad664063f517cafe77a8912ff83da020a3825bc1302 -size 419500 +oid sha256:fcb44a415c9095d6895919a77b6ba7fe693b866a17705603dc2715f600ea6821 +size 508909 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5e1e5cf7-2414-4425-a730-3a1d08d2897a.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5e1e5cf7-2414-4425-a730-3a1d08d2897a.png index 0b8f4782b7f20f95087d5ecb64deb9d3a002d365..fd44e966df7b4416fce2350d2d66855f3a67161e 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5e1e5cf7-2414-4425-a730-3a1d08d2897a.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5e1e5cf7-2414-4425-a730-3a1d08d2897a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db865cc84fc9fd5af80d3c895e189d50ce75a75c1ebcaeb47e488a81d13e7a9e -size 474203 +oid sha256:918bc4354bb9defbd7b95fe799da54ee0ae2f825c6f021885132e5f16895e238 +size 290018 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7065838f-d6b7-48b1-b673-1fdb72ebf959.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7065838f-d6b7-48b1-b673-1fdb72ebf959.png index 223ccec34329b6399779abe09af79356e045e196..65e8e50e160b2a2d16543568e41e8ede4ae43f82 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7065838f-d6b7-48b1-b673-1fdb72ebf959.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7065838f-d6b7-48b1-b673-1fdb72ebf959.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86e3a472cb9c7e4282b99858e4c6302a8d59f7fd91d5c4aa51e807d58700557d -size 474918 +oid sha256:e86d0771dc16e6b2bf21a63ae80c891acbdbe3f4ebdfc74b88b7a878572d75ae +size 516459 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7e7219df-cb90-454e-aeb9-988780eced12.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7e7219df-cb90-454e-aeb9-988780eced12.png index dadeafedbffdac7c0df73cd29268efeec534682c..861d61889ef0c5f8280ecefbaa02cae0099546b8 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7e7219df-cb90-454e-aeb9-988780eced12.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7e7219df-cb90-454e-aeb9-988780eced12.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab86983bdbd714743dcbd20c5c0c6f1d4f5e8773c20e3f489a3fb5399bc44f86 -size 475061 +oid sha256:a1e42352d38d5df4f36a3e5618d1aef2e440061ab4f0d8f63358914367cd0ee8 +size 427719 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_8668151c-111b-4824-8572-dd3adc202437.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_8668151c-111b-4824-8572-dd3adc202437.png index b85f181bbbf57e436377c9e92d824a2bf43a4fcf..703b100f02f1ee78630dad9e2367f8d291576f0e 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_8668151c-111b-4824-8572-dd3adc202437.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_8668151c-111b-4824-8572-dd3adc202437.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c66c59b4d3425f671eb7257af0cefbfd59ea3563917351acce33cee67ab5de07 -size 483784 +oid sha256:3d48990bdd71cfe1d95d4fdde9c2a3ddad42a120a64a99f4a564f0245900a136 +size 502324 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_874d2fc1-2cd7-41f9-9631-22c7542480fa.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_874d2fc1-2cd7-41f9-9631-22c7542480fa.png index 065adce16f25db5364f3b21078997678d97c7470..ad3b6e2bc10f8bb263e807fc40302d1c1b7fbb7f 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_874d2fc1-2cd7-41f9-9631-22c7542480fa.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_874d2fc1-2cd7-41f9-9631-22c7542480fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f662fed397b20776156468a131f728c30c21fbc3a1c909cfaaf8f0b0cdf4b33e -size 475125 +oid sha256:dcc32da3f4e75228e816420b70ae3bc4016ef9ee03082339c6f68e4c76ba39e5 +size 516554 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_b90bfd21-8292-4a30-b820-0e7294539949.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_b90bfd21-8292-4a30-b820-0e7294539949.png index 3c1e8b2e540ed293fcad3a63e41ea1f00a0932b8..c5e66431d30f821e831a081a2db5665d11527ac7 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_b90bfd21-8292-4a30-b820-0e7294539949.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_b90bfd21-8292-4a30-b820-0e7294539949.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6629638f1d34a00184bb92a793c6fb7cd8135dd71fe16e2d8e8e9d4da00ed556 -size 475706 +oid sha256:7ffa022034f069c92daa8d4628d94c9cf7b35fdc468574b0e1aaf02fa181c0f4 +size 517165 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_c629a825-fdc9-4dde-adca-9b8920a2ba7d.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_c629a825-fdc9-4dde-adca-9b8920a2ba7d.png index b6c44fc12b0e6fa9548b1e47452e448d78681942..60a949fed0f669e9468f13f185f29fdb31ae8c3a 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_c629a825-fdc9-4dde-adca-9b8920a2ba7d.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_c629a825-fdc9-4dde-adca-9b8920a2ba7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f31cb5b6fa42006fcd23024383d5f58d55aba4c3473767d264ee0a09dc3ca517 -size 479754 +oid sha256:382bdc5b1d5d86c7881b20723d7d52828310ab97a5dab53b1d02a71b3141f0b2 +size 424174 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_ce22d6a6-4223-44ce-83fd-889b45b45818.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_ce22d6a6-4223-44ce-83fd-889b45b45818.png index 069d1fd9b8b625ff74e13cac8e5c62d39f809bb4..bd201f826057e0c83a6fe910a6e484541bb7dd5c 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_ce22d6a6-4223-44ce-83fd-889b45b45818.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_ce22d6a6-4223-44ce-83fd-889b45b45818.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fc4ea514bbd2696db58cf940c6610c6a79aced68968f25a26434a35ca968db8 -size 1429463 +oid sha256:a80ef8c6846ad7175d5c2526b5204617821d916d9f8e0a991498b9ed07415d13 +size 1952164 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_def2773e-0727-493c-916f-407e36da2dec.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_def2773e-0727-493c-916f-407e36da2dec.png index ceafc85e61718dfb7e42a81f060e1bb6c90c1944..6e1c7419bae75ec30f0c65aa95e73166a5767eb8 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_def2773e-0727-493c-916f-407e36da2dec.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_def2773e-0727-493c-916f-407e36da2dec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c16b8bb17ecb45ab1b4716c71090b81b87af376e9f83be1753ce4220d98e00ae -size 499685 +oid sha256:acf53e81646c37c54a4b49a801bab612acb2be9c6304c77a00a874308d02b7bf +size 561513 diff --git a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_f13cc2d8-4952-40bf-a4fb-54be342dfa9d.png b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_f13cc2d8-4952-40bf-a4fb-54be342dfa9d.png index 065adce16f25db5364f3b21078997678d97c7470..ad3b6e2bc10f8bb263e807fc40302d1c1b7fbb7f 100644 --- a/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_f13cc2d8-4952-40bf-a4fb-54be342dfa9d.png +++ b/images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_f13cc2d8-4952-40bf-a4fb-54be342dfa9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f662fed397b20776156468a131f728c30c21fbc3a1c909cfaaf8f0b0cdf4b33e -size 475125 +oid sha256:dcc32da3f4e75228e816420b70ae3bc4016ef9ee03082339c6f68e4c76ba39e5 +size 516554 diff --git a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_9ad5cd9f-cc85-44aa-bf91-8bc253839abc.png b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_9ad5cd9f-cc85-44aa-bf91-8bc253839abc.png index a63fb19ac2d1d8d6f888b27c3f7b76befc2fde79..3510e0fd579cc3bbcb550d3f85dc6c7cd4e20070 100644 --- a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_9ad5cd9f-cc85-44aa-bf91-8bc253839abc.png +++ b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_9ad5cd9f-cc85-44aa-bf91-8bc253839abc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52749383515f1224bc11eb4f047d7a46af84d25ef0222b7619c9d037ebeaa424 -size 536763 +oid sha256:552503807b52028bdff4d125949c758b6edded38a23cb5730ac9439205232dec +size 614492 diff --git a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_a4a2bb03-dc07-4e60-942b-d43fe00ca4b0.png b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_a4a2bb03-dc07-4e60-942b-d43fe00ca4b0.png index 2e69d8e96d67ee60d3f7772ed05defc889fcc85d..61d4a21a060252af11e9d0075c28f8ce9f92c8e1 100644 --- a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_a4a2bb03-dc07-4e60-942b-d43fe00ca4b0.png +++ b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_a4a2bb03-dc07-4e60-942b-d43fe00ca4b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:336c0a17b5c9dcc86568123814e3acbe6e9318ad2b89006afbb863883e3b38d6 -size 2053228 +oid sha256:16675534470b10a97733f2140d51d809f23ac9cb1b1f2c1ea06c62f2eac7f995 +size 854155 diff --git a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_c2522b37-de29-4b8e-8f85-8cbe56475733.png b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_c2522b37-de29-4b8e-8f85-8cbe56475733.png index e58141e98d0d56137dd94414c36a03c9f8735b46..885df7a5945183b8d58182230f2d0646a4220b57 100644 --- a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_c2522b37-de29-4b8e-8f85-8cbe56475733.png +++ b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_c2522b37-de29-4b8e-8f85-8cbe56475733.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe552e873fafe3a8c121bce7025374478e5bb702ce56cb43d6d2bc104e8abbc0 -size 1355274 +oid sha256:1c1b0c19e8e182413d46dbf2880ba383b1b4eddc4872f9c3b8504ca85ff42ee1 +size 334341 diff --git a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_fe20b467-a94f-48d5-a52e-dac99270a61f.png b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_fe20b467-a94f-48d5-a52e-dac99270a61f.png index b4d4fb617bb62610193c6b687e4b076119dcf43b..7aa9dbad0371b0b0777be90c8884c50384c9b039 100644 --- a/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_fe20b467-a94f-48d5-a52e-dac99270a61f.png +++ b/images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_fe20b467-a94f-48d5-a52e-dac99270a61f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d047f12d295d20e0b767712347eaa89eb52d414b1a254c3af08cdaf39babbed -size 508480 +oid sha256:33e0653646ed12dc1d7a227d3583e464254bf78d84ab19d0950386e4a6b5d79e +size 626432 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_0914fa6f-323a-4498-9742-0b1fad40a9bb.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_0914fa6f-323a-4498-9742-0b1fad40a9bb.png index 8cd185499fd63a091de4a94be78c7c6560dde050..34bc93164ad6d51a0a181edb8d94d2fc129c9a34 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_0914fa6f-323a-4498-9742-0b1fad40a9bb.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_0914fa6f-323a-4498-9742-0b1fad40a9bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fd8356bf78343cf372e3d605a1fac1f30217d04dbf43e8122d22cec404e1edf -size 1905122 +oid sha256:3c153560a547362a58b420b49dcc9aeaa6fbd1662a67e0c8d76c698bb81b26a6 +size 1718130 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_11a99bb9-f182-4346-99d5-23975b4994b2.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_11a99bb9-f182-4346-99d5-23975b4994b2.png index 02a42767ef7979bc6c84a21d8931f1be900864cb..1ccf2ede8bbe45ecfe4574db5bcb5beaa9d6fae2 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_11a99bb9-f182-4346-99d5-23975b4994b2.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_11a99bb9-f182-4346-99d5-23975b4994b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c45f7719fa43a197b6091f0c6a8432ba76710bd3d5e53247b61d1ac2ee4455d5 -size 512306 +oid sha256:bbdeeebf6e794d0916a908a463d6d20075735ad5faec1bc929dde9db0dd6bb21 +size 631295 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_4497cc28-31de-4410-b209-540e572646c9.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_4497cc28-31de-4410-b209-540e572646c9.png index fe19670e93947b2ea6a7acdd82a4bb7f6dd632ca..dbbb9c8dfcf02a633d4c21dfaeb425c1bc76cbc3 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_4497cc28-31de-4410-b209-540e572646c9.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_4497cc28-31de-4410-b209-540e572646c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c5b7358aede9df975c3ca0d453bb126e3b524fb5ad682491fe51d8216d1ad7b -size 455185 +oid sha256:7c84287ea2cf9d9007e6f74fc171feb4d69c5d1c5e20fd3bf5c0660d3388b2b8 +size 295639 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_7864d3db-532e-478a-b365-5533d458f2d1.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_7864d3db-532e-478a-b365-5533d458f2d1.png index 009ed2afa76852d470b683b5f9ce898948b77ef7..9794a6b9cf703e1922a664199c045ba1f01cdc08 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_7864d3db-532e-478a-b365-5533d458f2d1.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_7864d3db-532e-478a-b365-5533d458f2d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:773ec0c1cc6542ed2509276829caf4866a25fb3b774d42ba7aa6b6f2689fc747 -size 882971 +oid sha256:62292ae4020d690a4cf53055bde55735cfde3ccde44fb9eaecd564f70cd605e1 +size 540540 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_89d0a43a-593a-46f4-92e3-d1b1615293e2.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_89d0a43a-593a-46f4-92e3-d1b1615293e2.png index 648e44eb6d953a5ba5ddd72dc68b730a0b834c46..634eb91c662e574452ca4b0652e3508cc2a298a9 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_89d0a43a-593a-46f4-92e3-d1b1615293e2.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_89d0a43a-593a-46f4-92e3-d1b1615293e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44d748e71564c03a216ede332749ad8f50778480047719ef318753d15e4e6eb4 -size 461467 +oid sha256:a9e29391fc01823ae9fe63b2e52546c338695265de9bbcf83581638c645f8110 +size 604592 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_8b949019-9211-4ed7-8748-cdd325e6ca6e.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_8b949019-9211-4ed7-8748-cdd325e6ca6e.png index 34d4f41ef3b5cccf55255135ad8b53200bcb6c5f..9eb1f7caebecbfcafcde02327a599929dcb4caf9 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_8b949019-9211-4ed7-8748-cdd325e6ca6e.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_8b949019-9211-4ed7-8748-cdd325e6ca6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6b314e415e8c754e019387b5a118f56074b7ebbd54dc483e889de71b0c4c61d -size 894690 +oid sha256:1136c595e7481289ee3869a69add62dceb53f0e625c50268ef1717915cf0259f +size 772256 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_8c9d79e2-b2a1-4197-9879-8ef7936e9e85.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_8c9d79e2-b2a1-4197-9879-8ef7936e9e85.png index dfa0985355c68dabb408818a38d13550064f2959..ab63768b9c786d88b3123af1fa08eb678e925307 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_8c9d79e2-b2a1-4197-9879-8ef7936e9e85.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_8c9d79e2-b2a1-4197-9879-8ef7936e9e85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72482ce3d58f928bfc224b5d22101dcab1073b2af437ddc302a4802bb0d622cd -size 1007158 +oid sha256:89c33f8408f921d5d9c31df3ad576786a203ccaec9f8d513a96c39353b745c79 +size 721185 diff --git a/images/8a6f2641-9896-4657-b447-56927420e8c4_da230e9f-977e-4972-bd15-c41c61617881.png b/images/8a6f2641-9896-4657-b447-56927420e8c4_da230e9f-977e-4972-bd15-c41c61617881.png index 8481e9a09d46a9075b117b4f45b00ef904646dc3..5fd0fb8e8da406001143fb556a79dd836a42117c 100644 --- a/images/8a6f2641-9896-4657-b447-56927420e8c4_da230e9f-977e-4972-bd15-c41c61617881.png +++ b/images/8a6f2641-9896-4657-b447-56927420e8c4_da230e9f-977e-4972-bd15-c41c61617881.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:529c2acf09741902f446d1b4f853289a5629bc23db1164a2933ca409cc1cca43 -size 500083 +oid sha256:fd671173e96ed7be194f2cf44455b29d8ed67ac449a4c1633d791cd4fd25b1a0 +size 621806 diff --git a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_0f0ef35f-c591-41b1-af03-eda24e8e7abd.png b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_0f0ef35f-c591-41b1-af03-eda24e8e7abd.png index 5f5da9907f04e7309ec1319044051cc4599bddcf..b5bdc213affa8170f8e239f2aa576906f6834557 100644 --- a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_0f0ef35f-c591-41b1-af03-eda24e8e7abd.png +++ b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_0f0ef35f-c591-41b1-af03-eda24e8e7abd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ddb11c5e702ff2c68145f80a4c8d6083e96e4370027c96cb92530be991de32a -size 696124 +oid sha256:7e7355435c0d22e1922ab09063bb7add02c7b05f5e31431a15a366ee87c16773 +size 713103 diff --git a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_2be5e5da-b142-4be7-9aca-8573136aa54e.png b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_2be5e5da-b142-4be7-9aca-8573136aa54e.png index 16db2cbce7d30e6a9937c31c658a6e896fae2441..181f047f19f033714ce0ca9970c115ddb1ba225f 100644 --- a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_2be5e5da-b142-4be7-9aca-8573136aa54e.png +++ b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_2be5e5da-b142-4be7-9aca-8573136aa54e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:daf79337c6a5cd3fed86c1694dda33ebda70b249947579e1aa5be2a301a7b497 -size 523200 +oid sha256:bd261753a2f7228f458014f4ee3fa042f9215def37b5c0f30f09062835eefae2 +size 940045 diff --git a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_585b6e77-b0da-452f-b0c9-97e223fc786a.png b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_585b6e77-b0da-452f-b0c9-97e223fc786a.png index 82617022298c26e32e7273022c0a736c7af92931..1536b673c17018f5d1800f4684185e96827c51d9 100644 --- a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_585b6e77-b0da-452f-b0c9-97e223fc786a.png +++ b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_585b6e77-b0da-452f-b0c9-97e223fc786a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49ee9d17e1f47b1314fb54a109f1b820e7901787004140c44a0645bd06708ec6 -size 699979 +oid sha256:ecc577c942ea93430dfd71ad6355a3c4ee545acec56a082d0216691282281505 +size 903707 diff --git a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_6a1a9c2a-d65e-46be-92cb-b0a2527d8d6d.png b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_6a1a9c2a-d65e-46be-92cb-b0a2527d8d6d.png index 5c2f71722fdce72d9fa8c2488d0c947cd752e18e..c796e96e65c621b3589201fad34d4a14f7087c23 100644 --- a/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_6a1a9c2a-d65e-46be-92cb-b0a2527d8d6d.png +++ b/images/8aae9804-0765-4e14-ab71-4dc13ada32f7_6a1a9c2a-d65e-46be-92cb-b0a2527d8d6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27d1380497d860a3583b46069ff6aaa9f377d5aa1c0a231460a11719c3115353 -size 756297 +oid sha256:2bd070f1eacca18c5ae9f6189e45ce61dcbadb2bd08e8eae9174bb4e38f0c6a3 +size 472734 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_0d6eba3e-0e6c-4ad6-ab14-4b84cbb2265a.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_0d6eba3e-0e6c-4ad6-ab14-4b84cbb2265a.png index 3b86a33d9b9b2bac96bbb99b2dc48c39c1bf7896..be7d316c6cbf8c98bc3bb444987d0758314a4e72 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_0d6eba3e-0e6c-4ad6-ab14-4b84cbb2265a.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_0d6eba3e-0e6c-4ad6-ab14-4b84cbb2265a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f750b51ab2de4044c5fd2b2459bbc89d151377f8a2ad7c5dfe6b412319edfc5 -size 847147 +oid sha256:14b0af4eec569526b7eb0559546c8e17276dd43c4f904bfb64ea52ba64149ddf +size 1163277 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_28288ba0-786e-412f-a038-4a9df7f9a4a4.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_28288ba0-786e-412f-a038-4a9df7f9a4a4.png index ef5b46fa2d3afc6284fd9551a558d7697fd80a67..622e6fd3ec13148c85522a6fc0bff37431149bbc 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_28288ba0-786e-412f-a038-4a9df7f9a4a4.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_28288ba0-786e-412f-a038-4a9df7f9a4a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6374ad6f170c96a6c1d89bf5544e82302ae53f7c92cf8469d79fff40179395cd -size 1234223 +oid sha256:14ae87f1fba87d87b7b6fc010c0c03dedca10a9adcbbd9a47fec504baa9e469e +size 897781 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_287f3852-35ea-4874-8d4f-64e2292bc1f1.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_287f3852-35ea-4874-8d4f-64e2292bc1f1.png index 4ec394161ce94ebf2a126c5a494bc5da2c155133..8fe31448cc52f9224f92ef469d6dfe123244be7c 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_287f3852-35ea-4874-8d4f-64e2292bc1f1.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_287f3852-35ea-4874-8d4f-64e2292bc1f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:284fa7fb505ecb7ec151843fac6dacd3c09ba40204c4cb67c49fd41edde48392 -size 796774 +oid sha256:b634c60458969ac798fbb4792bc211c4b694bade08d60043e71c88c5d70e7a29 +size 1106010 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_2de427ce-7c9f-44c9-b4b7-65e4f697624f.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_2de427ce-7c9f-44c9-b4b7-65e4f697624f.png index b8f5f99d6c63911da7be77d097d5aae811ba9f19..d9ed8a2b518098318d9486cbb6330b39099eea22 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_2de427ce-7c9f-44c9-b4b7-65e4f697624f.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_2de427ce-7c9f-44c9-b4b7-65e4f697624f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:777a99d34a1ec1154082470f78e6bd8af59da6f131107c47a745f46b9a92cc87 -size 1071612 +oid sha256:d623eb34d1af8ae0cb27148a58bc59cb2360530e35bb7ea8e7097dbc5aff8e18 +size 1259170 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_3371a7e6-bf53-469a-941f-47a7f6038b5d.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_3371a7e6-bf53-469a-941f-47a7f6038b5d.png index 621d02c03c8f162bede45381dd8a3e5a4441ae67..913498c56ca7144a10818f2013bb661260d850ca 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_3371a7e6-bf53-469a-941f-47a7f6038b5d.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_3371a7e6-bf53-469a-941f-47a7f6038b5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87cdd8cea440209c22102b89f1af401771da6f9412a8cabe794f4309ac014c92 -size 1268120 +oid sha256:dfcaffe7784ee3db6c655232d301b9e9cb52c775ba3b41a9b30c477b643bad01 +size 1506209 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4379cbc1-7c06-473d-9df6-705f2b4e3321.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4379cbc1-7c06-473d-9df6-705f2b4e3321.png index 0fe4a670a583db321df2dd76258b9cd7a8971773..4d8d7727b0cec9e4110256925674ce032faf6534 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4379cbc1-7c06-473d-9df6-705f2b4e3321.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4379cbc1-7c06-473d-9df6-705f2b4e3321.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a649c349807a47d19b1a0914f03612eb09fb7b2f12bf1a1ab05f87066f9944b -size 850816 +oid sha256:3589a7eba9909cc5f4c934b17bb16d659d7cc1a722e723653677ce03e6cd44d5 +size 967762 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_439cfa5f-34dc-41f4-b19d-ef6a9cbae5c1.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_439cfa5f-34dc-41f4-b19d-ef6a9cbae5c1.png index e4ff57552b57095dd51fe5e820be12143e0c7671..d8d7bef199b9af5b94a518a29b2fac05d94daa0b 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_439cfa5f-34dc-41f4-b19d-ef6a9cbae5c1.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_439cfa5f-34dc-41f4-b19d-ef6a9cbae5c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab3c143eb1ec8ae3ae0bc3941b5526ffd8dfcb0a2845f246ac90a32a98c66ec3 -size 1166444 +oid sha256:4ae9ca8d840602f4d801b29e9a9b5bc44685920368764c2209844b96b3f592e6 +size 1585777 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_49696ae4-ee6d-4a31-9521-754f78814c3e.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_49696ae4-ee6d-4a31-9521-754f78814c3e.png index 17961ece7c1a33bc35b3705bc33da1f5622b62b5..2d4646ebcb16843fb5b3397bf97873786c5b4af1 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_49696ae4-ee6d-4a31-9521-754f78814c3e.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_49696ae4-ee6d-4a31-9521-754f78814c3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:844113e9192c1292d3203805728df3b6a87244d2afc0a38fbcea20dfeaa65891 -size 1046192 +oid sha256:06e2f5ebacdf752f18aa1991857471023bd48ec2ddefe7c10cf9a94cfe9613bb +size 1247699 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4d7706e7-35fe-4adf-a968-ba5f21d0fe38.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4d7706e7-35fe-4adf-a968-ba5f21d0fe38.png index 9cc3d1a28ba9fe787d1200c573603ea44ad0c8dd..f818082589d22a52ddac86a66b0377337aaf1a87 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4d7706e7-35fe-4adf-a968-ba5f21d0fe38.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4d7706e7-35fe-4adf-a968-ba5f21d0fe38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fdd4309a5ad09e81f06f1dff01a7590a6d1b8b1df4c2c03fac152994cf55277 -size 1070630 +oid sha256:0d2204b94ea0d7da167e002bf6ebb232156c85746398899946b91e88253eddfe +size 1252172 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_608512bb-519f-407a-a619-1eeffcb9d896.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_608512bb-519f-407a-a619-1eeffcb9d896.png index 54cc4840a4c7a8f344dd9b6c27131ecfa7ad6b3f..fada1bd50658fbbf4e652846b613fbd5e3e2d22f 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_608512bb-519f-407a-a619-1eeffcb9d896.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_608512bb-519f-407a-a619-1eeffcb9d896.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ea5b9265b07a0547685df3019f9c15df5e5b4228a2401d05f250b71e0feed77 -size 1192280 +oid sha256:599ea5abf37458528f01371a4dcac35da3e62f48f774f96c1ffc69273501d1bf +size 2000940 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_698847eb-4f57-4615-90b7-cde1436b7612.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_698847eb-4f57-4615-90b7-cde1436b7612.png index 6b53bfa457ee5f84d0f2cea7d9ae3f75e8c8f588..5a773644ca960537fbba97dd74e5a9d5390baeae 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_698847eb-4f57-4615-90b7-cde1436b7612.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_698847eb-4f57-4615-90b7-cde1436b7612.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24813ebf6692f72cf5aee6ee36622475cd1d7bcc244af176242204cb987c26e2 -size 1155010 +oid sha256:c77368e1a72a930497f825ebc300024947877366f317af0c3e3c49b8ea5dceea +size 945172 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_758947fe-0cd0-4b67-a5f6-62048e8f794a.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_758947fe-0cd0-4b67-a5f6-62048e8f794a.png index 59d6c45bb77a18943b03e7d6b0e3d0c801d956ec..af96e62e1e42964e9391bb958c7789d07ad77886 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_758947fe-0cd0-4b67-a5f6-62048e8f794a.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_758947fe-0cd0-4b67-a5f6-62048e8f794a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:803b1c3012fb0480a7673810fdd964be4e4df6baefdf45c5656ecd839b33d5bf -size 1000703 +oid sha256:c7f35233de47d3df48709e7463ad845d921c7f96fcd98c237ca632ea62c6b66a +size 845881 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_7c4bf048-7214-4ba9-aa74-822f50390427.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_7c4bf048-7214-4ba9-aa74-822f50390427.png index 8c7aceb86e74401edccf5413662429e083189fe0..8fb37281937b2ef65bd08e606bf9986da9db3714 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_7c4bf048-7214-4ba9-aa74-822f50390427.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_7c4bf048-7214-4ba9-aa74-822f50390427.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:565ad6375b0fa0c04c23729596e0cfb61cbc55e0eb9518961b57eaf07604836d -size 940780 +oid sha256:dc7cae2e385c7a2e980c50aa6809fd77416b34c6ed99b910ddb3f9d829c37d0f +size 1339312 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_9e2ac8b4-b3a9-4882-983f-24af5568549d.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_9e2ac8b4-b3a9-4882-983f-24af5568549d.png index 424bcbd2420a3fc83328ba2f694f33c48c87352e..98c8c193ff80aadac80f780fbbd81d2881cea085 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_9e2ac8b4-b3a9-4882-983f-24af5568549d.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_9e2ac8b4-b3a9-4882-983f-24af5568549d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa00b18ef31f727871acf7ea00418513a8396d8f30cbeac7b75df26d0f9eca0e -size 888243 +oid sha256:0755a358ecdf7eb40357100242594016e335331a2b28ce99ccfbc1ef8b357bbc +size 915576 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_ade61336-8901-48d1-9c8a-f14332ed9aa2.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_ade61336-8901-48d1-9c8a-f14332ed9aa2.png index 45ababa1bed8ad5e8af2d72a2f53387cc21b03cc..7bdcfe7748bf44166eff218ec94c1862d9557319 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_ade61336-8901-48d1-9c8a-f14332ed9aa2.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_ade61336-8901-48d1-9c8a-f14332ed9aa2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24b30b820a62a6c144f29c2de559420b8b0bffa6ffa75d7358fa9062c1b23556 -size 1000730 +oid sha256:17ac1f2f4455574b2cd06711138f738c9d7d8c2509b60a145873205e8e1feaa7 +size 1175564 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b0b1bb1a-52a6-45bd-b8bc-b97ddb5f9e5b.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b0b1bb1a-52a6-45bd-b8bc-b97ddb5f9e5b.png index 6e43c331b095264a4589f978d23021c5adb8403f..265337a1c589972247f1cdcb3e78704ee2357aef 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b0b1bb1a-52a6-45bd-b8bc-b97ddb5f9e5b.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b0b1bb1a-52a6-45bd-b8bc-b97ddb5f9e5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a151555a05ea350e0a00b8022a51ae405bd4059f07a7383aae390c41ab397959 -size 865642 +oid sha256:5b3e431de5234d907c9595a06a3e9b8996d7329481fad8e41c8ed82c2155241b +size 849033 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b285e7b0-2a4e-43b4-a6d0-4ac251fcc085.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b285e7b0-2a4e-43b4-a6d0-4ac251fcc085.png index 80fb7dd17f4e26b2c9df4b424a8658129b0e3af8..3da7cdb86943a85fbd0886fedf7fe1d2c6c53f03 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b285e7b0-2a4e-43b4-a6d0-4ac251fcc085.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b285e7b0-2a4e-43b4-a6d0-4ac251fcc085.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c0734517b1c365f2f604b617529f3e492b55a923ab79efc278979834c928baa -size 1256686 +oid sha256:3cc7e2037b5ad4033f2964d82f6a599978dfcc12fcdae278b6c51ba272f8658c +size 1440983 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b37e82ef-4b52-4746-8b5e-68663a04a73d.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b37e82ef-4b52-4746-8b5e-68663a04a73d.png index c6692a7ac78dbf5de32696d2dda081bf3096611a..21d28ab666653ec4eec47bea8d554279e5db90b5 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b37e82ef-4b52-4746-8b5e-68663a04a73d.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b37e82ef-4b52-4746-8b5e-68663a04a73d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77248b8bad7f23d3b8ddfac3df59fe1c639b9ef881ae634814129352f8d7c896 -size 1035062 +oid sha256:9ef3938a68ce45c6912bf4d5ed748d1bede3d98ca3f4feb6c8a6f116b7ca3f7c +size 1191417 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b7659f78-46a9-4951-952b-37365caa2ab0.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b7659f78-46a9-4951-952b-37365caa2ab0.png index ece9217ada06af6a702d7670bc60fc072ee79972..e522ccb574295652f9fec37ad5cccd698c22c1d9 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b7659f78-46a9-4951-952b-37365caa2ab0.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b7659f78-46a9-4951-952b-37365caa2ab0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b375f89646968f7685e0d3ffc9a9cae1e376f2182a6071a2fdd39645602c773 -size 1188724 +oid sha256:273efdce6eca0a3c7bc4dc7175ea26afdbabc805ae1290fd32818ee027225e3e +size 1198039 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_c1c57834-374a-41c8-ac41-13163aad1f2f.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_c1c57834-374a-41c8-ac41-13163aad1f2f.png index cd11fc63a2440e4bcefa26f917c5bd8b60700e89..c38a43997bd7aa1c681907fa9f89b9d3a7166781 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_c1c57834-374a-41c8-ac41-13163aad1f2f.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_c1c57834-374a-41c8-ac41-13163aad1f2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ae47183d703ef48a4b10de8723655b7e56aeb2b63595f2b8b200965f150fd80 -size 1600157 +oid sha256:a57b99be0c303a88f1aed74a98e4afad8c09249895d703fca3e40d5a94ad2d0b +size 1200169 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cb7997fb-4091-45ac-b23e-3c4f1e114867.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cb7997fb-4091-45ac-b23e-3c4f1e114867.png index 5f1120c858991783224c0f0abe8515c43775e963..1aec4b64fa801be9aeec8891f4917bac1d0de916 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cb7997fb-4091-45ac-b23e-3c4f1e114867.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cb7997fb-4091-45ac-b23e-3c4f1e114867.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbb06086352a0272af68452713d30e3f4a177d1e9c0f63e11cc7178c1c09b36e -size 941574 +oid sha256:fc854a75e98feb6c082eeb227186aecba165ad7017b7d19ffb70c0a6e8bc6ef1 +size 1178321 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cbf7f977-c76b-4ae7-bfec-2ff4b8c4f362.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cbf7f977-c76b-4ae7-bfec-2ff4b8c4f362.png index 7a39473c2be2131fac065d78e21b41f203acf017..23e4d433a5fce0dc09e254df6cd69ba267d91029 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cbf7f977-c76b-4ae7-bfec-2ff4b8c4f362.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cbf7f977-c76b-4ae7-bfec-2ff4b8c4f362.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:783906785c2df5c4481a96957a752ca5d7851ba9730106b2d990835dd0b3a0ff -size 950423 +oid sha256:38ea96254c70e93e2e7729da1f17dc5a4534b397986e43313727e3fe789548c5 +size 1058136 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d0518e79-c097-4a9a-a841-be2f94c1aac2.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d0518e79-c097-4a9a-a841-be2f94c1aac2.png index 61b5613eb0422ded0781e21d13b8d3e3c7b78073..89cd4505e44d8058d4e2bbd512056f24256d098c 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d0518e79-c097-4a9a-a841-be2f94c1aac2.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d0518e79-c097-4a9a-a841-be2f94c1aac2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e418be8927e212d80096f62246647a95d5f4301143108be4b05fa8304d28d2b -size 1213621 +oid sha256:99362a595cc6c2074984127f58727d0a45bc39d21d3b7ea0a8f5415c6be14ee2 +size 1417167 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d99b4983-2ce2-4e02-b12d-1d5f4ead49ef.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d99b4983-2ce2-4e02-b12d-1d5f4ead49ef.png index 97f673255385b01d0ee8ee71a0e09396e7bdccf4..1a82b6db02ebfa7dae5b79dcbfff9ee294908141 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d99b4983-2ce2-4e02-b12d-1d5f4ead49ef.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d99b4983-2ce2-4e02-b12d-1d5f4ead49ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:740440cb770dd0b3eea155299d826396f27feddd86821c27ce115315f4678151 -size 1177033 +oid sha256:93a39e7c24db8dd5e760c7db84b7fde6d7bdd99989232649253dcebc133296ab +size 1774496 diff --git a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_f8cc67b4-274d-4aeb-9012-d1e307deb997.png b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_f8cc67b4-274d-4aeb-9012-d1e307deb997.png index 327196721ebc1f5dd7c53056b26dbd732c5c4eac..d16fdabbc8dcea286ad60488713853db51c7dc7f 100644 --- a/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_f8cc67b4-274d-4aeb-9012-d1e307deb997.png +++ b/images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_f8cc67b4-274d-4aeb-9012-d1e307deb997.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba87d647f67c77b67b4b01ec823808b0970c97bf5b3c221b93c37e9320a8d04d -size 1028784 +oid sha256:48eb4a751b94cb2dabbf7b8373bd7983f629a16271efd48dab250d00c5bcc4cf +size 1907873 diff --git a/images/8ab30c9e-f768-46e8-8174-83651d361b9e_36bdd9ba-ddea-4b12-81a7-7d1e8fb3a665.png b/images/8ab30c9e-f768-46e8-8174-83651d361b9e_36bdd9ba-ddea-4b12-81a7-7d1e8fb3a665.png index e128d424f81ff7540397a80809a814d5fae2d656..92caf12ea0bfe5080be34d8b8c8a5a63d1bcaa19 100644 --- a/images/8ab30c9e-f768-46e8-8174-83651d361b9e_36bdd9ba-ddea-4b12-81a7-7d1e8fb3a665.png +++ b/images/8ab30c9e-f768-46e8-8174-83651d361b9e_36bdd9ba-ddea-4b12-81a7-7d1e8fb3a665.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee4a90889b8296453ecc98624b2dd603812fb9442bb856de857f056f8aeb58b1 -size 269960 +oid sha256:49f1e54a03ef21e3a20e9b3c80fa88e0384a4efe4f3f3342f434bc4cab57e142 +size 587314 diff --git a/images/8ab30c9e-f768-46e8-8174-83651d361b9e_509995b9-2de6-4613-94b8-4da7b566ba2a.png b/images/8ab30c9e-f768-46e8-8174-83651d361b9e_509995b9-2de6-4613-94b8-4da7b566ba2a.png index 1e833f8dfc6fb27041660af2d5e75665be1fec3d..6590b9e0362b9b7e4f3658ecc8c56c3596b58399 100644 --- a/images/8ab30c9e-f768-46e8-8174-83651d361b9e_509995b9-2de6-4613-94b8-4da7b566ba2a.png +++ b/images/8ab30c9e-f768-46e8-8174-83651d361b9e_509995b9-2de6-4613-94b8-4da7b566ba2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b662de255cfffc403e57f9214070832e7e637a3f87cc1639889ad23d32416ed -size 797282 +oid sha256:b599e86458a52dc761eafb79fc1638dd4d1261c97cdda1fcff51f1adc9238f6a +size 688175 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_12b569ac-fd66-4dc8-a875-f3542d60c848.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_12b569ac-fd66-4dc8-a875-f3542d60c848.png index 73db3dddf4f703649c0a254b41b3a16f3de15551..5dd8549af5772ede86e94905dd60e2afa018bc22 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_12b569ac-fd66-4dc8-a875-f3542d60c848.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_12b569ac-fd66-4dc8-a875-f3542d60c848.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afc808e680e4976cf7fcd28a24dddf7f513ca6b7bacc3bad51ddfcc6f5d24d47 -size 2871654 +oid sha256:74c7e32ac5dcf3824086a9bf8c901071ff69fbaa068d8e51bf360e3ff56f93d4 +size 2562741 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_2146a4a1-d5b8-42c5-94b9-547d31930e44.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_2146a4a1-d5b8-42c5-94b9-547d31930e44.png index 260dadfc3e850a5658467d6dd26d90eb770549ff..e8f1861b64f5fbada72c70910b655f318cdc3731 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_2146a4a1-d5b8-42c5-94b9-547d31930e44.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_2146a4a1-d5b8-42c5-94b9-547d31930e44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91f7fd58de88a8e1b2ecebfda20c28635aee12f90d0db405f49eee956be4d6b1 -size 1273122 +oid sha256:21a279b06a32cdf0d4f80db20640128e98465798dce1ffb8e6edca360c65945c +size 546109 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_4f50c9e8-dcf6-44d5-b5d4-0cc813b9f8a8.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_4f50c9e8-dcf6-44d5-b5d4-0cc813b9f8a8.png index ca21462bad33d66f70cad34ff1108092c04a3f81..dce081b402e0f3c8e6b99909a5176904c681a600 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_4f50c9e8-dcf6-44d5-b5d4-0cc813b9f8a8.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_4f50c9e8-dcf6-44d5-b5d4-0cc813b9f8a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f6c20c84c16275c28a999c171fd5428e085c7339cb3ebdd6248cfaa8f90b7d0 -size 1126141 +oid sha256:21cf7c9151cd70a6147a77c63ed2f3200a687e2ec2263e91400be11e12139ebb +size 886295 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_55d5d079-4d18-450a-8fe7-9eee0ea3d7cb.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_55d5d079-4d18-450a-8fe7-9eee0ea3d7cb.png index 666644aa6ab6eca375a5c2c58d5a2c41a1869326..4c177d4c06f0f2c27be2f42f250820d11f9a1c0a 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_55d5d079-4d18-450a-8fe7-9eee0ea3d7cb.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_55d5d079-4d18-450a-8fe7-9eee0ea3d7cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e343be62e4babea0151b894aedaee8f447727e62e967c97f335d081a70adad6c -size 1122519 +oid sha256:b813d043a528e3f01ecf93fa4ec6b8862b83ee09c33d2f5b9ae1e801eb5a7a0b +size 614270 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_6aa2eb8f-0a6a-4844-8aa0-f1a9e66a2deb.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_6aa2eb8f-0a6a-4844-8aa0-f1a9e66a2deb.png index def0b13a029c1e3cb090bd9e2e2df7bfd479661d..264859d82b5035bdd435c9d922fefb54d3b4798b 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_6aa2eb8f-0a6a-4844-8aa0-f1a9e66a2deb.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_6aa2eb8f-0a6a-4844-8aa0-f1a9e66a2deb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a249000cd9d956d146cb3282dc661e706154af60c6db8ed9aec2b63b0769c2e4 -size 876328 +oid sha256:d7d9ca49fa42de47e942b84f41b097e494437868098961deb4c262ebf3bccf2e +size 871282 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_ae1ce8a0-f74c-454f-b0ee-ad9054d61a1d.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_ae1ce8a0-f74c-454f-b0ee-ad9054d61a1d.png index 52907764650d1fbcd6ab7c5f756eab7556ba9f87..355b67acb2743acc3aa4adfa9f884f5b207cb2b6 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_ae1ce8a0-f74c-454f-b0ee-ad9054d61a1d.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_ae1ce8a0-f74c-454f-b0ee-ad9054d61a1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f238981188aa429fae0a82d396725de487c55e1a3442e661e2ee5ce7c221c49 -size 1306362 +oid sha256:7012fdee5728b5cb735274645f4508d4bb78de58acfb1167eb7325473b132b6a +size 1285814 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_c0c75b87-87d9-4bf8-b4c8-62bd4f5cd482.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_c0c75b87-87d9-4bf8-b4c8-62bd4f5cd482.png index 41770c6ceda15aff18f2472d25aaa388623f603e..8f63fc4ebb60f35cdc4d5ea259a7385a72905c0c 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_c0c75b87-87d9-4bf8-b4c8-62bd4f5cd482.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_c0c75b87-87d9-4bf8-b4c8-62bd4f5cd482.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c34f2f6223ba1c489ba914ec5ba451a1a24736d2adfa5852979ccf7febfdbd5e -size 2835365 +oid sha256:f200dfd60d5e8de96e345a351e283106c6b44547d709fca807282716cbd7e5cf +size 2556205 diff --git a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_fe43de0e-b79f-4f44-b856-0087c33dbbf4.png b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_fe43de0e-b79f-4f44-b856-0087c33dbbf4.png index 5d7a6d9921615f722accb06be135e11d472937b4..c23dd49e9d5e90de6e6403ef03198c310b5bb67f 100644 --- a/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_fe43de0e-b79f-4f44-b856-0087c33dbbf4.png +++ b/images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_fe43de0e-b79f-4f44-b856-0087c33dbbf4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ecd76b8b798931c1918c2ada18c806a40c13f782ef71a3ae35c2b935575e822a -size 2286721 +oid sha256:199ef498a7ea822fad99ccd913f24b4c261c135a62d98ef576f4ad2f7578f06c +size 2228270 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_6f9b8dbd-0ec0-49af-8d7e-9e8596170ef8.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_6f9b8dbd-0ec0-49af-8d7e-9e8596170ef8.png index fe32cd51b6e32efeaaea94822bebd3480f27e079..0326eaba82730d32d8837db3609c00d95d6fcbaf 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_6f9b8dbd-0ec0-49af-8d7e-9e8596170ef8.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_6f9b8dbd-0ec0-49af-8d7e-9e8596170ef8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f2dad1f203c7a5c62d0121ff98d478ddcac4f693e3d4ade15fce342acaa4781 -size 1041861 +oid sha256:501d92802b3a1f9b0e9ae3154ab8ff71501124265b2f289be53afd02c1da2b6a +size 1408712 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_73c4997a-aec5-4943-b19d-803a0e57ca5a.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_73c4997a-aec5-4943-b19d-803a0e57ca5a.png index 57b3765688f0bad39147882c8aea1fd4f4f53475..ca030b0b918fa2ff7471ca411c2c8cc1883219a2 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_73c4997a-aec5-4943-b19d-803a0e57ca5a.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_73c4997a-aec5-4943-b19d-803a0e57ca5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74de839dfd1a76cc6d0a327db5b60707fa6b872636b069903155af7d782608dc -size 939572 +oid sha256:c5b8d9e157506596733757aca9fa7de5566af492479b1df0cf89230402872f2c +size 1171145 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_8a6f950a-4bb4-4b36-b0e9-e2d45e8d69fe.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_8a6f950a-4bb4-4b36-b0e9-e2d45e8d69fe.png index ad0a77f390963d2958906d69c0727f5eea91a7b6..0cfa06eed8595d43e355aae933607c345fcb7165 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_8a6f950a-4bb4-4b36-b0e9-e2d45e8d69fe.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_8a6f950a-4bb4-4b36-b0e9-e2d45e8d69fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56ad850b05c657c4209b7a986e96d546d962987bc31d134b7d6f71f85541224b -size 937827 +oid sha256:febbd415055775b071ef08e68cc044539c7d6d6ed796036cc10e66904b19df1f +size 1327641 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9b8d9526-5190-4d9e-8ab1-2845681d329c.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9b8d9526-5190-4d9e-8ab1-2845681d329c.png index 8cfb187ac67583ae4e4c994607f3133df0b7b1d2..9bd103bee25fa5f3badd5c8d8698822b9e7b73bc 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9b8d9526-5190-4d9e-8ab1-2845681d329c.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9b8d9526-5190-4d9e-8ab1-2845681d329c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b706972c9599944638857f1a026ffdef446b75e7b21b2b6299942ebc23e3271 -size 947263 +oid sha256:9cbda2b9ee3f6112bc7a68e16ab619145c6678950126e00e8aa3a02d6d658ba6 +size 1383467 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9baa2836-7809-4f44-8023-dc4c5f602eef.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9baa2836-7809-4f44-8023-dc4c5f602eef.png index 374526f347df0c9d5b643ecd7448cc15898d2d21..499c2af91a5fb630f376b27ddd8714be447bb145 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9baa2836-7809-4f44-8023-dc4c5f602eef.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9baa2836-7809-4f44-8023-dc4c5f602eef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c1805d1aa08e37167872e7b3b2a7b13c0484849e4eedc6315d3ce0b22348806 -size 935950 +oid sha256:37e394042b679a6b15633c14da5c352bd1697b1613b58442a9d4e7128ef89c9f +size 1414446 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9f7ab3a7-9b90-42cc-969b-9cd4d687d6a3.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9f7ab3a7-9b90-42cc-969b-9cd4d687d6a3.png index 2de602a1162c4b0ca871e6a695f1695c47e010db..8fc65dcb859328611d014d85dab1986b58d4ff25 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9f7ab3a7-9b90-42cc-969b-9cd4d687d6a3.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_9f7ab3a7-9b90-42cc-969b-9cd4d687d6a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea3b3be92695010424d36f8e2fc77c76a7f5fc29c0dcb69177702b24258c9cc1 -size 946095 +oid sha256:2818c9607aedfa9875dfcfdfce10647799f2356299a8e97d5f9e868afed986f6 +size 1444763 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f2b7effa-1ee4-4c0d-a8c6-c086818a5542.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f2b7effa-1ee4-4c0d-a8c6-c086818a5542.png index 6186d391d67e1ebb60e91389d86c8094d40de3eb..ec90cc9c82fc04b8359b8533e7fbfcb2a1dc054c 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f2b7effa-1ee4-4c0d-a8c6-c086818a5542.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f2b7effa-1ee4-4c0d-a8c6-c086818a5542.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aaa9a5eccc26c4e11222a119361cdc9bb90ba58c0d8a5daa62adbe67b672ad86 -size 913971 +oid sha256:919ea5e6907eb9b25fee5da06a067b903ee035f8ab7fa9940f187d7ed84eb519 +size 898500 diff --git a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f62b9966-0056-47a0-b9f6-8c6da5a0210c.png b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f62b9966-0056-47a0-b9f6-8c6da5a0210c.png index d65b3b19213b00c99ecd57f3e2bb574e20c3d2b3..71d80f5e4509af76b0b6c74be806b0e713ee73ae 100644 --- a/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f62b9966-0056-47a0-b9f6-8c6da5a0210c.png +++ b/images/8b079ace-b202-4d78-a9b0-4cde39e58934_f62b9966-0056-47a0-b9f6-8c6da5a0210c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a851ffafba461adfa2001d75dfa7233e018300d3518b94976f880b230ce14a1 -size 947749 +oid sha256:399861407dd91a2f693183b25ef4312a2ba6c458aec9ba170afeb9b0bb0400fc +size 681017 diff --git a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_3533eae9-5554-4489-9498-64ba4f8c832b.png b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_3533eae9-5554-4489-9498-64ba4f8c832b.png index 58ca866f1a3b14c36ecd500b406ece55bdeb8efb..a5eff71544a53df0be53f2dbc50dafd2d5d93d0e 100644 --- a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_3533eae9-5554-4489-9498-64ba4f8c832b.png +++ b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_3533eae9-5554-4489-9498-64ba4f8c832b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2723188c1b0995aa2c165d26da98fb3a9aff4afbb98cb27737657e860c4b627d -size 1253523 +oid sha256:35c889c9ae1b8d66a16df721c6ecdbd8dca96ec7e9b22f18b199e93bababd1e9 +size 928905 diff --git a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_931b38e6-e860-43fa-9d36-5b864e1ff95b.png b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_931b38e6-e860-43fa-9d36-5b864e1ff95b.png index e801eb03591d1d8183f41d2205a75a672e02b8c5..18a05c298833fb02f8154223f7c1dc19fa11b67e 100644 --- a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_931b38e6-e860-43fa-9d36-5b864e1ff95b.png +++ b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_931b38e6-e860-43fa-9d36-5b864e1ff95b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2177704331ddc8d67ebdff525c711cad5f4bc3cc890a789837b43b8d04beffc1 -size 1454716 +oid sha256:d95dc66ec90d6d858126d74f1aaf78518afb26050773ce6ca6cf968761409d45 +size 1197472 diff --git a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_b651a6f3-40e8-4541-bb42-45c812a7017b.png b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_b651a6f3-40e8-4541-bb42-45c812a7017b.png index 0577e1a5f6c3226462a993ed75d57d8898d8f963..0d762129867a1cc202532caf506268700f1fac3b 100644 --- a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_b651a6f3-40e8-4541-bb42-45c812a7017b.png +++ b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_b651a6f3-40e8-4541-bb42-45c812a7017b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5476a9dcf955841dc8f4b03f805126f1bd9ba5fec66f4d2e773339e399c3f15c -size 1364125 +oid sha256:9508e7ce832dc0df381cceeba588ec822a2a2e587bf7b4302b13d39d27b56f47 +size 1685145 diff --git a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_e96f5968-a7e3-4e14-90a4-c528877899fa.png b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_e96f5968-a7e3-4e14-90a4-c528877899fa.png index 2e2ad6db18c36cb3d3c8bac433e0c329886bfb57..fa608f76345c3a9182d1a50ae9f8abd5cf3f9a83 100644 --- a/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_e96f5968-a7e3-4e14-90a4-c528877899fa.png +++ b/images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_e96f5968-a7e3-4e14-90a4-c528877899fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f57d713268391e316780f56d65ef9f836357634f4c9e0bb2760c1993f89c8583 -size 1304151 +oid sha256:ecd2d7b7a5ebd87c94f8737ad8c4ab73db1a608b4678062f5d528f61e9efa553 +size 1109951 diff --git a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_1dd4cd76-6894-4702-8d5e-d107e0846f67.png b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_1dd4cd76-6894-4702-8d5e-d107e0846f67.png index 3952d57fa27a8e9a2ddb6dd8cf283ab230e85f88..e751d547cab7ab71fca8320033efcc5e70f0c0d8 100644 --- a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_1dd4cd76-6894-4702-8d5e-d107e0846f67.png +++ b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_1dd4cd76-6894-4702-8d5e-d107e0846f67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3163fd7c6caa9982f5125b4718fe08c5d939360e566e75d4f0e305b55d8aa79 -size 2111666 +oid sha256:4c1fe4a5c1e68a115c70fecda775f6537cd6e22b3b101bf357a7df8e6ffdebb1 +size 1877328 diff --git a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_2b9925ab-a059-47a3-9bcb-4007b10f734a.png b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_2b9925ab-a059-47a3-9bcb-4007b10f734a.png index 6612907aeee490c59c37515f4144484df42674bf..d0fcf4e9b1543b05d55348d90d75b17dc7fa2dc9 100644 --- a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_2b9925ab-a059-47a3-9bcb-4007b10f734a.png +++ b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_2b9925ab-a059-47a3-9bcb-4007b10f734a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5b9921041056667e3f1846c8dd0de89b1f5ea04c0df44b806db07d84b83d52b -size 1554110 +oid sha256:e49cee404625ef2b0cde59e385de0dc8f0d7bac5f7a89f48315139cf7c0ca962 +size 1760589 diff --git a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_596faa46-ddf1-4243-8f12-aae808036582.png b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_596faa46-ddf1-4243-8f12-aae808036582.png index c60be1ccad8c9e2c5a5aedbdd526753a632d02d2..b55ad9aa0295950ea5f219ab2e5a6090bb94ec32 100644 --- a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_596faa46-ddf1-4243-8f12-aae808036582.png +++ b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_596faa46-ddf1-4243-8f12-aae808036582.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee7c516d61bfcf901aeb83c69bf6277a1b44f98d5e09268d018793312cf65297 -size 1553201 +oid sha256:6f6dd1fbc11e5e14ce88239259322d53d1037b98a9b78aa75f1f75e662a53438 +size 1914972 diff --git a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_695f1ad2-16c0-4655-b15d-ad6a894df41a.png b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_695f1ad2-16c0-4655-b15d-ad6a894df41a.png index df433fd9304472fb466d177a378efe4346cb039a..6f26899040527f5fa78f97a8ee234d127e856fd1 100644 --- a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_695f1ad2-16c0-4655-b15d-ad6a894df41a.png +++ b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_695f1ad2-16c0-4655-b15d-ad6a894df41a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75d181fa4aa4a48ad1c4adc064afde37b5a39a3e3110ccfee92115a844a902b5 -size 1557861 +oid sha256:2eaf269717dd7853715e1ec146da9876eab0688f5e400b796dc42e8cd69b43ae +size 1696871 diff --git a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_84185798-3837-4eae-8599-fcf123c64957.png b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_84185798-3837-4eae-8599-fcf123c64957.png index 338bddca4ac9bf66e03e8a9f406ad5c48447c3ad..df8c25d8b3ac665b7f71ac7e61f73085bb54a281 100644 --- a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_84185798-3837-4eae-8599-fcf123c64957.png +++ b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_84185798-3837-4eae-8599-fcf123c64957.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcffa077e90611b4f0685d8ab8cecbc35167acc9fb90bb9264635c0b1f500ef5 -size 1818310 +oid sha256:3f83b58f7681d3d2ed7ffaae18b8ccb107765461daccac594610256f16d4d2e7 +size 1545361 diff --git a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_b9ee8eb8-3b77-4ec0-9278-a65267b9cc50.png b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_b9ee8eb8-3b77-4ec0-9278-a65267b9cc50.png index ee7f0a94b796677579276551ac5a066dc742b1ea..7fd0988a06d0b47297131b2594c0e8ac1a27d065 100644 --- a/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_b9ee8eb8-3b77-4ec0-9278-a65267b9cc50.png +++ b/images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_b9ee8eb8-3b77-4ec0-9278-a65267b9cc50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a718b2a1d293a050ce4329d8817487af3ba61fce5b90c0e3c248845f0c4ee23 -size 1153672 +oid sha256:885051f75391c45b472e22294273b55c5b94ff125e3a7ee597f58bb45178049b +size 1129012 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_0a3ab473-d734-47eb-9710-22e03410d4f6.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_0a3ab473-d734-47eb-9710-22e03410d4f6.png index 51e9a5adae0e1b640cad560346c089b05509c580..8ef99f643404845abd895a79f1974b07135656fd 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_0a3ab473-d734-47eb-9710-22e03410d4f6.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_0a3ab473-d734-47eb-9710-22e03410d4f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e782e0bada18378dc5c4f9f38cb14b54729337f66670f5efe1b1d2025ed9ffce -size 765118 +oid sha256:65df815ecc3d4871bbfa28f0a3dc3cce7267daf67f9ec8a6232658cfbcc6bb98 +size 954422 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_18636549-3ec4-44ab-9778-216113946411.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_18636549-3ec4-44ab-9778-216113946411.png index 78c0083a6b553a3c81e0bd30d090e713b83d9c02..3714e8a5a734d25d572c8ba77d97675630bbae33 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_18636549-3ec4-44ab-9778-216113946411.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_18636549-3ec4-44ab-9778-216113946411.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b4b4a41b428cb80b184a0c7cf53bcb9f0cad113421236d94038ad58335d0daa -size 1579248 +oid sha256:d840698fe7f2ca972e1db816e4322ea8173ec381559ee85f43e9a3b5ba9fd746 +size 1222614 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_43bb051b-7c4e-4b20-921d-4555a8f353dd.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_43bb051b-7c4e-4b20-921d-4555a8f353dd.png index 6e595ba2d8a235b53ae185e826517265c8e0185e..a89df366ed50bb9c51c3c93d146fef5261089b18 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_43bb051b-7c4e-4b20-921d-4555a8f353dd.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_43bb051b-7c4e-4b20-921d-4555a8f353dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd65fcb80c28293b378214655059a6ac9249825dc2d25f9958b4cf3362b07b51 -size 200244 +oid sha256:1189e59b4583a49c83fcae37c15de86f8b803e1bac9178ea65e0324af51c23bb +size 200342 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_4d0ed0cc-72ac-4a64-8ff7-3d5962f067fb.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_4d0ed0cc-72ac-4a64-8ff7-3d5962f067fb.png index 166e95262d45682df81cad3a6e210d54ff471445..962a798de9463132db1f7b4c4d50b3e896bf7506 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_4d0ed0cc-72ac-4a64-8ff7-3d5962f067fb.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_4d0ed0cc-72ac-4a64-8ff7-3d5962f067fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cedf53a7ed1ba6f060303d023d53258e9ba4945da1f32fa303ba40b1be42e19c -size 977611 +oid sha256:e757162145b5b97c352bbe03880a53ae7a237efaebbbbd36787d594ba2bdbeba +size 1003042 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3092161-3b70-4ae0-833a-2ec69d613c38.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3092161-3b70-4ae0-833a-2ec69d613c38.png index c6712100340ca594ae8dcfe126281488aabdff8f..cc0e55062110da32bca84f6e0425d53595a6c7dc 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3092161-3b70-4ae0-833a-2ec69d613c38.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3092161-3b70-4ae0-833a-2ec69d613c38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f7a8d64f285eff661af1087a7d365d2b03df02e239860b8fecde4ca02098ada -size 1406845 +oid sha256:813e9f22961383ef4902ae3325b42c0c36e10dc0459c7b07d1f360e4c7d96d0e +size 1410884 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3faae7a-eb45-4287-a15e-dc3226ffb69f.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3faae7a-eb45-4287-a15e-dc3226ffb69f.png index 89a8c05864c25a7671da86331beff83f80dbce0e..b7525bd6be3f055d9db821d53cd321084e089add 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3faae7a-eb45-4287-a15e-dc3226ffb69f.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3faae7a-eb45-4287-a15e-dc3226ffb69f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:892a9a7e047503ab7704d727143b0c2d7a6feedd1aeb02e8030c01e41f3455e4 -size 1081267 +oid sha256:0e873ce0e61dc645394d8f469c33387c20a404ea1b3a7a0aab7aba972bd0b929 +size 1313737 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_ceca548f-aa24-4a6b-8249-a0974e25b9d5.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_ceca548f-aa24-4a6b-8249-a0974e25b9d5.png index 669430449d28033be74d51d2388364ca7ae45d7f..10528091df23883849461e28ce0f14e61a8dde8b 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_ceca548f-aa24-4a6b-8249-a0974e25b9d5.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_ceca548f-aa24-4a6b-8249-a0974e25b9d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc4e8619494a501317ea5850f9513d6dd89887b0477e9ebc7ada377bf546b983 -size 764856 +oid sha256:a9313e9165ad173681d909e75efb3517892ea1ad0c48d93cf942ef38c1e56194 +size 1025410 diff --git a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_fa213183-95d3-41e5-a2ed-9593cb0934c4.png b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_fa213183-95d3-41e5-a2ed-9593cb0934c4.png index 5c74c7b4bb35920e0bfb8cf1d856450ea33cd643..f8728dcc5d279bc4c65eab50c1bfebf236e784a2 100644 --- a/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_fa213183-95d3-41e5-a2ed-9593cb0934c4.png +++ b/images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_fa213183-95d3-41e5-a2ed-9593cb0934c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebf2797f9893840157d5b2a7a666e8a8e1664dc23fc0aeaf099996563bc96de2 -size 727482 +oid sha256:870950f9e3ed727417233ab8902fadc5a633be9f2b7ded5e7b0b805d6f444a40 +size 968190 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_1aca95b5-b812-4f65-8921-f9a045ac4c34.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_1aca95b5-b812-4f65-8921-f9a045ac4c34.png index 70a0eb228ea54339e43ebe9533424f23bcc8a017..dc27bb54c5585ffd95107fbc9f665a3d0c51d865 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_1aca95b5-b812-4f65-8921-f9a045ac4c34.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_1aca95b5-b812-4f65-8921-f9a045ac4c34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b66dbb89105fecb6b8cdb2bd6809808787fe38bb531a98c6dc926a0e242a7b35 -size 920123 +oid sha256:a10b979d1b975eaa8d42b205ef13bfec9f6fd443f5dcd239d95230ac7ef49400 +size 1009750 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_3ab94658-7eb5-4f3f-9a47-d87421d1a4d5.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_3ab94658-7eb5-4f3f-9a47-d87421d1a4d5.png index ab1b7f4e895cb4790b7efd2d74fdff9983b16c79..040ec8254c63484ecb930a753460ce848d6a74fd 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_3ab94658-7eb5-4f3f-9a47-d87421d1a4d5.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_3ab94658-7eb5-4f3f-9a47-d87421d1a4d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0505cc48b7278ae20ef93a299a6b6d95f0ed0345ce12561e19b758cfcef5d12 -size 853150 +oid sha256:36792330e7a657061ac64eb57670610fc145f184089e0911a481177a37a5464d +size 796269 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_6743a749-1918-4fdc-8ec7-6d0319125849.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_6743a749-1918-4fdc-8ec7-6d0319125849.png index ffbbe162b53f479866073cfd9e81623adb9d3419..19f263e1706cab1186f243ab7aa3c785d6de9877 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_6743a749-1918-4fdc-8ec7-6d0319125849.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_6743a749-1918-4fdc-8ec7-6d0319125849.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e25d278e77a7ed7001773c4e4d23248f797f847c9c64b51d0680dc87e2f2014 -size 1017459 +oid sha256:aa1163b3f65d3e454604dd1a6d23ff78268db29cab9c732e8705412a66b4f378 +size 950303 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7d4790d6-486f-4152-a56d-6ec08c11b626.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7d4790d6-486f-4152-a56d-6ec08c11b626.png index 8d1d1a617ead20adfa8a5ed74ef31625fbfbbca8..18fdf4076a759e4f7dfdbe55e1e385b195c4cc7c 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7d4790d6-486f-4152-a56d-6ec08c11b626.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7d4790d6-486f-4152-a56d-6ec08c11b626.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15897daa542aa5439c06104545e287561a5d0360ef33aef77e6d602c777796a3 -size 1592738 +oid sha256:85b6b3bdbe18ae458931f796a6165b0f9153e07ae2981b1328002fe47e0fb38f +size 1580329 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7ebdc5db-d931-4a68-a6f5-cb6976b12702.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7ebdc5db-d931-4a68-a6f5-cb6976b12702.png index 2d036fb07e919770c906505209300cf626ef45c4..ac2e8b7d97d23b17096788c2f27e8269a9b451fb 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7ebdc5db-d931-4a68-a6f5-cb6976b12702.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7ebdc5db-d931-4a68-a6f5-cb6976b12702.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6efbc47933a06d8783a20773133e39c1b17a02123ecffd55b22cc3bc4086f26b -size 1089631 +oid sha256:e61ca593c97dc62df970c5e51ba4b65baf9446fd16f7a1da8ee7bfad292b0631 +size 1040664 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_8031f639-c35b-4b15-b569-2d863a8cd52f.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_8031f639-c35b-4b15-b569-2d863a8cd52f.png index 41b5175f5850a6b7452dce6eec5f88ad10a1e44e..7529b4cc1be1c65fdc4383359034d3c9b190f499 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_8031f639-c35b-4b15-b569-2d863a8cd52f.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_8031f639-c35b-4b15-b569-2d863a8cd52f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:814fe2ecd59dde3765106bbe251814cec2ed4b28ba8fa74c0040fefd72f94ab7 -size 1165479 +oid sha256:7251cbad26146bf04bbd265c16405259bea5142416c78d2d80e19faafd014a39 +size 1231971 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_9d37eb8e-33e6-4a01-b91d-82a919ed0da2.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_9d37eb8e-33e6-4a01-b91d-82a919ed0da2.png index c8bbd27a1cb052869cee5cbbe8352bbd27c3eed6..5160b4f4caf94061f1a3f7245a3992ae39833e6c 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_9d37eb8e-33e6-4a01-b91d-82a919ed0da2.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_9d37eb8e-33e6-4a01-b91d-82a919ed0da2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:529361259f74a9585563a44a6c592fb775d5057bebf582a7c7efea12e9589954 -size 1178525 +oid sha256:da16e0503849656a6589a22bc2d7ff2ffafefe618224e96ac747ece830831827 +size 1177167 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a03f2e58-8fe0-4622-998b-ddcd17a238ea.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a03f2e58-8fe0-4622-998b-ddcd17a238ea.png index b9d2e7603724e3de0c2deccde4e27451c7b5e495..df9d338b73ac36d6d49c8cd674bd7f3f75620d22 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a03f2e58-8fe0-4622-998b-ddcd17a238ea.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a03f2e58-8fe0-4622-998b-ddcd17a238ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f477ed44dc09a5d75afc1962fa51c255b777ceb162cef0ed91f4330f265764d7 -size 1230058 +oid sha256:458b9499ed8c62016aacd646e05509b172f1e54d1b14259690909f826c5c11a7 +size 1225516 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a92ff28c-7818-4e0d-b705-d0b8c171af63.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a92ff28c-7818-4e0d-b705-d0b8c171af63.png index fa140fdb08137b2a7eda258e8a95eda6203648f6..458c6dabe54b3845b11123728ef536bf5f455cff 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a92ff28c-7818-4e0d-b705-d0b8c171af63.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a92ff28c-7818-4e0d-b705-d0b8c171af63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a07c89c9f50b6653942941a4789c23e668df21303e88eccf16a35563e8b1426 -size 1017061 +oid sha256:1aead433dab195287de478d66689a2eba6893f5bf2f54268bcee7e9b65bb4300 +size 786849 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_c8b5a410-7058-493a-bb19-342825c78916.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_c8b5a410-7058-493a-bb19-342825c78916.png index b1ff8d6cc5d72d93807a285a845866a718c48200..febc5ed145b9d62b5b78d0a3c37dd592837b5329 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_c8b5a410-7058-493a-bb19-342825c78916.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_c8b5a410-7058-493a-bb19-342825c78916.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75c4453d4207ee2d84ce8693ce3cda6fa7a8e48d303fb15d344fd83460840720 -size 1254760 +oid sha256:574976f2f9b726e7cd45989394a74c8c8217178b7a255a1494441beb3c1304b8 +size 1356235 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_d40f1233-8494-41b8-81a7-06a62b0e1d9c.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_d40f1233-8494-41b8-81a7-06a62b0e1d9c.png index 13383d6dabbf5c8d8e035ed31ab6c50628d888c3..2f60ab37a2694acddec8c19258dbc4e092d86349 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_d40f1233-8494-41b8-81a7-06a62b0e1d9c.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_d40f1233-8494-41b8-81a7-06a62b0e1d9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b55e44639883307fb814a41d732d5d9a90bab7ce10425949e10f9690bc64ed14 -size 1533403 +oid sha256:ee336f547d973d42329ca9e78a67c9768322b3470d87c1936873de46e5c02dc6 +size 1516299 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e0790ebc-b02a-4b78-abb2-ec03cf320458.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e0790ebc-b02a-4b78-abb2-ec03cf320458.png index 213e036e60c93429f871591442e1abf615605253..489146f05edddf3af495b9c0fafbc549a91d29bb 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e0790ebc-b02a-4b78-abb2-ec03cf320458.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e0790ebc-b02a-4b78-abb2-ec03cf320458.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0defcbd279618fab4b46330b4bbe156f05c8f31db28367b2465ee7a620f0802f -size 483168 +oid sha256:5d1858157c0a77750d3c1ce58e09fc7ec545bf9f110ecd1474752f6b94ac3a8e +size 524848 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e17eb0b6-cf6e-45af-be58-704816835353.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e17eb0b6-cf6e-45af-be58-704816835353.png index 3d88d246be04bcc1f4e1c7188ab124a0b78a7f7a..2f9a6e8655a08203ad739a431d3f6b21391ad28c 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e17eb0b6-cf6e-45af-be58-704816835353.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e17eb0b6-cf6e-45af-be58-704816835353.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e69b83bd77d402758b3f0b24ff7ea7739bc2e6b8a773c7059f1db3d64566f9c1 -size 828726 +oid sha256:0f6a488bc5299a300cc4f520000917f8db5f7eb61e23f4cac43b8529cc983d05 +size 771441 diff --git a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_f0138e48-b01c-4f47-81cf-41be44fa3298.png b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_f0138e48-b01c-4f47-81cf-41be44fa3298.png index db08cd8e5edece770274784980d1ed02c4c4ffe8..f43e6171dede45ed5e4883780b5426a3b53dfa7f 100644 --- a/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_f0138e48-b01c-4f47-81cf-41be44fa3298.png +++ b/images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_f0138e48-b01c-4f47-81cf-41be44fa3298.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9520d813db7e7983f1d9de02b7fd517b3e93de933f4a29d49b0592358cd0e9b -size 1072542 +oid sha256:4ec23de5e22956a8d181a9552281a72b0ce0f3e248c67135593089553e8b3529 +size 1012672 diff --git a/images/8b743c63-2a99-4c29-93ef-af920dab9535_d89d6e35-b522-4916-a7d8-8dd1410634bc.png b/images/8b743c63-2a99-4c29-93ef-af920dab9535_d89d6e35-b522-4916-a7d8-8dd1410634bc.png index 717e7ceeb98a88dcf50b21deb1c14a587ab40162..d237767c9e8ffd7d7522c245c4a3080fc27fbc19 100644 --- a/images/8b743c63-2a99-4c29-93ef-af920dab9535_d89d6e35-b522-4916-a7d8-8dd1410634bc.png +++ b/images/8b743c63-2a99-4c29-93ef-af920dab9535_d89d6e35-b522-4916-a7d8-8dd1410634bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b5f99fe1bcd67bc304bfe45575e3818a11e83426252030e41de6cbea7e10d58 -size 883058 +oid sha256:9589b3d7935add5f815af9cefd07cc17bd50f8be352e35615cbd03d97baef475 +size 1103714 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_01ef4b13-5aec-4c24-9e21-67d5c3f3caeb.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_01ef4b13-5aec-4c24-9e21-67d5c3f3caeb.png index 5a7a686b0f1c1d7675b03d31bbd502ec289857ed..0cf94e31105211751b456febe0dc07130ee3f705 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_01ef4b13-5aec-4c24-9e21-67d5c3f3caeb.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_01ef4b13-5aec-4c24-9e21-67d5c3f3caeb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:609a8d52d442d0a3dfb78b8bca7ebb0bf66df5559fc3142bf523a1620b02c6e8 -size 1215191 +oid sha256:d1cef4d1572ee796d55aec09c4415e5716f27c861d4ded3daaaea5843a6ca1d1 +size 1218998 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_067334cd-e49b-45ac-8a32-31cdec47b52b.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_067334cd-e49b-45ac-8a32-31cdec47b52b.png index 29b010591f24c7070449e2187a47a56f0e1db623..19b358e31ade7225721ab44ff176519858756c11 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_067334cd-e49b-45ac-8a32-31cdec47b52b.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_067334cd-e49b-45ac-8a32-31cdec47b52b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c94767ef271276b47b4e306cf359996be8ef819c84f32eeb4e8da158c77572d -size 1142090 +oid sha256:cba77d92840b03467f4f2acfa7581cfb7789519e9a21d54e07a8dad8e64b6102 +size 1892507 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_10273ad8-391b-434a-be8d-9bc3df13ce88.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_10273ad8-391b-434a-be8d-9bc3df13ce88.png index 222e0551d920e309b6226ed3484267581e23f922..1f1ffb926ec81e1655871baaa30b4587cb1c1a91 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_10273ad8-391b-434a-be8d-9bc3df13ce88.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_10273ad8-391b-434a-be8d-9bc3df13ce88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7600e10104587aa59761e0dfd40ca6f40f21cf774fda09300e24d653f7aea667 -size 720499 +oid sha256:e64f16b58eb0c6ad7f549f5aa885d01f34958d9b6dc9b85970fc704b0a9641e7 +size 898446 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_241b0765-cc4d-4ce1-9b29-92cf7f2173c9.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_241b0765-cc4d-4ce1-9b29-92cf7f2173c9.png index 6e907cd1436e2eebe5ce38fa0a1a0a3a01b46c71..148e61dca3ff7ea5e31ea3148783630726a497e0 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_241b0765-cc4d-4ce1-9b29-92cf7f2173c9.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_241b0765-cc4d-4ce1-9b29-92cf7f2173c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08ef19e2c8987292b68d8746761b9ab991add5b39293f04e5699b9b2cbe0cc8c -size 1284417 +oid sha256:b571af95d7b2ccd958f47eeb846dbbf288db1927612e7aeeab4ded11a7ba364e +size 919240 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_306b6b4a-ad42-4cea-8a57-5f9e54bd1f04.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_306b6b4a-ad42-4cea-8a57-5f9e54bd1f04.png index e26abfacd6bb849a24999372db64464140c516bf..eeca165f75ed6d4c69982665a6e4c6c28e2ee000 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_306b6b4a-ad42-4cea-8a57-5f9e54bd1f04.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_306b6b4a-ad42-4cea-8a57-5f9e54bd1f04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae529a2c572eb59a6029f282ab2538cba9b490055c375f4a7ffdb698c3dadcde -size 1046965 +oid sha256:cdc4fcbfeec75a16557a271b49879aec97fe8b364980bdb4c962e84f0d9f8446 +size 1034347 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_36028024-168b-4da8-a0fc-6452fcb120f1.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_36028024-168b-4da8-a0fc-6452fcb120f1.png index e6f1ae859a80de287e3dba1fcba1fe19dafe3b67..616866d4b3a08c54b9bfec69c52e17b2e6a2d6b2 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_36028024-168b-4da8-a0fc-6452fcb120f1.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_36028024-168b-4da8-a0fc-6452fcb120f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd9897e1d5301c7f06242140257e93ed694a0208cb2b293bcfd9cd5139676f3c -size 1110456 +oid sha256:1aab85a7a1827a5cab280a1d224093844556475a6a84eef1d41d6031c0191456 +size 1660980 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_3756ef1a-2931-4990-a272-b1bf2b76a68c.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_3756ef1a-2931-4990-a272-b1bf2b76a68c.png index aa2c65d765ed2e76f708143cc9eb1fc51eb8215a..3a5ff029f539104445d46dac09c4ed4159de9046 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_3756ef1a-2931-4990-a272-b1bf2b76a68c.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_3756ef1a-2931-4990-a272-b1bf2b76a68c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f9664bdf74c355c4cfc90ca4b981859c026ccf8eb769af629c04e5cb0931703 -size 1162249 +oid sha256:bf43ead52290d6f49d917e44e73facc0e4d69ca19ad5062e0187503888a9e13f +size 1397463 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_44247c08-488a-41ec-ac6c-8eafb6ef3703.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_44247c08-488a-41ec-ac6c-8eafb6ef3703.png index bfb577bac68c5223ae58aa377171a3764f7c845a..6474168b7d8492de605c7dbc579d4d69e4857fb0 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_44247c08-488a-41ec-ac6c-8eafb6ef3703.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_44247c08-488a-41ec-ac6c-8eafb6ef3703.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2820fe04a8a04907430a908dbf79e6c04a861a127163f75cba16284fa5c37e97 -size 1325005 +oid sha256:3b88a617a60a5104a368a8395d0eaa1a4bc71d0507ec8801f8c3b45660db2c62 +size 1425835 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_46b11c44-c75d-44bf-8f50-74ef5dc7513f.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_46b11c44-c75d-44bf-8f50-74ef5dc7513f.png index 9bf02f6281503150eb770f5ef52cfd3679e01671..ba73288858dfa25af7b4f8f42db5fcd6b27031ca 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_46b11c44-c75d-44bf-8f50-74ef5dc7513f.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_46b11c44-c75d-44bf-8f50-74ef5dc7513f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b9ef707ca0eea14d511adc572da3f5ad256ddfd4e455f2719ac0ef2c52b0bf2 -size 1152922 +oid sha256:47c753a6c0219c4e1ef87a02c1fbe3803fe1becd2a4097cc7a4ab260074fd6d9 +size 1314278 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_4836d82b-9f3b-4116-b7a9-0130346c4835.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_4836d82b-9f3b-4116-b7a9-0130346c4835.png index 4c62873b140d905e6a9ca8df1e1237073a2e7ccf..69fd399b729390bb073b6918e4c18f7469bc791b 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_4836d82b-9f3b-4116-b7a9-0130346c4835.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_4836d82b-9f3b-4116-b7a9-0130346c4835.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cacc0dfb9ad78b98b5a199cc3afcf044835e7b1e4440782bb31bedde6d56a10d -size 1135544 +oid sha256:72975ed63d9eb3d1940039e56a4a004ac7177402e362a4e1045285be1ba5e090 +size 1641188 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_5afa6856-13c2-405b-b830-a5ca14fe587f.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_5afa6856-13c2-405b-b830-a5ca14fe587f.png index 5de1b0a5dce2c39464cc76b373fa6f1e10460136..d49a622c5c5ab721b67cee04f3f649533dc1903e 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_5afa6856-13c2-405b-b830-a5ca14fe587f.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_5afa6856-13c2-405b-b830-a5ca14fe587f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4ee92ee22f5231e949c67fc83cc960e3d08792e0f914c02a10f228ce42a6676 -size 944769 +oid sha256:add9a39e5195701c0a9c1c8601b2f90688d92ae95187976ad3bca2a4c169235e +size 817401 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_63848707-7e00-4f6b-9033-a086a7c6bdaf.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_63848707-7e00-4f6b-9033-a086a7c6bdaf.png index 0ae3c621d3a76e732effd80349641d667578ded1..13ca48326e138d6e6bb2200f92348153eedaaf88 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_63848707-7e00-4f6b-9033-a086a7c6bdaf.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_63848707-7e00-4f6b-9033-a086a7c6bdaf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44f11e8a63d7c1c6c23d7ffde6b3de7e8bc257adfd334a31a112265a5e89b35d -size 1039806 +oid sha256:a79166250f0cfdbae62ed4d6aa5ca3f99cd9dad6b608d7b3635d9d141593b9d6 +size 1020547 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_68d7bbb8-fe62-4300-9f0e-cc06b85a6552.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_68d7bbb8-fe62-4300-9f0e-cc06b85a6552.png index c7f03bc77b93c7d388e159c1c9b13528955a38b6..94c0dd2d5b8b6bdb86c1624c27b805064ec8774f 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_68d7bbb8-fe62-4300-9f0e-cc06b85a6552.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_68d7bbb8-fe62-4300-9f0e-cc06b85a6552.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b33f7bbfc3c621dd9b1203a63febed44c19fcbed138e545468aaa1c05f8ac445 -size 1143105 +oid sha256:97b97c216761c8a1c8c631e594512f69d8af77c28b1dd25b024fe59e2ff6f687 +size 1708634 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_74542878-df35-4595-8762-c4c3e951d6aa.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_74542878-df35-4595-8762-c4c3e951d6aa.png index ecec59d2f5960e90ea4f4056cab72df146bb27c3..029272768e8f90d639f29768b1493eedb1ebbbc0 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_74542878-df35-4595-8762-c4c3e951d6aa.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_74542878-df35-4595-8762-c4c3e951d6aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c896dd1a339dbebcb8552572516b65bf38fc06ae8848b26de99e4c0060fb54c2 -size 833601 +oid sha256:98480b21a68b905f7a66feb7a86a9db092db35a120ddc3524e7d110e35aea912 +size 619926 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9b2f17c5-ddb8-49a4-87b3-1840f8f1047e.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9b2f17c5-ddb8-49a4-87b3-1840f8f1047e.png index 46a8029ee006bbfcb1cf90df3ad588f468c3b364..520f7cc2df112af60f0e6653fd4737fcabfc4de9 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9b2f17c5-ddb8-49a4-87b3-1840f8f1047e.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9b2f17c5-ddb8-49a4-87b3-1840f8f1047e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce57ee91ce15eaea903026746e7a4e25b05530dc2b0af63476bb6b628b9e3f07 -size 1248307 +oid sha256:79e22c2f71f1c13b7d09ea5b5c67cdc82f6164c04c60dc0a447016de996be7aa +size 986793 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9bca0cd8-adcb-40e4-b5be-788809e0f59c.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9bca0cd8-adcb-40e4-b5be-788809e0f59c.png index 35b4492e73d781a33a004ffad01895c7c758d5b2..0873d02c76815e4f2387b271b55ac059db04dcc2 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9bca0cd8-adcb-40e4-b5be-788809e0f59c.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9bca0cd8-adcb-40e4-b5be-788809e0f59c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7ed0f98812ece4f78089ab64d3830ced08e3a66df8545f8cd0fa84b92255f3d -size 1146124 +oid sha256:b3bfd409764f348ed40ab73eb6b8526ee104fcbb8005174a0346614e5ea49f94 +size 1000100 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a0ae2016-64e5-4317-b950-1a708db07062.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a0ae2016-64e5-4317-b950-1a708db07062.png index 6e1fb3284588ff7e395f6f2b56c2962493f5722c..c0ca6660cba21604302103d66f9475951be4893e 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a0ae2016-64e5-4317-b950-1a708db07062.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a0ae2016-64e5-4317-b950-1a708db07062.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfe4245956d7b33ae1f0c51f5fc84687853eb99ac0cf0a61aff4da1a9f916938 -size 1142756 +oid sha256:294704be55f40a26970620b8079f4e17e46d9c888d4df8659e5f20d1b0cd0455 +size 1724996 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a559a3c5-70ba-425d-a2c7-ee28846020dd.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a559a3c5-70ba-425d-a2c7-ee28846020dd.png index d2f5b7195055ebaf99af1802bea6f9bba3d42967..3b5ccdd478c9b6e2aa26a3c004f98c31a36091cc 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a559a3c5-70ba-425d-a2c7-ee28846020dd.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a559a3c5-70ba-425d-a2c7-ee28846020dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f417b1362eaa0d4b9a201a8c6b9aea4a1f59a4af7fab30884320d9ac84e1f741 -size 1235266 +oid sha256:17e3e7fedf2eae651ed4bb0f109f3a77d064a7a2054300ef87ab827a52807596 +size 1234470 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad1a399e-b8b0-4a93-bd03-f6a9c930c30e.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad1a399e-b8b0-4a93-bd03-f6a9c930c30e.png index 2ecdd390d75883de18ef30d1758c89a94cfc5e77..5564ead5bf523848cfba056e8cb396fb2bc3d7fa 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad1a399e-b8b0-4a93-bd03-f6a9c930c30e.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad1a399e-b8b0-4a93-bd03-f6a9c930c30e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c603e6f70d1cbcb5ba55b1cd78f0efb59a887d6fa3ca0ad91e5d9ca92e8627ce -size 796654 +oid sha256:72bd2f596305bd54f78a436e11d6bc4d3facd817273b161ec629303887b4632f +size 971687 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad3a284c-17af-4570-b6d4-fd177a683a78.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad3a284c-17af-4570-b6d4-fd177a683a78.png index 414ba7456359d94f3a65b0f9e9943834d115e8fd..d52c6023c5865d4d7f9de5834ed96c64b892cdd2 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad3a284c-17af-4570-b6d4-fd177a683a78.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad3a284c-17af-4570-b6d4-fd177a683a78.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6495973378c7027f9fca0925243e9400d0eb2d63d5107497a6fa8618a26af1d1 -size 1111826 +oid sha256:829f4129684e71998448a01ec1b34371725135578dd11c9f232fa352f3347e65 +size 1293036 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad4ae519-c48d-4921-9da8-b102cae1e64f.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad4ae519-c48d-4921-9da8-b102cae1e64f.png index 0672ae12b57011b26b6bd6d9ca8f10f16394dda1..dc4224fa83ab6566fd5bcbaf872f290314b450dc 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad4ae519-c48d-4921-9da8-b102cae1e64f.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad4ae519-c48d-4921-9da8-b102cae1e64f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28b02b4afa6ad35d6017698955245c1fdc4c443b334bc4d74d0d993e7083ee09 -size 863527 +oid sha256:cd8845ec36ad63355bff8b3744d2003d42d867ca631f7659f8030ef88a751fc6 +size 670632 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c35de92f-4b70-42f2-827e-95ba59506320.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c35de92f-4b70-42f2-827e-95ba59506320.png index 0980979776225c9f742181e4d9d4892c0b8e4d26..e524b6cca9728fd945c082ce06758eb3f36605f0 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c35de92f-4b70-42f2-827e-95ba59506320.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c35de92f-4b70-42f2-827e-95ba59506320.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:260596b966c8be98d1283884f752dc36b5de5f5c524fd8947e0c052fd7d6a271 -size 1215342 +oid sha256:7ad574597ac47390984e7d0eb5c25dc42be36537b2ac977025885c7e85ea1a01 +size 1758015 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c4d77221-301b-48fe-8061-d291c303317d.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c4d77221-301b-48fe-8061-d291c303317d.png index 78e0b9da3ca048eca7548f84286fc6cc8f305ba0..cd1d0051963394ef2e1ead00355ea2343a9b76c1 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c4d77221-301b-48fe-8061-d291c303317d.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c4d77221-301b-48fe-8061-d291c303317d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c55c16387c705470ce8e4b569ebd635feedeb127d395fed9cf1f344ba811bff7 -size 1123170 +oid sha256:f6127411e9d297503c20958e765615ee841ff2e23625264c6ed2832c2766c5ab +size 1114470 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c75eb377-9d6e-4a59-866a-86b6912f4e6c.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c75eb377-9d6e-4a59-866a-86b6912f4e6c.png index 1be4f7c42e5dc394a0afe2391db8eb0dc4dd1226..e86484c566e3dc0929e44ac003cbd4b6b9b1c422 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c75eb377-9d6e-4a59-866a-86b6912f4e6c.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c75eb377-9d6e-4a59-866a-86b6912f4e6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c97a4543695ca2d56d526fc33c7586894b309a7cf4396236c9809595dd92225 -size 866269 +oid sha256:a8051fc9c0753f3a95680875361c0ae635eda79238dd1d8c26c60e1f56a66d89 +size 1414745 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_cedbbc01-d62f-4fcc-9b2d-e44336dabc7c.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_cedbbc01-d62f-4fcc-9b2d-e44336dabc7c.png index 8cfa68452fb06bb71d019409c08170cbebb1164d..82d2851e4644e173a73df0bc3a58262ec3bad7b1 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_cedbbc01-d62f-4fcc-9b2d-e44336dabc7c.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_cedbbc01-d62f-4fcc-9b2d-e44336dabc7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a256d35dca161691f98bc432392fe296d18422d73a82e5b3e6bda178f4b355f -size 808392 +oid sha256:7a504d478e3d3c6aede0bcf5f02a7ea6d209c4cfaa84baa1a444dbc7f2647ae5 +size 1008089 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_e390e8f2-c563-4082-b220-e8544fd3f37d.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_e390e8f2-c563-4082-b220-e8544fd3f37d.png index d7ada9451d314bae2d7a5fdb7b5e3be62743ca15..cfbc9715d2265538e784eb56feb59466b0a2865c 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_e390e8f2-c563-4082-b220-e8544fd3f37d.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_e390e8f2-c563-4082-b220-e8544fd3f37d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca29ffd687103e599f4cab08851c5fc3159edfbf94a320f416fdb17a8464f77a -size 1234590 +oid sha256:f5210fb43820e12f734b6f234e36c28e38e199d192a84601c64fd62e552637a5 +size 1171248 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f057ecbf-37ff-4849-a8bd-52524dbff3d5.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f057ecbf-37ff-4849-a8bd-52524dbff3d5.png index 218eb35848a77f09effe43399218f608e9f90b89..68cff1d2660cf9e0282096d7c4de490fd1c49f6c 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f057ecbf-37ff-4849-a8bd-52524dbff3d5.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f057ecbf-37ff-4849-a8bd-52524dbff3d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c79dbebf9407ec7931602fc869e1331565aab87536a197366ae8e93b1f8bd4a8 -size 1206714 +oid sha256:f3863cb7ccb88c7a8dc5370165de2edb63829498bb3dacb0e1b097fc8db907a1 +size 1832378 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f16bc000-f7b9-4f57-b5e0-4fcf43b9bb40.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f16bc000-f7b9-4f57-b5e0-4fcf43b9bb40.png index 2d01648bb31c89f71ce24c5672b247f3973f02e8..9875eda8203025e2ec3fdae828a0b18a738b5453 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f16bc000-f7b9-4f57-b5e0-4fcf43b9bb40.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f16bc000-f7b9-4f57-b5e0-4fcf43b9bb40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e714774f31df30a670c7329dae249f006485af0b89f47c7c7ee428b45e7b3b2a -size 1788832 +oid sha256:081aeb70e7d8a1d73ce43b87d6cff6876a673e7d960f2ef2a883e3c066220349 +size 1470887 diff --git a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f2cd36f6-89c6-42a0-a70a-1ed8db7b1860.png b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f2cd36f6-89c6-42a0-a70a-1ed8db7b1860.png index fb29d33ba395994028bf26af14b4978bba503e85..4e1cb9091ee7f33068d4e188db279e1d311bb63b 100644 --- a/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f2cd36f6-89c6-42a0-a70a-1ed8db7b1860.png +++ b/images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f2cd36f6-89c6-42a0-a70a-1ed8db7b1860.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4ef4cfae6740610c4445332aec0ed634a6b78d5ab350a73b617b683b9242197 -size 1126051 +oid sha256:9995d24e183649aeef735857581246f1abc31bf4dd3c21e1ece0c8fe8bf723d1 +size 1382758 diff --git a/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_2d6cc8ed-6f0c-4311-a5aa-860a52c8452e.png b/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_2d6cc8ed-6f0c-4311-a5aa-860a52c8452e.png index b80392c506d1aed4e253714314742423235b1799..485b6118f90e2f246d9f866b16b5328cf8d5beb7 100644 --- a/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_2d6cc8ed-6f0c-4311-a5aa-860a52c8452e.png +++ b/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_2d6cc8ed-6f0c-4311-a5aa-860a52c8452e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:530b306ab5d5364783bf9d52d22a4b2cf74b5011288a4f0a53b459e06655a96e -size 861813 +oid sha256:de8703878f8f0bcedd62d7e6ca772486753ecb4764ce42a6cd4fc8112c301be2 +size 723531 diff --git a/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_de075009-e20b-4800-8460-2bb57f6db9c0.png b/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_de075009-e20b-4800-8460-2bb57f6db9c0.png index cf4675dd622864c5c51d93b066907cf9bd9d0439..872065e538238db4bedac077b0ba6d39347e04aa 100644 --- a/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_de075009-e20b-4800-8460-2bb57f6db9c0.png +++ b/images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_de075009-e20b-4800-8460-2bb57f6db9c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27b4d603c46307accabe1b5f84b72d81f5f5725be4b4e51b6bce9a6f441bd9b4 -size 488565 +oid sha256:4b88862e0aebaa6abab7e6b86ab46eec4fecf36294bf3956b64b593ca619194d +size 272789 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_0ddfeb2e-f66f-45bf-83fa-9e1e9b969560.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_0ddfeb2e-f66f-45bf-83fa-9e1e9b969560.png index 3e76a8f3e446e77129a2be42d0770f24c5661470..e21c927feb0c102a975446c4e94eb0ab1bec586e 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_0ddfeb2e-f66f-45bf-83fa-9e1e9b969560.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_0ddfeb2e-f66f-45bf-83fa-9e1e9b969560.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69c3de6e507b0a9101e4e3182e690e76d09224aaab5dd741f040aa68cfffced9 -size 1509688 +oid sha256:b5c27b616767ccb40d12cea077d9cfcbf09a5e9f83ae9ab9a15c2357755a7afd +size 1121309 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_24a16929-1568-40b1-b407-013d6d8b9107.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_24a16929-1568-40b1-b407-013d6d8b9107.png index c1261ea4195f173d9d1b3e19bf860052faf17c1e..bc6e9469cdcf8dc3fd330f0566d441b46fc2be90 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_24a16929-1568-40b1-b407-013d6d8b9107.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_24a16929-1568-40b1-b407-013d6d8b9107.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77ce411568cd4f628e2bcba2f76ef8b9f2bccdff3badea6fa4584d6ee7d5159c -size 1542672 +oid sha256:8b21eacc984bceff31bd4e4f462cc0394a100c7a058d1a53de40d030ecceae1d +size 2454042 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_4b7632ad-468d-42f3-933b-c11f40d27ded.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_4b7632ad-468d-42f3-933b-c11f40d27ded.png index ea731292a02918343359c50425780944eaa78a2b..a7be8c9c0d4983a28737a9afb3c0fccf04fa45fa 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_4b7632ad-468d-42f3-933b-c11f40d27ded.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_4b7632ad-468d-42f3-933b-c11f40d27ded.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f1222f668ef26829de9b3b6e4324e580304d42cab39afb3829ecd802d191c3b -size 1198369 +oid sha256:196304106ccf3dcb238c68c0d80b43358c6e99f6eb83c6ffff2f850966e268e3 +size 1407526 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_60d9f7d8-adde-4e1a-8763-46cf48b62328.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_60d9f7d8-adde-4e1a-8763-46cf48b62328.png index b6fa871a87cfb22d359871a27cb55d43585dcffe..3291bfece81405bd375c570dafb0d9bd6a4e2fc1 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_60d9f7d8-adde-4e1a-8763-46cf48b62328.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_60d9f7d8-adde-4e1a-8763-46cf48b62328.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:953dfeb17ec2aa121335cbee3201f6781e18b802593b7f8a01ba03d771918136 -size 1101351 +oid sha256:a1eba1ed601a8ffc15b7403905b6734403958f7b41fc67c581df89789ad3fe6f +size 1605786 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_7e5977da-bceb-4022-a210-58f4c1a25d9f.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_7e5977da-bceb-4022-a210-58f4c1a25d9f.png index 793deead0aa5213ac7aa9c365cbae0536acf94c9..67c64473b3dfc2fef30022cc9f1b41b6c2164d63 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_7e5977da-bceb-4022-a210-58f4c1a25d9f.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_7e5977da-bceb-4022-a210-58f4c1a25d9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01b6f1f74c600a4dd9d7815eadae0978011f48f426d0db8d03321b968e8673ec -size 1238311 +oid sha256:c5f85f87fd44f0954826b0d058f2d9cb19b7ef550277b32b7b019bb57ac2de0f +size 2007398 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_94af0adc-9075-48cf-a933-f0fad4d2a873.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_94af0adc-9075-48cf-a933-f0fad4d2a873.png index cffb543a6bc051a78c43035d02a86aee14fbed12..40646f938d7fd0c10d0de471a5e5871d8e40e903 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_94af0adc-9075-48cf-a933-f0fad4d2a873.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_94af0adc-9075-48cf-a933-f0fad4d2a873.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d427fc7ae7ae7edddccef23e349ee63e7e906bb050316baa50b6102820ae42bc -size 1452628 +oid sha256:09a8e37a3a9cbf39bc6aca97a871163c0b809e66df1223632eff408e0cfc1c14 +size 1737200 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_a6f1e015-8967-4d33-b56c-4daf513b7396.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_a6f1e015-8967-4d33-b56c-4daf513b7396.png index 76e429603bfd180dba8ff1c453dadb8b4ba93c3a..c50a00000bd0fe1014971c455b4004f9b5ed8535 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_a6f1e015-8967-4d33-b56c-4daf513b7396.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_a6f1e015-8967-4d33-b56c-4daf513b7396.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ff37cada25f43abc9cfea29e7c1732f0758b76d015976e4a08e777dff4791c6 -size 1523014 +oid sha256:5ede5d5b4f59ab4a70d02ae77c56147c889fe37bde6269f225ba795fa7b4d8c3 +size 1591749 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_b8b485f5-fffa-457a-98c8-3e3721b953f1.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_b8b485f5-fffa-457a-98c8-3e3721b953f1.png index 5158fe9dc1db802a4c05387d30b8d54370cc9bdc..05f06482fba8e93b707039f3e66fadf87bbd93f7 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_b8b485f5-fffa-457a-98c8-3e3721b953f1.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_b8b485f5-fffa-457a-98c8-3e3721b953f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e9cdf9f385a02c3715ff530030dc46ddfedc913b6990c6d9667856495bb5aae -size 1938430 +oid sha256:54132c94480a41eb3a316c4000bc6cdc8949b70c1b9e49d121469cf8bdaf06f3 +size 1597648 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_d0bb7e5d-b098-470f-926b-27415618e851.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_d0bb7e5d-b098-470f-926b-27415618e851.png index fd87c158319964138432f6b1a188f48236e02240..754f7b6f35fac068697454e32960fb92ccb2fe56 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_d0bb7e5d-b098-470f-926b-27415618e851.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_d0bb7e5d-b098-470f-926b-27415618e851.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49d68fea51cb4eb48393509e4a8801d70d432b6995447c76c028667582229cf5 -size 1091884 +oid sha256:0fc3e695769642361d78d773d261344021949b63ca2d3ce2d717ad1aab33c5da +size 1347009 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_db42143b-781e-4365-b5ee-5c02269ede06.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_db42143b-781e-4365-b5ee-5c02269ede06.png index eb64b95e607ddc594481dae9ce0e614d86757be7..d6e4b4ed7545b1b08279ecbeff8c0ee30ec1c95c 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_db42143b-781e-4365-b5ee-5c02269ede06.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_db42143b-781e-4365-b5ee-5c02269ede06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:341ba7dd1df20d44344bd011e80db919e4eb2c63b1eacb35ba1d977c37b3633e -size 1511114 +oid sha256:1dc5bfa1899115240f53dcfdb8529c3e5880e2274d8b53148dc43a0bc57c1080 +size 1547314 diff --git a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_e6f0de37-a72b-4b57-94c8-6d65e77a025d.png b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_e6f0de37-a72b-4b57-94c8-6d65e77a025d.png index f5b0e9ab96b85d7fd960ac0c7fe1100b306d2d94..243fd35680e65456ed0a3d4576499c643ba00d5a 100644 --- a/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_e6f0de37-a72b-4b57-94c8-6d65e77a025d.png +++ b/images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_e6f0de37-a72b-4b57-94c8-6d65e77a025d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc33ea3580008d3cd9d7f8bd09679c100b514d2348172ddc1914e420377bddbe -size 1728894 +oid sha256:7ed9cfb79cb158a15b8e7baba6d2681e79c879a44ac6ad3bca4d82f5d3a99a81 +size 2426112 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_02680f4d-0ad2-4cac-b260-f95bda93cf34.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_02680f4d-0ad2-4cac-b260-f95bda93cf34.png index e2b03d13a4ce56d3f866298ca81ac121ded182a7..cc2b050d95e8fafb1563ecea2ed3b59a4a34a5df 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_02680f4d-0ad2-4cac-b260-f95bda93cf34.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_02680f4d-0ad2-4cac-b260-f95bda93cf34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b5a3a75f4ec995655d3cc9d0dcdb49fa4747804fc2219b6bbd5c3e26c12c8a6 -size 1235101 +oid sha256:d5c9b5329bb18cf45d5c36ef68da73bb03d633fe9d37d6a859c3bfcb764b7bac +size 867568 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_165bc7ab-0f5e-4633-acf5-588ddbef6ef8.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_165bc7ab-0f5e-4633-acf5-588ddbef6ef8.png index 2a9e248fc5f6021f190c26040d571aeffecbecd8..d619c111c6f1d10f8b5f33cbcda060c9498dd5bc 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_165bc7ab-0f5e-4633-acf5-588ddbef6ef8.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_165bc7ab-0f5e-4633-acf5-588ddbef6ef8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4eafee3401ab68158af4640439bdd66e7c0c06250e17b2f6ae43c4f70db3a0c0 -size 1246654 +oid sha256:82ab3d675f8e03c6c509e383519ea09fb8162d97784daceda2b7dad761b918a5 +size 1111217 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_20b9f688-742f-4a8f-8955-04d57f566697.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_20b9f688-742f-4a8f-8955-04d57f566697.png index 5cb52a295f1672476cc3e0a192062b7c648f9b47..e890f49f1feb4250d84395233ba3d23dfaf24ebf 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_20b9f688-742f-4a8f-8955-04d57f566697.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_20b9f688-742f-4a8f-8955-04d57f566697.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e52aa18a808bffebcc01318a59f66f7a4dfad4b6459b7b5dfed1a8306ba32dc9 -size 1260087 +oid sha256:1335c46fbb1d1d9a617ba083e53fd401bc76b4cf4047af1afe5db0abf40bcaf9 +size 1327912 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_26ba29a5-6c06-4176-a682-02c044459b30.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_26ba29a5-6c06-4176-a682-02c044459b30.png index e2484c70e4517773ccd4285dd0e4d9b9a67a3452..f729d1022d1191601cfba186c4e82eed239a3742 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_26ba29a5-6c06-4176-a682-02c044459b30.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_26ba29a5-6c06-4176-a682-02c044459b30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dae4d710941c898822c899b89d6ef39654cb5b1ab356d1e0c491868b2c7e3d99 -size 1248800 +oid sha256:64652c691c3f85391feaf51dc3e4de187954729c1bdf8fb33ca94c3e58ac0931 +size 688280 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_6fcf3fd4-8f58-4bd7-8267-d4fa5cd2b6e9.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_6fcf3fd4-8f58-4bd7-8267-d4fa5cd2b6e9.png index d66bbdd07ff01daca9ecf2a7131ad942272c0b0d..2db9dc5d1985a58b15991e84ba6a9a2d023df2bb 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_6fcf3fd4-8f58-4bd7-8267-d4fa5cd2b6e9.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_6fcf3fd4-8f58-4bd7-8267-d4fa5cd2b6e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1946c691a86f443ca41801d4b861e207f6b5db70c6c2d86cf8aec6ae6b548fc7 -size 1236917 +oid sha256:cfad18a72673ff989a70f1cd4173136467995e8523e2ae84d481e3913872922c +size 1237267 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_7457cb3f-1727-46f9-ba3c-c6fa1f567265.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_7457cb3f-1727-46f9-ba3c-c6fa1f567265.png index d5fb09f0ab26f4fa161ff9eb53b6dbbcf8975684..d4f2dfba59bd98853e3667d97a89c0d09d0e716c 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_7457cb3f-1727-46f9-ba3c-c6fa1f567265.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_7457cb3f-1727-46f9-ba3c-c6fa1f567265.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33ba7b96ad79f95e1f8481503281201a9f5d04b424f500351bb17a5c9f4943d1 -size 1256705 +oid sha256:0d674409ca1a9b8fa75ac9021868a8327a91389f6cecc0e2f096cc8aca05da21 +size 1259454 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_992413a4-5f43-4f99-866a-cf43fa75678c.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_992413a4-5f43-4f99-866a-cf43fa75678c.png index 15772dea3ba0874cb7a726ed9c1802155068b6fb..524962594bdd2efed5ddaa907f8dc4b2154a3872 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_992413a4-5f43-4f99-866a-cf43fa75678c.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_992413a4-5f43-4f99-866a-cf43fa75678c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:182fc78c5068b3aa2ab215bbe45ec4f565aecc5912f3f0b9625d954971f5aedb -size 1260122 +oid sha256:bd695f17981f318f6325ea9657686bf3386db99edd3b9ad8ee788a75c5593f75 +size 1342878 diff --git a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_d7ad69f6-d2a6-49eb-9b8e-d3dd23c57bbe.png b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_d7ad69f6-d2a6-49eb-9b8e-d3dd23c57bbe.png index da0018b84d0f68f80d0b20dd7b36ddbcc6307e0c..cf8140477461d7ffd19302979855d3f5fc03bfec 100644 --- a/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_d7ad69f6-d2a6-49eb-9b8e-d3dd23c57bbe.png +++ b/images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_d7ad69f6-d2a6-49eb-9b8e-d3dd23c57bbe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a122cf32b162248c27336ca10fd6a04b5643ce7b998661cd0bf6661141fcda9 -size 1271988 +oid sha256:ac8812bc0587fd450d7fde4e474ee21d6eca95d5d27123b2ea572b5ff07cce53 +size 1355308 diff --git a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_1c165a7e-dea3-4fb6-96f1-f52d15dc0ed7.png b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_1c165a7e-dea3-4fb6-96f1-f52d15dc0ed7.png index b7ca187bbe554821612531bf5f572ba0c6e4e0dc..dd055acab8fccc419459923f2d7a13c10886ba3e 100644 --- a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_1c165a7e-dea3-4fb6-96f1-f52d15dc0ed7.png +++ b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_1c165a7e-dea3-4fb6-96f1-f52d15dc0ed7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45ed5906ee28784f40a2ac6380f909c23631b2b7bdba783f273b13adcdfb365e -size 928450 +oid sha256:10a61338aff412bcb97d6bf3fcb1f3bd94b944901c3334445746e2d1cd6d282f +size 745649 diff --git a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_34f7ec94-d726-48df-b6da-a798f9bc8325.png b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_34f7ec94-d726-48df-b6da-a798f9bc8325.png index b6fa1049bc487b07e14694b5f03004e206dbb248..727f78256a27e74753ef5bfab295cda551f5e118 100644 --- a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_34f7ec94-d726-48df-b6da-a798f9bc8325.png +++ b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_34f7ec94-d726-48df-b6da-a798f9bc8325.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a52ac158381bd1b51d982c3a29ca4646701cd312859447a27b929f42d83539d6 -size 865169 +oid sha256:d3825b60ff2c7262d5f6020fd46348e3712894345821efed452499d93d67652c +size 192956 diff --git a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_3827733e-5426-4c24-b369-ebf496245627.png b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_3827733e-5426-4c24-b369-ebf496245627.png index 3d37b1d765a589411e711adab1c2032eb5a20f5f..608fd490172fae90f02c2fa8057b14e8d12f1a37 100644 --- a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_3827733e-5426-4c24-b369-ebf496245627.png +++ b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_3827733e-5426-4c24-b369-ebf496245627.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d053cce996fdfd7be3d09149d5ad141b13e81fd5577499b87f887fdb8e93ca0c -size 1119043 +oid sha256:3267ec1b380f1cf4da7963d5e7b51a73cc2bb8a08f8876ddd44d5677bc3e2130 +size 1599941 diff --git a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_602daa70-9988-473f-9c95-cff02a656628.png b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_602daa70-9988-473f-9c95-cff02a656628.png index f9216ecc1fe3655623a98738646fdf42bb7ed30c..3a5ecd8b2c095bc65d24d856ebf5c0d0df625912 100644 --- a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_602daa70-9988-473f-9c95-cff02a656628.png +++ b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_602daa70-9988-473f-9c95-cff02a656628.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8babea7ed4edf04fa9c15fb39d144709d69f5735a32af86d1390ebc01ee0622 -size 2497388 +oid sha256:4e443d116285cff9a2474db8fc7c561c3a03acf6d4e1e225cf5d1cf2dd1559a1 +size 2988734 diff --git a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_7ad70888-fa59-43d7-8787-aa207662d59a.png b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_7ad70888-fa59-43d7-8787-aa207662d59a.png index 4460e5235571f2ef61fde926ca2680e71ed36631..2f14fe26b9f98c0350c1e6e0fc76e4adfc67bb3d 100644 --- a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_7ad70888-fa59-43d7-8787-aa207662d59a.png +++ b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_7ad70888-fa59-43d7-8787-aa207662d59a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03ab8594eaf322db2f1e7feac6ce490bbf3df928409b6fe9a60b7f3e70533308 -size 959964 +oid sha256:a409f8419c78e4234b21487586d2adbe0b0844cce18904b059028596bb44a614 +size 884966 diff --git a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_9fd66fb7-b3b9-44e0-8279-072992a676c0.png b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_9fd66fb7-b3b9-44e0-8279-072992a676c0.png index f4f348880ff7b2df285550c08d8af039dfb1bb86..c7295b4e0e4e21f5ad9039c65eef07a1a8f632d3 100644 --- a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_9fd66fb7-b3b9-44e0-8279-072992a676c0.png +++ b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_9fd66fb7-b3b9-44e0-8279-072992a676c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d796db6a649a8db3c7540d81a38e8b74e0d8f4edf825898f46f327f6f79c3ef8 -size 1012866 +oid sha256:c09574581ebdf1ab1f0ce8b023bec6177f49a9f9da8ca169131cb973c7389a56 +size 996073 diff --git a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_cc825221-7e27-4074-a31a-56d90e876fe6.png b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_cc825221-7e27-4074-a31a-56d90e876fe6.png index 38d18eaa0814570d3796bb26c138ea40158c9cae..7794e93531afaa0ae6335e49b13c1e8646266f9a 100644 --- a/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_cc825221-7e27-4074-a31a-56d90e876fe6.png +++ b/images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_cc825221-7e27-4074-a31a-56d90e876fe6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4313638e88f3f0d6476a307db179559caefd546cc4217680ec3c6144293f59fe -size 963625 +oid sha256:f57be239cb92ea3892568ff2c87f9225e8eb4fd0fa94a9fbc2cf4fed844def2c +size 866727 diff --git a/images/8d9e09e1-c31a-420b-ace8-079204db551a_6ed5c79c-dd28-42e1-af23-8a7962616627.png b/images/8d9e09e1-c31a-420b-ace8-079204db551a_6ed5c79c-dd28-42e1-af23-8a7962616627.png index 868c34a5e2c8be70c7473e64fde4868946e164a2..64666af386ebc10f2c1ad8360dfc6f68813bbc7d 100644 --- a/images/8d9e09e1-c31a-420b-ace8-079204db551a_6ed5c79c-dd28-42e1-af23-8a7962616627.png +++ b/images/8d9e09e1-c31a-420b-ace8-079204db551a_6ed5c79c-dd28-42e1-af23-8a7962616627.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66f6356eaef9c419b310673e712148998f65a6012cf3ac8df422f7a3a786db46 -size 1091967 +oid sha256:113a6578840b9810f58758fcfe84fee0fbfcbf42404deb8440f69a7315e0c37f +size 1036363 diff --git a/images/8d9e09e1-c31a-420b-ace8-079204db551a_8ea62d22-a406-479b-a65a-acb24a4adb33.png b/images/8d9e09e1-c31a-420b-ace8-079204db551a_8ea62d22-a406-479b-a65a-acb24a4adb33.png index e65702e517d184722e4e26ef438014a9dceeea91..6f6361732f111381183aa9bb5d1a415ff087d594 100644 --- a/images/8d9e09e1-c31a-420b-ace8-079204db551a_8ea62d22-a406-479b-a65a-acb24a4adb33.png +++ b/images/8d9e09e1-c31a-420b-ace8-079204db551a_8ea62d22-a406-479b-a65a-acb24a4adb33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:604fe0523feb01d16ef0a8f3ca49576691890fec3385862c4a1d205317173050 -size 1098927 +oid sha256:9be7bdde59d326a67f319948fc27224942d2b0cf17c9c3017bef76e0ae1c052f +size 846938 diff --git a/images/8d9e09e1-c31a-420b-ace8-079204db551a_912aaece-7d84-4401-bda8-02aa5289da82.png b/images/8d9e09e1-c31a-420b-ace8-079204db551a_912aaece-7d84-4401-bda8-02aa5289da82.png index 405d672337e543dc9223ed68c3a690853811ba17..85ef720b0cf9bb5ce3e877fdc39823f1c8dc944a 100644 --- a/images/8d9e09e1-c31a-420b-ace8-079204db551a_912aaece-7d84-4401-bda8-02aa5289da82.png +++ b/images/8d9e09e1-c31a-420b-ace8-079204db551a_912aaece-7d84-4401-bda8-02aa5289da82.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb6ca4d951a24ec19927db47adf30507b4b9063d51e9cbfe4e4f954b6fd7f96c -size 2188145 +oid sha256:c68c32c02331c3277fc0c0026e4b0c6565a9f4b1b7f37353ced3a57011ac32a2 +size 2009906 diff --git a/images/8d9e09e1-c31a-420b-ace8-079204db551a_d5f3fc99-7306-4bf6-8f6f-d0efa393636a.png b/images/8d9e09e1-c31a-420b-ace8-079204db551a_d5f3fc99-7306-4bf6-8f6f-d0efa393636a.png index efc6b2c394e56f740e346ca1c92f2ec6602b6031..f0586e026ecbc5615c8a5af04e2c4371c6764ae4 100644 --- a/images/8d9e09e1-c31a-420b-ace8-079204db551a_d5f3fc99-7306-4bf6-8f6f-d0efa393636a.png +++ b/images/8d9e09e1-c31a-420b-ace8-079204db551a_d5f3fc99-7306-4bf6-8f6f-d0efa393636a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ad1be40c555ce2567fec7c84c17820b81e5b109edf668cec3ead6c6b206e7c7 -size 2178262 +oid sha256:9b1f739850a8cefa362e6434a1f17951c1365d9b1eab79d190b5d54e999de6e3 +size 1879795 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_0f7759b9-f43a-4d97-ace7-6405722611eb.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_0f7759b9-f43a-4d97-ace7-6405722611eb.png index 60e2375bd6e40e7fc0cbbc7f6652d2314d55e44c..086ce8dcd6f5ab56fe8e74e8fc97354204312da8 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_0f7759b9-f43a-4d97-ace7-6405722611eb.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_0f7759b9-f43a-4d97-ace7-6405722611eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4fd004bddbb464765d507695e56a003515f3b9a21da269e7ec0b8179893a1fb -size 905051 +oid sha256:9e876f51611034d264fa75c79d9818ccfa2b902c0b013308ab4e6ade1552fe96 +size 963627 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_6cd768bb-689b-45f4-aa5e-d0e6532efd84.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_6cd768bb-689b-45f4-aa5e-d0e6532efd84.png index a93e200a32871b45308d9739dd6b7f52058d30a8..150ce7354f8a5e615a7fffa3b619591ec2f952db 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_6cd768bb-689b-45f4-aa5e-d0e6532efd84.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_6cd768bb-689b-45f4-aa5e-d0e6532efd84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa0c5d010f8197ce8fb6f96d75cd37d85e30f8bf0321b5db4921901558e5804a -size 888673 +oid sha256:030b73c24e96676ea4cb7bffd29b7a383bfce10e69f255dd047871b12193db6d +size 1425589 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_75c668c5-0c35-4978-83b5-45de8d786e2c.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_75c668c5-0c35-4978-83b5-45de8d786e2c.png index f9377c160461e45f897883aa3a42a750ef3afd51..d0571270c66ab6d720c6b05387eb620cca033de1 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_75c668c5-0c35-4978-83b5-45de8d786e2c.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_75c668c5-0c35-4978-83b5-45de8d786e2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6360f0c17c4393b7a811775d00f04910bced6b1e7333dc1338ebcf231d9538ba -size 1337083 +oid sha256:be7bbe4c94bd12d5510232fc5dd760f50858bde8572d2c9af3543903d8298eab +size 1025087 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_7e717da9-b333-49a6-a9bd-b2f045d69cc7.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_7e717da9-b333-49a6-a9bd-b2f045d69cc7.png index bc4b5230043841491fc9808b07cb4de86eb0aec1..68609d3905fe79474497327d231b8e3049c9d542 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_7e717da9-b333-49a6-a9bd-b2f045d69cc7.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_7e717da9-b333-49a6-a9bd-b2f045d69cc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04352927bd533bcbb6e40fd34551292faa6c6e3c6836b4ec70eb58b941b55fc7 -size 958839 +oid sha256:b5aa80133bfdae4056b86ba25cc47d238f7a240154a416fa289e445dc642a99b +size 1795530 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8066875c-cbad-453f-8371-11f45293f19b.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8066875c-cbad-453f-8371-11f45293f19b.png index 87b4b19f55381d4b7037c4ffb1dafb6bf92fc2ae..a4934c23b36087f053aa15824bec49b4832c976a 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8066875c-cbad-453f-8371-11f45293f19b.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8066875c-cbad-453f-8371-11f45293f19b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bef50339ea82debd256a1fbbd6cb19570e8f873c899ea9f3d6520d2136cdf811 -size 813380 +oid sha256:05bac2d73fd3fad1e56a0a3b5deca9566145a3b8276a79fc8d56905fbcd10ffd +size 1787225 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8e2b91b9-de9e-4e98-8b53-a53d238427cb.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8e2b91b9-de9e-4e98-8b53-a53d238427cb.png index 23c26b0d3df1425f2b30c5eed8b24b69662215f2..a2cb805cf425aec7655ad0354aa4fdc876590c85 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8e2b91b9-de9e-4e98-8b53-a53d238427cb.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8e2b91b9-de9e-4e98-8b53-a53d238427cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4cad8e6b0f4d8a7f750c4b7190e3b96351857b8733fabc497f0927546b3cccf6 -size 969016 +oid sha256:b9e799a2143fa49c710e573a0cc145b5d82ce280957cd47842f3b57e4c9ad3da +size 867262 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_b355331e-4d3d-4f44-855e-35803f0c361d.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_b355331e-4d3d-4f44-855e-35803f0c361d.png index 084c371f0bf066a3868cd7d316fbec86585bf917..f0b98c30202f4a9af661672cacd7f20dadaefe55 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_b355331e-4d3d-4f44-855e-35803f0c361d.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_b355331e-4d3d-4f44-855e-35803f0c361d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06f52dcb02e1358e98be542a31a03e215c7d830b1b93686bdc477d00bf40364b -size 1489011 +oid sha256:5c8f27bec96ed90c438389ae61dd7c689c6a3e2ccbcf2734ed3d3d3095c1735e +size 1222012 diff --git a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_e58c6cf0-b67e-459b-bdad-9bfe55c453ea.png b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_e58c6cf0-b67e-459b-bdad-9bfe55c453ea.png index b1ed1d715268285148d36582a7c453e548ac0195..3560d827a178520b1ad1e3bf0b7518537b8ffbf2 100644 --- a/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_e58c6cf0-b67e-459b-bdad-9bfe55c453ea.png +++ b/images/8dc49f67-4803-416a-8d05-d6e891e3efc0_e58c6cf0-b67e-459b-bdad-9bfe55c453ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:706adbab678c4c1524f84efd744f1d84b96291f7b7d0f9f5cc48e071cc4473d8 -size 805953 +oid sha256:4ae7efc27ffcdb50865dddeacca82a300e24cf3cb11af524fc14f5e2d32be168 +size 801234 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_4fc61431-330e-4811-a880-f1d5e77c1642.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_4fc61431-330e-4811-a880-f1d5e77c1642.png index f60a68a03ef00f842ab6fa6d9cc22f978a1a0cdf..51d65b62ea98b1139a5043fe1b3cc02187a8820f 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_4fc61431-330e-4811-a880-f1d5e77c1642.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_4fc61431-330e-4811-a880-f1d5e77c1642.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fd3d0c8140387ed84784aadc1948c99cc89f4faca259e77aba38eb62aa4049d -size 1781747 +oid sha256:e544e1ff9531e9fb4caa5caaf5d25622dfb7e398687788e1a062c9fe7667e0ed +size 1226032 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_57b2efcc-12bd-437b-973a-1c5fd75e39db.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_57b2efcc-12bd-437b-973a-1c5fd75e39db.png index eda391c4259d0de85c15f7178925293a94f571e0..9124b54a79aeed000e563b9bf2d396ddc86c5b65 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_57b2efcc-12bd-437b-973a-1c5fd75e39db.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_57b2efcc-12bd-437b-973a-1c5fd75e39db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e57b8104a411fc8736a0c4bfa1da3390e5dece4ff79683682dbfee5c8ae6aa6b -size 1611867 +oid sha256:172855ee2be174cc67fde90424fab5d0cfb39bf8d99615159e8814db9aee00c1 +size 2126304 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_75c2b629-cdff-4b3e-820e-15b047009f95.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_75c2b629-cdff-4b3e-820e-15b047009f95.png index f79704e96b01f935e93a1e1973f25f7779e98705..cc69e34334603f838dd4be1f4f128bb1bbbedc37 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_75c2b629-cdff-4b3e-820e-15b047009f95.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_75c2b629-cdff-4b3e-820e-15b047009f95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21439c68b8350382269688a5b5a62b782148ee075a528c8716c6b1970ce88abc -size 2212401 +oid sha256:08d0d2fdc4aee4f1a274645da63d6f3e0fd4e4553e055ffeb8e719a397dc567b +size 2468473 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_84ac998d-2de1-42a7-802a-4df326c3ff1d.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_84ac998d-2de1-42a7-802a-4df326c3ff1d.png index 256d2f598e32f02220085cc906ac135a57494ab6..e825cd48727cd35a15c71b733a140b6a3048ab1f 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_84ac998d-2de1-42a7-802a-4df326c3ff1d.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_84ac998d-2de1-42a7-802a-4df326c3ff1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97c9cdeb1e2c56c671e6a9c247d4e3e74010123f22abbfd000e1c207a797dbcf -size 1930487 +oid sha256:fedd9e007b12c1333657e18d132eacb52133f9da4e3fe71ccfd027064b06da32 +size 1050758 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_8fbb902f-04fa-4bd7-a4e9-d0ba0f793c6a.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_8fbb902f-04fa-4bd7-a4e9-d0ba0f793c6a.png index 5ee40cbd223057a769036bc3e4e0c58a6d4307b7..875fa949f30d2209ca6d0f1c7aafe514589dbdd0 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_8fbb902f-04fa-4bd7-a4e9-d0ba0f793c6a.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_8fbb902f-04fa-4bd7-a4e9-d0ba0f793c6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db12b72bd14152da54952868f085ad29fed1e0ff0728256edaad43d0733e98fc -size 1593325 +oid sha256:97dabac830592735046e68a45a1303bc1826c0c2b392e52ced3b26147a0e81c0 +size 2039470 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_9edd8d21-46ba-4b5e-a9c2-d5677b6f439f.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_9edd8d21-46ba-4b5e-a9c2-d5677b6f439f.png index 5e042f2997470c56268d05b0efbf5641c0f0af56..ebe59dd7604a6e576689e45151a258cc6fd2576e 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_9edd8d21-46ba-4b5e-a9c2-d5677b6f439f.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_9edd8d21-46ba-4b5e-a9c2-d5677b6f439f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5d50aed69c38985c29f96bfd6b764e5be4ec38ecc456b11f34a7e98978407ee -size 1879680 +oid sha256:3935b34459b229c472d22378db5079ab1fdcaa38cd8a610441c851d91d5736cc +size 1279983 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_ac723ff9-25c7-4256-a703-4498b7baaad3.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_ac723ff9-25c7-4256-a703-4498b7baaad3.png index b4b570a6642bb15d673c3f89c2e7024e1cf7dd38..5f63ed57b37dc9781932a4ad575a955adc7c8baf 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_ac723ff9-25c7-4256-a703-4498b7baaad3.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_ac723ff9-25c7-4256-a703-4498b7baaad3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b72ccc17993c62fbba11f0064c29144efae8a689101e91bed82aba5b9d98802 -size 1758337 +oid sha256:e05185a1dacc6b5add7f04c7e1e345e22f41be5790be845d25058886f7d322fe +size 1081276 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b060d99c-865a-4016-b147-d497c82a20db.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b060d99c-865a-4016-b147-d497c82a20db.png index bb268a9ca029a79620a301ce910ac0ba0238c669..f8d174ed7248984fcf7d362f7eced8b68380e1d1 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b060d99c-865a-4016-b147-d497c82a20db.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b060d99c-865a-4016-b147-d497c82a20db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb5207d2ed86b806c648f6cb3ee8841a78e415c95b66356b47208c37b4105a7b -size 1160724 +oid sha256:bfa21153cf111d772634f9c10a09f087bf668136b0085a8fde7b8ec87d83b9b7 +size 1779416 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b44a010e-fd15-4659-a3e5-e01c7fd86c81.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b44a010e-fd15-4659-a3e5-e01c7fd86c81.png index 1a3dbbd24788dcbbc1754e5a63b1ef5e6a457457..6d636dd2599ed9d1b58ae59a34410a093d056786 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b44a010e-fd15-4659-a3e5-e01c7fd86c81.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b44a010e-fd15-4659-a3e5-e01c7fd86c81.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1d3640c09c1e7fb0f9cbb59bf336c9e8bfc736b07f2698acdf2f6ce22946791 -size 1600621 +oid sha256:859fb1b5ba6b7669145cfd44e77c2c8e19c4b16882c32976f7ee60c1af4a1343 +size 1965328 diff --git a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_f485178c-d6c3-4937-b013-bafc9d8fe989.png b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_f485178c-d6c3-4937-b013-bafc9d8fe989.png index 8d7a985bd7263c6a5a03af1d1efb95d12d7208f8..2f64b2eb4e2b5a611a14a6b57a33f44d89bc4dae 100644 --- a/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_f485178c-d6c3-4937-b013-bafc9d8fe989.png +++ b/images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_f485178c-d6c3-4937-b013-bafc9d8fe989.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:982bf5fe49ffe3363a08012f01c8e14499a1c595c1e43322d920e0cf88b89795 -size 1765946 +oid sha256:28699a6c2c21ec629b70b27656a27c6d2f0b0502da1328b459c9a3bb2bdf4b59 +size 1881766 diff --git a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_00e19130-721a-425b-aa74-57bcbb23ded3.png b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_00e19130-721a-425b-aa74-57bcbb23ded3.png index 7f0216862df0594f4e075bd72c3d63920a32026d..579eab20460044524a82eb767a4d606d39ce0cc3 100644 --- a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_00e19130-721a-425b-aa74-57bcbb23ded3.png +++ b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_00e19130-721a-425b-aa74-57bcbb23ded3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1efdb821bd6ffb7f51831fcc06425486920c4ac32e9f5573cffa9086879f2caf -size 1791916 +oid sha256:812477e825318c8834a37eef128c9b2a406c03c18e32ae438f9165b28b2a6121 +size 968634 diff --git a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_08739645-479a-4ec6-8ace-d73e2de59ebe.png b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_08739645-479a-4ec6-8ace-d73e2de59ebe.png index fa537cdffe25fcabb5d45582d9f3331c885df484..d78e1c1421c4cbd3f8529ddccb7b78c539874f8a 100644 --- a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_08739645-479a-4ec6-8ace-d73e2de59ebe.png +++ b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_08739645-479a-4ec6-8ace-d73e2de59ebe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ceab20449c0a34d4b60d4c8e79bec2e58322a3d776d3154f5bb885a10c64bfb1 -size 1787149 +oid sha256:026138a30665cf902bb85332a726e57ef309c7f09c40b2808b590e625b956783 +size 1673384 diff --git a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_b8094fea-6545-48cd-b82a-d9420fd540c9.png b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_b8094fea-6545-48cd-b82a-d9420fd540c9.png index dc93f4f40774292f8fc0d58c9f6feae228231b06..bae2223597aee99c3c9c2fd9965e3204293709be 100644 --- a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_b8094fea-6545-48cd-b82a-d9420fd540c9.png +++ b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_b8094fea-6545-48cd-b82a-d9420fd540c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9254f0412b39031b904fb3026ec31949dfdb6af65059f557d2e9529e0794e46 -size 837272 +oid sha256:9fbe462da483e3bfe2461ade728e05cb66003695a21255c166a06e2eecabf544 +size 894871 diff --git a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_cb8feb86-5c07-4ad6-bcb4-9e4bd08ee0ba.png b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_cb8feb86-5c07-4ad6-bcb4-9e4bd08ee0ba.png index 431fcd9fb0fbf4adb7a1064e0e504a1c209ed3ac..89024cce29906ed0ea1404b0c0e5473985c2bee0 100644 --- a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_cb8feb86-5c07-4ad6-bcb4-9e4bd08ee0ba.png +++ b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_cb8feb86-5c07-4ad6-bcb4-9e4bd08ee0ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b597eeda8797202cc9b0dcf9aa430796507a4bc7363ecc07b672e27564e2afc -size 1167316 +oid sha256:9e847298049d45c92bf000c02020b8c453adf6edc57aae2f90048be42577d3ef +size 922694 diff --git a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_da39254d-694d-449a-9fd4-61d73f28d2a3.png b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_da39254d-694d-449a-9fd4-61d73f28d2a3.png index ccaeadc4351ff4935b91d2195010fa2cabe1c8e0..77fbf95217f6777c4f4425ac29d071665546c1f2 100644 --- a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_da39254d-694d-449a-9fd4-61d73f28d2a3.png +++ b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_da39254d-694d-449a-9fd4-61d73f28d2a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d53ddf6de426202a2799053fd582239581c2058584dc233d9cdef1dd0db232c -size 1279486 +oid sha256:f1c03cd979ac46725bc253831fcff07e32b83ad9d46495c4eb55a57e17c45ac8 +size 1348437 diff --git a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_f14aee92-2270-4ef1-a4f2-6f3c03627989.png b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_f14aee92-2270-4ef1-a4f2-6f3c03627989.png index 0e05f6f190a9c62139d0b5ddd95ac14690fe7724..5c0fb4dbdbb61cc2ef3ea901d0727857dfd648cd 100644 --- a/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_f14aee92-2270-4ef1-a4f2-6f3c03627989.png +++ b/images/8e133f6c-155d-4ba4-89f5-16f569d6e918_f14aee92-2270-4ef1-a4f2-6f3c03627989.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6fbac719a9a96d8de066c7e408571fa46fbcc30ebd8191fcd365151bddcb94ff -size 1408966 +oid sha256:4436e14b96033aaf84e659d04ad6536ef9d8d8a56c39a43fcd3996e08f161bf0 +size 1371980 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_0cb8de0b-1d19-4944-9449-4e01d24cb987.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_0cb8de0b-1d19-4944-9449-4e01d24cb987.png index 6799e792fb14c8d5252ebd0e2a03a930e35ab76d..5b2a78bb63a343e15e8efd6a2b0c437892570f3d 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_0cb8de0b-1d19-4944-9449-4e01d24cb987.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_0cb8de0b-1d19-4944-9449-4e01d24cb987.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d27e9cd0f687b51a272d2c62f86439abad548de00f9f40a0f9b17ed58ff891c0 -size 1493202 +oid sha256:e687e53db4e4069ecde84fafc0e894b0748bbe41c900520f39402f501ff437be +size 1632045 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_1295eaa5-670e-4aa2-b430-998bd21fda2b.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_1295eaa5-670e-4aa2-b430-998bd21fda2b.png index 253348fdee67e690440646b3bc9ae7a31ba8e682..660195a22d4cdc05ee4c2016f93672b55961a5fc 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_1295eaa5-670e-4aa2-b430-998bd21fda2b.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_1295eaa5-670e-4aa2-b430-998bd21fda2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8692c1d522db8dc0a93a0c4ce3c6cced586816fe45ddd996e710cea743c8182 -size 1163822 +oid sha256:78b29db661f8ac353e2416f995b47ba75bd62baf84aafc80d148c92e12c90baf +size 1010775 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_3458c954-87c8-4d5d-848d-2b9041fd5ef6.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_3458c954-87c8-4d5d-848d-2b9041fd5ef6.png index 5cfc59fd30e3a0c55f19cce85f15fbe25d1a3f0f..f6b078c4cdb02e49b997eabb971843586663d533 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_3458c954-87c8-4d5d-848d-2b9041fd5ef6.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_3458c954-87c8-4d5d-848d-2b9041fd5ef6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32f82033bc75319cdb14666172a22c9a38f119d31a2cc779ebae1543106b5e1f -size 1518079 +oid sha256:07bda6f3b93fa686183b99082d7e77582d8f9c96477249a491d990704dd217fc +size 1353301 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_39d3c8ab-ef00-491e-87fa-2c87e399e835.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_39d3c8ab-ef00-491e-87fa-2c87e399e835.png index e23af2e755d811389e9aaf868d8779e6e4c0e5ab..e7164418b2bc330e16cbcf8881639e20d001234b 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_39d3c8ab-ef00-491e-87fa-2c87e399e835.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_39d3c8ab-ef00-491e-87fa-2c87e399e835.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d379cfbf9da5734c1b52934c9b0c3766646309cb07e9f6ed3cebefcb02ece485 -size 1524341 +oid sha256:7a9b5b4f88308a5c36a64da6b5b2579c15d0086694790fb0f2a807b47a9e1e38 +size 1271529 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_50d097a8-12d9-488d-bb6e-a7c7a0f4a112.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_50d097a8-12d9-488d-bb6e-a7c7a0f4a112.png index e6117198befb44106e6d3c8a3cb6a6116ef47beb..c55d057950ba057549b7fce19e40874a7f3bf1ce 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_50d097a8-12d9-488d-bb6e-a7c7a0f4a112.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_50d097a8-12d9-488d-bb6e-a7c7a0f4a112.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77f1fec4e027ef13855a2c0ef8d83b1354b011f2cbec3cd1d691868ab872f087 -size 1485185 +oid sha256:26fd97f6b0fdf53aeabf0bfb3c583848cb720a22aedfb05b82f8141fb785953c +size 1691157 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_726244d6-5065-4024-9dad-6bf45baea932.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_726244d6-5065-4024-9dad-6bf45baea932.png index ba23ce04df7373ac00f03ecd15f60766664903ce..eeddb56fa27b71474d83e44b7528e548140cc75f 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_726244d6-5065-4024-9dad-6bf45baea932.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_726244d6-5065-4024-9dad-6bf45baea932.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e99fff4224b742c21d3113fa65cbb4603f98136aabb113a761e22b9c688a072 -size 1475222 +oid sha256:e1c7d724194788a28cf1748bdc891a2a608858a6e7f216376692091ecaa002d0 +size 1149810 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_744fc34d-8efe-4c20-96ee-05cad5df1cbb.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_744fc34d-8efe-4c20-96ee-05cad5df1cbb.png index 1738f292dd6ba3f2cdd38d7f64d9d8237c7c6e0e..db90568a7f4aafff626e5a1bb5d575cfb91e9636 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_744fc34d-8efe-4c20-96ee-05cad5df1cbb.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_744fc34d-8efe-4c20-96ee-05cad5df1cbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa69890b983e6e01beeabbb749cdb3887b50230dd987719b5240c497c5d0b175 -size 1160881 +oid sha256:8b791a3944a271f082cce33332e63d154c7f47295879ee0cbd8b711afde6ec17 +size 1130470 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_af528d8f-2c1b-44c8-8440-ab3caf5b60ea.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_af528d8f-2c1b-44c8-8440-ab3caf5b60ea.png index 76a18ca278d7debd17c526dfa9ea49ccffe6bb93..f6651cfbf1ece67abec749bc17836280c2bb4af0 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_af528d8f-2c1b-44c8-8440-ab3caf5b60ea.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_af528d8f-2c1b-44c8-8440-ab3caf5b60ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f5dd6bc9d0cc09248921404a6278c0fac217656f8846970faa1d81e37aa7ced -size 1432617 +oid sha256:c3c810a5cdb0acda344978611c3e1c893e0b4d473e394134a76fb0c66f0c3710 +size 1075803 diff --git a/images/8e1a344d-a27b-4613-8863-7afad0829b23_e0d490bf-0f05-41af-a25c-2c1607beb5f9.png b/images/8e1a344d-a27b-4613-8863-7afad0829b23_e0d490bf-0f05-41af-a25c-2c1607beb5f9.png index 340eac1565074101560bd44b517e5b0005749e44..a4a7fdfe42aff6b99f2a5b3d7487c6b4d33c3dba 100644 --- a/images/8e1a344d-a27b-4613-8863-7afad0829b23_e0d490bf-0f05-41af-a25c-2c1607beb5f9.png +++ b/images/8e1a344d-a27b-4613-8863-7afad0829b23_e0d490bf-0f05-41af-a25c-2c1607beb5f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d83b781844c75f37ecb78e91ba13617b506318f7f8a3b86e5e878352e7bd400 -size 1584747 +oid sha256:efd1df57a57e4bec7e8d55ab840d5f07880b2e6cd01f96f648bcb17a00802785 +size 1113026 diff --git a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_335f924d-95e0-4f41-a4b9-9398ac0c958b.png b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_335f924d-95e0-4f41-a4b9-9398ac0c958b.png index a8915b36b3c2e15182552f6d29efd388e99b7b89..d784c3d6f547d629688945b062a22e6208456405 100644 --- a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_335f924d-95e0-4f41-a4b9-9398ac0c958b.png +++ b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_335f924d-95e0-4f41-a4b9-9398ac0c958b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5407115b9d2ecbea2a794f7caf2a25e90429e71e62d670ddb8d0f67919880bda -size 1529363 +oid sha256:e0910272ea7352a18bd944db5da76aa2da1955aca1b499f378d228dd2bb95e99 +size 1301833 diff --git a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_658d1d45-7bab-4d3a-8ece-e898e819cc5f.png b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_658d1d45-7bab-4d3a-8ece-e898e819cc5f.png index 29c1ad23ddf9fe8376875600383b3a85d6d65122..0478e4ae26da210a9235ddf8564fdacbee478507 100644 --- a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_658d1d45-7bab-4d3a-8ece-e898e819cc5f.png +++ b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_658d1d45-7bab-4d3a-8ece-e898e819cc5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e56c3d51cb13054db9ff82d012c9f2fbd86327f59a33c4099e0531aedc9cc734 -size 1384125 +oid sha256:3e3cd9df2dc3f7614362b2deb8f510c415fa4d9e1bc0244e92efc039a1d85e7c +size 1509653 diff --git a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_6fe956a0-e058-4224-83a1-f19fd7d3f44c.png b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_6fe956a0-e058-4224-83a1-f19fd7d3f44c.png index 7220c6992d49e6f36e8e9d7e1ed009354b6f60b7..31dca034888d2821b4e46ebf054846e424661d4e 100644 --- a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_6fe956a0-e058-4224-83a1-f19fd7d3f44c.png +++ b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_6fe956a0-e058-4224-83a1-f19fd7d3f44c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5459dbf798e041f1397bdbfa9b27f76099dff044dfa82eaa206babbce06e470e -size 1622795 +oid sha256:f8e641b482f3eb84b746b55643c37731f0b0609e230715a716775e72c51227c7 +size 1281052 diff --git a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_9a258ed2-27f1-43d8-96f2-b7dd1562bcea.png b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_9a258ed2-27f1-43d8-96f2-b7dd1562bcea.png index cefc06ec50f261bb4b337c69f047669af929b22d..f1500eb731529a9e82a830f8e76a79e6e023ee12 100644 --- a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_9a258ed2-27f1-43d8-96f2-b7dd1562bcea.png +++ b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_9a258ed2-27f1-43d8-96f2-b7dd1562bcea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94d514ec7605b1ef8e61384f6097e92718da1b9c3b1c54d14a79ce069301e9df -size 1194601 +oid sha256:abe0592b5747156266d69d11879ea36585109a0ec77df27613a3b9f08c9528d5 +size 1028900 diff --git a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_e043291f-ff92-42c8-a9d2-016c8f3469f0.png b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_e043291f-ff92-42c8-a9d2-016c8f3469f0.png index 09be420e1c5f45f964cd1672578dd44f5f30d6f3..e6964addf5b6134b0d3332d42324018ffd507a86 100644 --- a/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_e043291f-ff92-42c8-a9d2-016c8f3469f0.png +++ b/images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_e043291f-ff92-42c8-a9d2-016c8f3469f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3445dfb0d91707b916d314dc161240d9650de274de34bd3bb28f28b0fa606b0 -size 906191 +oid sha256:dae829cd9e1ba4ce31c7ac0ebf8e19e42e4f425ef1a69d82a6b347f48623ca00 +size 1466337 diff --git a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_4d006658-126a-4f2d-bfef-45ca970c91a1.png b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_4d006658-126a-4f2d-bfef-45ca970c91a1.png index 82f90854b8a61abe031d86f0b7830babcb8ad4ae..8f2f47587c7cba7169171a3dec3659cf0c14fd35 100644 --- a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_4d006658-126a-4f2d-bfef-45ca970c91a1.png +++ b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_4d006658-126a-4f2d-bfef-45ca970c91a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3551c68a1dc16feac3400ef9cdadbb3cde64e9e6a9dca9d05c1c9d02c2559300 -size 1612499 +oid sha256:6f08284d644509f0c4bb50b74a19be66b458f0b756476c1214c32bee9469f9a9 +size 1091780 diff --git a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_9fb8c08d-560d-454e-8098-08de434ef903.png b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_9fb8c08d-560d-454e-8098-08de434ef903.png index 96f49bc63e1fd35afc24a681124b85109ccb17a1..f91714d2b261dae0c3b5a3a6cfeb6fac56177ff7 100644 --- a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_9fb8c08d-560d-454e-8098-08de434ef903.png +++ b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_9fb8c08d-560d-454e-8098-08de434ef903.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d0bc3b2d3b86192a01f50e9736bc5cae49ae2de89d2b6454318f13f24b7813a -size 1656062 +oid sha256:74197c130b4c8e03ece0a0a68ab55fabca544497a3e21dd152ac4a6ba1aa7e67 +size 1486922 diff --git a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_aac1c895-4aba-4a70-92e5-fcc5fb7e46e8.png b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_aac1c895-4aba-4a70-92e5-fcc5fb7e46e8.png index 2a414d426ad8b37019f9bbacdc3ec4207ce3c487..509f22401404fb978f75afead596600ea18fd850 100644 --- a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_aac1c895-4aba-4a70-92e5-fcc5fb7e46e8.png +++ b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_aac1c895-4aba-4a70-92e5-fcc5fb7e46e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2946f02f604d48a0c3308b8aa11da859eff778dfad140b59c7e42bed469be396 -size 2699819 +oid sha256:c4ed75705270d0a8e895171e4eb8dccb54df8b28b44b47f7cbf487242b175090 +size 2646287 diff --git a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_af945b27-92ce-453c-9c39-59ce63a8190a.png b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_af945b27-92ce-453c-9c39-59ce63a8190a.png index 03416a1c8d3f4fc5c6dd6825d1e211851b93310f..a54be4c1962e10e0538eab011a74780871a62b22 100644 --- a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_af945b27-92ce-453c-9c39-59ce63a8190a.png +++ b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_af945b27-92ce-453c-9c39-59ce63a8190a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51c56db24e2a198a7b14d32c8a432499787a63cce2243c5e784c30b1a6366ddd -size 1627261 +oid sha256:998ab8c1330ce17c8f276228dc195932331c15d88ace0e3fc771eb6bd00d57e7 +size 1917926 diff --git a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_ecec671d-d6ef-4da5-ac94-b680f5e904dc.png b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_ecec671d-d6ef-4da5-ac94-b680f5e904dc.png index 6295628d8d4efc3099959a815e825e47a8546818..612ae95016b6377aa83c3925acae8aa3ea6ba6f2 100644 --- a/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_ecec671d-d6ef-4da5-ac94-b680f5e904dc.png +++ b/images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_ecec671d-d6ef-4da5-ac94-b680f5e904dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04375f864f21e98e3e949b7db869f9e94938984bc65e034eb897d553bd1e3524 -size 1984365 +oid sha256:ae2db26aa7f37bc5957487d8162e01728bf39f065ead9019ab2813a6077d596c +size 2001857 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_09094d31-83aa-4538-842f-a3d990b2c0f9.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_09094d31-83aa-4538-842f-a3d990b2c0f9.png index 252b2caf1a832b7417769f57378e3e7308940988..16e6332c14af2161019d021edc3e5bad10a12a39 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_09094d31-83aa-4538-842f-a3d990b2c0f9.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_09094d31-83aa-4538-842f-a3d990b2c0f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6146102824556c00dc8c835b6bd33e9bb51d19a6b5036fd5fb26484d09e8186d -size 1178836 +oid sha256:3422a639e685d7a1570e7e8b8ab3cc6c6a70d195e2472dfc7bb70399c97583e7 +size 1188808 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_2f874e77-6f98-43bf-a476-bb69382c7197.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_2f874e77-6f98-43bf-a476-bb69382c7197.png index 9d51f0d35be068fbf19339badf36a9276aa4854e..60b4c12de5b6f423752d3a72451c40091691fe24 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_2f874e77-6f98-43bf-a476-bb69382c7197.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_2f874e77-6f98-43bf-a476-bb69382c7197.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb63c8de8ccea8489321f0a50bdb580a8577174b1bfb94b26cb0db09cbd3bffb -size 1181355 +oid sha256:56274781d69f539f06d3953a6a514e0b7f3fee320612a8d08ee761b525dc1373 +size 1127737 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3203402a-4b9e-4b1a-8ddb-22aad143f626.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3203402a-4b9e-4b1a-8ddb-22aad143f626.png index 8ff39385dd681949a432639064c0e94a643de13a..52cbc8de985de06e05dc6f9b4c3cf467eacb4d89 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3203402a-4b9e-4b1a-8ddb-22aad143f626.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3203402a-4b9e-4b1a-8ddb-22aad143f626.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:430fadb77787503a93ae7efdbfa6882f2461bffce642c536ce3bd3a496ab955a -size 1003965 +oid sha256:20b0f9ad398ad4eca556db50325cb4273a46f7d4cc140a07abbc10b7d41b5c65 +size 719023 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3ebb1cf3-9b8f-4c1b-9da5-6ae7225dff0d.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3ebb1cf3-9b8f-4c1b-9da5-6ae7225dff0d.png index c50bd6fc32f3af502b7b53b6aabe474906824e01..2b051226c3031b9a4e9058092a12cff674c24309 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3ebb1cf3-9b8f-4c1b-9da5-6ae7225dff0d.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3ebb1cf3-9b8f-4c1b-9da5-6ae7225dff0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b12d374b93e1c0c156a68599fc341fffb111713ad1a5f053005582e961449d63 -size 1177846 +oid sha256:5b2e153aa002c1491350fa04f1f3d07c25c8a82a5ce87cd95a6a7dac677a26b8 +size 1460476 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_49946c93-dc95-4827-97d8-1d4712866ae9.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_49946c93-dc95-4827-97d8-1d4712866ae9.png index f6b7e7bfd3aaa1cd27e1f7f40a1c01defaf0c113..b5b49fced40ca3379ee6c84d22d73ba109bfba22 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_49946c93-dc95-4827-97d8-1d4712866ae9.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_49946c93-dc95-4827-97d8-1d4712866ae9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6901f93ddc6f9306d53666f7533493f1f2b6b6925114e4b81db17895645a2401 -size 1181167 +oid sha256:6a24ec394941165bf5a7040f3ffdfdfbb55a5ee0886ec22084f48f4ec8aa9243 +size 1557292 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_585cb70c-a451-4298-add8-b19c4b26f1c1.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_585cb70c-a451-4298-add8-b19c4b26f1c1.png index 1c74d49be665397f7e57d846dc1c88c02342ec17..152d50109effe51b889a94d64b3e3e45f12e6438 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_585cb70c-a451-4298-add8-b19c4b26f1c1.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_585cb70c-a451-4298-add8-b19c4b26f1c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:172e44b47729024b433f76fee3aa500cbce8b3dff5043a33d5672cdb4b9ff99c -size 1128459 +oid sha256:fc8cad912d27671e62f911c5acee9e312d48bb019fc40a2d457b4799bd8061c8 +size 1382759 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_9ac1fc21-dd17-467b-ad80-40db1092b18a.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_9ac1fc21-dd17-467b-ad80-40db1092b18a.png index dfc1a2fc9488310a8dc2d0a0e393b27abf63a7d4..3f40682795acd6f8740101db7bf28a8a34ec9e4c 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_9ac1fc21-dd17-467b-ad80-40db1092b18a.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_9ac1fc21-dd17-467b-ad80-40db1092b18a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5147113a2af410e5cb4d57a4ce49b28ebe8fbfbc40a0a535b380d9cf575a4a7 -size 1219483 +oid sha256:7ac9bcb616a6dc07db774e4fd3cd1c041a8252c860cb4030512b81e05b18a7bf +size 615904 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_a87d15f4-9b84-4f7b-9e24-8cd9f9b7a6f1.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_a87d15f4-9b84-4f7b-9e24-8cd9f9b7a6f1.png index 99b6a26f5fe38792fd79bface4390c2f3b368287..46ff2d0d54b2d8f3466779414b99263d0272b5fd 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_a87d15f4-9b84-4f7b-9e24-8cd9f9b7a6f1.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_a87d15f4-9b84-4f7b-9e24-8cd9f9b7a6f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8143cfcad23fb01e5264ed7b9abd583858c32ac4d656083e92791756cc7f89ec -size 1194126 +oid sha256:0e5c80c9ae7e357783c03a20d844cd59baf21083417f8c71f48e78012eb950ae +size 957160 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c02f0195-b85a-4a67-95a2-379936f61b69.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c02f0195-b85a-4a67-95a2-379936f61b69.png index 811bb09fa8b9b706d43162c0475b355e1ee224df..aea5cc76b8edefb4be614f12433a55d24073e010 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c02f0195-b85a-4a67-95a2-379936f61b69.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c02f0195-b85a-4a67-95a2-379936f61b69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c2917088782d1352a2b592a1e8a5feed70ea8c314bcb838fd31239f075a7426 -size 1826330 +oid sha256:ae88e00a03f4d4e7a306bc7c5239eec2467dc902d1ac39d2ac70033a41e16c48 +size 1202626 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c11a83bd-a583-4cdc-a473-04c26ce5eba5.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c11a83bd-a583-4cdc-a473-04c26ce5eba5.png index 36e1906944f3eb9dfc243199bba530334420618e..dfc10c9ea79c6ad786b7d20e857e6ddf5e4fbba6 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c11a83bd-a583-4cdc-a473-04c26ce5eba5.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c11a83bd-a583-4cdc-a473-04c26ce5eba5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e25a5fd7c19dc02ad10c5b32e6aadf60c3d3e9fdae788119b7000a890bfb999 -size 1129697 +oid sha256:c6f42198c6fc8f6f7ae41e245d23fab065e1be6dbb4c74100ec24f8f786ef8d8 +size 1131204 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c398aaf8-6107-421e-a9b4-8c7518e18c46.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c398aaf8-6107-421e-a9b4-8c7518e18c46.png index b4f1d44bfec528402377dc201bf321bfdace90db..f422c5e5fd4c69f23c01544e1393686f28d21660 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c398aaf8-6107-421e-a9b4-8c7518e18c46.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c398aaf8-6107-421e-a9b4-8c7518e18c46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3d527170b04aee893d0fc9cdcdb2be92fe1e60521ef81b6dbe6e12c9382869c -size 1141618 +oid sha256:a010d6cba2e7c1ef03f9e4780b089b7062513f3bfcac833e5ac29558047d33ee +size 1184720 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_d471e93f-d8de-4dd3-8d20-e0d660259ade.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_d471e93f-d8de-4dd3-8d20-e0d660259ade.png index 835c9f39473065f48bf4fa1b6dc184a2b0f274ff..c077b02fc8f1b316c5488faf67d2b3c671d961fb 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_d471e93f-d8de-4dd3-8d20-e0d660259ade.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_d471e93f-d8de-4dd3-8d20-e0d660259ade.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99294716eecde74c4d8264682477b87a1a8fe6156bf4bfe2d5cd7df700db4f3b -size 1141494 +oid sha256:ed9f29288f8ef3ef1c2a21323857076895cdbe685f3037f63c128c0a1f0a16c6 +size 1346502 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_dc3c5e75-a90a-4e6f-877f-dd1b5c40e9c3.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_dc3c5e75-a90a-4e6f-877f-dd1b5c40e9c3.png index 382e44b0dada669b4087b2d9344d969cea0b57ea..15de06ba8d5a1733a25911333723ea96829ae483 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_dc3c5e75-a90a-4e6f-877f-dd1b5c40e9c3.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_dc3c5e75-a90a-4e6f-877f-dd1b5c40e9c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4376d04cec1baebad07f1a75cc8c1478d0556a9e7bafe83633d9a21de17dd149 -size 1142371 +oid sha256:d4a9a1453982179a44db67e8ed41ca4c05fbfc05bf5249c3077bdad0812fa3e4 +size 1670877 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_e6655c90-5529-4167-9f8b-b3f458a83f8a.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_e6655c90-5529-4167-9f8b-b3f458a83f8a.png index ac638ac2f3fec05334d1ff83fda15a270b413a98..b46ec6c1dde2e458c5af9689d3a80be2868fba1b 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_e6655c90-5529-4167-9f8b-b3f458a83f8a.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_e6655c90-5529-4167-9f8b-b3f458a83f8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:385c145c805d9c59a1c213e4126a218df4335b38ee16dbe315be550a24d8fd6d -size 1179318 +oid sha256:4cead2eeede1ff39f375a1f9db9e8c6f6eae4695fe2a1dc601c1ec0cc5933240 +size 1253427 diff --git a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_ea4f9b84-8386-40f7-821b-26aabdb914d0.png b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_ea4f9b84-8386-40f7-821b-26aabdb914d0.png index 7bf99757cb04900c5a99b9e4b343bff9cb09cf19..e2aa3ac896d495d707e199ffb42af4f568b48982 100644 --- a/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_ea4f9b84-8386-40f7-821b-26aabdb914d0.png +++ b/images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_ea4f9b84-8386-40f7-821b-26aabdb914d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33cb9f2de2de500ec207ecad74ac23893b0aff5dab18fe9f31b6a6aa4aea4ede -size 1169718 +oid sha256:9b1ca14a9989bdf03b9a251b4ba22aae695fa57d0a685f7c4d5cabb455dbf5c2 +size 1451161 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_65871880-9edf-4376-ba5a-724665a1454b.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_65871880-9edf-4376-ba5a-724665a1454b.png index 6df3b952bb70055b958704d667ee2af8c7d6f249..41981d1e244b4c85836e553197084bf407081f2b 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_65871880-9edf-4376-ba5a-724665a1454b.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_65871880-9edf-4376-ba5a-724665a1454b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b2794b3a9005856860e839faf83e02cfbdc341f15168e734c63c05042caaf2f -size 953720 +oid sha256:e01f4bc888020fab526c4b84c9fc242dc39ef23bd18bc198f066f92baa7bd254 +size 607681 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_6777b49e-bd42-42a4-83c8-6924d950ddec.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_6777b49e-bd42-42a4-83c8-6924d950ddec.png index 23674e166683f077d7e9b614af6976e6c2d25c79..d89d2a57c0ea1ed258d788a2d1c5afdfaf89e9da 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_6777b49e-bd42-42a4-83c8-6924d950ddec.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_6777b49e-bd42-42a4-83c8-6924d950ddec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97acb5981a0c74af51dee6404d80e3d06478cda8ad27eaeed9c22673320ad6bd -size 1122587 +oid sha256:6260efa00cd500c06183ee9459942eb5d4598080a84936761e0e86dd34cc11e9 +size 1256632 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_6d12a2b4-31a8-4534-97a2-6c84c75d3fad.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_6d12a2b4-31a8-4534-97a2-6c84c75d3fad.png index 2261720d339fa3775e440f41765851e96974bb00..682fb436ca35897c468d7c64a944e66ae70f6ee2 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_6d12a2b4-31a8-4534-97a2-6c84c75d3fad.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_6d12a2b4-31a8-4534-97a2-6c84c75d3fad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15fb9f2b5f3b3b96e1702a93c5c992e5b9b8d2601c900ae92ae51984079c6053 -size 745375 +oid sha256:f9807db3768d71fdbe8bdc12b017d40f5b359f3244837bac169722a1e2eb434f +size 709311 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_74f2b513-2ee2-469b-a2de-837034b739e8.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_74f2b513-2ee2-469b-a2de-837034b739e8.png index d231a52cb4b00c903bbdeb4b5d8ad9150662f68a..331df35787adfee268c652f8373872486d2e934a 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_74f2b513-2ee2-469b-a2de-837034b739e8.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_74f2b513-2ee2-469b-a2de-837034b739e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e41420c325aee6702f199bcba55d3626ef14f530705cbddbf90ff77c3ec26095 -size 1644957 +oid sha256:ddf8cd07f11184409ddc435c55a1bf05704ee439302bb15dae7add2aeb237143 +size 1917711 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_a1264e25-26bf-49e0-b1f1-9efe9e8a1adb.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_a1264e25-26bf-49e0-b1f1-9efe9e8a1adb.png index b7341044408e088b5dab671d9619025fc989151e..801c115600b668a2df0f750b2816a899e0bd231d 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_a1264e25-26bf-49e0-b1f1-9efe9e8a1adb.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_a1264e25-26bf-49e0-b1f1-9efe9e8a1adb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbc53e412512b1829e5b9843e5141351d20cc47ad34c6efde24ac6325f8201a9 -size 974076 +oid sha256:a31d405771a98b8be5b85cd68e5f7ddb517303bb498c10ad4f4719e83bc4f53f +size 991628 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_abb7ad2b-5603-4bd9-995c-e98a5ad82e57.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_abb7ad2b-5603-4bd9-995c-e98a5ad82e57.png index 7408ac3207a8fb8c4c6ae2e79adbb3009069bf0d..c8459e282426e3ae0d5dddf536d78be60161eff3 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_abb7ad2b-5603-4bd9-995c-e98a5ad82e57.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_abb7ad2b-5603-4bd9-995c-e98a5ad82e57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8a127fb7be98f8db7cfb9250aa89319f57dc143966d21b5fd7e2f50b89cc32c -size 973901 +oid sha256:3fb2ce78672bc86ae7069d70194cb1c9385118dd709775dc448233fbf43620bd +size 1117748 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_b1b6ed4f-e03d-4b9f-8c20-2487956712bd.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_b1b6ed4f-e03d-4b9f-8c20-2487956712bd.png index 98588118e04a413b695c27192c067a09c69fbb7c..02a288acd0f9c098edf6fe76ced844da6308610b 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_b1b6ed4f-e03d-4b9f-8c20-2487956712bd.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_b1b6ed4f-e03d-4b9f-8c20-2487956712bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05b8f4275b41de8307876a47636a7f24ba63b40f706dcc659c42a42508d01b16 -size 1098791 +oid sha256:04398200a20686cbceeea9e6d754620f882b1f1fa7cf0f21fa6e5f959db3c12c +size 1561070 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_ca98286b-ed03-4f7b-be6c-f1da235ef72c.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_ca98286b-ed03-4f7b-be6c-f1da235ef72c.png index f1466128b4417eed879f62e207e95c1686666d05..e50c3fbec9649f6bd0d65af21e9462248be3237a 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_ca98286b-ed03-4f7b-be6c-f1da235ef72c.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_ca98286b-ed03-4f7b-be6c-f1da235ef72c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:925d3278dd448cb2d6deca018eec9fe3787b1da8d41fadab0829af9e040feb6e -size 936324 +oid sha256:1c7b7180e5e0f7ac95e5e671b50124a39d4e073baa5c2e322b6e8492090c2039 +size 626522 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_cd58814d-9500-4922-a7f4-416a19ffcc34.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_cd58814d-9500-4922-a7f4-416a19ffcc34.png index db57beeee16a6ab3e25f51d44d11c465b3446321..f8f47aa5a3a06037b44108986ca4e71aa44a9125 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_cd58814d-9500-4922-a7f4-416a19ffcc34.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_cd58814d-9500-4922-a7f4-416a19ffcc34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13a3640c0f8398f712019e89b293998becbe69895e6d30d3c7633588bc97c2a5 -size 1060297 +oid sha256:91c7429b79d78eba598a9a7a77034d3da77f86545fad276100f50a2c61459ff1 +size 1401445 diff --git a/images/8e721b00-f406-449b-9885-0267b47ecfdb_d129b23b-eec3-4afa-9787-6e2e06042e33.png b/images/8e721b00-f406-449b-9885-0267b47ecfdb_d129b23b-eec3-4afa-9787-6e2e06042e33.png index 970244a0e6fdd367115d916f61bce0b3b15ca215..d6f7883c86ac8282c999459e7a287ee5a7c2a959 100644 --- a/images/8e721b00-f406-449b-9885-0267b47ecfdb_d129b23b-eec3-4afa-9787-6e2e06042e33.png +++ b/images/8e721b00-f406-449b-9885-0267b47ecfdb_d129b23b-eec3-4afa-9787-6e2e06042e33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e168fad2b32fd5e6b369fde6323ceb9345f241cda3a5a664a525a3e10136ee0 -size 1324596 +oid sha256:9ba1be475652e7961b3cae88ae1a35305a38a200699d9bbb6fa1850005c39762 +size 1531646 diff --git a/images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_7232efcc-03a5-4cf6-abca-52d962651164.png b/images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_7232efcc-03a5-4cf6-abca-52d962651164.png index db76bc271c80d159090fe35c9da7d4b915c7be52..bb5c86301cc2cbd8c01c3446793ac54e3009ae11 100644 --- a/images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_7232efcc-03a5-4cf6-abca-52d962651164.png +++ b/images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_7232efcc-03a5-4cf6-abca-52d962651164.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2179f5ce9729e658e0df3dd74dd325b4bb3bbc0751e38e9cb97fcf89f7b42e4e -size 1409361 +oid sha256:b74b4a2fc1f3e57870441e05f55725590f532d62fa84849fbf60b79ad1fe7d05 +size 1322285 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_05a173a8-2088-4da9-97e3-f8988811b5b0.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_05a173a8-2088-4da9-97e3-f8988811b5b0.png index 4653bd96ddc802dff3750b3972212224ebaafe8c..2eee4b3ac77b0527a926f744512f158c7aab0b1b 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_05a173a8-2088-4da9-97e3-f8988811b5b0.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_05a173a8-2088-4da9-97e3-f8988811b5b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a0b0803299279a7ebe094b884f36449d11c137c6b54d6c51e6002330fbafff1 -size 380757 +oid sha256:134a1c5aa3ff11b8de361c182865463504492daa3c28f8a31ef4245371b94f0a +size 358587 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_2d3970c4-2b0a-43cd-bd19-103d5bcb4dd3.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_2d3970c4-2b0a-43cd-bd19-103d5bcb4dd3.png index 50f5c96d06c9856fc5c9aef41cd59e5174baae1c..046b84f37c3a5416241e6bd318c3e747b2eb4821 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_2d3970c4-2b0a-43cd-bd19-103d5bcb4dd3.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_2d3970c4-2b0a-43cd-bd19-103d5bcb4dd3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6cbaace0c1a5f97b001c7066f3a956d3a3f601f8a27da10bb1d1f897a111620 -size 952743 +oid sha256:f00e6c1cb56eddd51258b93ced43846c3b2b069c840c912227c72178330158c9 +size 1097073 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_58359ff4-73dd-45ee-b703-026fd4666acf.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_58359ff4-73dd-45ee-b703-026fd4666acf.png index 11673bd7b8357f857d5208787b558f6eaccfff61..0026ef95fc14769e53329336279fa927125d5234 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_58359ff4-73dd-45ee-b703-026fd4666acf.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_58359ff4-73dd-45ee-b703-026fd4666acf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9451d72a1a2dafa1dda462a14024c4cf0ab364d46a2157f6e3c29cb06c1581d -size 892682 +oid sha256:312312acce9416a8d25f110517c9b40c52cfae2526e3f3397f062ee58914d58a +size 833366 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_66558d92-b0c7-4478-a66a-dcba598144ef.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_66558d92-b0c7-4478-a66a-dcba598144ef.png index 74c0e33a850b4a8eb347d088f4b89cb604eda440..5582c82aa0112e03515d8e783d96e8bdea031891 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_66558d92-b0c7-4478-a66a-dcba598144ef.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_66558d92-b0c7-4478-a66a-dcba598144ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dae182658c47d03889790491d5f811269a990d27f1109a0972fb28709cc4fe06 -size 381243 +oid sha256:79335323065a1bc7ea5aa085c79fb067dc4e0cad4f0dbe3d5e520d6caa9c07b8 +size 304732 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_6fa44411-fae8-427a-a086-b687187d19a6.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_6fa44411-fae8-427a-a086-b687187d19a6.png index 99d4d76720549d3bff3d6d81b07926a86cd5ad23..5fd81624d4e58b5da35bfb9b4a16e2fd7dd640d2 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_6fa44411-fae8-427a-a086-b687187d19a6.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_6fa44411-fae8-427a-a086-b687187d19a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:503c6e7c3c5bf94aeaccceda9b8ff828def46ec66edea48e9d891ec4fd663987 -size 379776 +oid sha256:9662e87e995ca829d8f9ab42f3e835b1f78d97aa51aeb461f685384387171c7d +size 240854 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_88c687c0-458c-43e5-b265-561b2efdf331.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_88c687c0-458c-43e5-b265-561b2efdf331.png index 6ecfe94ae91c83226ba2d57984e135816446eb24..067fee93a5e97e8e886ac8f396a821605ee892f2 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_88c687c0-458c-43e5-b265-561b2efdf331.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_88c687c0-458c-43e5-b265-561b2efdf331.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4677cfb0638a49f16bff880d9dc068ad7510b9c2732e36d68e7e50e917e70500 -size 384612 +oid sha256:3197ebecb62b92841fac0e57413604b940326aa1f483bd818d603837757e784e +size 227671 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b5b40650-0447-4f2a-adbc-15b3faf5babb.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b5b40650-0447-4f2a-adbc-15b3faf5babb.png index b93988e0a3e2a4b9783f513b9406607d5f3900e9..cb75425ebd66d0b10860bee6ec770cd9be7f7f54 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b5b40650-0447-4f2a-adbc-15b3faf5babb.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b5b40650-0447-4f2a-adbc-15b3faf5babb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43be1650f7e278622ef932ab7e8f0c327b09bea749a19db74d23479e2bafe09d -size 380744 +oid sha256:eaad16e4c872ada5199d322e62882fcf78c20a27c9da820bdc11cc29992695ad +size 301503 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_c6cbec5e-1b4e-4c9b-bbc5-c0d55a1968e8.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_c6cbec5e-1b4e-4c9b-bbc5-c0d55a1968e8.png index 6ecfe94ae91c83226ba2d57984e135816446eb24..c7cd5c984998d52ab9fee1a4f5de45e5dc470bcd 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_c6cbec5e-1b4e-4c9b-bbc5-c0d55a1968e8.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_c6cbec5e-1b4e-4c9b-bbc5-c0d55a1968e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4677cfb0638a49f16bff880d9dc068ad7510b9c2732e36d68e7e50e917e70500 -size 384612 +oid sha256:aeca7fa51a3e60643b88de8c8ca0a971ed9940278bdf2e8e418418a0155a1926 +size 332559 diff --git a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_db0dd015-f9d9-4fd6-9c1b-90ee29a84a8e.png b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_db0dd015-f9d9-4fd6-9c1b-90ee29a84a8e.png index 7bfecf35627ee14b0f302fe497f09d07f00fd8d2..255faa016ab2f51cb64435b1b6d5bdf5e5834173 100644 --- a/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_db0dd015-f9d9-4fd6-9c1b-90ee29a84a8e.png +++ b/images/8e849b85-5acc-4d26-ad5e-d24ad24343df_db0dd015-f9d9-4fd6-9c1b-90ee29a84a8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57e8c5471b3a9023c494dfe14e62d645d73cc02bcfd8123d01c08cf39f47b4cd -size 380848 +oid sha256:9e3c336deb821db3cc761d348ae664d9ed7b3d3cee75b76ca199ded40bbef864 +size 249706 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_0d28ebcd-d197-45b9-9d04-92004c51a57a.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_0d28ebcd-d197-45b9-9d04-92004c51a57a.png index e638718ed1b7d9bf5be9ec29b65bbbaba3aeae8b..c00ef28602f77ca70dfecf1e92e65d2644bf97ad 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_0d28ebcd-d197-45b9-9d04-92004c51a57a.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_0d28ebcd-d197-45b9-9d04-92004c51a57a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0222d684410e8a6880676ce0fcf76b5882e1b0be4245881769f64d90d06aaaad -size 1561083 +oid sha256:5967f283605e9d6de1b8300906bc21e91ecc6af00d45a71e12d4d332f5b35199 +size 846739 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_23bd27eb-bad8-45f9-88df-ba23d36bc19d.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_23bd27eb-bad8-45f9-88df-ba23d36bc19d.png index d8803b949f7232b8dd5d3740886d96cf01b676f5..1e99835c8258b9fa0d16d680e2e627c3491a9f39 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_23bd27eb-bad8-45f9-88df-ba23d36bc19d.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_23bd27eb-bad8-45f9-88df-ba23d36bc19d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6fb2dd16ca584ed5064d32c4d9e1afac9251fdbe8ddb16fa429d74012b0eea8f -size 1165157 +oid sha256:247a772ec6494021d0730d0dc49925a7f126d3aa2177a1229bf32f4e22238016 +size 1217838 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_282c09d0-c9e0-4007-b88a-27887fe1e388.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_282c09d0-c9e0-4007-b88a-27887fe1e388.png index 876795522d7f57e4e97454a353277dac427eda9e..a70b45f1a1904d939bf6e27787eb2c151726436b 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_282c09d0-c9e0-4007-b88a-27887fe1e388.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_282c09d0-c9e0-4007-b88a-27887fe1e388.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab98ff1d7b37924a3aa0f31bed4a1e9ace3cbde3062d8f15492075696b15e93a -size 534125 +oid sha256:3f5fb91f1d8327937fa28113e1de86732a3529a5dab6a3414605b225ad6be4ac +size 443603 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_29aa4ba4-c9ce-417a-9ffd-24d73d1dec89.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_29aa4ba4-c9ce-417a-9ffd-24d73d1dec89.png index 19ba87c6c5edd3840c5a7ed6a24dceca6a6fd378..3b1a8a97c478aa197423ddbed7361de531cfcfaa 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_29aa4ba4-c9ce-417a-9ffd-24d73d1dec89.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_29aa4ba4-c9ce-417a-9ffd-24d73d1dec89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:795b37d3a806a683e71e3f3ee3108381982a6ea1887bf73ff64e4c2202b18d36 -size 778707 +oid sha256:d0074e5e012918647fc033af579cc2137f6305316c55a8495ce381c4962d2d54 +size 1148299 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_390be8cb-8c11-4011-8a91-b0eb8dffe25c.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_390be8cb-8c11-4011-8a91-b0eb8dffe25c.png index bb764d4b96f0d14ebfb203956b4561aba913b3f9..09dfad4cf33e860425219bb6e8cf855d5c2681b5 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_390be8cb-8c11-4011-8a91-b0eb8dffe25c.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_390be8cb-8c11-4011-8a91-b0eb8dffe25c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a46491543c8d3e6829baccadd3f2502c59864810a25d722fed5b881d06249121 -size 1654357 +oid sha256:2d91207d497f9ea1853685305fc5fd17444cbda50db98a8e6814a5adf41a67e3 +size 1083300 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9c7ed1f9-73cf-40b2-807e-d0e4aa0cd853.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9c7ed1f9-73cf-40b2-807e-d0e4aa0cd853.png index 653c4ff9d582e701a3820bc8c75a537876c8fa33..eeefcb5e6baa593aa8297f818952364e7ba49c98 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9c7ed1f9-73cf-40b2-807e-d0e4aa0cd853.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9c7ed1f9-73cf-40b2-807e-d0e4aa0cd853.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f398900203a0b0fd32234b64ee380a23ccb62594677c2ba072c3a750b8fec5de -size 508525 +oid sha256:066307d0357f6c10fe726af953dc172b8c34516a51063d3a66dd175793b1af32 +size 910839 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9ebda146-dfb9-438c-b151-ae45e7624802.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9ebda146-dfb9-438c-b151-ae45e7624802.png index 1095938eeba318b6f0a0f0b5a27b6b48c0c3a7a5..07cc0561116ef073c7212c82cbb3a11dd2a06535 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9ebda146-dfb9-438c-b151-ae45e7624802.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9ebda146-dfb9-438c-b151-ae45e7624802.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20c0eb4c739cdc4122b61d2bc0e000a0a184fe2e1117268a1370f113baff133e -size 570633 +oid sha256:4d7e95eb222fa60be2c47acbc85c88c3f9f128a8703a2c0600cba6d1d8eb4ba7 +size 969450 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_d4367241-a990-4cdb-909e-2a0e80135606.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_d4367241-a990-4cdb-909e-2a0e80135606.png index 07f470deed0d10e6f63cde5dd837ce77ff6be92a..238894765e9fe553d90b0d04d6e4a0be8e93c284 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_d4367241-a990-4cdb-909e-2a0e80135606.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_d4367241-a990-4cdb-909e-2a0e80135606.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6c74bc461ba30d497bd405dd4c820b3906fad05e7d6104d348a93ff91220588 -size 587950 +oid sha256:47364ddf71c170cd51c1a39929c3b269900bccd745df2d23362b198e5ffc9e06 +size 990031 diff --git a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_eef408ce-d3d3-416e-8a22-75d2730b5cdf.png b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_eef408ce-d3d3-416e-8a22-75d2730b5cdf.png index 65a6db9045f71cc3410fe927dca7efc9b2902dc9..eb7cdcc32a40bf7264030d2c19cb15941f153a88 100644 --- a/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_eef408ce-d3d3-416e-8a22-75d2730b5cdf.png +++ b/images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_eef408ce-d3d3-416e-8a22-75d2730b5cdf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03b2f53662043d5669899d7a6f9176666f85f04668ec5a16bc28eba5e7463fb7 -size 1186810 +oid sha256:540fce0c21345ca08b1ec9d042f9200c450b6cbe55ef07b69b7ddc328e3f3b24 +size 1309403 diff --git a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_442b805f-3033-4997-a3d7-ef6375d4f79e.png b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_442b805f-3033-4997-a3d7-ef6375d4f79e.png index 3e77febd17e6162f1d8ce942213e65b22f120be0..59ea862032958b680420dc21dfb9f1d93dab99e3 100644 --- a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_442b805f-3033-4997-a3d7-ef6375d4f79e.png +++ b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_442b805f-3033-4997-a3d7-ef6375d4f79e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d01b1100b08160d3331d2951e61c306923a6c35a2657e3d9d843b31de7d7d407 -size 1454643 +oid sha256:02a9627b83f914b377605003a01660a30dac6d46740bf9041ea3b2a0b8985664 +size 1135941 diff --git a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_564cb934-0518-4171-9ef4-ddc0e0d42251.png b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_564cb934-0518-4171-9ef4-ddc0e0d42251.png index d74defc265d2c2ce2c2e703fe088cad5e030ba2c..a709cf89a63cfc08226d8d3d44e79a5b714b6f28 100644 --- a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_564cb934-0518-4171-9ef4-ddc0e0d42251.png +++ b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_564cb934-0518-4171-9ef4-ddc0e0d42251.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7d22c2a036c91d23926a90730addcbc7ad653277a1c2ac1a383633ce3d1caab3 -size 2364869 +oid sha256:44132b012e5cf14a917b131f7cf64f598ef70c8f3602f018120195e893239190 +size 2251111 diff --git a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_703f0030-4a9b-4879-a9ec-f17fff4b2859.png b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_703f0030-4a9b-4879-a9ec-f17fff4b2859.png index 560fd199b2e44e4a29f5fdf147478c843455e973..87ec48cb58ee6291b8787fc80f0d2c15ec992242 100644 --- a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_703f0030-4a9b-4879-a9ec-f17fff4b2859.png +++ b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_703f0030-4a9b-4879-a9ec-f17fff4b2859.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43f010b5681d24509c35c3e0d994b7047523b214595374d8e69f5bebf8e66d5a -size 2218598 +oid sha256:cee370542c9d0d097cab102496282fd5b0ed49a8803587f3a6806ddae47d9a5a +size 925855 diff --git a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_7e0ec349-8a65-494d-90cb-d3ecee8d23f0.png b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_7e0ec349-8a65-494d-90cb-d3ecee8d23f0.png index 3fe533285ae9a91d0d526f28f0e688bdf6628ea4..aaeee3c33c8b0c8a5dc7df996791a9bd48330563 100644 --- a/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_7e0ec349-8a65-494d-90cb-d3ecee8d23f0.png +++ b/images/8eef04e2-3d51-4a0e-84f2-db017a69050b_7e0ec349-8a65-494d-90cb-d3ecee8d23f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:872d4505e581df33f01781847d1e1d4c87cf3283abc603895faee4f0412e3386 -size 1244732 +oid sha256:5232ff4dfb81ff10ca4964a72d853ba3045a34043034ee8f2e444ff3836262fa +size 1244432 diff --git a/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_05468c6a-dee3-4b1e-a923-9004409dc1ee.png b/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_05468c6a-dee3-4b1e-a923-9004409dc1ee.png index fbd4378d07be4713482781ec74642fec20066ee3..37a3e9f7206559f03cc2a2252ef6a406dda933f8 100644 --- a/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_05468c6a-dee3-4b1e-a923-9004409dc1ee.png +++ b/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_05468c6a-dee3-4b1e-a923-9004409dc1ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe5ff75273f1bcd34b79f2c41cc6d231d32ad7b0f6809ade73a8bdd6950544d0 -size 1045850 +oid sha256:510ecef75ef40372c815fb09f2eb9830e5fc155756e69efc157c84fb433e37b5 +size 833314 diff --git a/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5175d48c-cd70-4186-be31-ffda1afc9e9f.png b/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5175d48c-cd70-4186-be31-ffda1afc9e9f.png index 957b91191be8a75f913fb38c58febbf85830540f..616f3d79f241f0e2f6c8354f0aa9544a698edf56 100644 --- a/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5175d48c-cd70-4186-be31-ffda1afc9e9f.png +++ b/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5175d48c-cd70-4186-be31-ffda1afc9e9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ff7910a4476f511ad1802a56c3bb39682f5818ecfb630aa5bd2a3ffe5dd3468 -size 310737 +oid sha256:a3732f2ff778ac3e6f218e6334370d1e33cfff790b78e10fcc95691624749d1c +size 287048 diff --git a/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_71312214-c558-4c47-a70e-f32e1f74f9a0.png b/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_71312214-c558-4c47-a70e-f32e1f74f9a0.png index 00f7062b913fdbc66667e9e48a4d377b5e79705e..d7e3e4fd902b58a2e2f20701e343dea8f4e3d8f6 100644 --- a/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_71312214-c558-4c47-a70e-f32e1f74f9a0.png +++ b/images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_71312214-c558-4c47-a70e-f32e1f74f9a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a4c8bc7ce731a8d9c6e14ed084c345ab85b85f92a055a685471742ee84333da -size 366016 +oid sha256:f9e52f30837e1e532fe2cf65b6b655529dd739d37f9ae63591d53554b803ade9 +size 1154033 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e54e343-24da-418b-bb3a-3695576b276d.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e54e343-24da-418b-bb3a-3695576b276d.png index 20c2a03c23d37880f4996bce903ac2e4b996c2a0..307060f310fa06d603c4e22302d36ed8ee105796 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e54e343-24da-418b-bb3a-3695576b276d.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e54e343-24da-418b-bb3a-3695576b276d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04c208603c7f0cd34cce4f52a2cb16d9e506472bc48eec60f705ac9edcc9ffb4 -size 477110 +oid sha256:939aac6afe941e15b599b0e667c4a74a282e2f42cea26b0c929802a98d555e80 +size 408284 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e95c855-bc55-4b1d-95e7-4b68a2b075dd.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e95c855-bc55-4b1d-95e7-4b68a2b075dd.png index 230cf779616164f06c0a1c2fd34de9b4b1f00d4a..adb4e671cb8f0787b551c69506b175f714eb4977 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e95c855-bc55-4b1d-95e7-4b68a2b075dd.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e95c855-bc55-4b1d-95e7-4b68a2b075dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be304400bf1c163fabf1580f3be01deeb846f894bc82c5d26908887623f2df6c -size 477031 +oid sha256:8feded830387cd33b92b038fe4bc7be99e92e3863c50b0fc646d4e4230e69d75 +size 477192 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_1ec53584-9015-42d8-b9ef-b956a061181e.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_1ec53584-9015-42d8-b9ef-b956a061181e.png index 5d1d0910ca39842b71c5e48ebcc4b47926947a84..ba4cf69483288755a59b7b934bb2a793a84fbdeb 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_1ec53584-9015-42d8-b9ef-b956a061181e.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_1ec53584-9015-42d8-b9ef-b956a061181e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98200828c1dadfcb882b8b384b3a8841b5fb14c71141427a559b5296fbb260e9 -size 3223430 +oid sha256:ba4c6a50ba938b54ef206719231cae1dee7eda2113c7261bee04d8d06d0e3692 +size 1195855 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_38ec61e1-77aa-4f6f-9bfe-c062d0f80e62.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_38ec61e1-77aa-4f6f-9bfe-c062d0f80e62.png index 02f432842b5269565be0dae73758e4b234ee2d14..1f75948535a7bf7542b5277a24bbd87f38e767a4 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_38ec61e1-77aa-4f6f-9bfe-c062d0f80e62.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_38ec61e1-77aa-4f6f-9bfe-c062d0f80e62.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1780805535fb5c23e499c7927edc1ee0941c1cebe6a31ab0da6ac929c3ed9864 -size 473222 +oid sha256:9d21fbf946c9137fb1e44718d4eaad6833d5aaa4f91944edabe1ebccdce88e4a +size 912286 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_520c72ed-8115-475e-8bcf-6f01cc2526f8.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_520c72ed-8115-475e-8bcf-6f01cc2526f8.png index c778c516f8dc774f90a2612b883d1bafce0df7da..215d760a4981ea4373d6eb6af62e76d2e5e52c08 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_520c72ed-8115-475e-8bcf-6f01cc2526f8.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_520c72ed-8115-475e-8bcf-6f01cc2526f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66ed52925541a1ec5f50c6c50ce2488f3dcef13a0be8a521983c9ab83a0a9ae0 -size 2250488 +oid sha256:d3a4a086317cf8e45c9335c98a177e4f35ba09c32e6301ce55a3e27b3b24795a +size 632555 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_5c48bea6-ba29-458e-8ccb-ab038bfe9f9c.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_5c48bea6-ba29-458e-8ccb-ab038bfe9f9c.png index 1d8eb12488c2cd779ac1ec90c4b8ca62f7514dbb..032fba6aa49cfe4f039594245d2e815eee6e541d 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_5c48bea6-ba29-458e-8ccb-ab038bfe9f9c.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_5c48bea6-ba29-458e-8ccb-ab038bfe9f9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db85456c63e599250b035a45556b0da5f3ef36127359c844b3acaca63c9eed43 -size 470842 +oid sha256:1d88e4bdde425deaa554800185a36990f55a56a61a8c2aeb5d26749a7c62acdd +size 812387 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_6e17b7d2-b893-403c-a122-e0256b285750.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_6e17b7d2-b893-403c-a122-e0256b285750.png index b33b650f830bdea111621445302f1877ba0136f2..d9524d9bbcf491aed62918040b52e98fc100a4e8 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_6e17b7d2-b893-403c-a122-e0256b285750.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_6e17b7d2-b893-403c-a122-e0256b285750.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98ae8f0ff20aca4e448b6a3db55b45cd26bdd25f76d70ded93eae6a4cab0363d -size 1909686 +oid sha256:cddd1e8c6237859d150b6a79a2b0dd52aaaf655dbc848000b1b2de228bf491b2 +size 2277482 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_77eb7f8e-0e43-4535-b87b-9704908c779f.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_77eb7f8e-0e43-4535-b87b-9704908c779f.png index b58f9aed2938038057f91e04c17723001ac13bdc..6ed054d052a2eec6a66ec70e0533e88f7d1715d0 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_77eb7f8e-0e43-4535-b87b-9704908c779f.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_77eb7f8e-0e43-4535-b87b-9704908c779f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e0057366e3b5af838aa64a583e907f9b74cba0824d1a2c5d145e9119fd2715e -size 3152358 +oid sha256:ffa1a739397804e23dfbcf14c5b584ee204de0229209af935baa86d45c4dd434 +size 1069212 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_7d8e5218-f5aa-40c1-a37a-9b84def7e069.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_7d8e5218-f5aa-40c1-a37a-9b84def7e069.png index 3b7aa58084f64e17a968cdeb32a78f8b4e9674f3..bffc79c695a3b80546f647e60c66dc9ef24c7803 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_7d8e5218-f5aa-40c1-a37a-9b84def7e069.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_7d8e5218-f5aa-40c1-a37a-9b84def7e069.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:613f3f82d23ec303e2566de4596eb48c64117f98cdab09f05bd2464266eb4be3 -size 2058812 +oid sha256:0363118ac2fa03a9830f93ef1a8c0045da4aa2dfaa09473e75eda8a5becd02e5 +size 1944388 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_875a9aaa-9e35-4575-868b-6dd03d6ca8dd.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_875a9aaa-9e35-4575-868b-6dd03d6ca8dd.png index 03b6930b69edeae467eb4f8a756ac440967980ef..0213b6e7a0bcb15eb0927c3bad9d7c88041e31a1 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_875a9aaa-9e35-4575-868b-6dd03d6ca8dd.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_875a9aaa-9e35-4575-868b-6dd03d6ca8dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35e96e6e2375fd5aa4854fee97162adeb6a33683a95b7a20cdad2caa8502bef2 -size 473573 +oid sha256:6a2209d54f62fd710fcdc2170882985b40938e2879f7c9bc4dc6366a1def265a +size 642573 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_987291ad-91f8-4e77-80b9-343575d7813d.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_987291ad-91f8-4e77-80b9-343575d7813d.png index 22c31f7b6b3a749925b4e91231b0842ff390293f..39ec89d44b3eb934a7f25bb5c196f4aed1f17d63 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_987291ad-91f8-4e77-80b9-343575d7813d.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_987291ad-91f8-4e77-80b9-343575d7813d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:323273ef46ad18e96e4330bdfdd9a2c449444e9dbe6788aa60b228a6db4ccfdc -size 623483 +oid sha256:499c34b75ca2e4599437ca1f0a06d245de59e2ca449b173234e341aa677e4750 +size 1129321 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_9912f695-1f04-491d-bcb4-dcc99b5eb3ed.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_9912f695-1f04-491d-bcb4-dcc99b5eb3ed.png index 5b708bb8e3db6e24383fa84a013491e7a8f6b1a2..50d92aa1253796e7cca8a59f099b990b3e5947fa 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_9912f695-1f04-491d-bcb4-dcc99b5eb3ed.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_9912f695-1f04-491d-bcb4-dcc99b5eb3ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3f2f989bca25710bd24cfb6c2adc7ca60e545ca056be1830769b42b0454e75d -size 475624 +oid sha256:4a78dc734357237fb10a0b335d1c23ff8fc2acbac406e598278c977f26c86d93 +size 711542 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_b3159b0c-489d-49a5-86d8-e614669b3b88.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_b3159b0c-489d-49a5-86d8-e614669b3b88.png index c57c233ea49df85a550d81172e55726f9cde758a..7f18bc5d2e6b597e13307b20d211d2979fb74673 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_b3159b0c-489d-49a5-86d8-e614669b3b88.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_b3159b0c-489d-49a5-86d8-e614669b3b88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91b8667cb868ce74e82840739af40f160cea2b07d99eb24083b986fcf1f0365b -size 471468 +oid sha256:ddede67ac993b45df41479541adc6d4e08207fbf6c4c512a26d7d142c83a602c +size 963334 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c0c57aa1-6255-44f0-a853-a8199d85778b.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c0c57aa1-6255-44f0-a853-a8199d85778b.png index f30aff629f49eb3d3e08268039301951b6ebc67e..d78140cbb8a8a985eb9b14691d674a384eaafebf 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c0c57aa1-6255-44f0-a853-a8199d85778b.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c0c57aa1-6255-44f0-a853-a8199d85778b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af237a29fdee988b9fad6e165a58d33a619a39fa20a1727216be17e3281c0771 -size 476594 +oid sha256:2aa7ace1447e9aed5518f73ea0b6f37c1d8185535afd7d6b0ea07859d54cf893 +size 1468138 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c5d7f50f-9728-4cb1-b11e-e97ff0e67470.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c5d7f50f-9728-4cb1-b11e-e97ff0e67470.png index 03708873658dd0f37c9a107f1cdd05829f6b0c3a..fe2883fe0581a8f90a6afe7ebaba7d4fef1d5f33 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c5d7f50f-9728-4cb1-b11e-e97ff0e67470.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c5d7f50f-9728-4cb1-b11e-e97ff0e67470.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b643a0cf376b4b69af185753ad964b272b0b09dfaa584e9f41635f6dc28bcb7c -size 1147091 +oid sha256:075fac607e6a56806d043f0e4474af5e7f0a8d45233179d6ab7813a5c3b31091 +size 1854070 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_d1701f0b-aad7-4ffa-ac74-2ced4ccbf481.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_d1701f0b-aad7-4ffa-ac74-2ced4ccbf481.png index 9ab307e812a2df8c842b141f59bb0dea121b7c74..c159e15d79c95e3c3538c57afe9f7e30654aee9b 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_d1701f0b-aad7-4ffa-ac74-2ced4ccbf481.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_d1701f0b-aad7-4ffa-ac74-2ced4ccbf481.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d097b91948c156366cfd72b216229ac24c9d541f6a53fa9f5a7c84a6b4cf9baa -size 626948 +oid sha256:ff02900779467d1d237c3374b319c050d7e0e4e3ce8a853c9cde434626f253fe +size 1489833 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f0bdf42b-3c1b-4d04-a6be-4abab3133890.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f0bdf42b-3c1b-4d04-a6be-4abab3133890.png index 1238d09b1c7197eaf93c27a8b12503497239d1ce..48fc874216efa466ddca3115af3dd2359178a88a 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f0bdf42b-3c1b-4d04-a6be-4abab3133890.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f0bdf42b-3c1b-4d04-a6be-4abab3133890.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcd576f706414509c8a48718533fbf5f543eb0628a074a0cd05a550b093f0e0e -size 470413 +oid sha256:66f4327d576c996186af3218ea202cb62a58957f82773f7a430f650b74d67319 +size 1256140 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f26ebe1e-3767-41cc-9263-447a47ea8ce3.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f26ebe1e-3767-41cc-9263-447a47ea8ce3.png index 6ee88b3ab0f62bb027bf56b0ecbafdcf16bbe06c..4017a682f6d81943c1161c842700644c2bc282bb 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f26ebe1e-3767-41cc-9263-447a47ea8ce3.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f26ebe1e-3767-41cc-9263-447a47ea8ce3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b996abd922fe221ea55f2e923187276682e08edd265fc80e07e66cd97b758fde -size 512315 +oid sha256:9c8979c4c95451647bb6c3b1df02fa412e053885c9ccca53b0ba5866d21ae319 +size 1530881 diff --git a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f72fce46-9cea-405a-bda3-7fa5b65f08b0.png b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f72fce46-9cea-405a-bda3-7fa5b65f08b0.png index afb64760c55709c14ee4d76c8a13955ce7dd491e..7f11f9ee44ef3c4b413ed9bc04adef2e28bd4a83 100644 --- a/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f72fce46-9cea-405a-bda3-7fa5b65f08b0.png +++ b/images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f72fce46-9cea-405a-bda3-7fa5b65f08b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4892c55dc5a16c16bd468f4ff21da553e99523984a061b7ba9b1686c1878b550 -size 473071 +oid sha256:1ae34bb6fda0c48c158c94a018aff11fea4d7a7a9e996aa95c5b6d79c2a9a1ba +size 867252 diff --git a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_4b20b395-938e-4a1a-8f71-bdf4dfb419e2.png b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_4b20b395-938e-4a1a-8f71-bdf4dfb419e2.png index a041aa33c3cf6abc52b49ebc3f2ad046b455e5e8..7fd4768bd2af4d45a58b97e3ebaff4666a09e951 100644 --- a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_4b20b395-938e-4a1a-8f71-bdf4dfb419e2.png +++ b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_4b20b395-938e-4a1a-8f71-bdf4dfb419e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65ed26f33c778d18795c40dabccbb24f4aa3e32b08b4237f70983394719ab714 -size 1233937 +oid sha256:e3b5a3c26577abd54cd463bf8cb02991a7035399e9c796753fee63db75811a77 +size 1031811 diff --git a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_589c149d-3ebb-478a-851e-ee098f3a2f14.png b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_589c149d-3ebb-478a-851e-ee098f3a2f14.png index 5982c81d44eba8430769b1ce5d1e61864d2d02b2..89735ae19ad047e11cd05ec23c6d0f70c73a37b7 100644 --- a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_589c149d-3ebb-478a-851e-ee098f3a2f14.png +++ b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_589c149d-3ebb-478a-851e-ee098f3a2f14.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fe10b0c511d4c9f8eec57053bd32481a05be30c92e2cc44e83a52485ed354fc -size 998250 +oid sha256:620b181268b6521b37192dffd1b10414fee1011fb3dd6d4e4fcfca1458d91015 +size 2201305 diff --git a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_5feef698-f267-4190-a94a-3cc69cfae45f.png b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_5feef698-f267-4190-a94a-3cc69cfae45f.png index a5a3db404ee011f7caab21b89d67c82892495b49..15557a916e48aca2319d5da0ceba26df55dcb5fd 100644 --- a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_5feef698-f267-4190-a94a-3cc69cfae45f.png +++ b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_5feef698-f267-4190-a94a-3cc69cfae45f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:910bfe457557a1356e9efb64a56fb19909b2d8ca7e3daa4930dec5ee2e4b666a -size 2470639 +oid sha256:650f4a9f402c62642b33aa1a166110f2b23c30366e7a0762ab549457c490329a +size 1183901 diff --git a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_eebe61c3-9d1e-450d-a54c-0e428e7c9dd9.png b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_eebe61c3-9d1e-450d-a54c-0e428e7c9dd9.png index d24a7a478926b59aaaa4cef9e95358ebd6424987..2c2056f2254f3ae064c8a351f87d2d96c95f679e 100644 --- a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_eebe61c3-9d1e-450d-a54c-0e428e7c9dd9.png +++ b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_eebe61c3-9d1e-450d-a54c-0e428e7c9dd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e603853edb4db9f2188542a677214353ad73082a2244a510e2159244237b4dcf -size 1566092 +oid sha256:bd676538389c10fd2cb3e6630003860c43c2c6be7cf1e0f648c8c4375f2eb9fc +size 1252932 diff --git a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_f5c415d1-2c8e-40e4-bd0f-72fba8cf0fd3.png b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_f5c415d1-2c8e-40e4-bd0f-72fba8cf0fd3.png index 5c3933738167285ac437a184150bb0945f67902a..ebf676702bde55cebf31549c818d964ab9c80ceb 100644 --- a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_f5c415d1-2c8e-40e4-bd0f-72fba8cf0fd3.png +++ b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_f5c415d1-2c8e-40e4-bd0f-72fba8cf0fd3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a957fa36a16b8f8587683b7c8564f22b4071b228b009d524caa7a8db2deb635 -size 828818 +oid sha256:9cc0a370f8ca9e13e1879f200622706f6e5e95b508710da26240be7cd2ef0eec +size 1213784 diff --git a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_fa456b75-d802-4cb8-a122-24ba577812f1.png b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_fa456b75-d802-4cb8-a122-24ba577812f1.png index 7b6d4824b6893a62103fbaaff7640bdc9615eed8..5908e2a8184f33a372ad5010d266b6ad713c373c 100644 --- a/images/8f567f79-e197-4d7e-9a49-877daae6dde5_fa456b75-d802-4cb8-a122-24ba577812f1.png +++ b/images/8f567f79-e197-4d7e-9a49-877daae6dde5_fa456b75-d802-4cb8-a122-24ba577812f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df5d28d0d15b1583c2c02eb603df1f398fe3d103b9fdcc874f3e0c7c60116b35 -size 1643035 +oid sha256:23737437efac4c92b7765982bb915e7872c0d73c6ce97f043fd19433ceb523d8 +size 903910 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_076445f7-fdd3-49f9-a7d9-642f2d7090a6.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_076445f7-fdd3-49f9-a7d9-642f2d7090a6.png index 6e65b569b96af47fb76f3e8069b7bcc080f6a3ac..07cd360f8a2550187156ca16f4c442dcce8d74cf 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_076445f7-fdd3-49f9-a7d9-642f2d7090a6.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_076445f7-fdd3-49f9-a7d9-642f2d7090a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39b5eca81e07c35a76db94d79482fc89f3dac720bb87e8f4c8bfeb6992c3381a -size 618843 +oid sha256:d9c8e267b3e95600d841ac98124d561abc4ae4f05f597e3ca138fdaaca96a321 +size 712381 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_1b8d8699-8875-4d1e-a4b0-594a3f659771.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_1b8d8699-8875-4d1e-a4b0-594a3f659771.png index 72ed0cb2a06ab5eb4387ac89f9b00b93f32fdabf..cce43ccd712db79ff588aea57d868076e382fa2d 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_1b8d8699-8875-4d1e-a4b0-594a3f659771.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_1b8d8699-8875-4d1e-a4b0-594a3f659771.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aed3db0271758092344b0d6a51a5c6534bdb04378c7397b59829bd779d5251ea -size 443548 +oid sha256:c5f8922f0488318d3cac943979e2a2dce66e7721fda6bef94312d5e9c84dd223 +size 167950 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_4aaa493a-ae2e-4cb4-8081-485b49488432.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_4aaa493a-ae2e-4cb4-8081-485b49488432.png index 506b74f0c6bfec76d4eff37e628d36e57cde74d8..0b6a31aec8f85dc1ac8364483685a4ba8d8a88b7 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_4aaa493a-ae2e-4cb4-8081-485b49488432.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_4aaa493a-ae2e-4cb4-8081-485b49488432.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8fcec133e0583d873e512cd0186bd7d963c7e3c6353aca17cac0614e7822a9e -size 471607 +oid sha256:9e38c997ceba8b0aa5f11e3b0c2e69af0d4927df200d75b03f71ec37fd7787b1 +size 440697 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_5dbc5df6-49dc-425b-a6b4-27142ff6f88f.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_5dbc5df6-49dc-425b-a6b4-27142ff6f88f.png index 8f48723d0d4eef0d83fd3588cd0203e0577578c7..92bec3e18ecd1020f46617effd9f94dddabd7bd8 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_5dbc5df6-49dc-425b-a6b4-27142ff6f88f.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_5dbc5df6-49dc-425b-a6b4-27142ff6f88f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d88f3d72b24028d1aa157287d8f82dc6b82482c559e6e18ed649106d98aa6f07 -size 332477 +oid sha256:4fcf7782afc0b7e3d65e5065b7ef0927b68ee6e887c138869b204e6248e210c1 +size 333131 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_7f5b804a-de4d-431e-af40-f11f88024f8a.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_7f5b804a-de4d-431e-af40-f11f88024f8a.png index 98374e949b53eb8c774a6283a26edc78afa8e8a3..fe3a8fa2e49cfd702a190e42acf12d64511e3897 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_7f5b804a-de4d-431e-af40-f11f88024f8a.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_7f5b804a-de4d-431e-af40-f11f88024f8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79405e24b7140d908092faf0e22397866d00f6d6268a992d4d1e0e58d33bfd6e -size 529265 +oid sha256:138f026cd484abb6d1180c3900237586b93728acd8f9d9c6c8baa115e5c96de2 +size 704973 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_8528cae5-cd59-4742-b285-f1855866c552.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_8528cae5-cd59-4742-b285-f1855866c552.png index 41ba793181e085a7c491ad741f17cbb32119ba38..8dff1ccd64d6eaf24823a3a53cda82a545209900 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_8528cae5-cd59-4742-b285-f1855866c552.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_8528cae5-cd59-4742-b285-f1855866c552.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d68d24e204748c8050cc175cfb46ad865504416b3cf8fec503145da59c17b1af -size 832245 +oid sha256:45a921ce07073843f9d176bbcff39765d8d820372c172a827f1dbe13eaa9c51b +size 838477 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_c3667e3c-b19b-44bc-a90e-e55c3a194518.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_c3667e3c-b19b-44bc-a90e-e55c3a194518.png index 14ffe5828e7af02a6543a7bed9d5195a278244f1..8e04db53059e43e32476beb903ebb57bc746cde0 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_c3667e3c-b19b-44bc-a90e-e55c3a194518.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_c3667e3c-b19b-44bc-a90e-e55c3a194518.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b778c3eefa781d29711c28cf73f70400ebe8e069ccb446b135b5f929761ef6f5 -size 392273 +oid sha256:fba93a2c6a01de8375201cafcc7cffb6b5f221c970a6bd154be24a85a5bce42e +size 318207 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_c7f740d0-cf88-49df-8733-a50c0383c393.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_c7f740d0-cf88-49df-8733-a50c0383c393.png index 957021114ecd17e1ec1bdea84ceeb7f6ccd8449d..9bac8a8054afb37daa5bf890fab87a43ff45ca08 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_c7f740d0-cf88-49df-8733-a50c0383c393.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_c7f740d0-cf88-49df-8733-a50c0383c393.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:807055010df52a78fb968d91291dded55a34d52221c403fda702178677024030 -size 560611 +oid sha256:ac08f1b1a7c28d24d7bda7955e186a3ab39800ee978db3dd9b63c2f3818bb288 +size 743822 diff --git a/images/8f6374b0-36f1-478e-a282-a61849c8174f_e04c1a00-5b98-4724-8d98-7bc6a2fe9241.png b/images/8f6374b0-36f1-478e-a282-a61849c8174f_e04c1a00-5b98-4724-8d98-7bc6a2fe9241.png index b25a332bcd1cdb9fefd8752e39efc0c53ce2c9b6..02addbe1b7a2c5762ce3ce03715d390529162fe9 100644 --- a/images/8f6374b0-36f1-478e-a282-a61849c8174f_e04c1a00-5b98-4724-8d98-7bc6a2fe9241.png +++ b/images/8f6374b0-36f1-478e-a282-a61849c8174f_e04c1a00-5b98-4724-8d98-7bc6a2fe9241.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51328a365a2ae00e8045e524e3dd84a0b71ad0ec08aa3e859853d596765ef511 -size 481809 +oid sha256:2265d58654c49ca8e55f3c0da48f8c1de90d73d66259c34d8ee0f0387dac7453 +size 751612 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18914d6c-86d1-4e1b-9a42-0517303af913.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18914d6c-86d1-4e1b-9a42-0517303af913.png index 7d55c4b20738f674e4ae1ffad0234505ed4afef9..b129372d9c849393f53c10450cccc59cd2005725 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18914d6c-86d1-4e1b-9a42-0517303af913.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18914d6c-86d1-4e1b-9a42-0517303af913.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07cdc38ebddc4bba68067ebc4556ad30678312ce8e73ec2a18ce45da3f240a84 -size 870331 +oid sha256:d71eb907f7bc1f1dda3165d86cf963ac3a17f255b43c7b1758407a49947d0e8f +size 1117219 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18f385db-5b2a-4643-aead-754c6836369e.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18f385db-5b2a-4643-aead-754c6836369e.png index 9d0f67b8351eb4782afb362141b528f352ef77a7..355fe9366441fced67dd875538999fadbf4b3c16 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18f385db-5b2a-4643-aead-754c6836369e.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18f385db-5b2a-4643-aead-754c6836369e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39f2ad4a455130f8ef3ef677b87580c3f7101226085ef69d1ad355585f37aa32 -size 914114 +oid sha256:9051a506b116b22cdc8d26dadb83c5de6ae553c6dca9aeda91d57de69d431baf +size 999012 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_1915589a-bee5-4557-82db-5244bdd93e0f.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_1915589a-bee5-4557-82db-5244bdd93e0f.png index c35c565de3243afc8ab6e407116802b443d59162..e59370400db56f097f60859d7dea494c1e53a69a 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_1915589a-bee5-4557-82db-5244bdd93e0f.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_1915589a-bee5-4557-82db-5244bdd93e0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29d9a8d247ee7ffd8b7a4676044c394f85d25f0705d23c58152a620b5c9ef93a -size 976913 +oid sha256:bf550bca28a78b41479fc08b1cf8e90c3759cb185545c92009e5dea302582528 +size 1099513 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_31e673c0-8f73-4188-8f0b-6d94548c7ff8.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_31e673c0-8f73-4188-8f0b-6d94548c7ff8.png index f4404e1305d45223311d46b9f0378288d78d2e50..2e268c0c711bf75add6cc23867eebba9db179b6d 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_31e673c0-8f73-4188-8f0b-6d94548c7ff8.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_31e673c0-8f73-4188-8f0b-6d94548c7ff8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:023e88702b5ef76cd81187849c3b5038bff3a0ee31e37a16ee396a44702e8661 -size 951389 +oid sha256:73213f43b4b18f028f88806edc3872a8ef6e364ae74f2796fb722bf5079b68e4 +size 1059741 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_336e6fd2-269d-493a-b7dc-c6c145b02503.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_336e6fd2-269d-493a-b7dc-c6c145b02503.png index a8f54b7feb10a44701bc119973ed32462069dd37..ca4c541e6beae9e65e503ee31a142255e324dc19 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_336e6fd2-269d-493a-b7dc-c6c145b02503.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_336e6fd2-269d-493a-b7dc-c6c145b02503.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3862c98ee5ca922bf898764bdfa83d2228b54d06a23382db7948a6d055978f2a -size 1031491 +oid sha256:0d5340a0d629aac804b9904441d1d4b7d564cdc059dfc72eba2984926dbaf87a +size 1239830 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_3b3620c3-4b5c-44fa-a170-36828db8938a.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_3b3620c3-4b5c-44fa-a170-36828db8938a.png index 6c3442f1da30b8a561dcc1f91987dabfc8a09c7f..5fd12e4559dd9ed4c81b3db27da3404056c9314b 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_3b3620c3-4b5c-44fa-a170-36828db8938a.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_3b3620c3-4b5c-44fa-a170-36828db8938a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa84ca9a8b477ff809a3438452200e9e959a98dd6f12b4fa5b0e8f9f7269c5f3 -size 631104 +oid sha256:a79315399e07bb46337e3fe6841c6998f8b6081b740e351863306b7a6b74534e +size 620172 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_513911cb-10e9-44d7-9254-252734b92b6c.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_513911cb-10e9-44d7-9254-252734b92b6c.png index c5ebc070c0676c36642bd36b1015d86bdf0aaebb..60350eabba272b6454a643cd3a75ae4761e6c72f 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_513911cb-10e9-44d7-9254-252734b92b6c.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_513911cb-10e9-44d7-9254-252734b92b6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45ccda514cc23c8d38bae52be097dad2a5ce811d6a7cacb5e62c7d8eb557a52d -size 1063293 +oid sha256:a6b2f36ecfa5b2df6621da679e3a8f433b1778bc7220844939266d33f740e923 +size 917270 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_525afe7d-fb68-4af8-83b9-ae729b67d9e1.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_525afe7d-fb68-4af8-83b9-ae729b67d9e1.png index 74ae7168e8c162027dfc2746752a8978b98a03a7..b5a796a60c505728febc11b0414377b90be0c088 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_525afe7d-fb68-4af8-83b9-ae729b67d9e1.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_525afe7d-fb68-4af8-83b9-ae729b67d9e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:becfe9bf259c4f1bea3eb2e37eedcb12ddf1c86ba4432df0060540ea3eb215f5 -size 1026273 +oid sha256:be816c6e33588f2a008b7349cde6cf6d939d638b1239c63d4989cfea64417f44 +size 1146238 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_93aaf6df-5228-4992-b532-9613a18117d1.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_93aaf6df-5228-4992-b532-9613a18117d1.png index be20478685368650bdd0d2fe2bf1311f22e91675..a1e8129b7b3b9bc55af0582cc38a42e6431775cc 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_93aaf6df-5228-4992-b532-9613a18117d1.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_93aaf6df-5228-4992-b532-9613a18117d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8b61f7f5ebe3897ce8e3a565c97472c4ee0be4fbbead3c67fc6e98f96360b21 -size 1016520 +oid sha256:d2889396f8048bede517233c5ff949a7d8dd6fdd3bc91682d560e4e13ce2cf22 +size 1269087 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_994e0184-cc07-44d1-b721-977f549fd4a0.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_994e0184-cc07-44d1-b721-977f549fd4a0.png index 11410198ec44238dd1f1e8cbdd8536a53fe8d66f..a611d529bb180263a08e2b48a116b804df5c024f 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_994e0184-cc07-44d1-b721-977f549fd4a0.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_994e0184-cc07-44d1-b721-977f549fd4a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26b2e395c73b11abf08ec081d4bfefb622a443b7caf6e33fe73107dd772bbbae -size 916329 +oid sha256:ef26e022aba1b7add2176080087ab7c3ca51b14829ba7f56cb712f64f62ad88a +size 843792 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b413aea4-57db-4202-ae50-8c0adbc9e2d3.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b413aea4-57db-4202-ae50-8c0adbc9e2d3.png index 57e3af4344e231f2bc062fca7c9de7f34bada211..7e2be6c1519a3566fdb25b1a51ef9eb0b69164c9 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b413aea4-57db-4202-ae50-8c0adbc9e2d3.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b413aea4-57db-4202-ae50-8c0adbc9e2d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0c4077ce9408a875bc0390fb2fdb69effe3536f5ac9b1af43b9e60a78934910 -size 1018003 +oid sha256:598d14765d9107cea1a6bc627009e10ca6f5d7376601f5ded785d114fbbeb3e1 +size 996236 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b959a299-bca3-4ce1-a18c-1605c44fc90a.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b959a299-bca3-4ce1-a18c-1605c44fc90a.png index b7b0ea1eeb882412d131f8cd147aff1c4f770e95..0d54b4c551d4a8d37fe30143fa0b96ee92850d50 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b959a299-bca3-4ce1-a18c-1605c44fc90a.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b959a299-bca3-4ce1-a18c-1605c44fc90a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15c95a7f3a4e10b14ac0434d33f898e339be86d1699630b28ce46bb84a89f14b -size 1028064 +oid sha256:b5de1ed497d44d765cb4153eee9d373d894b6b15e618ba0fbbe9740b2b58d632 +size 1102733 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ca937f48-03be-48be-9daa-ffe7587749c2.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ca937f48-03be-48be-9daa-ffe7587749c2.png index 287f70a06533272dd2fff7a33733515a5361d67f..05d232c7c2e9088fb8db07b92d23280cb91347ec 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ca937f48-03be-48be-9daa-ffe7587749c2.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ca937f48-03be-48be-9daa-ffe7587749c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a31ceb1436049c755cb3748730f8483f06762c3c50c7d8b990afd8484b2d9e3 -size 973378 +oid sha256:ea04572c20bccdca4f5b45358b11fbdba48df5382cce120797a0153388c742c7 +size 1158950 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_d75f272c-4aa0-45e5-9737-33d00ac9f661.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_d75f272c-4aa0-45e5-9737-33d00ac9f661.png index c9cb77d8f39f8441b19d86cadb3da3e7a84713be..0e086646afd077fdfed8585e6f1f9a088f3de541 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_d75f272c-4aa0-45e5-9737-33d00ac9f661.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_d75f272c-4aa0-45e5-9737-33d00ac9f661.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22f87d588e2cb83c5adf27ebe5c45a53ba2f2e27d70eef189c8fc72669c39b06 -size 992656 +oid sha256:57ecfb8a83d8ea74400de72121361b3f933d3f6de2f61d584822c4ec186a3e06 +size 1050558 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_e2eb86ff-e660-46d6-a5d8-7109a895d213.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_e2eb86ff-e660-46d6-a5d8-7109a895d213.png index 29e6e7a8d8925ff9b75d8e680f21726d8859a8bf..43f176af4e4a4ae8c2523f34349dcb382b4372d2 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_e2eb86ff-e660-46d6-a5d8-7109a895d213.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_e2eb86ff-e660-46d6-a5d8-7109a895d213.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d452a9c67666d992a84009196e3f73b0e3ce27f900964db7424e43843565e0b -size 1012917 +oid sha256:d4bbd5c391aa0766e45b85afeeea809934f97b845ea980413d1145ac3add8f33 +size 1303536 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ecaed200-95a2-4e5a-b81d-7e4638985800.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ecaed200-95a2-4e5a-b81d-7e4638985800.png index c43d35d55921d863196747eddbaafa5baa1c5319..b9180bbd4ab246565250e04bf8391b2eb3e1fa0d 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ecaed200-95a2-4e5a-b81d-7e4638985800.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ecaed200-95a2-4e5a-b81d-7e4638985800.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce0730c9a16294b810c59678e208bcdea0171e3a890a01cff118ae09a9928da2 -size 1023814 +oid sha256:cc399da4904c1a86c2670da26852ae80afe68b558a629b5d9f144e87548e42d9 +size 1276615 diff --git a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_efdf1978-a4d6-4b14-8198-ec383c1f8703.png b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_efdf1978-a4d6-4b14-8198-ec383c1f8703.png index cffce245fb1390db2ad1dc39d9f6d68fafd43f18..e7effd7a8c1884e39b5956fd9b8954c1a7767ee2 100644 --- a/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_efdf1978-a4d6-4b14-8198-ec383c1f8703.png +++ b/images/8fb78266-8862-4608-9ff3-92f81e58b2ff_efdf1978-a4d6-4b14-8198-ec383c1f8703.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08f1264e74d39f4806780081be682e2dd0e8f47a8c89aa7551fde78bc42fc764 -size 1063311 +oid sha256:dcb2cea83c5809e31504499ab060151c5f55170baec78d8f9ee09eca871d2b92 +size 1159230 diff --git a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_1245d53c-a9f4-4a43-b386-dfbdf4e4aed6.png b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_1245d53c-a9f4-4a43-b386-dfbdf4e4aed6.png index b094958edd315a8e7efbce887b2e1b42b7da1f7a..8d1502b8c2d4de8fb89bdd99987ef715ee9b04c4 100644 --- a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_1245d53c-a9f4-4a43-b386-dfbdf4e4aed6.png +++ b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_1245d53c-a9f4-4a43-b386-dfbdf4e4aed6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d63f524c66faa8df23be769387aa22480ca1050f04128021d705acb5bba6a81 -size 2088273 +oid sha256:8ade58e898cae5258e16e54e518d0720de606acc224234cd5203e814c319178d +size 1571632 diff --git a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_a68ce23a-54c0-4a20-bdf2-c64c60b7db33.png b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_a68ce23a-54c0-4a20-bdf2-c64c60b7db33.png index b345b230c4031ac0d369d4c1881ba5d9749c083c..d1ce827e83f7c9eedc612fff5ff7808d1c87ca07 100644 --- a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_a68ce23a-54c0-4a20-bdf2-c64c60b7db33.png +++ b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_a68ce23a-54c0-4a20-bdf2-c64c60b7db33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c6b82758c0d8e63b0958ea34cdc12fda2bed7c50b79ad69af3f901a1ef4a248 -size 1907079 +oid sha256:7ca52e371f6f66a837e977f5664d518154d62d3213782e57dae9f819e5042855 +size 1745806 diff --git a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_bc05be9f-e46b-4654-83c0-862c601f263f.png b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_bc05be9f-e46b-4654-83c0-862c601f263f.png index e591fbd2677833ee3c3879ebe55474511e8e68cf..728313507acd4ca60b8dc5fb95434ac02a408174 100644 --- a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_bc05be9f-e46b-4654-83c0-862c601f263f.png +++ b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_bc05be9f-e46b-4654-83c0-862c601f263f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3512fd1be026af03a42508ac1856d681504f696340b9449eda23a9b912f31d52 -size 2083118 +oid sha256:3f286a283f3a8b708a88a065ffafbff438828f57b48cbf789fe7a133b24b054a +size 1906124 diff --git a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_e003a56c-dc49-40d5-bcdb-aa86ca0d7b66.png b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_e003a56c-dc49-40d5-bcdb-aa86ca0d7b66.png index ff84912b92cdaa8185b3e794b2f283d30af508ee..05605ffae5f0e18e4b8e27e5917be42a146a6720 100644 --- a/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_e003a56c-dc49-40d5-bcdb-aa86ca0d7b66.png +++ b/images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_e003a56c-dc49-40d5-bcdb-aa86ca0d7b66.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b0a0682a7b87c767731242095ba861de0b1df2cd2773b51b8b73aeb7a2ed15c -size 1993741 +oid sha256:f012ed65a0532d29ab02e68f2b4dca59a34c64f2e03eeb954ed07914072cd349 +size 2226972 diff --git a/images/90557510-32dc-415f-8507-41b050594962_0f456a6f-04d4-4bc3-beb1-b377715f08b1.png b/images/90557510-32dc-415f-8507-41b050594962_0f456a6f-04d4-4bc3-beb1-b377715f08b1.png index 5cef33a9f74a20c0db5e2b12af338413ba0ca228..96a3127da29e9a1f816a2e86096f5544441628a1 100644 --- a/images/90557510-32dc-415f-8507-41b050594962_0f456a6f-04d4-4bc3-beb1-b377715f08b1.png +++ b/images/90557510-32dc-415f-8507-41b050594962_0f456a6f-04d4-4bc3-beb1-b377715f08b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0669afc63cec9a050f52a826f7399cc8a9dd575985bb8d9a51f2552e21a0e62 -size 1590579 +oid sha256:d5e18910961ccb802aba3f5c483c05babe362f17fd666cddac98909812894bd9 +size 1553536 diff --git a/images/90557510-32dc-415f-8507-41b050594962_317a2951-ef4e-4a9f-bd40-18d345a63cd1.png b/images/90557510-32dc-415f-8507-41b050594962_317a2951-ef4e-4a9f-bd40-18d345a63cd1.png index 9f66152cd5b593f149462ac6af7ffe36d1e46453..85ad2386b0782b137cb7bfe371bd4a9aae55a63c 100644 --- a/images/90557510-32dc-415f-8507-41b050594962_317a2951-ef4e-4a9f-bd40-18d345a63cd1.png +++ b/images/90557510-32dc-415f-8507-41b050594962_317a2951-ef4e-4a9f-bd40-18d345a63cd1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c88d113ddef804d2ff53675072c5b55788347f0691a11b8f8a8a5319d4b7b12 -size 2228756 +oid sha256:e6c0dd860c0a30defaa43a83e817efa640cfcada66786ba6f830a35eff00ed91 +size 2463135 diff --git a/images/90557510-32dc-415f-8507-41b050594962_48a1f5d2-1da6-4ac5-a698-b4fbc319662d.png b/images/90557510-32dc-415f-8507-41b050594962_48a1f5d2-1da6-4ac5-a698-b4fbc319662d.png index 153ddd96dcb818f80702d3f1ff036d717391a28a..cd72ef0d9670de1173c231f7d703deb65b12b994 100644 --- a/images/90557510-32dc-415f-8507-41b050594962_48a1f5d2-1da6-4ac5-a698-b4fbc319662d.png +++ b/images/90557510-32dc-415f-8507-41b050594962_48a1f5d2-1da6-4ac5-a698-b4fbc319662d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:721cffd1bfaeb450adf94958f96692f06ae717952e6649985c9b5003e9cde620 -size 977082 +oid sha256:b91c6fe2cbaf052ef8f560f885a79355204a486f469d02f673987153087019ac +size 876552 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_0d111192-54d9-412a-84ff-e2603690250a.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_0d111192-54d9-412a-84ff-e2603690250a.png index 885a8341489ce18c2f3ea7e0f0bfe67abd160cb5..b49e52146b7112f8102fbb3ffb71fc0d15edb70c 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_0d111192-54d9-412a-84ff-e2603690250a.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_0d111192-54d9-412a-84ff-e2603690250a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:399d60dc8ad9f48465b0b95aabed021ea4e49db1dc15f9c6ba0fccc7f20f886f -size 445687 +oid sha256:d83cbc412a27375f09d67905cab98080d22a9083cf7fa8d7261202b56787a6e4 +size 348881 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_182ccf4f-51ff-45df-badd-9fddd96a70bf.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_182ccf4f-51ff-45df-badd-9fddd96a70bf.png index 40d77fc236fd27f10d45bed41ac60079898bbcc4..994b7b8ed683355844beb7c9032bf3e7a6c6955a 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_182ccf4f-51ff-45df-badd-9fddd96a70bf.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_182ccf4f-51ff-45df-badd-9fddd96a70bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2714282ab67eda68c7ad948f702a02fd388fd64757eba83435c09424fd37f2eb -size 1567068 +oid sha256:c64742630a44a2d3d981c0e3be0a152407145e5a13ba1dc5184863b9e0bd3025 +size 1406838 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_32d68cd0-e6e1-470e-aa15-4f34d99a7f9a.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_32d68cd0-e6e1-470e-aa15-4f34d99a7f9a.png index 8d63496cf0b70e87ee33d4046a333f763fbf7277..4a497a9f0894d362f4aedd3c63f2ff90ec15e77e 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_32d68cd0-e6e1-470e-aa15-4f34d99a7f9a.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_32d68cd0-e6e1-470e-aa15-4f34d99a7f9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35d1f3f49b449996926f76dbf029266b316262894c54a51d55e54a8e61738914 -size 360361 +oid sha256:43c4b53234b23159a3408ac888e7410802f9eef900a160e3c1cc9ab7b576c423 +size 379828 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_35261f26-98d1-44e4-80c9-9aead528ab00.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_35261f26-98d1-44e4-80c9-9aead528ab00.png index 33ab37b9806d1a6c19a397926879f0cf332251ee..1566de4b72af069a850cb7193bde919dba3ae4bd 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_35261f26-98d1-44e4-80c9-9aead528ab00.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_35261f26-98d1-44e4-80c9-9aead528ab00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe771e15cf8e08f326c4a1c5cd04b934a369c23c02fe232a95df066d75d357c2 -size 339083 +oid sha256:29f74467fff698cdcbbb91f41548f3cec070831365d2ec6f84430d392ec68623 +size 413779 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_360bdc21-2f56-4e3c-a631-3f81d3908dd6.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_360bdc21-2f56-4e3c-a631-3f81d3908dd6.png index e33858e81468e3ce9652601d5e7e2974642536d4..c71cd67645c6ab4e036b3dfbc227f3f96b9064cc 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_360bdc21-2f56-4e3c-a631-3f81d3908dd6.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_360bdc21-2f56-4e3c-a631-3f81d3908dd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:070e5451080faec2232c20959f90e346cc4f5cf9117508a5c83c8f184fed2fe5 -size 385239 +oid sha256:6ffc924fed54a9c5e0082916cd938b4f28af525a55c71dcc0c7b3f146bce0c6b +size 371981 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_5e2410e0-53cc-4f38-9183-503099b46c3b.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_5e2410e0-53cc-4f38-9183-503099b46c3b.png index 6fa4016d1a11a830be964be5568b052c7264dc5c..0d431bde39dfccb73782e7d5ac1c8489894b5c6a 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_5e2410e0-53cc-4f38-9183-503099b46c3b.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_5e2410e0-53cc-4f38-9183-503099b46c3b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef8bb3d6572e6217bc852e40a41a1bcec5e63453adab9237458f2ec9fdec5bca -size 374678 +oid sha256:6a8ed70575b5b7f148a83a98abcdf8474062b7afdbf2e9c4486ec813ef8c3e30 +size 334319 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_76bcc527-fafd-4e31-b315-fac7ab42f06f.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_76bcc527-fafd-4e31-b315-fac7ab42f06f.png index 67a7efda1f2b3d6441ffbe89ad8f1276f1b47641..0c7ad711969f20b91ec189be5142581b2b4517c9 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_76bcc527-fafd-4e31-b315-fac7ab42f06f.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_76bcc527-fafd-4e31-b315-fac7ab42f06f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82946f83de55fc41eb7938dceb1f6f686365ab03593daa483b09c8e3ef81c5f7 -size 361000 +oid sha256:04b9db57a485f6c3d9c8f94f0801a5703e0fed71e470bcce24a9d1391fef6746 +size 276373 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_7e920296-08ae-40e7-a085-ce00bbd794e1.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_7e920296-08ae-40e7-a085-ce00bbd794e1.png index 4b80e23bdb9c75d3293d18dbc4f94aa09d60193b..f69d2c74ad6a9c80072d2a81ae34a6780ea9324b 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_7e920296-08ae-40e7-a085-ce00bbd794e1.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_7e920296-08ae-40e7-a085-ce00bbd794e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9314966ae1807102a9f5e88d737984a636c6fa0a1be52d961876e826f6ea3ea7 -size 451172 +oid sha256:8f14975a0d3b09453b8412936713cb8b3233cb06e962df4ed4da4237151fbf52 +size 355566 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b4a1a676-cba7-4e00-9d26-56a1b833680c.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b4a1a676-cba7-4e00-9d26-56a1b833680c.png index 436505217617b9f1ee9e72dd0ef0ea09e5f1ed5d..cd3c3c6c8398b3e77299e7caf9d3878b5b077258 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b4a1a676-cba7-4e00-9d26-56a1b833680c.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b4a1a676-cba7-4e00-9d26-56a1b833680c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce83b19228c0f37d2350ad6a809b5da6a98ed76f2b406e230a8ca19caa51ddbc -size 364830 +oid sha256:1df88a1ef93cec668ab6df642c3bf0daad26f47dc224067d73612e1c552befdf +size 526506 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b7fd1e38-d007-46cd-ae33-f560d075b56b.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b7fd1e38-d007-46cd-ae33-f560d075b56b.png index 5d34c58b8318c28a6509ae3c83c5714c19af22a9..9658b4c740b8631a862eb8b8f4ae05164a21d8d4 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b7fd1e38-d007-46cd-ae33-f560d075b56b.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b7fd1e38-d007-46cd-ae33-f560d075b56b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0084c9b5edd1238107c18948037ba03ed7a0bddad4592d22fd3c710b0bd23a2 -size 363410 +oid sha256:106d122d3aecadd2689ae07d1e021078f26f6143a9d9c1a7adf2a84b9adc39ff +size 327198 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_bd2e6fd5-bbac-40dc-8a8b-1f2ed8eb5c07.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_bd2e6fd5-bbac-40dc-8a8b-1f2ed8eb5c07.png index 68f515651c4fd6e9bdf34321237c935d50d1add9..66cd35698f0ee380cea3e175f42725e85de9f455 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_bd2e6fd5-bbac-40dc-8a8b-1f2ed8eb5c07.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_bd2e6fd5-bbac-40dc-8a8b-1f2ed8eb5c07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20a3462e3b9c2d4c25e39452837c400f9772325ddcf7cdb89304151b6dcac52b -size 524058 +oid sha256:b4cd12e060f1cb0e8a11054d1cc55a52d684e3b34dd43c843e1b72e1b8a116ba +size 392028 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_c39edac7-e345-4b0a-85b0-aeb1ffc251eb.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_c39edac7-e345-4b0a-85b0-aeb1ffc251eb.png index 759e9e9271814d4b8e80c5c2f74331d6d579330e..c527da501d4c47e7b2f3fa98f71506288db099f6 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_c39edac7-e345-4b0a-85b0-aeb1ffc251eb.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_c39edac7-e345-4b0a-85b0-aeb1ffc251eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f5402c0508ea6e4e9e7a9afe61190343ae861b239642b202446b6b12367a37f -size 339217 +oid sha256:a525ce87b1a0ef17922eafeaec04bd318277951e02e5be273ab47bff12c676a4 +size 250167 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_e38767aa-e1c6-4969-b1f0-eb94870fafd3.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_e38767aa-e1c6-4969-b1f0-eb94870fafd3.png index ebf28f599ece76a31322e8d9430633d28d164f17..0627371b369aa6005209ce0b629c557be5bbb529 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_e38767aa-e1c6-4969-b1f0-eb94870fafd3.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_e38767aa-e1c6-4969-b1f0-eb94870fafd3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a745e08792bcf45067d6b1314bb8fd2a34453598d0713903d6e851585dc6e551 -size 413042 +oid sha256:2bfdc79d7d3f81a39818fc1ccb910a2be0e67ed9528feb283e4a4db9d80059ce +size 425043 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f491a74d-ea2a-4ce7-b73c-a8493517b790.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f491a74d-ea2a-4ce7-b73c-a8493517b790.png index 1fb45f543a292bc955c6c1d91cdf9f3648493435..5a690669b3d7dfd922cf915cc8d385a45a4f5504 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f491a74d-ea2a-4ce7-b73c-a8493517b790.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f491a74d-ea2a-4ce7-b73c-a8493517b790.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccbc1266ec1eacc65eea6ba9136a52e1243265b259e42ddf7e40d8266801f2c2 -size 364455 +oid sha256:0c69079731499aedb7dfdca96548cd8e5ac2ffa023e58a4221fcc99c0c57ceb3 +size 486860 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f5aeec71-5f34-4bbb-872c-fcf7e73581e7.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f5aeec71-5f34-4bbb-872c-fcf7e73581e7.png index bc5b06e6098a01a9d6c2733348be9f29421fcaef..4a7b2eaadaff75b6994f9d0b0daf4eac228eed6a 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f5aeec71-5f34-4bbb-872c-fcf7e73581e7.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f5aeec71-5f34-4bbb-872c-fcf7e73581e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67629cb3dc01615a97bff967a16c5c8b3f13a6d63410f07e1fdf50625190c64b -size 467730 +oid sha256:ba6ab7cbb0703322b9ac12576a562bc66cee1abb7df7373219697fe6f3f0794c +size 346380 diff --git a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f99aa013-809e-4fb4-8fd9-80ca0220ca54.png b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f99aa013-809e-4fb4-8fd9-80ca0220ca54.png index 7d40bef8f1e7e3dde0704ca80bdfe1367a4e4e1d..d744ff8951ff4b9270fa31b26e60144410eadc90 100644 --- a/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f99aa013-809e-4fb4-8fd9-80ca0220ca54.png +++ b/images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f99aa013-809e-4fb4-8fd9-80ca0220ca54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9923a7e923efd186972d6a225a7b34e009e37f5bf2a2eba9e3fb0a66f8c6c73a -size 376618 +oid sha256:60c71c8b0d1bb0cff8bf945a0afa5f3a772c52a89ea04190cba4be024bf6c561 +size 485401 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_1e69c4c2-c2a1-4bb3-b5ad-3a4c6b19dd76.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_1e69c4c2-c2a1-4bb3-b5ad-3a4c6b19dd76.png index 5a59d681330fec7576b0a37bf408fe06eca595b0..336d768eb850925564b45b71a23d3ec32951c65c 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_1e69c4c2-c2a1-4bb3-b5ad-3a4c6b19dd76.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_1e69c4c2-c2a1-4bb3-b5ad-3a4c6b19dd76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f1c68dfd9a968c942f910cb3df58cfe4692b47ba0e9cc590f654bcf13b5ac8a -size 501613 +oid sha256:0a7fe9bde97e4b68a61501ad001642b7d3282a1d2310ad03457695354bf4f695 +size 680161 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_57021f78-0a01-4b86-864a-5f427019edf4.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_57021f78-0a01-4b86-864a-5f427019edf4.png index 2dd3346f2184067d0862e4169ccfe880e74e6b25..82c04ec6a713666d10746aaeef8a39ea99239850 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_57021f78-0a01-4b86-864a-5f427019edf4.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_57021f78-0a01-4b86-864a-5f427019edf4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de655e6941c10acb674ce387690c02f2d4c919857b75acd763d6fee330c9e10d -size 478398 +oid sha256:d00844781cf83aa746f4f9acef0a10de9c417faca7a1b3fd57c0167416a145d3 +size 605877 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_7b98db39-1751-42ca-b632-f40400c443bf.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_7b98db39-1751-42ca-b632-f40400c443bf.png index 4fb95b95909667d30351bc97c209cfe2ba079998..d078bbac2bad24b54f05d111aa05fff6c59c43ed 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_7b98db39-1751-42ca-b632-f40400c443bf.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_7b98db39-1751-42ca-b632-f40400c443bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d5bd15847e897f62157d91d2f64426bb2c9f0863ebeb85478e5b02a865018b4 -size 545652 +oid sha256:4f8f4c3144c4392542aa10a46acfba9a329a28d1199e421b3915cd08b23261ff +size 799078 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_81062e4e-eea9-437a-ab50-756bba2cca30.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_81062e4e-eea9-437a-ab50-756bba2cca30.png index 86842af8024c7344616c6fb5cc9fd37545125c41..c22060b5b51b658d9b84bae087b0ea772d40a130 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_81062e4e-eea9-437a-ab50-756bba2cca30.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_81062e4e-eea9-437a-ab50-756bba2cca30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c1d283c209addf13faf6d84ddc2ccc574a0ee36675bd57d932e3903dff329a8 -size 615453 +oid sha256:f3d05392e387b38a70b033805eddd6e2a270e274d8c7a5440278b0d7b1ad8421 +size 510566 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_9c5ab548-979b-4b73-a0dc-144229a6a59b.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_9c5ab548-979b-4b73-a0dc-144229a6a59b.png index c5fb0b68e77754945f69e3d490a33195b3af5170..d1375f6fa48821d2a8f55c6250a789f26aa59fe9 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_9c5ab548-979b-4b73-a0dc-144229a6a59b.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_9c5ab548-979b-4b73-a0dc-144229a6a59b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08f1db84dab30809d323e8cd7ab50cba9da9e3b35edaf6954c15ae52493a4d07 -size 422255 +oid sha256:365856442898199cd527422b2b9a8ef96b380df46d79b369d0ddd3983f023ee1 +size 400918 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_a63671f9-def0-48d6-bcac-289e2360a5c7.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_a63671f9-def0-48d6-bcac-289e2360a5c7.png index ea5b8aeb564dcd6aa8b90a0bb15a2caaaeee61fc..90023ffac920c66df1c7abe3a27c710ddcedced2 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_a63671f9-def0-48d6-bcac-289e2360a5c7.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_a63671f9-def0-48d6-bcac-289e2360a5c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c09ea12853a08f1a661e92dc96555c5f0878c69a900d4e31180c1b6a21a2f029 -size 1041270 +oid sha256:b9c99316bef81c22b4a79358ad78f55dc182e212891bc38f5a5595ad88088f71 +size 1534388 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ba0752ba-76d5-439e-baaa-e9f077356cc0.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ba0752ba-76d5-439e-baaa-e9f077356cc0.png index 959b51aef9505cf618bb1695ab88fbe629e82f0b..d49dfb326d3283c51f53d94550892e0078fa0327 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ba0752ba-76d5-439e-baaa-e9f077356cc0.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ba0752ba-76d5-439e-baaa-e9f077356cc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16491e9e93c383fbc0bca4782282c4466db29b17efd382264a07845fb4951e4a -size 800383 +oid sha256:15a5422d66bbe3f6d7532581978a94c1f15d2f9ec7e2030fee7b50083486d062 +size 419572 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_be028dfc-bafa-4ce8-9b29-da311352ba93.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_be028dfc-bafa-4ce8-9b29-da311352ba93.png index 1293949e6127befcdc6e97b68b898486c8bb16b7..26913f7dbc51eb787544e2d33454e3808d9726cc 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_be028dfc-bafa-4ce8-9b29-da311352ba93.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_be028dfc-bafa-4ce8-9b29-da311352ba93.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ccdf2f1d30d036221053ad83ccae5c9d0282975cf77dd25fa222e875e9342b7 -size 533806 +oid sha256:dd83fdfb3c46ccbed2a38bff2af1e5bafaf1e925f11a0740bc57afa617af0c7a +size 871215 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c207e1a1-c5a2-4a3c-aba7-4bf4e98d6829.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c207e1a1-c5a2-4a3c-aba7-4bf4e98d6829.png index 0deb9bcf426482d026a6a44d737745e853efb0a8..8ef6988c4290ca04ead77f51a766dd649d2c7822 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c207e1a1-c5a2-4a3c-aba7-4bf4e98d6829.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c207e1a1-c5a2-4a3c-aba7-4bf4e98d6829.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c85acd434da9e1a5927abba2afe3659cb5c0a01917a369dde912ff962bed6a7 -size 2977048 +oid sha256:56089d2adfd10a3d14930a785396020ca270f27dda7d0a6fbdfe7a4a7c59d22b +size 2002510 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c670a2c4-9acb-4532-a8e0-bcd618e1f8f7.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c670a2c4-9acb-4532-a8e0-bcd618e1f8f7.png index 2eed2d8bbaee3674fe5735df2a3718dc38b02025..620a4829334a8bc2a7e61789cf94d7779c96ea7f 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c670a2c4-9acb-4532-a8e0-bcd618e1f8f7.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c670a2c4-9acb-4532-a8e0-bcd618e1f8f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38d84f5c6e41db245bad42eaa4134d3dc1fbabc6203e038fea01e28adb3797ea -size 808336 +oid sha256:1a03e34b12ddfb34baf2c3f3d3e27adba64e3107db632e3c3b1eb20c8ac7d00a +size 697386 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ca2a9f0f-84e9-4e41-9462-32f12264b4ac.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ca2a9f0f-84e9-4e41-9462-32f12264b4ac.png index db755a073a910725c809360372e51e22503476ca..9bc46224e7489caa00a28ade03d5ce7d0b00cff6 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ca2a9f0f-84e9-4e41-9462-32f12264b4ac.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ca2a9f0f-84e9-4e41-9462-32f12264b4ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57c3fdff1ff516da2775c0f728a41c3174c5973e9d4af99d18746d04ea922aff -size 402667 +oid sha256:1c96d8ee7d89c3559c4734510c34d501526c2e4f88b369825fc31997daea9442 +size 549490 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_cc2547c1-82f2-4e44-8419-61fc4536e234.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_cc2547c1-82f2-4e44-8419-61fc4536e234.png index 213dd9c549bab6d40bf7234fcc8f7e9e1d555e0f..66a7b5dd0e02b6d3f16bf2876abb0c3be1a8637a 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_cc2547c1-82f2-4e44-8419-61fc4536e234.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_cc2547c1-82f2-4e44-8419-61fc4536e234.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d102b663f047f7d0616b326b8a7e7d9820fead42e46f42c21577927a085f4a6e -size 403188 +oid sha256:becfb5d0d80a5871b983ed037f970b33930bfdc7c5f313b63e981892089272fd +size 401000 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e1c57852-be09-49d4-b6c5-8b08ffa4dbc1.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e1c57852-be09-49d4-b6c5-8b08ffa4dbc1.png index 1a9ee48e2a58f898fe4eb69e8779921f87299273..00ae84d26ac0dc162a934c1d44631cbef75f90a5 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e1c57852-be09-49d4-b6c5-8b08ffa4dbc1.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e1c57852-be09-49d4-b6c5-8b08ffa4dbc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3d7c7dd98429f0773f9d4e84f78e3b25a5f8efe85d0a8cb2c8cf06da1aba360 -size 762975 +oid sha256:01548d534d6c3c1b3c51911f16150fba9e2422628f7686aaebecbe7dd0b1b67c +size 857026 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e3aea0a2-63d1-40ad-9b55-ca12b927d7c1.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e3aea0a2-63d1-40ad-9b55-ca12b927d7c1.png index 117b40e3dd2531cff0f3d7f82d22c061ef6c0820..e7aaaef227d96a63aa2a214aa4e71b24c6330d20 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e3aea0a2-63d1-40ad-9b55-ca12b927d7c1.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e3aea0a2-63d1-40ad-9b55-ca12b927d7c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aaeedc36ef2ad64a0bf57470bea1f1b2feaa9ff9912b49ebf4a3a18c7bb8a438 -size 514688 +oid sha256:dfb3b2e0d26ffa6e9263a7ec1036c23abe22eff16498b8efa87fb33200ca7b5d +size 670597 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fa48db1f-c5f0-48de-863a-93d7dbd7f15a.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fa48db1f-c5f0-48de-863a-93d7dbd7f15a.png index e3835207520915a735ea54f4212660444468bbb6..e64030b4dc0dc2ff550500d1e14a64247b4a17e1 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fa48db1f-c5f0-48de-863a-93d7dbd7f15a.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fa48db1f-c5f0-48de-863a-93d7dbd7f15a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:349011e69535ef72b502c13e8a1320ac0c1e1fe2fee229873b98aa65b4773a1a -size 523605 +oid sha256:5df265b38cb93303342d31ebc8be61c5731af5862469f35ff6a3d253f579a450 +size 534634 diff --git a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fadf5fda-b09f-4516-b1a7-9ec58dc23e1f.png b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fadf5fda-b09f-4516-b1a7-9ec58dc23e1f.png index 9f62d0489e5ccc8db651fc9234219ae3a544718f..14fdc5e2c7228ab27249638a2cd259ac5a9c824c 100644 --- a/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fadf5fda-b09f-4516-b1a7-9ec58dc23e1f.png +++ b/images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fadf5fda-b09f-4516-b1a7-9ec58dc23e1f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c1c905a5911b33aa8d3202eaed177210d9e811a65aa2f5a6071709c4af85681 -size 799972 +oid sha256:c044d5cb00714c374d3d418dbe058742cf468ec404c08c2d693c5c1b0c771a54 +size 691905 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_04f0418a-4171-42ec-9588-fbf470f54df0.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_04f0418a-4171-42ec-9588-fbf470f54df0.png index a7248f44fecb85c0cbc0564b8076625ac02fd0bd..eda2a328402ff02597e3d69da0e80a2774957ea2 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_04f0418a-4171-42ec-9588-fbf470f54df0.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_04f0418a-4171-42ec-9588-fbf470f54df0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2305835c029a3b3128ed257eaa8d20cd60e5de9fe04dab722b0688f007d105f -size 1101316 +oid sha256:f809c102fc782464588f414bdf1d4003e8fde14f31b65093bc81e51c9c76824b +size 1614093 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_0d6383fc-e3a0-4402-ace2-f80e4c686a24.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_0d6383fc-e3a0-4402-ace2-f80e4c686a24.png index 289c5e4f45318c54912fbd9873b24bd4ea49c00c..cc7d15fc0d91aa52be3399ae276cef14805756d5 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_0d6383fc-e3a0-4402-ace2-f80e4c686a24.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_0d6383fc-e3a0-4402-ace2-f80e4c686a24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f63243883ae8f3efa2d729cdf2f8a44cca0f402ac19d3148a918dfdf3c8d82c7 -size 1101575 +oid sha256:7960a2c069fc8a5149bfe87a320edaa060e2528fb6be9cefaaf77708afc39237 +size 1294368 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_26337816-766e-4897-b7df-e4d62ea83cda.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_26337816-766e-4897-b7df-e4d62ea83cda.png index fc931354d063b93a3b667ae8667e821011713f9c..806570df9bb144021ad77b0a31e909eec3837e40 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_26337816-766e-4897-b7df-e4d62ea83cda.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_26337816-766e-4897-b7df-e4d62ea83cda.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3df5d2ba536bf11300aea92b909f3e3939da9db842b5837163cb161334ce14e7 -size 1102564 +oid sha256:7de7c404f0fc02fc28a267a05b3baaaac3e83f374a9264406fe2753ed8045af6 +size 1524927 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_5ec56760-b47d-4c6d-bdac-c3a6640b443a.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_5ec56760-b47d-4c6d-bdac-c3a6640b443a.png index 36cc3772fec70357bd5139a78ed4f6907f4582d2..c54371f8d09883e122fd5211f66c5a57f5f9f706 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_5ec56760-b47d-4c6d-bdac-c3a6640b443a.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_5ec56760-b47d-4c6d-bdac-c3a6640b443a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d54e439c8e87b7cb979a6f61417bc0c516e367bcce672e4627b15a1db0919269 -size 1046573 +oid sha256:4d146f28c47b2db548687ec88968521134f80bacf7e394e11eb908b075527255 +size 1467637 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_61dce3f4-e036-403d-b3c3-bc956eb57807.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_61dce3f4-e036-403d-b3c3-bc956eb57807.png index 1dae6b7deea0b5efaad140be8cf6b8aa7cee0525..22b1c6a3bcc61c787b1b13f0c8577ce5b47ea9d0 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_61dce3f4-e036-403d-b3c3-bc956eb57807.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_61dce3f4-e036-403d-b3c3-bc956eb57807.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33dcf9597aaa8757eb4b2dc3ad69a68232d57fe1a7386a3bf6b5bd46502998f4 -size 914767 +oid sha256:a9eae0eff01374fbf44d7fd1d8ccf81e84695d178d730dba5583bb58469bfb12 +size 1433849 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_689b707c-5a1c-4d4b-a8c4-78b279f9f47a.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_689b707c-5a1c-4d4b-a8c4-78b279f9f47a.png index bd56e769095e6af477b7398b44d14308453f3e19..67d165a6b538abab811523fd6a839eaf72fd3594 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_689b707c-5a1c-4d4b-a8c4-78b279f9f47a.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_689b707c-5a1c-4d4b-a8c4-78b279f9f47a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68b9935d742a9ccede594bca28106508c264e7e3564ad9b3446afe044d30e36e -size 1046118 +oid sha256:457bb2fb5cb003dd76b5ea08291173da23d638760de9057f97b695fa7a3ee9ab +size 794187 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_6ec122a9-3a93-4787-abb1-da425a910bc1.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_6ec122a9-3a93-4787-abb1-da425a910bc1.png index 8161e5808ccf970ab9c2a4f30a4ccec4cbc6a80d..ddd64cd3c05ebc695fef66985e5160c5c2cb1990 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_6ec122a9-3a93-4787-abb1-da425a910bc1.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_6ec122a9-3a93-4787-abb1-da425a910bc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e40034d38104589f52fa5b8def76222ca123ef8c0bab886263d75cb177724a39 -size 1045780 +oid sha256:954677476167004d39ec5149f8c4dfc1f5158a22323187c71a3c8e10b8783eb1 +size 1604597 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_ad0e2c07-6812-4507-8b98-f82b0d619fd6.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_ad0e2c07-6812-4507-8b98-f82b0d619fd6.png index 34b02751ab4d21693ffed74d1d066e47d804dbcd..37822a52f780cb00b8eaf5eb32190cb091c0e896 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_ad0e2c07-6812-4507-8b98-f82b0d619fd6.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_ad0e2c07-6812-4507-8b98-f82b0d619fd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6973dd1f648a01823f651f16d843f81577f23d0015246f4e1cb3b3fd285720d -size 1094984 +oid sha256:e6fb391c3fdb28034a6448cff1617baad2d2ca9baaf34ef9640705e1f0a065e2 +size 1500391 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_b75c9c94-7ef6-4420-bb81-33661e5e430d.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_b75c9c94-7ef6-4420-bb81-33661e5e430d.png index 02367e08b70c14965450bf45fd592e3c2dcb90b7..ec22dc72100dd09574e8cb5b0a3c2feb3733c426 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_b75c9c94-7ef6-4420-bb81-33661e5e430d.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_b75c9c94-7ef6-4420-bb81-33661e5e430d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7893f2987d51013c623086a376b5f2dee6e68eed1e8b5b0e79a28e6acdea788 -size 294879 +oid sha256:1a2f8d608ddad5c276da99e2daa24b1f0ae0d476b5cc1a8dd9596ec5c3d61975 +size 521065 diff --git a/images/913b902b-37b7-4230-a0f2-a19931b06951_f00798ea-e59e-4f62-8079-eeb0d52ac0a1.png b/images/913b902b-37b7-4230-a0f2-a19931b06951_f00798ea-e59e-4f62-8079-eeb0d52ac0a1.png index f658410c8118152e7c4aff077a9f14524dc558d9..9debbddcfc9602a372b6864a022666dd8dc9cee4 100644 --- a/images/913b902b-37b7-4230-a0f2-a19931b06951_f00798ea-e59e-4f62-8079-eeb0d52ac0a1.png +++ b/images/913b902b-37b7-4230-a0f2-a19931b06951_f00798ea-e59e-4f62-8079-eeb0d52ac0a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd52a034011434a509f4fe0fbb5d533b71c2c41e647fc4e20a32d9564c242af5 -size 1136010 +oid sha256:a5db4295672aae235cea482688720023d004bd4c67dfc6417d531f3a573ffc96 +size 1646831 diff --git a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_38ed349f-c786-4ede-ad54-2636970b733e.png b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_38ed349f-c786-4ede-ad54-2636970b733e.png index d89c10e7df6bca1f33156ff86508126e42979016..6d3a80e6d78f4a2cc37546eef94a90986d968fdd 100644 --- a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_38ed349f-c786-4ede-ad54-2636970b733e.png +++ b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_38ed349f-c786-4ede-ad54-2636970b733e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee28dacf7e1ad8921b24444a0ad94c4dd105f63788cd50dd256b1d6cd2d417e6 -size 592649 +oid sha256:e179e9dbe284f5d4590267c9d59f42f03c7d91bf93c53a0e462aea7ada91d280 +size 613001 diff --git a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_57a8bfec-2f70-49b8-b132-25569b94616a.png b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_57a8bfec-2f70-49b8-b132-25569b94616a.png index a686e6b2c35c5c6abc8ebaaae04a0318c8b3ca8b..452631fb1768ec6bd8d72df618003a5435d2e1ba 100644 --- a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_57a8bfec-2f70-49b8-b132-25569b94616a.png +++ b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_57a8bfec-2f70-49b8-b132-25569b94616a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:346285f3b8828df1c8beef7a1f55eb40534a79c028071be8706f02d00c168a25 -size 196729 +oid sha256:84b9e6e1d11b9d4819ad4339f739b4632b8be7ffbe92e52fc2f562e89e38825f +size 245269 diff --git a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_b3ae5b47-1de0-443b-a314-b300e04cd29b.png b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_b3ae5b47-1de0-443b-a314-b300e04cd29b.png index aea8bf04f452a6deb1da6948ee011db6a6d7d7bd..17de21692fb74b037d740096e4d8792ba12f95a9 100644 --- a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_b3ae5b47-1de0-443b-a314-b300e04cd29b.png +++ b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_b3ae5b47-1de0-443b-a314-b300e04cd29b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff2a85ab398386d0bcded8476dd8472520187dc25e03fc0862d1c4543aa2b8fa -size 1274221 +oid sha256:ab6606bacb789c5c57799e2520dcc03e2610890498660ee7dcd5457185e56f4b +size 1076159 diff --git a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_cf567012-e9c2-4c4d-a269-6abf5adff7d5.png b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_cf567012-e9c2-4c4d-a269-6abf5adff7d5.png index 1a01fae87c45beec1aa877c0055fa5367fbd1c38..4ba7a0f34accb2043f744d3e797f9969df621097 100644 --- a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_cf567012-e9c2-4c4d-a269-6abf5adff7d5.png +++ b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_cf567012-e9c2-4c4d-a269-6abf5adff7d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bbda2ab2707783121de73410c0bc645274593e991eb9cc0e7ec11d3727ee399 -size 866356 +oid sha256:362fbaf966a23d40e1874addeb5f476ba8d10d61d79f9a28babe548e5f2fc364 +size 729323 diff --git a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_eb4e73a1-9b97-4d34-ab8b-df4419a6dbc0.png b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_eb4e73a1-9b97-4d34-ab8b-df4419a6dbc0.png index 5f6339a0bb7a6d67306d2dcaf64efeba22316b08..bb9c264bab7974b89b9211fda5f5c21b4628f6c3 100644 --- a/images/91843d71-05c3-4b17-9b8c-856f2390fe02_eb4e73a1-9b97-4d34-ab8b-df4419a6dbc0.png +++ b/images/91843d71-05c3-4b17-9b8c-856f2390fe02_eb4e73a1-9b97-4d34-ab8b-df4419a6dbc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51ff8d76ebc041821951ec9aa5c9ebb0c1fd7121ba07f52ad6e92e6228d50224 -size 1279742 +oid sha256:5d6bf838749a52ef96352d4163864d9005fb2d4cff44c88784913e97e54d4a67 +size 1266858 diff --git a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_364bc655-fc39-4523-b249-45dca735161e.png b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_364bc655-fc39-4523-b249-45dca735161e.png index e048b6526519985c47d2b212bd49d3c73d801ae6..e06b72c426c5418aa324f4edeef658f346a67d14 100644 --- a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_364bc655-fc39-4523-b249-45dca735161e.png +++ b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_364bc655-fc39-4523-b249-45dca735161e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:094e59fa6d933298a4e02ed5827c4b1b33b9fa9b175541a6d3fe916156be5796 -size 841440 +oid sha256:d99740e0a3448d64b3fdcc67543f1be4861e0e83cd72736c8f9511d233847cff +size 555333 diff --git a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_4d90c658-8e5c-4f33-abda-abb115083116.png b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_4d90c658-8e5c-4f33-abda-abb115083116.png index 21a55de260fa35f9770a578f8ed9d40c53413472..6b7ac1d8daa86f821360a388e8416c621c7e073f 100644 --- a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_4d90c658-8e5c-4f33-abda-abb115083116.png +++ b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_4d90c658-8e5c-4f33-abda-abb115083116.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ed37272338c6f655103cf97cffd1b19855c5eb4f227a453a802c6e033f8e535 -size 1192926 +oid sha256:ff8be4377e33f088d89cfb94b206f0c369b2f47ca1cf5c5aee655788a8797d27 +size 1171946 diff --git a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_a6f4f6b6-0ea3-435f-95bb-55fd74917bad.png b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_a6f4f6b6-0ea3-435f-95bb-55fd74917bad.png index 237a014609ace9fd43a696a27a2391e7b49cb77e..92d17ea7af6fb59227efed9ce268f5efd837ac54 100644 --- a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_a6f4f6b6-0ea3-435f-95bb-55fd74917bad.png +++ b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_a6f4f6b6-0ea3-435f-95bb-55fd74917bad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c06c27adc11ad77a2ca761feb8405037fed30b0005cc1b3c98de6e8a7162cb69 -size 1958317 +oid sha256:eb4ce93b681a69f999fd0fa71318080f544a2cac6e54ce9ee1cd876d1504a482 +size 1593087 diff --git a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_b5bf6287-38d3-4152-9941-e345eb0396ae.png b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_b5bf6287-38d3-4152-9941-e345eb0396ae.png index d25c01b65b0c574f793cecb334ad603299eccb94..f393665459dd32811f98d4df8f03680bd6d7923c 100644 --- a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_b5bf6287-38d3-4152-9941-e345eb0396ae.png +++ b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_b5bf6287-38d3-4152-9941-e345eb0396ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de0f824047a69ee4c570ab9a135bd09615ba25d104d2ad086a15b49d6f2837cb -size 778550 +oid sha256:179066fe7fa3eb6af51927e9d7aad75855cfcab8e271a3567f0188f87c1270f3 +size 1217595 diff --git a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_d12db8d3-672e-48e8-8d6f-b9adc6ffa5f6.png b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_d12db8d3-672e-48e8-8d6f-b9adc6ffa5f6.png index 038e0e6255a1a5076bf13faf571ce411e2addf03..f1cf93be991b56f1b6277a8e12db69dcea648550 100644 --- a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_d12db8d3-672e-48e8-8d6f-b9adc6ffa5f6.png +++ b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_d12db8d3-672e-48e8-8d6f-b9adc6ffa5f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd07002ae1e959d10d02533901a7cbb68c3cc4dd7bee872a0947fa3bdcb0b986 -size 825823 +oid sha256:c2d6a74e898e41370833dea0c3add7b4011bf98611ba1bf8566cddae6e8951e4 +size 717609 diff --git a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_e32bf4f6-c213-4e3b-90a4-759546efe869.png b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_e32bf4f6-c213-4e3b-90a4-759546efe869.png index 784317611874729cc233d6c1460aa697130e9303..c8b9d6698f288fe26c7ba148e7d198e8cc2eb4b7 100644 --- a/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_e32bf4f6-c213-4e3b-90a4-759546efe869.png +++ b/images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_e32bf4f6-c213-4e3b-90a4-759546efe869.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60cd4410375f423c7ef722da9b9827d2357c612d052379c2289142237049f38e -size 817511 +oid sha256:377666835289c43360f92364747e405640833e0e5ccac34d5443c3c03aee0237 +size 1186902 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_3b94029d-b4fc-45d2-8460-41fe1a2dae10.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_3b94029d-b4fc-45d2-8460-41fe1a2dae10.png index ae7be6f0a5153721e5931eece8e85d97978f77fa..972a14abe0cf76bc829d853968202168468ff416 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_3b94029d-b4fc-45d2-8460-41fe1a2dae10.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_3b94029d-b4fc-45d2-8460-41fe1a2dae10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8346cbb8b2ffd98229c302c801593654686dd2e2e13c9c2580e70884c3111738 -size 251769 +oid sha256:d5190253485ce38807c0e9a36dd6ef642656c7515fe1a3466f0ce207422a8d5c +size 246871 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_589275b2-8649-4dcd-b815-bca201d28836.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_589275b2-8649-4dcd-b815-bca201d28836.png index df8b97f11b58ca16ef8574350491c8c9d83b0d7a..8126467b79b5102a664992ca7b01920f4624a0d6 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_589275b2-8649-4dcd-b815-bca201d28836.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_589275b2-8649-4dcd-b815-bca201d28836.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a9e55916e6bce086826855429a68d2346bf608bb24febac8af5017856fee03d -size 248016 +oid sha256:cd6dc92e0ca4ee10c8e762fb458d3b1db20887d5ab37ee42efbcbfe7fc4f6f39 +size 255012 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_5a217967-5bc9-47a5-8827-7b36d5e4c9ab.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_5a217967-5bc9-47a5-8827-7b36d5e4c9ab.png index f52b8b458ff69750d71b073c0fe1511575ec6a29..59a0b591a70ba39f3159886fe0dcec836fd61da7 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_5a217967-5bc9-47a5-8827-7b36d5e4c9ab.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_5a217967-5bc9-47a5-8827-7b36d5e4c9ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d0c9b5e09447ae154a701c99996ae52cc1e679c18fb5bd089394fc7db7fd4c8 -size 357204 +oid sha256:5c3ede1535332c9422c3c1df24db1777f3be6641bd5e3ccd48aba1ace04c76b8 +size 352669 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_9ae69032-e90d-4dd4-a331-dc5968c0c211.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_9ae69032-e90d-4dd4-a331-dc5968c0c211.png index df8b97f11b58ca16ef8574350491c8c9d83b0d7a..da664b06611b800fe1541702087701b3acbd4f55 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_9ae69032-e90d-4dd4-a331-dc5968c0c211.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_9ae69032-e90d-4dd4-a331-dc5968c0c211.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a9e55916e6bce086826855429a68d2346bf608bb24febac8af5017856fee03d -size 248016 +oid sha256:03f78853806b1a226b3b9a0ce5c0d5fd486bb672ed0b0a1da692a524da091633 +size 254051 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_c6dcefac-fb03-4657-86c0-8738db10dd14.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_c6dcefac-fb03-4657-86c0-8738db10dd14.png index cb031086582d5aa3e0ab7702177b27f0cc2d97c5..ec0bea36ee4adb536777b1f2dd0f475b59b36b0e 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_c6dcefac-fb03-4657-86c0-8738db10dd14.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_c6dcefac-fb03-4657-86c0-8738db10dd14.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9081909ad07ed7e3b63c9d8403f517639aeed589f59382e40d4f322b8b87fc9b -size 251326 +oid sha256:00f93876b4000bf42a16c25920a4f90f5be3088ade0b5282642483ebc8594a5c +size 248171 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_cdaff30d-7164-4b7a-b1e2-a95d33da9282.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_cdaff30d-7164-4b7a-b1e2-a95d33da9282.png index f52b8b458ff69750d71b073c0fe1511575ec6a29..14fcafdadc11708358c60f45ff15a615f6583a93 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_cdaff30d-7164-4b7a-b1e2-a95d33da9282.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_cdaff30d-7164-4b7a-b1e2-a95d33da9282.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d0c9b5e09447ae154a701c99996ae52cc1e679c18fb5bd089394fc7db7fd4c8 -size 357204 +oid sha256:2c5bf1fba99df0cebb88e3e473716fcd5a5a5499de81978ee256335aa08353ce +size 350979 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_dbb99d1d-77d3-4826-a887-2bee8e5bf43c.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_dbb99d1d-77d3-4826-a887-2bee8e5bf43c.png index a12b0c70480c09fbdf2aeecc8d6cdbb57bd0809b..3b6503b0b9ae1ebb23530e48c0c7ea4896ff6bc5 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_dbb99d1d-77d3-4826-a887-2bee8e5bf43c.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_dbb99d1d-77d3-4826-a887-2bee8e5bf43c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1acff4ce0c1d85beb8ec0bbda77d2a1e346f6295e8ee6d2566c3231808c966f4 -size 1096606 +oid sha256:5fd8b68c2ac021d688fdd8173f8fa1f718c05d0132f7ecb531aa811249c3daef +size 1231657 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_e186f90a-bada-4de9-9201-38bce05d6f24.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_e186f90a-bada-4de9-9201-38bce05d6f24.png index cd85c8c872b1ea88f762d3af31d0dbce4a91f3bb..655832a4c9845ca19db747de82162580d35846c0 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_e186f90a-bada-4de9-9201-38bce05d6f24.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_e186f90a-bada-4de9-9201-38bce05d6f24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:393e0fd9df41ab1b65177322ed9569392bae68d8aa21ab55658289e496b83f50 -size 857818 +oid sha256:1107b6df4fe83b374f82971adc61cac04b8ca306947158ffd89f89598f03a933 +size 793492 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fc8e75d0-f09d-4cf2-a112-2f0184fa48e1.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fc8e75d0-f09d-4cf2-a112-2f0184fa48e1.png index e1a85459bc5df1355d953a180d53884c984d0353..37b7dabcd5af32681fc4b14e995dd6a89cd824dd 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fc8e75d0-f09d-4cf2-a112-2f0184fa48e1.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fc8e75d0-f09d-4cf2-a112-2f0184fa48e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95d9be3fba1c201b268ec58e8c92aec7d677990e4bad48dcb851718f106b1d42 -size 249164 +oid sha256:9faaa118f603252f48b75e162a5e8a0d75aac0adfcf016d6758308800b3edc15 +size 262967 diff --git a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fd35e11d-5eb9-48c1-a193-bf04d51813b9.png b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fd35e11d-5eb9-48c1-a193-bf04d51813b9.png index 6e33cff3cb419019f75078171aac943ae8ec8608..e22fc575e7d94398ee4b9b9a4a6c0441d3725549 100644 --- a/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fd35e11d-5eb9-48c1-a193-bf04d51813b9.png +++ b/images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fd35e11d-5eb9-48c1-a193-bf04d51813b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8758666e2a5998347c0c2c51f5cc25670f8d366d1c0d449a033bebe6e7126b5 -size 359038 +oid sha256:8262a497e56ac6bbff524aede70e498771f8cf923d3a362786543875493669af +size 352494 diff --git a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_5c1e2823-28b4-4884-9036-1d917f7a70e2.png b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_5c1e2823-28b4-4884-9036-1d917f7a70e2.png index 2bc76877a4cf3a7fe5a177b4d26baeb536a1116f..2d7acfe5187f5769b7bfe2f4febef5bfa2b494bc 100644 --- a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_5c1e2823-28b4-4884-9036-1d917f7a70e2.png +++ b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_5c1e2823-28b4-4884-9036-1d917f7a70e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:639e73b35174e24524cc8784cccd4f54a5c23d233b5b47d128fd410669623a44 -size 1066075 +oid sha256:1df78714706e147c3a1de1e3b49bdcec763749fcbd9afff48d6864800ae71239 +size 1070531 diff --git a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_6cb303c4-1ce6-481a-aea4-4579b0be918e.png b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_6cb303c4-1ce6-481a-aea4-4579b0be918e.png index 68bd187d8439f0ab48fe2e0eae71532e1416faf0..c43776378df24c4a68fa5e7095a101e950817ad3 100644 --- a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_6cb303c4-1ce6-481a-aea4-4579b0be918e.png +++ b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_6cb303c4-1ce6-481a-aea4-4579b0be918e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b909afe8bbe1713cc884af82fe086b0e3fb087db6cdc92fbe2cc3024680668b8 -size 1074742 +oid sha256:5da9cb6b53743808680359ab8e29f736f872eae010d6905fe5f83545d46111e1 +size 1073562 diff --git a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_73a605e7-8819-41bb-8cfe-73fb22979a30.png b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_73a605e7-8819-41bb-8cfe-73fb22979a30.png index 92dcabaf65da4308de698c651f90b7716a4bd891..c8bfb607b6ab0dd943b500008391f8f55db726fd 100644 --- a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_73a605e7-8819-41bb-8cfe-73fb22979a30.png +++ b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_73a605e7-8819-41bb-8cfe-73fb22979a30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7150a2099b3ac071032c2cd48ce2b41ebd29e011a9d335bca782e07d595e572 -size 653048 +oid sha256:0034f9431b5e311a5585c620e00d8c7a0a1c8dd0e8836b8baa0ff84f9b07aea7 +size 616395 diff --git a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_76b4419e-d497-4831-8074-447ca32328fd.png b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_76b4419e-d497-4831-8074-447ca32328fd.png index d64c7e20ceb8352103c227ae3d8ff764a794d899..e9f0b0f30d75fb193b9c62bf8f64d2dcd09c44da 100644 --- a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_76b4419e-d497-4831-8074-447ca32328fd.png +++ b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_76b4419e-d497-4831-8074-447ca32328fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d63c438c4d5e9bdb6a078bc9ff02532fd8b8c2d24671a793601227cd5b406773 -size 1610482 +oid sha256:d3fa1dd6ca0678eb3370a62ee6cbcd54aad0904895d2b479229011ef3b187da1 +size 1499888 diff --git a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_83416428-3787-4952-875d-dcfbb6e4cdc3.png b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_83416428-3787-4952-875d-dcfbb6e4cdc3.png index 7ecded0448402018bb58b238b6b7c94ceca01aa2..450ce8cd1e70742dbe528b4df71e16a591ced490 100644 --- a/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_83416428-3787-4952-875d-dcfbb6e4cdc3.png +++ b/images/920f240d-77a9-476b-a1d4-dcc88d199bfa_83416428-3787-4952-875d-dcfbb6e4cdc3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff197597a1a3d0e01ccb4b1b9aea5113623a1eaa04378443974f1b687fd8f616 -size 1118127 +oid sha256:9d838a90918e8f8586d1054c1b26a7c4a05c350022c360de2bc24d6d4290c94b +size 1086266 diff --git a/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_46a981ea-d3c6-42fb-9d9e-3cc0f679b56d.png b/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_46a981ea-d3c6-42fb-9d9e-3cc0f679b56d.png index 647fe54e4ff52e268638dcf7129cd1087efaae5a..0493b861d7283a9fc5ff386cd057df1e49dce410 100644 --- a/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_46a981ea-d3c6-42fb-9d9e-3cc0f679b56d.png +++ b/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_46a981ea-d3c6-42fb-9d9e-3cc0f679b56d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26d20898a6f7063105c4c1df3e26e1f78b510a2777e225967b74a59254c2d2a7 -size 610013 +oid sha256:82bb33d03f82647505790eb58f4af5d479daa2ee8581a9f2d91a068e3f1b03b5 +size 891056 diff --git a/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_71c0293f-f272-4abb-96b5-f08d24560f51.png b/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_71c0293f-f272-4abb-96b5-f08d24560f51.png index 73f12b9b8b36dc25428227cea734fc3e7a1c7aa6..d26a306bda1d8b950723a78153eb11344c940050 100644 --- a/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_71c0293f-f272-4abb-96b5-f08d24560f51.png +++ b/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_71c0293f-f272-4abb-96b5-f08d24560f51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb37630dedb13c0e7c30f27ff52bc77f382f4b9027701518810c220d94ca8cea -size 374849 +oid sha256:51b8f6ff764aee3398da28ffe6b402556536f7f4e2efb3f617a33466a77b9b6e +size 1112226 diff --git a/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_7b4101fe-ef0c-4517-9e11-be6a982e764e.png b/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_7b4101fe-ef0c-4517-9e11-be6a982e764e.png index cebd775e75845d830d16a1afa47f7bccf147b896..03e435a0965446766356ba4a3f23843f42fdde86 100644 --- a/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_7b4101fe-ef0c-4517-9e11-be6a982e764e.png +++ b/images/9223ed29-5abb-4f4d-8108-1c3a584a7017_7b4101fe-ef0c-4517-9e11-be6a982e764e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0275965efaca29948198551245edc48100e52b4a45f932dfb574c17d675444a1 -size 620415 +oid sha256:92703f5501c5cb65aea766d65be29095ee9786a2b5a1b03a0d2d2d36af9ce8c0 +size 1832787 diff --git a/images/92869590-839d-4dfd-8884-4c52bef3b328_6aa1c326-0259-434d-98e9-cb78d7e25950.png b/images/92869590-839d-4dfd-8884-4c52bef3b328_6aa1c326-0259-434d-98e9-cb78d7e25950.png index 5a842b8b2cdfd0c3f452f89dfd1a50254e61aeb7..78423157d1260159cea8e5af50793ec31055cfe2 100644 --- a/images/92869590-839d-4dfd-8884-4c52bef3b328_6aa1c326-0259-434d-98e9-cb78d7e25950.png +++ b/images/92869590-839d-4dfd-8884-4c52bef3b328_6aa1c326-0259-434d-98e9-cb78d7e25950.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38af4ef9066ea0eca58a74b278000ab1d432624d61fe73129b40cff83a77e566 -size 1047995 +oid sha256:4487bc7f95b50ec28544c033ae1cca627a208c05dcb692725dad19ec237f4214 +size 1116050 diff --git a/images/92869590-839d-4dfd-8884-4c52bef3b328_c5ef9f3a-728c-4744-bc1d-d112a9d73d99.png b/images/92869590-839d-4dfd-8884-4c52bef3b328_c5ef9f3a-728c-4744-bc1d-d112a9d73d99.png index 3be498eb5457b2ddf6e7e30b32ef8d2f836c558f..decda315f1504fad0baa3dbf3a05db8b310d4473 100644 --- a/images/92869590-839d-4dfd-8884-4c52bef3b328_c5ef9f3a-728c-4744-bc1d-d112a9d73d99.png +++ b/images/92869590-839d-4dfd-8884-4c52bef3b328_c5ef9f3a-728c-4744-bc1d-d112a9d73d99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7440a985fb60079bc3f83bc07116e282383e669424e13b5607c0fd4d1570f43 -size 1001778 +oid sha256:d62c05622556bee884e86c470835087dd51026eaa324986f7dc0281fada40669 +size 1207669 diff --git a/images/92869590-839d-4dfd-8884-4c52bef3b328_d0dd6fda-cd1e-4d3e-b3a5-67611bb74e68.png b/images/92869590-839d-4dfd-8884-4c52bef3b328_d0dd6fda-cd1e-4d3e-b3a5-67611bb74e68.png index b4f994ece133627d020855b8aad5fc7a48d3dcc7..d5c68b82b4d3d103b078f3415bae8438283f91c8 100644 --- a/images/92869590-839d-4dfd-8884-4c52bef3b328_d0dd6fda-cd1e-4d3e-b3a5-67611bb74e68.png +++ b/images/92869590-839d-4dfd-8884-4c52bef3b328_d0dd6fda-cd1e-4d3e-b3a5-67611bb74e68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af305549227b1115742d5a3b2ae17ad38fbdad8a1b30c26dc37e941785f05370 -size 2343421 +oid sha256:0d294bf5c59eebe370f308f501acaa628afc82d82f7b812c8b9aade1919b5f2b +size 2025454 diff --git a/images/92869590-839d-4dfd-8884-4c52bef3b328_ec710e69-c4be-4825-890c-7e865bcc443e.png b/images/92869590-839d-4dfd-8884-4c52bef3b328_ec710e69-c4be-4825-890c-7e865bcc443e.png index c2265c4c5e7246d3938ce770efbc1303ab65445a..be183d940361d300e7292a88e0e0e15510013eab 100644 --- a/images/92869590-839d-4dfd-8884-4c52bef3b328_ec710e69-c4be-4825-890c-7e865bcc443e.png +++ b/images/92869590-839d-4dfd-8884-4c52bef3b328_ec710e69-c4be-4825-890c-7e865bcc443e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77c505ccfff2dd3b88eee11b72685e4cc4a3736ad7c7d5e0d403f3f07ad2e3b4 -size 1720722 +oid sha256:64043e192f0667c4ba2f6f9eaca335ec74fa81b7ed18efce516765246f18feff +size 1123242 diff --git a/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_1287a730-f1cc-4046-9b54-e1aa12b6d33f.png b/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_1287a730-f1cc-4046-9b54-e1aa12b6d33f.png index d25c4e59002c2ff99ce5a78060b6f6f05984e0ac..ab367e26c179290a06828c31150b1fee7a10466c 100644 --- a/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_1287a730-f1cc-4046-9b54-e1aa12b6d33f.png +++ b/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_1287a730-f1cc-4046-9b54-e1aa12b6d33f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6af0358428b06a0529d11873bd8ba0959ef73fb03684edc48df0185201c55260 -size 1137103 +oid sha256:ab84c928e19cb3a2f085f7e8410fef6af40f6475ad0677053dd1f952b777b2ca +size 777719 diff --git a/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_9d934934-db77-4af8-89ca-56dfc9f9f1c3.png b/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_9d934934-db77-4af8-89ca-56dfc9f9f1c3.png index 089c247bfba6a28e38800a3b80f6295b6e9bf7d0..e9c7014d918d6a3d03ee573e94947f69a7cd68d5 100644 --- a/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_9d934934-db77-4af8-89ca-56dfc9f9f1c3.png +++ b/images/928ec908-ea23-42a4-8b13-3ca6f0721ead_9d934934-db77-4af8-89ca-56dfc9f9f1c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ec9d1e0b65c87d306f2a57c883d2903cd5294268fb5a5af710731e2c3058531 -size 968327 +oid sha256:494f5352ac00d84c377d7b03c55ed7f8c3531b1ee68753f746742fba5dc9fc60 +size 1166843 diff --git a/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_0925b90f-0055-40f8-a347-3771f43852dc.png b/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_0925b90f-0055-40f8-a347-3771f43852dc.png index 301e6823d25c69a78dabc67c29394f5e319ea374..b5bdd0f7b23ccd15dba0218b40c3a3bd9042da27 100644 --- a/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_0925b90f-0055-40f8-a347-3771f43852dc.png +++ b/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_0925b90f-0055-40f8-a347-3771f43852dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b7e4aa329c3db07631b1fc0827d788f6d9c73e9e1b2d80dc180fb3be1929ceb -size 1151336 +oid sha256:6b5fb6921c5810dad5c0f1e6c807cc64bad7f9bbe57667ce697fd5d48134b374 +size 1455554 diff --git a/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_d4ad5462-08d1-400f-881b-080390e948c2.png b/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_d4ad5462-08d1-400f-881b-080390e948c2.png index bfee7ce6af68ac96d45d51c51df34f6a79493e1f..3f6ee1b9b553196e0ec70345fa1b939cdd577949 100644 --- a/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_d4ad5462-08d1-400f-881b-080390e948c2.png +++ b/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_d4ad5462-08d1-400f-881b-080390e948c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e44838e41eb6df3c997992629994ece636ffd6986315ebc34234773246100a9b -size 1111189 +oid sha256:e498fd9016a8f3d2f9ad1c2a13ea494d636187607ff712211475b3f98e53d9f8 +size 1856857 diff --git a/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_ff1358c0-bfa5-4b15-aee0-ad09119d4bd0.png b/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_ff1358c0-bfa5-4b15-aee0-ad09119d4bd0.png index f8a0fc3b756b52aa065978c60214f814f22d3604..e78acc53f640988f78a4e76f73682c9572a49212 100644 --- a/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_ff1358c0-bfa5-4b15-aee0-ad09119d4bd0.png +++ b/images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_ff1358c0-bfa5-4b15-aee0-ad09119d4bd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3351c8b70862273e7529a3501d53280c87ca2507d248c4f3ebc5b052b524421f -size 1232468 +oid sha256:27d9e8da74e3becf2fd4918fac1c5cf8e2b1221e15c9ca5505a1741e92151c9a +size 1991758 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_4c96f4fe-99ac-440e-804d-bea1c48f40ea.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_4c96f4fe-99ac-440e-804d-bea1c48f40ea.png index ad6aa21f59b69212eaa3b6a30adfabc7509340a4..83f5bf5d8d1c1202f8fd33348044d8a2bfb0a01c 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_4c96f4fe-99ac-440e-804d-bea1c48f40ea.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_4c96f4fe-99ac-440e-804d-bea1c48f40ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:576a4a6f6f5c8e69b63951ee6940f8a750e88b89fd5ee14d5f30123b175461a4 -size 1609975 +oid sha256:bd6b5e02c145805c3748a88389dfc4cf6aee8eb2ffd13b2dcd46c4e9a682ea7b +size 1602678 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_55d981df-d9d9-4428-998c-76ae31d88d41.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_55d981df-d9d9-4428-998c-76ae31d88d41.png index 479448846970ee546e0b6bf80918607141004355..20818add2367db99a71843fd2d68e05f6ec42307 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_55d981df-d9d9-4428-998c-76ae31d88d41.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_55d981df-d9d9-4428-998c-76ae31d88d41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60aca3590db758fbb6284fc06da41314f94d1488494ea01823a51e26684436bc -size 470763 +oid sha256:c8e9b5e4e92c4054390709368b6edbbbdc96feaf64305a744a4f1c2b3b6409fd +size 287993 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_6dde45f6-ca37-4848-9b11-2c361a0e023c.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_6dde45f6-ca37-4848-9b11-2c361a0e023c.png index 75c49caa675608166059c9cee84063f71e3172c4..ba58035ed275304daf6578e91a9b8daa4de0bc40 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_6dde45f6-ca37-4848-9b11-2c361a0e023c.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_6dde45f6-ca37-4848-9b11-2c361a0e023c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2153f58814222f1def816ac98ff5e1a8cce0f4a65066b82e70c3c8fc83e0985d -size 1260250 +oid sha256:fb8fbbc5870d8f1114b40f21b22b9a684e2d97a42abe3ee973124f358ecf514f +size 1258497 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_87dc3c95-c884-4f08-9267-36cde803766b.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_87dc3c95-c884-4f08-9267-36cde803766b.png index 02989732cff11bc6233560764f4e1a24ac8ce281..36b751ae78f08f6459e98cb2ca5a738dec7b90f0 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_87dc3c95-c884-4f08-9267-36cde803766b.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_87dc3c95-c884-4f08-9267-36cde803766b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:511ccf0f6ed636f843574676766af4018d3c1e7f5b4ac3be51686f50b11db57a -size 1632121 +oid sha256:60e1975eacf06fdd1bf86bd10089e644365f47135fc3aabf7cbc01b6356e5fba +size 1149657 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_9bb22c1b-f45d-478a-bf4f-1a018c576906.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_9bb22c1b-f45d-478a-bf4f-1a018c576906.png index 1345ddb67426829283050f2c9b8945e6f76e4695..382f12a34711e79e5cce66c94356ec3359121bb5 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_9bb22c1b-f45d-478a-bf4f-1a018c576906.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_9bb22c1b-f45d-478a-bf4f-1a018c576906.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4485efc72f599ae5e2f7c2366dd6ccb7c1ce4e37095432885a6828f867bd7c94 -size 1604556 +oid sha256:88b96296c4f761c6b49ec31b8640a12789714f441456f2cbef4323c9ea65a2e0 +size 1254618 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ae8a6811-3d44-443b-8bae-878a6f545432.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ae8a6811-3d44-443b-8bae-878a6f545432.png index bff4a7ac5d87e408816e92fdd99392340e66db8d..a787c9a50c55dc4ff39eb5e632733fc627baa593 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ae8a6811-3d44-443b-8bae-878a6f545432.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ae8a6811-3d44-443b-8bae-878a6f545432.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2391ee823c3eeacb12d2440f37982089594e348dc40709ea09c8ce6e441a375b -size 1602952 +oid sha256:d0657d29c8f2adb185033edf09783f94e40914b93eeb92e18a1bf380aa3c47f9 +size 1088117 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c0bd78c0-c5b3-4607-9f24-fa07181701a2.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c0bd78c0-c5b3-4607-9f24-fa07181701a2.png index 2c58abbf5f788138901f9552221f65e307f66e65..53decb6998e086de83257d1b0dfa53a90829dd60 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c0bd78c0-c5b3-4607-9f24-fa07181701a2.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c0bd78c0-c5b3-4607-9f24-fa07181701a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0517b5d226b7d47ec411d3e089c6db434fff4b3b897cf3d8dea2ee4991b7ff0 -size 1257625 +oid sha256:78a5d2be2c62f416896d354add033775fd8943806d84e7d37d2371c77994b0cd +size 1214842 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c72c3d18-5a88-42c8-8c16-9294a6019000.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c72c3d18-5a88-42c8-8c16-9294a6019000.png index 1db50d2b58e7f2d56c5bd3505b05c69beb5bc9f9..919b790c4400111808b7b7efcd590262424739a7 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c72c3d18-5a88-42c8-8c16-9294a6019000.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c72c3d18-5a88-42c8-8c16-9294a6019000.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c4d44cbbeaf713a172723763b2009332efec74199edbaa3f514a4f17effc30c -size 1652844 +oid sha256:0f9c06d47d7c36d11ba4653d78ec39201a89d87666565616cce1fab757ef9138 +size 1312647 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ca113dc2-d281-4cf9-9793-9122d1170097.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ca113dc2-d281-4cf9-9793-9122d1170097.png index 6336b1de894fef05d755ea831de1d83d39649a66..69dc5297cef25e68cc9be04a06ddac7d0ae8d633 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ca113dc2-d281-4cf9-9793-9122d1170097.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ca113dc2-d281-4cf9-9793-9122d1170097.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c6b8360e3baa9a27adcbc6db85c101cb7c6712f5a7740a7191d6e527468729f -size 1600948 +oid sha256:996f94816e3ce9fc887b6d1d0ac7744f6f395fbb92a6be27f597e4a73eed8088 +size 1208855 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_d20e2519-15c9-4c7d-943b-75513d98fbb4.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_d20e2519-15c9-4c7d-943b-75513d98fbb4.png index 23ebc8ceb67d1310703472a9584e26c967737b41..c0c20986939bb90108df0a01b0a3a8958d98cb03 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_d20e2519-15c9-4c7d-943b-75513d98fbb4.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_d20e2519-15c9-4c7d-943b-75513d98fbb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:584929835a46ef9d2d3c7dd7b90deba42c968e96623a56d8bef7971e1f44c23a -size 1600813 +oid sha256:fadd613c8d5455d339be9e49e08cb72561860da1e6f9f9c814fa006c31c8bff3 +size 1213633 diff --git a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_e9c76458-26a1-4095-8726-6f6a158f1e25.png b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_e9c76458-26a1-4095-8726-6f6a158f1e25.png index 381f4ab1b19213a968302cff9f8361eb55874868..cac05a69697e48a844576b114085593c91559ec7 100644 --- a/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_e9c76458-26a1-4095-8726-6f6a158f1e25.png +++ b/images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_e9c76458-26a1-4095-8726-6f6a158f1e25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:527290af2e22012cad070691de241bf3607e49f576d2b42cf295d2ab81d17cdf -size 470577 +oid sha256:438d8c67779c27df49187bad8a37de5962eed888577163e6d401ea6626dde677 +size 952819 diff --git a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_28459c7a-e656-4f30-946d-53f528631e26.png b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_28459c7a-e656-4f30-946d-53f528631e26.png index a72fabf261e8c6f6b1a72d6f89d9fba3fd8f25db..788c17b2931fc55ceed1bad24e7d8cde37556705 100644 --- a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_28459c7a-e656-4f30-946d-53f528631e26.png +++ b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_28459c7a-e656-4f30-946d-53f528631e26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28f970c31dab9189c3bfa5a49a461a36e1a77a3c18b26a4c7765aa935d4eb112 -size 962675 +oid sha256:8492788bb410aef70fd2fdcb3e20d95e5527f5d2ae116b88eaa6ff7d42a8505d +size 949005 diff --git a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_391b3a9a-8396-4709-86c0-7d88ba2b43e7.png b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_391b3a9a-8396-4709-86c0-7d88ba2b43e7.png index 9b56729dc4a207eaf1dc85096bf3b4dfe9fd21ff..88b4f7e74ca7ab138d1bddd72917c84eff3cd115 100644 --- a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_391b3a9a-8396-4709-86c0-7d88ba2b43e7.png +++ b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_391b3a9a-8396-4709-86c0-7d88ba2b43e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ce7a2f7053fae1c6bc052005d558d5199b4ef35e26877b928dfb478cf4b7dbd -size 1417040 +oid sha256:21f391608dbed7c6bc4b75f068c0e2193ae56a5faa435c2723386d9ac2980c62 +size 1093286 diff --git a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_4fa33a9b-c512-4911-a2c8-c8118c344b25.png b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_4fa33a9b-c512-4911-a2c8-c8118c344b25.png index 533e5e281483fd5e43bd9e16a11178721f07d349..3616b4cd6c33a0db777fcb921ef379240e2044f7 100644 --- a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_4fa33a9b-c512-4911-a2c8-c8118c344b25.png +++ b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_4fa33a9b-c512-4911-a2c8-c8118c344b25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da47be6318eca05860b5b34bc68f4441a00c55114ef54c5bffcbe7a4d2c73f97 -size 1371240 +oid sha256:2fc6a7ff704cf7013235f02cb788d50c1a1466cfecdabe341cc17287446ab81e +size 1244465 diff --git a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_769d15aa-5ed6-45e7-8caa-d271597da9d2.png b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_769d15aa-5ed6-45e7-8caa-d271597da9d2.png index 4b749c327680425a928ba040f8f0385593c6079a..426e72883eebe4b922dbad8c0a09e431cc2a1d0f 100644 --- a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_769d15aa-5ed6-45e7-8caa-d271597da9d2.png +++ b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_769d15aa-5ed6-45e7-8caa-d271597da9d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb35002897174b6ca3b4d9138476e299b6de59a48262e8e8f4ff8eb8862a2ffc -size 1639242 +oid sha256:f85e1dea7824e3a27f5df65737e59e287734faa716ec78fc9d21aca20a3e8b33 +size 1428486 diff --git a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_81a0d494-3e16-47e6-ae12-96ca4d918431.png b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_81a0d494-3e16-47e6-ae12-96ca4d918431.png index 21f60cabab149027b1640e141128d74a0247fb2a..37f9d0ce6b44daf385ec9426a2bbf64319ee760d 100644 --- a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_81a0d494-3e16-47e6-ae12-96ca4d918431.png +++ b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_81a0d494-3e16-47e6-ae12-96ca4d918431.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c64c010a99f7a0cb83b9b48c089d709fa0cb92b0cdcf51b27ae5e62035793591 -size 1008119 +oid sha256:33f6fa168fbf6e15f2f9a4b08ef220796d08621c09cec77c4a3a1eb20b9ffe9a +size 653009 diff --git a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_93138842-6d51-48af-aa67-d6214bc11bfa.png b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_93138842-6d51-48af-aa67-d6214bc11bfa.png index 7b80c4d9bb105cb93fd6fcdc01928a4dc3c82399..41cd445e34e4daafba32fa196fcdd2eb86033f2c 100644 --- a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_93138842-6d51-48af-aa67-d6214bc11bfa.png +++ b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_93138842-6d51-48af-aa67-d6214bc11bfa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f3e7e89ca3cfb815772e4b7c161f9129c92b68abce873066063dcba6164de9e -size 1339193 +oid sha256:1cf7f1f37b7ff88791447598e142e7d4c2775c0ee11a9ebe8e26dd205f19a154 +size 1516981 diff --git a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_e1fe119b-be4b-474b-a766-4b1e38ee29e4.png b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_e1fe119b-be4b-474b-a766-4b1e38ee29e4.png index 860dcfbda8b73e4c1410ff92b5979fef8d0bc1ba..028b971ebfab5d9eec739453f37effe2be804d2b 100644 --- a/images/92cfe78f-0385-4ef6-b829-ae34291e766f_e1fe119b-be4b-474b-a766-4b1e38ee29e4.png +++ b/images/92cfe78f-0385-4ef6-b829-ae34291e766f_e1fe119b-be4b-474b-a766-4b1e38ee29e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37d8089317d4686504f90bb527ffeee3d287e05cd86b81ba03eebee844a65a43 -size 1126178 +oid sha256:c9a902c48fbcfb1562f21487ea00c60f72d4424428bd09d827edc13d3d5f7fbb +size 1671293 diff --git a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_25c59d1d-3e28-490b-a832-aa15ee2497d3.png b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_25c59d1d-3e28-490b-a832-aa15ee2497d3.png index 4e6031c8549a903cd6c92a9f6c7183ac8affb983..4e71e34e696393109896e559f5695894fe06daf4 100644 --- a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_25c59d1d-3e28-490b-a832-aa15ee2497d3.png +++ b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_25c59d1d-3e28-490b-a832-aa15ee2497d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ec974868d69a2a55459d258e3b9921cc8f37074c58e022f5dd91547478cc502 -size 1404889 +oid sha256:62c032ccf28d5238c49df2d31678829eac85be13b4e094f2dd39bfe0e6bc6771 +size 1115323 diff --git a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_29445a41-8ff2-4bff-b2fa-3f892a59bbf3.png b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_29445a41-8ff2-4bff-b2fa-3f892a59bbf3.png index da84c16a648b5baa1a4c884f0c463ca802386824..6780021afaf0f1999074967b56a0c6af820df248 100644 --- a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_29445a41-8ff2-4bff-b2fa-3f892a59bbf3.png +++ b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_29445a41-8ff2-4bff-b2fa-3f892a59bbf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:383ca5720809e4d368b6d7fd6666194734ad5fa3ea291e4b312f877664f4f8b8 -size 2064611 +oid sha256:4c916d262357f635fef6422fbd83865ae25f4bd634a238536a3c423ddd27e175 +size 2154079 diff --git a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_2bfdc2d3-8e60-435d-9e21-c63207b3c90d.png b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_2bfdc2d3-8e60-435d-9e21-c63207b3c90d.png index 12cb4ac7cdecb881dfc14c0cc7701795b9e8919d..6c329aa5925c81bf8baba2f712dd33b7a7f13507 100644 --- a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_2bfdc2d3-8e60-435d-9e21-c63207b3c90d.png +++ b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_2bfdc2d3-8e60-435d-9e21-c63207b3c90d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91aba088b7d43f2afd50d82a6f678d58dbbe83082709e62c6da8dacd8753aaad -size 530828 +oid sha256:23cc485acf75b1b717ab7d155be9ef4da69628979ba2bf0c8ea93febe6fc4790 +size 532930 diff --git a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_4d3c65b7-8e9c-4bb7-9347-708aaba58996.png b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_4d3c65b7-8e9c-4bb7-9347-708aaba58996.png index 4a89d537a2883f783176b96ce7fc83cc8d8d013a..2d0557739139fdce2f5e83af839d01856315ca82 100644 --- a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_4d3c65b7-8e9c-4bb7-9347-708aaba58996.png +++ b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_4d3c65b7-8e9c-4bb7-9347-708aaba58996.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aff33ce3bd787a02a76eafef39f0de02ecac98135237e04bebb47a9948a4a76b -size 2409033 +oid sha256:cf377e055bdaa935cd95d93f564bbfbd44fabeaa313834020ddf2748b48615c0 +size 1152458 diff --git a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_9af5e2bf-542b-482b-b479-2cdead789a25.png b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_9af5e2bf-542b-482b-b479-2cdead789a25.png index 5a0eb910f2d734f417afd7f78da036f5476b184a..a9f09ec8d5df145b9f5715af6171a2bd0c49f226 100644 --- a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_9af5e2bf-542b-482b-b479-2cdead789a25.png +++ b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_9af5e2bf-542b-482b-b479-2cdead789a25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af2d85f30672f93179193bb7cd327fb4fc6ac16b2fc4da0cbbc42390cdb56915 -size 3265097 +oid sha256:cb5d3402582a259542afb7b27f0dcbe09b8457a75fe2027f0532a58575ea053a +size 896752 diff --git a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_bd9a4601-4e54-41e7-ba10-2c10b0d6f156.png b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_bd9a4601-4e54-41e7-ba10-2c10b0d6f156.png index a47ae953fc8d76ed4a340bdfd673e95ad02bcdb9..f60771d7677f4e8b426a021bd3d06aea9e37899a 100644 --- a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_bd9a4601-4e54-41e7-ba10-2c10b0d6f156.png +++ b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_bd9a4601-4e54-41e7-ba10-2c10b0d6f156.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1a42a7cd768e3242c4f41947b7210e70e5f0f39d4af844222ff6743f364d492 -size 3156650 +oid sha256:d2774fb79f24ab0a83dbfebb8b62740d6a350e47889f6a32be06b9119d9bbe2d +size 1069191 diff --git a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_d1772f3d-086d-4f30-b37f-eed1de2786aa.png b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_d1772f3d-086d-4f30-b37f-eed1de2786aa.png index 79f22973ab96c58884ebdd09c77beafaddf15a15..d247071c32c3e34a24606462d744e836c27d6408 100644 --- a/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_d1772f3d-086d-4f30-b37f-eed1de2786aa.png +++ b/images/930803d7-4032-4144-89a2-e44f3c5c9ccf_d1772f3d-086d-4f30-b37f-eed1de2786aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6b481365d578b9bcd08c45feaa3cabaaeb24c471d7467c085940d1b8b5cf078 -size 519261 +oid sha256:04490ff9a64c7476a00bbcaaebe360b3a867ac2725eb3682b1b2b4690bb4fa44 +size 583770 diff --git a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_40db3113-9b8c-433f-a36e-b2bce9ea6527.png b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_40db3113-9b8c-433f-a36e-b2bce9ea6527.png index 9712c47f76215686b31f7bdd67dad1c499450fdf..bbabd46cd2b972cfc464cda1374fd9fdf3b9c2f4 100644 --- a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_40db3113-9b8c-433f-a36e-b2bce9ea6527.png +++ b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_40db3113-9b8c-433f-a36e-b2bce9ea6527.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5f9ed078f11c1ad89d6e4874410ef10eba515d5f726d29c77cc879fc82e8b71 -size 964776 +oid sha256:3df3c78d47ea3dc4e62d65102a7a11ec89240db6b70033e0ad4dd6d795c5f6d8 +size 805137 diff --git a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_833d7854-cd6b-4f03-ba0c-a4c72e905d03.png b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_833d7854-cd6b-4f03-ba0c-a4c72e905d03.png index c8f27f3e01a84fedb999febb8e8ab4476262ba03..52b61c4555d1a14cdd45c6563c33d41d44d9649a 100644 --- a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_833d7854-cd6b-4f03-ba0c-a4c72e905d03.png +++ b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_833d7854-cd6b-4f03-ba0c-a4c72e905d03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f97bcb18d4293b52cb05054896a43a9a930bc0d783aabc4f86f97f771341bfe9 -size 885007 +oid sha256:e013568155e069286477f93eaca6e413bde2de587b6573a3dba5f92d5e93f132 +size 708222 diff --git a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_84c41c48-86b7-4420-8cff-e286908d36c4.png b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_84c41c48-86b7-4420-8cff-e286908d36c4.png index 6baf845cb8c83b78322ab5bfc4f751905ec86db0..63b42b92363913228b8bfb14d221465b66890244 100644 --- a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_84c41c48-86b7-4420-8cff-e286908d36c4.png +++ b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_84c41c48-86b7-4420-8cff-e286908d36c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7219db5e56aa988120568666e0c485deec4fb7be0701a68bcb4eae53944acb18 -size 1074884 +oid sha256:161c0a729c9eafb0346972ba79b198c22750c32676ecd82c5dbb48f5c2632907 +size 436972 diff --git a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_865bb951-c7cd-439d-8d45-50b44019491b.png b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_865bb951-c7cd-439d-8d45-50b44019491b.png index ae8fb25393d55edb2b4d9d8ce14dbebca30c878e..56fb7cdcefd559105c5be251f199098790b4d8b5 100644 --- a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_865bb951-c7cd-439d-8d45-50b44019491b.png +++ b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_865bb951-c7cd-439d-8d45-50b44019491b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cfeab85988bf9748f4e52891cf3925acaa9662a31c6f32b27adb47f407f2a45 -size 1174652 +oid sha256:bf52c4d130a9f376ba5b25ca18080e288f9a9765a658feed40fbf48e9575720d +size 1749096 diff --git a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_9d73a27e-5499-4d8d-84c2-f442fdfd516e.png b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_9d73a27e-5499-4d8d-84c2-f442fdfd516e.png index 7d91c5777aa9e1526f64346d6d37497edff6c53b..4889d1094243713130caa1c99e8355fee67f739a 100644 --- a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_9d73a27e-5499-4d8d-84c2-f442fdfd516e.png +++ b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_9d73a27e-5499-4d8d-84c2-f442fdfd516e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b7de451ac439a3c668e8b58bb1cdbe92f2f7a2e6fb1f1e4fe29662fc4b0f6f6 -size 1001481 +oid sha256:df5b30015548cbee66eef5105726376ff59d3c65ae38a5786d795f7766a20153 +size 1085840 diff --git a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_aa7bf5bf-a02a-47e5-8a36-dbdb3b463d2e.png b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_aa7bf5bf-a02a-47e5-8a36-dbdb3b463d2e.png index d27bfeef4ec594afaa2c268efb0dfce272709725..713d027823fb6e760946c66e235d54eb2749d454 100644 --- a/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_aa7bf5bf-a02a-47e5-8a36-dbdb3b463d2e.png +++ b/images/9326b908-cbe6-41f6-957f-00b84c26bfcc_aa7bf5bf-a02a-47e5-8a36-dbdb3b463d2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f0c36895e71180b42229c9f6bdd80385f8ad2b6dad933bdadd64af8f154fa93 -size 877876 +oid sha256:7ea0c441fa2160ac1308dfc588d4cac8dd20627c99da27c983ff75cc96e49c85 +size 1232186 diff --git a/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_70e49603-f22f-465e-b9b9-1344b4a905ad.png b/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_70e49603-f22f-465e-b9b9-1344b4a905ad.png index fdbfcb2e2c1f2e40982900e9c5931c35e22f8285..6e445e4cd0118d94f4029959977ee55fd9f3d5ee 100644 --- a/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_70e49603-f22f-465e-b9b9-1344b4a905ad.png +++ b/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_70e49603-f22f-465e-b9b9-1344b4a905ad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa4ea18a0510eb23b24550db2242ff7141d9b88ae4d4519fb7e12882dc0575ca -size 788768 +oid sha256:365651334311b533fc18e262085e35df6e6922090125329bb545db67b87cf277 +size 1019870 diff --git a/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_b0bd4807-1c83-4f24-a9ca-e6b59dd2d8b5.png b/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_b0bd4807-1c83-4f24-a9ca-e6b59dd2d8b5.png index 5f59a0a6388c9255d3f129513869805e6dc8446a..ce7bc0eafdc16a61b1db89b97bb31a68d145fc9e 100644 --- a/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_b0bd4807-1c83-4f24-a9ca-e6b59dd2d8b5.png +++ b/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_b0bd4807-1c83-4f24-a9ca-e6b59dd2d8b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbf12388f8416e5af7a079802746b79535d86b26fead546763302ddabbf48fef -size 650733 +oid sha256:ab12cccf59f00b7d7ed6bab44cb307bccb1b82c15f94d622d7d739c5252c8d01 +size 646879 diff --git a/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_e9b80c7a-04d6-40ee-87d9-c678b93317d1.png b/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_e9b80c7a-04d6-40ee-87d9-c678b93317d1.png index 204e936a157f6ab633c8c7e6b544b26914cf7b7d..d9858f47388b5f42713add2d124317770630a33e 100644 --- a/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_e9b80c7a-04d6-40ee-87d9-c678b93317d1.png +++ b/images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_e9b80c7a-04d6-40ee-87d9-c678b93317d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:585f301c979bd7471ef72791b9bf41cc6ac1ddc4f2607af05d6604bb3252dec2 -size 486337 +oid sha256:e81f90d50b574453e84f9ea392bb59e97e1330c9652d367d59a1433093b0dff5 +size 1001850 diff --git a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_26ad118a-4b0e-4fcf-aa60-59d470e2ef31.png b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_26ad118a-4b0e-4fcf-aa60-59d470e2ef31.png index 98a962c53b6586966051eeb2aa70fd0a3125876f..88a238885693d6f17003ee5dd7464700e64f9244 100644 --- a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_26ad118a-4b0e-4fcf-aa60-59d470e2ef31.png +++ b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_26ad118a-4b0e-4fcf-aa60-59d470e2ef31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a78cb1267a91f0903070c786a56d6589538e13153fe110c49471db4e61bb3ca -size 2262116 +oid sha256:4865f4e6840af89619241c5ead4027786f3c87e8241529a981effe9f3587905c +size 1076104 diff --git a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_35eeeb9b-b19e-424f-a037-42daf164c207.png b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_35eeeb9b-b19e-424f-a037-42daf164c207.png index 1b55d07b35d820fcd1da80bdc112cfafdef70cee..08ad7e469fb09ff43c9ad9b1a462191e1698c27b 100644 --- a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_35eeeb9b-b19e-424f-a037-42daf164c207.png +++ b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_35eeeb9b-b19e-424f-a037-42daf164c207.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e844560261cd5d33a2a80704fb6999c52860cd60526da4c2488c2e7e81ed913d -size 1453579 +oid sha256:c3aa5fe94161ffa0a8a67fd9cfe97c065aef6e04b35dd94430ec652df7a8cdbe +size 1800138 diff --git a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_428b2ffe-86ef-407a-9079-cfec97b80000.png b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_428b2ffe-86ef-407a-9079-cfec97b80000.png index 7cef63ac0a64b55afa23d433f72f8340ab707dec..1cad5ecc58a9c5cce1d0020686197d2a97f800a9 100644 --- a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_428b2ffe-86ef-407a-9079-cfec97b80000.png +++ b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_428b2ffe-86ef-407a-9079-cfec97b80000.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0117c62858dc7c5a18a9989d43ee3f6bfb29f3f0c944f84851f6443bd4dbc4b9 -size 2368508 +oid sha256:205ad4dc60eac0907644e8354dd1de4cfe639409f8b0636a044694382b3c4fee +size 1597932 diff --git a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_5e18d747-451a-47e9-a2b9-4bbeffaf596b.png b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_5e18d747-451a-47e9-a2b9-4bbeffaf596b.png index 4326dc52e1e0cc869c3299dab4cdae89ba4fbc04..6fe6210182e2adbc7f1a5aacba2ade907614f911 100644 --- a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_5e18d747-451a-47e9-a2b9-4bbeffaf596b.png +++ b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_5e18d747-451a-47e9-a2b9-4bbeffaf596b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48d25593001f96c4dc139af87ed0321bf65502f15e19370fa02d66937277dddd -size 2407202 +oid sha256:a34877c688f6efedfec98105432dbe1a82f045dc0ba8b5b754c46f5e8c032777 +size 1674476 diff --git a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_9e69c679-e673-4c93-9bfa-0ef90cfd8822.png b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_9e69c679-e673-4c93-9bfa-0ef90cfd8822.png index dd46750f061e8f16114b3d01e8cff9c7045139b6..6b37a43f22d515c32a56c625b6865e09a4c3dfd2 100644 --- a/images/9365fba7-2698-4063-b151-dd0bd55e0f50_9e69c679-e673-4c93-9bfa-0ef90cfd8822.png +++ b/images/9365fba7-2698-4063-b151-dd0bd55e0f50_9e69c679-e673-4c93-9bfa-0ef90cfd8822.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7f16e680194a645105d59ac5d9ea162b3b7d74171158890126b254c9e706d70 -size 2259340 +oid sha256:adab798bfb5269dd68f55374541f339e089786f18c3b2a495612a67b7c0d64e5 +size 1438791 diff --git a/images/93d0190f-ff39-4b69-82fc-58cddac42006_2258dff5-dc9b-44c6-94f6-629411cc0506.png b/images/93d0190f-ff39-4b69-82fc-58cddac42006_2258dff5-dc9b-44c6-94f6-629411cc0506.png index baf1cfaf376d6d5d488693cf6baf74f0b40797e1..087d0c1a3766bcdeae67f98e9aa20aa4595825ec 100644 --- a/images/93d0190f-ff39-4b69-82fc-58cddac42006_2258dff5-dc9b-44c6-94f6-629411cc0506.png +++ b/images/93d0190f-ff39-4b69-82fc-58cddac42006_2258dff5-dc9b-44c6-94f6-629411cc0506.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a69d3d16b44137d18960403610f4046c75d53731cc7a12ffb6b6986c125a7a4b -size 1734147 +oid sha256:694db0654a29989a3703a60e6dedddc019c819e6e264b25856940bd047dc83d1 +size 953115 diff --git a/images/93d0190f-ff39-4b69-82fc-58cddac42006_2adf11c4-9ff9-460f-932d-fafc19f37981.png b/images/93d0190f-ff39-4b69-82fc-58cddac42006_2adf11c4-9ff9-460f-932d-fafc19f37981.png index 6a9a5146aac75904278876311107c410d7c4e109..1f26411b422abc81ad62134d320418c716b691e7 100644 --- a/images/93d0190f-ff39-4b69-82fc-58cddac42006_2adf11c4-9ff9-460f-932d-fafc19f37981.png +++ b/images/93d0190f-ff39-4b69-82fc-58cddac42006_2adf11c4-9ff9-460f-932d-fafc19f37981.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a23657a0cc18ed878e80baccf13d546229a507a7bde1276eaf458acb5988620 -size 848958 +oid sha256:72b4d555c18483d7252aa96e705c4b80f9e31d5b45751347636ff40c88f35b65 +size 952787 diff --git a/images/93d0190f-ff39-4b69-82fc-58cddac42006_8897f156-5756-44ea-8035-2d1e1aa1ddd8.png b/images/93d0190f-ff39-4b69-82fc-58cddac42006_8897f156-5756-44ea-8035-2d1e1aa1ddd8.png index 2a12d2aeb7e9e86a420a897c686af10eeeae6644..053b9d8cf7460f22ae2415169f8aaae236abd4f6 100644 --- a/images/93d0190f-ff39-4b69-82fc-58cddac42006_8897f156-5756-44ea-8035-2d1e1aa1ddd8.png +++ b/images/93d0190f-ff39-4b69-82fc-58cddac42006_8897f156-5756-44ea-8035-2d1e1aa1ddd8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cce2c8c6ee9519bfae9cbda84c0b2306b2f34d448a93cd6d03a07a404118fe8d -size 836991 +oid sha256:283578a9b44fc807942d7410fb88afd8966a883cf7019e94ec8418be96dc4094 +size 807048 diff --git a/images/93d0190f-ff39-4b69-82fc-58cddac42006_9163c12d-351c-4892-bd5f-8918723bcf44.png b/images/93d0190f-ff39-4b69-82fc-58cddac42006_9163c12d-351c-4892-bd5f-8918723bcf44.png index 90d81389125d991776b706b59c1c320dbf651590..2342d45fa14ed8944eaf77876cea560098cd1be1 100644 --- a/images/93d0190f-ff39-4b69-82fc-58cddac42006_9163c12d-351c-4892-bd5f-8918723bcf44.png +++ b/images/93d0190f-ff39-4b69-82fc-58cddac42006_9163c12d-351c-4892-bd5f-8918723bcf44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5759e0c5668c39d1eeb38a9dbadd14f997eebfbac899500ac8e9508bcc680a76 -size 834736 +oid sha256:bc651d35ddd74f9eba6afbc735894a0c4a1a37522775218edfa540ef662162f2 +size 975108 diff --git a/images/93d0190f-ff39-4b69-82fc-58cddac42006_d882dd9d-3efe-420a-b0e0-ef2d36f1f947.png b/images/93d0190f-ff39-4b69-82fc-58cddac42006_d882dd9d-3efe-420a-b0e0-ef2d36f1f947.png index 5c750189932b0edb58fe7402d880c579d9e4b1e3..9ee00c9a3fde3da40279e40ebe0b4539b768d2a4 100644 --- a/images/93d0190f-ff39-4b69-82fc-58cddac42006_d882dd9d-3efe-420a-b0e0-ef2d36f1f947.png +++ b/images/93d0190f-ff39-4b69-82fc-58cddac42006_d882dd9d-3efe-420a-b0e0-ef2d36f1f947.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00547a5902fad21feec26c32e0c07c7a4ea4f9d81b6863270344ddaf8c9d0c70 -size 789730 +oid sha256:5cd8ee24fea5a304ead888ea43a76d851a1020053772f75cadf2aa81d011252a +size 1047826 diff --git a/images/93d0190f-ff39-4b69-82fc-58cddac42006_f0cd22a0-1abe-4cb5-b3cf-ae886963828c.png b/images/93d0190f-ff39-4b69-82fc-58cddac42006_f0cd22a0-1abe-4cb5-b3cf-ae886963828c.png index f4a9e182384031155d5a36e999f4af216f0f86e7..4c6ba46d07dce471d711d64051fa89bce4046da3 100644 --- a/images/93d0190f-ff39-4b69-82fc-58cddac42006_f0cd22a0-1abe-4cb5-b3cf-ae886963828c.png +++ b/images/93d0190f-ff39-4b69-82fc-58cddac42006_f0cd22a0-1abe-4cb5-b3cf-ae886963828c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5f59c0f23cf5f1d608330dd74f54560a08ff00789f83ffc72bfdab24d2e79ef -size 774553 +oid sha256:052c06e32208e1065cf5092656225d78e4655f0354402b7ff9ef22af0e86eb4a +size 670666 diff --git a/images/93d0190f-ff39-4b69-82fc-58cddac42006_f1d139be-16d6-448e-836d-4a5043a316d5.png b/images/93d0190f-ff39-4b69-82fc-58cddac42006_f1d139be-16d6-448e-836d-4a5043a316d5.png index d6a25ce8fc57363c79285a5ed802f700c38fbaa6..0fb153559320db0ae111f457e1b94d77701768a5 100644 --- a/images/93d0190f-ff39-4b69-82fc-58cddac42006_f1d139be-16d6-448e-836d-4a5043a316d5.png +++ b/images/93d0190f-ff39-4b69-82fc-58cddac42006_f1d139be-16d6-448e-836d-4a5043a316d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd153b130fcc76fa2e51aaab7ed00dea795fe291466f94b16b7689fa874c0031 -size 829227 +oid sha256:9768de43bd8db4a07849e46194775abaaedbde8366a849e96a5727e56e232371 +size 1000467 diff --git a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_03ea992d-d5ed-4a7f-a6a3-1d66a15aec50.png b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_03ea992d-d5ed-4a7f-a6a3-1d66a15aec50.png index 70bf750e3ac479975876aec48d412b441995d5e5..b56887df14a0f1f11527be0efdeec0314d90d208 100644 --- a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_03ea992d-d5ed-4a7f-a6a3-1d66a15aec50.png +++ b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_03ea992d-d5ed-4a7f-a6a3-1d66a15aec50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f81c7e91bc087254921c5dae65e532c6abe93c920d5dcc1aec1434cc8f49ac0 -size 1162394 +oid sha256:0ce8d714fb83cd9ff2d881c2eecd95154712ef256aca48893dfd3bc044bb18e7 +size 922571 diff --git a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_2b0e0d18-c0cf-4ae9-a1da-3a815944a4b2.png b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_2b0e0d18-c0cf-4ae9-a1da-3a815944a4b2.png index b6fbaa323d7658edb2ecf917032268bbc820e165..92bb52cbc3944f7e1441d3119e9d91c6b5e5f4f2 100644 --- a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_2b0e0d18-c0cf-4ae9-a1da-3a815944a4b2.png +++ b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_2b0e0d18-c0cf-4ae9-a1da-3a815944a4b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f6ba252d9f374119d1459d8086a8ab7501041ff3a2b60c15c53d78edaafbecd -size 1115796 +oid sha256:5c78a474331b92f64793fbe4ad87329e41fb79a3a0f7326ecb3996bc2cd639a0 +size 1433854 diff --git a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_3f9a5ae5-bcb9-4dda-aec6-2e5d2e3a0499.png b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_3f9a5ae5-bcb9-4dda-aec6-2e5d2e3a0499.png index a2ca0bbe2d768015eefc1033472e726353f28458..4eb5efc0bedaae1d0c49311192893b2847285f73 100644 --- a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_3f9a5ae5-bcb9-4dda-aec6-2e5d2e3a0499.png +++ b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_3f9a5ae5-bcb9-4dda-aec6-2e5d2e3a0499.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:279bc57fe3f0c731dc2d472982443fe7dc03f840c6a47d17102c5b1b79a07985 -size 1159681 +oid sha256:a38a46ba9658107289b0c59569b326ee8dfa85d8212eb877b80456db7621fa3b +size 1072105 diff --git a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_afe77c2d-c101-407d-8a8e-f73b0bfa6588.png b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_afe77c2d-c101-407d-8a8e-f73b0bfa6588.png index d9dd7d4213370f63d9f3a2e1328f36cfc3616848..f0721d894b9754f9a73abeb6b6c8f4158dbdd9e5 100644 --- a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_afe77c2d-c101-407d-8a8e-f73b0bfa6588.png +++ b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_afe77c2d-c101-407d-8a8e-f73b0bfa6588.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:265b43e990ed2c4c54da15dfd1e503ac8845a7c817d7fa62ecfd85278739d719 -size 1165920 +oid sha256:3c8d180c5f15f066e97f6ad116806b93174144049427049b8b97a306d2837857 +size 1150146 diff --git a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_cd326427-a6a5-468d-81d4-97d01903c790.png b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_cd326427-a6a5-468d-81d4-97d01903c790.png index 560af53cc7eeaa76d57ccf4c5ded57d511bab806..80dea43a767c0ec85fe57829856b894ae40ad4c6 100644 --- a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_cd326427-a6a5-468d-81d4-97d01903c790.png +++ b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_cd326427-a6a5-468d-81d4-97d01903c790.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae89e68067820474d72ee42789fda9a3ee480755d3f1411d4ee72ad885294476 -size 1202236 +oid sha256:dce07d5cb482c0ac2f0308a44e775d7ccf12d06c2cdb190a677b2f7859003fd4 +size 1528704 diff --git a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_e3aaa938-3cff-4e70-bf97-d5b032254b8a.png b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_e3aaa938-3cff-4e70-bf97-d5b032254b8a.png index 48d55e7a871dde44c30f404d7acd5efe7af48a3a..40a61888cfe5d14f70b71d5b686a262500bb6507 100644 --- a/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_e3aaa938-3cff-4e70-bf97-d5b032254b8a.png +++ b/images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_e3aaa938-3cff-4e70-bf97-d5b032254b8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:976bc4f6c8168883cc9e39e4becc9f52fc68024796f52e2c25e2af127232a546 -size 1114915 +oid sha256:4302024c1e61d136a1482bd4a15ba742a8fce0b985c797ef03479854a8746cc5 +size 1664248 diff --git a/images/942e315d-b494-469c-93df-cd69c88ea2d2_5f142677-efdb-410d-b3b9-917b3bd60b03.png b/images/942e315d-b494-469c-93df-cd69c88ea2d2_5f142677-efdb-410d-b3b9-917b3bd60b03.png index 80c6959bdb39fe3163b77b03aabec090aa6e7a24..dc23eda23166d3ae11f3ff660cb0b80cb65f8f34 100644 --- a/images/942e315d-b494-469c-93df-cd69c88ea2d2_5f142677-efdb-410d-b3b9-917b3bd60b03.png +++ b/images/942e315d-b494-469c-93df-cd69c88ea2d2_5f142677-efdb-410d-b3b9-917b3bd60b03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c528de1d1a3aa77a6eefa3124774e040491d1a51ceb63ecd9de135dd4e0b6ff7 -size 661468 +oid sha256:df68cdb79a2f9fcf7cab9fcaa5071ac68367f96a1b0880602a4b8dd61cdb06a9 +size 712322 diff --git a/images/942e315d-b494-469c-93df-cd69c88ea2d2_6c20de6d-fcb6-460e-a454-34f681cbb142.png b/images/942e315d-b494-469c-93df-cd69c88ea2d2_6c20de6d-fcb6-460e-a454-34f681cbb142.png index 7e8741fee2d16a5daf1dda419f903c21f4ecd8c7..20785b828aba24e1ebb6dd30ae4b9c31c7ddb62c 100644 --- a/images/942e315d-b494-469c-93df-cd69c88ea2d2_6c20de6d-fcb6-460e-a454-34f681cbb142.png +++ b/images/942e315d-b494-469c-93df-cd69c88ea2d2_6c20de6d-fcb6-460e-a454-34f681cbb142.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61771a0c3300926132087cd0a9ef0e701b23758c31d2dc7dac80175582dba96b -size 645746 +oid sha256:556ee0d4d68bc6ee2c9f64c47b503d32fa4f75bdedab5c824d11639f87f1182b +size 745965 diff --git a/images/942e315d-b494-469c-93df-cd69c88ea2d2_bf3377aa-05c8-44a6-a152-194d47239df9.png b/images/942e315d-b494-469c-93df-cd69c88ea2d2_bf3377aa-05c8-44a6-a152-194d47239df9.png index 0d2704afcea8abe77f0f4508bf59adbddff4927e..a3121995cd4ededc0b53c2ead5a807808e092819 100644 --- a/images/942e315d-b494-469c-93df-cd69c88ea2d2_bf3377aa-05c8-44a6-a152-194d47239df9.png +++ b/images/942e315d-b494-469c-93df-cd69c88ea2d2_bf3377aa-05c8-44a6-a152-194d47239df9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8932d2ccd10f5dc360a01e84e2f6922c741fa211f6d25489ee70f1aa5bec47d3 -size 495657 +oid sha256:25b0ec3ecf5f13e53ea8f0d986c19d2850c35b37176dcdca84b81b96fc724182 +size 693447 diff --git a/images/945ac29d-8e65-4e14-8bab-21742ac92a47_22dbddd3-037a-4ffc-8622-f0181c16c949.png b/images/945ac29d-8e65-4e14-8bab-21742ac92a47_22dbddd3-037a-4ffc-8622-f0181c16c949.png index fedba2b6ff476ac0ce186d1e3831825b75a045a8..b5064d9ee0a42e1a4aa337976291cdabd6e87fe4 100644 --- a/images/945ac29d-8e65-4e14-8bab-21742ac92a47_22dbddd3-037a-4ffc-8622-f0181c16c949.png +++ b/images/945ac29d-8e65-4e14-8bab-21742ac92a47_22dbddd3-037a-4ffc-8622-f0181c16c949.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8e455dcf1c4a9c42e13bea3fe9b56d8d0f739f223d0d70ebf13763d5cf38198 -size 265605 +oid sha256:5d45b29461bff80c17c391fe87c8ed16b6a553ee50fe6b3e8f4ca32e92d89a37 +size 326852 diff --git a/images/945ac29d-8e65-4e14-8bab-21742ac92a47_84fd0117-d99b-47e0-96ea-08945d9fadb1.png b/images/945ac29d-8e65-4e14-8bab-21742ac92a47_84fd0117-d99b-47e0-96ea-08945d9fadb1.png index 4a6035254df7a9a7a02c712af6e9f48ee1f28fbb..db13648ff8385c178ecd7a6de453976dd9089434 100644 --- a/images/945ac29d-8e65-4e14-8bab-21742ac92a47_84fd0117-d99b-47e0-96ea-08945d9fadb1.png +++ b/images/945ac29d-8e65-4e14-8bab-21742ac92a47_84fd0117-d99b-47e0-96ea-08945d9fadb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:62dde7948249e95e0e8e689cb3fe5fce4faab934e6dc54bb77485f6cb7f0825f -size 353334 +oid sha256:ee377c0c6f9a11db4e77792c690404dfaed646b912c9497752dfcac30871fb43 +size 600897 diff --git a/images/945ac29d-8e65-4e14-8bab-21742ac92a47_cc11e618-5383-4745-a31e-9b971622ef02.png b/images/945ac29d-8e65-4e14-8bab-21742ac92a47_cc11e618-5383-4745-a31e-9b971622ef02.png index e6a86133092071651aa2d2d14f23a63156f72b14..e600c51f1bea61f53ff82ffd4e6fcc05e25389ad 100644 --- a/images/945ac29d-8e65-4e14-8bab-21742ac92a47_cc11e618-5383-4745-a31e-9b971622ef02.png +++ b/images/945ac29d-8e65-4e14-8bab-21742ac92a47_cc11e618-5383-4745-a31e-9b971622ef02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7eb08c1ed04d204a8f6ca881008967c825f1ca4e8311077c66d0c76e85fe7d86 -size 360299 +oid sha256:8fe6b241dc544fc7e1dc84626a0858978bfa934af16cb7ccd50480110bdf25b1 +size 275813 diff --git a/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_062da2e4-9c43-48c4-898f-1ef4b05a7542.png b/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_062da2e4-9c43-48c4-898f-1ef4b05a7542.png index a2c65f87873afca4854279444d5495806ba6ee6c..40811b6c6ee37c8bb42507a61f4829b314d1c1e3 100644 --- a/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_062da2e4-9c43-48c4-898f-1ef4b05a7542.png +++ b/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_062da2e4-9c43-48c4-898f-1ef4b05a7542.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:490b1535781c7dbea441fe554020facbd434bf477cdf6f7ed81e99f7e661cee5 -size 1222014 +oid sha256:50f014e4f00e03304993180f94e279cf987fe9788b55631444b990d29cc48788 +size 1679442 diff --git a/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_30baf113-0948-4a1c-a1da-ae5a3b030698.png b/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_30baf113-0948-4a1c-a1da-ae5a3b030698.png index 0c5ff17b6473278d269cd4f16541054553a017dc..dffa95ad982f0d5fb8a5a6b4406553fed6ba70c9 100644 --- a/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_30baf113-0948-4a1c-a1da-ae5a3b030698.png +++ b/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_30baf113-0948-4a1c-a1da-ae5a3b030698.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ea8c12db7a3ef68aa8e8ad6943de81e719dd7d5c69170c27ba2379852e3f55c -size 1347724 +oid sha256:b6c4beb76daa44a05ca75d493a27779c7d07193bd35c128f0d59c0197ebf86f0 +size 1245712 diff --git a/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_9df6a79e-671a-40f0-bc71-b7394d96f511.png b/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_9df6a79e-671a-40f0-bc71-b7394d96f511.png index a85801c0e84579b10cd81b982de926adac3a2e6e..cba4ee6483125356b07611d33fff403c13f1d5f1 100644 --- a/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_9df6a79e-671a-40f0-bc71-b7394d96f511.png +++ b/images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_9df6a79e-671a-40f0-bc71-b7394d96f511.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20698c641f0db113a3614643edf24a9ae02061ea730489719caec90542bb1d76 -size 875792 +oid sha256:3e9f110aecc09b431b8f86049d614a19d52613a049022f9210fae1fe2bf4efd4 +size 1033445 diff --git a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_2533c6aa-8fbe-4a89-8047-a7346e530fe4.png b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_2533c6aa-8fbe-4a89-8047-a7346e530fe4.png index 4753de5a570b63fabda8f46c5a47e0c880cdfe18..bc378669788658081e147be8585df2867e612aee 100644 --- a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_2533c6aa-8fbe-4a89-8047-a7346e530fe4.png +++ b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_2533c6aa-8fbe-4a89-8047-a7346e530fe4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca334ff782e32fbc3600b47e15904a171f3e4daa0a81e04b6eefb9e4d1ee3f26 -size 1440714 +oid sha256:35367b47255996c9d6efb95e948138c11c9ca7ef0556121dccee6df29eb62b42 +size 1818738 diff --git a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_9e4f6faa-1691-43ae-ad28-12414527bb85.png b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_9e4f6faa-1691-43ae-ad28-12414527bb85.png index c9a7dd83b8a813efa54ea7a6fa4871c00bda022d..a64470c83918ab27f8dbdec32304e94de23fd64d 100644 --- a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_9e4f6faa-1691-43ae-ad28-12414527bb85.png +++ b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_9e4f6faa-1691-43ae-ad28-12414527bb85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20ee9158694d229d18e3e98876d4d3d7ae9c1e50c149317c45203fe6396f147d -size 327525 +oid sha256:dd1ba02f398e183a00d17349767f4171aa84efca5f98745dd866604863b4f014 +size 217108 diff --git a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_b6343c7a-6d35-4068-a022-3d52bdfb2d80.png b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_b6343c7a-6d35-4068-a022-3d52bdfb2d80.png index 6328869e53cdda7bc3c334e0642a3b35ff123fc5..ff528f9170cc74c65092cf3e5c12036af14d767c 100644 --- a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_b6343c7a-6d35-4068-a022-3d52bdfb2d80.png +++ b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_b6343c7a-6d35-4068-a022-3d52bdfb2d80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:736d1bad75ae506570ff153e7c20d1049b693a7eab141439ec2fc5949a7c09c3 -size 486720 +oid sha256:3dd84b7e3dedae0c60412ed93dcb22ee5f726647b2a0e49878e8db6566ba4e15 +size 664525 diff --git a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_bc4dbb7f-e800-41f5-9fb1-edeecfa090b1.png b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_bc4dbb7f-e800-41f5-9fb1-edeecfa090b1.png index 6554780449285367c4ee5890ec5c76982d568ddb..84095ed2f45fc9625d3176eb7e32e0f39bba9d36 100644 --- a/images/95499427-980a-4115-b1aa-6b252b4fb2c3_bc4dbb7f-e800-41f5-9fb1-edeecfa090b1.png +++ b/images/95499427-980a-4115-b1aa-6b252b4fb2c3_bc4dbb7f-e800-41f5-9fb1-edeecfa090b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba200ff47d4acbaf48c1e337ba5ac6008d8857041473f9c6c9285e62baee2953 -size 1495243 +oid sha256:75072c8e09ecc5701dbab554e554dd66802cd556a3028a251f6fc17b2a7f31e8 +size 950999 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_27347e65-851e-4170-a7bf-64293faf81e8.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_27347e65-851e-4170-a7bf-64293faf81e8.png index d53f6c2eb4988947aabb9fdc6d71b4d8984ad5c4..4cd13ab2ee6506270f6639b6a0e72df24ec2a865 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_27347e65-851e-4170-a7bf-64293faf81e8.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_27347e65-851e-4170-a7bf-64293faf81e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2696a461c750da5cad9c127310d04b829908effe7377006397278a33dbb8fa1 -size 402454 +oid sha256:242ec476a5fc2dbc5bb604c6aeb36d492b3fadf33b3ad20edec44c0c795386ba +size 396514 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_4eb9f199-5e4e-46b3-9f15-5b65eab3ad0a.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_4eb9f199-5e4e-46b3-9f15-5b65eab3ad0a.png index 0fcc93c0fc6c77636a55679c871310abe2e83288..a2c4cbd4ad5c650d400852a872c76e7b6803146f 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_4eb9f199-5e4e-46b3-9f15-5b65eab3ad0a.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_4eb9f199-5e4e-46b3-9f15-5b65eab3ad0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7830b935f5583838ccbe4bf0ef277128c46d17738ca30e3af941265bd45de220 -size 680688 +oid sha256:933dfdc92b355f952483d20341885a7d9887e791e88ddfc95e5918e1cb9d078b +size 682350 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_5ee7f323-9126-4fd2-9fe7-42af620acde1.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_5ee7f323-9126-4fd2-9fe7-42af620acde1.png index a0de44de23dde5c5e1f507579ee484c25ff77e4f..a99fc23a14f753fd1c1aa7727c724b882e94bd83 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_5ee7f323-9126-4fd2-9fe7-42af620acde1.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_5ee7f323-9126-4fd2-9fe7-42af620acde1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4359f66fd305f9dcab83ba74f2e5cca6476a47aa6fc37be682e3dd7e978ab7f -size 964235 +oid sha256:33d71b41921ab3c820bddc607deba075662f6a6a06aafd1902bca1ef58cc3aa6 +size 964586 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8acc4e87-a3c3-4f37-a65d-ced00e37a017.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8acc4e87-a3c3-4f37-a65d-ced00e37a017.png index c2fed1761fc12d4f13b85524d88b8aa50fd440f0..9b2ec1fe5f1fc1a3e868d19789e24e7d47a5b9a9 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8acc4e87-a3c3-4f37-a65d-ced00e37a017.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8acc4e87-a3c3-4f37-a65d-ced00e37a017.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea6cbc1619ed4235028861c19719045f5ed8203a51d1a55ec4b688234a4dc4e5 -size 964354 +oid sha256:e27bd648d0541b6cde0699ab30ff226aa56e75d881977cc76c03c2650f0a90f4 +size 960875 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8e64a305-417f-4f93-b0c4-ae588b41194e.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8e64a305-417f-4f93-b0c4-ae588b41194e.png index f31428115e80799e688773b5d65a7c28364777ff..40dd72d05d5b4b9059bcd8e376e31de51182239e 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8e64a305-417f-4f93-b0c4-ae588b41194e.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8e64a305-417f-4f93-b0c4-ae588b41194e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bb591cb770a3c5b1334c5d4f5fb7e56843f3ef2280b34cec33f4ec154062b65 -size 559562 +oid sha256:189601183fda7413e0c1b27d841b043565430039c98bb5a38032ba42bf6df634 +size 498688 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_971dc47e-71e7-475e-9eef-87d837b34356.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_971dc47e-71e7-475e-9eef-87d837b34356.png index ecb5ae5a30311cb3239f4b5e4809149f3ec02de2..471c64093a7780db2f057eecf42d1669f6296a67 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_971dc47e-71e7-475e-9eef-87d837b34356.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_971dc47e-71e7-475e-9eef-87d837b34356.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f07e914358762a7760027a89d187c7e559d936be34d52b55e140d52ffb54cace -size 652191 +oid sha256:4b461bd7eb713e21c6350f384fefb8532424f2456462bf5de2bb34f4d56c5782 +size 659156 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa29a6f8-0eb6-4810-a0d2-c46095e1eb0a.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa29a6f8-0eb6-4810-a0d2-c46095e1eb0a.png index 7fb8563cb91f36d71b797e7694407a7420942c07..95986c1dec526aa97ec89c1c4f404351e1a15384 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa29a6f8-0eb6-4810-a0d2-c46095e1eb0a.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa29a6f8-0eb6-4810-a0d2-c46095e1eb0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21b692cc39cd43055ec1c3d993cf33ce0958177a9739f009327e955f43aa97a0 -size 901996 +oid sha256:22581a44321ffd607dd0e572026f80d3007a5fd31a42b9b2831571ee12ae5c87 +size 1291054 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa97ab13-302f-4371-b31f-a17cb1c4c0f2.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa97ab13-302f-4371-b31f-a17cb1c4c0f2.png index 1aeaa84d00aaad13280b924b7f89b408bfc375ca..d184907863664708022e91c4cd12f113f2395e03 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa97ab13-302f-4371-b31f-a17cb1c4c0f2.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa97ab13-302f-4371-b31f-a17cb1c4c0f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab92d4a521ddf96fd03a2b7b7d405e9c6b781c81e0d017cff4faf17a51ca9220 -size 400531 +oid sha256:fe0cd93766ce2e25bf263ccaf099649759e19c13ccda573a8b57eb8a6190e920 +size 385855 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_b597c9fb-a0dd-48ae-aab4-cb1928e97ecb.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_b597c9fb-a0dd-48ae-aab4-cb1928e97ecb.png index c48b86546c01cd42f66ec8883fe3352df1016c81..021b09d3b592333d5aee36692d6a41c94712f00a 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_b597c9fb-a0dd-48ae-aab4-cb1928e97ecb.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_b597c9fb-a0dd-48ae-aab4-cb1928e97ecb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d69050abb1cf655cca3ec3aede599d9a0d77d61742d936f9a049ee010b5d441 -size 948790 +oid sha256:123f1dd305493f6c6909d602e8aff6bc4e641eb0c2d5b9bec5c0be0af9bad67f +size 914505 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e499aff5-fada-4a43-a168-d2465e48c36f.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e499aff5-fada-4a43-a168-d2465e48c36f.png index 4295192fb80be1b0573e3faf08e708ce944e29c8..74ebeba437b88376eb494175ee841e7105b073ee 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e499aff5-fada-4a43-a168-d2465e48c36f.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e499aff5-fada-4a43-a168-d2465e48c36f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a1cfab9036c83869f7ed822839b8b3184d914102e39d99ce9f47bc12ba29632 -size 1018535 +oid sha256:80948a94a970c591be041966e707979e950c555be9a5272c95bcf55b0243f8c5 +size 1400903 diff --git a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e9c074e0-eb15-4d22-92b6-f703bb5da185.png b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e9c074e0-eb15-4d22-92b6-f703bb5da185.png index 2c5db2e9aa57e00aea8c17f309b23d5dbf84ef63..51a3125c76d0b5c4f54ca35420019226ccfc22e4 100644 --- a/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e9c074e0-eb15-4d22-92b6-f703bb5da185.png +++ b/images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e9c074e0-eb15-4d22-92b6-f703bb5da185.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d37bfa57ece06ed54d0a0d77aa3750853cfba443510fd29ff6ce88ff1a767263 -size 696611 +oid sha256:6c94056c5d929cbad0329ac7325526655518744571a8695e8413ca9978c81f3a +size 701387 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_09906659-140d-4f28-bfc2-14222fa6aa19.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_09906659-140d-4f28-bfc2-14222fa6aa19.png index 1d3bd2f0506b2d4c4c19673d046a7ed62b33ad0f..3d0648a1780491f7bb01afaa18f13b18fa2ac559 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_09906659-140d-4f28-bfc2-14222fa6aa19.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_09906659-140d-4f28-bfc2-14222fa6aa19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de3fcb395344224101e0d34986ec131bab0f8e346db625268cf99c61f75a85d5 -size 1003077 +oid sha256:14aa5c440c649a819852834a1aacfbaa82905a485382cf39c6e5c6545a5c9752 +size 1066643 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_0edebd4f-be22-427c-84d1-2223ab345ef6.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_0edebd4f-be22-427c-84d1-2223ab345ef6.png index fa69d8ff3eeee6976a497b4835d900bb1543d5bb..2bad7c2f3a800e15894a475ba02eabc5d239b276 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_0edebd4f-be22-427c-84d1-2223ab345ef6.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_0edebd4f-be22-427c-84d1-2223ab345ef6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c47fdd7c8eb52fe6783c20f18e8b5edbd18582bd8f366e87abb776f998c20fc -size 734049 +oid sha256:ffcf7822958673f47cc41feaaf65e91e1003d96e5c91f74a2d60ed88c3bb44c2 +size 613010 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_3077a3f0-48f7-423f-a919-efe74e72572a.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_3077a3f0-48f7-423f-a919-efe74e72572a.png index debd60e23084345ab05860bd6e3243073d37f4ec..93ef7841eff81ea3976ef3c91389808395835f2f 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_3077a3f0-48f7-423f-a919-efe74e72572a.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_3077a3f0-48f7-423f-a919-efe74e72572a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d3d5d2f1cb9aec2b98deabe17d99d95e5743d1689070333f9097f2cab81353c -size 1002814 +oid sha256:87b7d0d26dbd61dfc754f53eb6f4ef4c0b5aff8b4d19b720f125affff8530721 +size 1022086 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_36a9d86c-7e16-4e62-8351-5bf4f50e8b2d.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_36a9d86c-7e16-4e62-8351-5bf4f50e8b2d.png index c7bedc2a7e6a5c4a554e531a4a8513e2c7dc5aff..0b6a06c5901fa869f4055262f55037ab235a8cbc 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_36a9d86c-7e16-4e62-8351-5bf4f50e8b2d.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_36a9d86c-7e16-4e62-8351-5bf4f50e8b2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d3956e37fd6a08adcd6db9b5b54b402950726e2b7747231f7bf9822841e9d02 -size 1042665 +oid sha256:4a554d126fcbcb133f573c0522ff2d2cc37f5fede72021f4a8b97ffeaa77d15b +size 1578257 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_6379c507-cb3e-4e70-bd40-2c45ea705298.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_6379c507-cb3e-4e70-bd40-2c45ea705298.png index 37d0af3129e24c3124635fe45c7cf8b686ba9ca4..3d805b5ef169dc21a8e7ab6b688731eb18f74f70 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_6379c507-cb3e-4e70-bd40-2c45ea705298.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_6379c507-cb3e-4e70-bd40-2c45ea705298.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68f0e7a57b2d439aab8572ef0f03c3b7b86d80e44f82396d221216a477afd299 -size 1016165 +oid sha256:84838cf3dad0e811f84f4ed048df5d137581e63dc9c7d8368f317992c03b8b55 +size 703584 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_719999a6-90a6-40d5-8b0c-067215172e55.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_719999a6-90a6-40d5-8b0c-067215172e55.png index e210865c67db822b457331b8721e6f66b419fda6..8e52b0e5b069e08c0712c57baadaf15967d1da08 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_719999a6-90a6-40d5-8b0c-067215172e55.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_719999a6-90a6-40d5-8b0c-067215172e55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4864a01ace0aa1f19ba9b92375f2893ad4dbbb765f5d9a71655a95289a343963 -size 999035 +oid sha256:aa91c05fffbfad75ad33f7c95871e1a90e7c0c588d14801dc32b0c11df827046 +size 551725 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_7e7b3cd5-ee9e-4a9c-aee6-96220607e196.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_7e7b3cd5-ee9e-4a9c-aee6-96220607e196.png index 91057f8dd19e4ea400b566cfbbb6470ec5b6ce42..0252c42b2fa91210202bc82eec4a8467a0f12036 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_7e7b3cd5-ee9e-4a9c-aee6-96220607e196.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_7e7b3cd5-ee9e-4a9c-aee6-96220607e196.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1b4cb199d2ad0f88465c06046fbdc97070e1a0dbaf0f6333947cac2c716d98e -size 642392 +oid sha256:99d3994574f403bed9c7eb55c9160e6dbab73b0536a0699d650392c02b9e7f86 +size 805313 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_7fa88677-82b5-4d90-876b-5f482ce96cf4.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_7fa88677-82b5-4d90-876b-5f482ce96cf4.png index 28721707055525a8c0ff93d96c58fabee4b68480..16c45ffb5f1a82d5fddda0c810c77624272a4a98 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_7fa88677-82b5-4d90-876b-5f482ce96cf4.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_7fa88677-82b5-4d90-876b-5f482ce96cf4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f391dfeb2d52fa1bdd377f26aad04b9aaa234b4edaa3410599b1dfb256441d92 -size 1158205 +oid sha256:9ce7b99dd4764a26aa0dc47e5dae11f65123a0bd8e9c0dc2a57e4990ba156bcd +size 1334966 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_843eff2d-9962-4a8b-9e30-2c0b32f05d88.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_843eff2d-9962-4a8b-9e30-2c0b32f05d88.png index aead00f8725bcf09151126f4f56bbda89ba0fc99..a7018454dc14a3ac06ed34e985b7475c61ff1cf7 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_843eff2d-9962-4a8b-9e30-2c0b32f05d88.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_843eff2d-9962-4a8b-9e30-2c0b32f05d88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5b1bbd27f6347357bedb300a2c6071f7aced80b88469ba9bd3b9441b909eff7 -size 1003496 +oid sha256:ac0fe02de111835b07bb2e46b25f732550b2b29d2ca3b4715a621255f6a425fb +size 1022045 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_986fa986-16ec-4b2a-ab72-03f8ab10bec8.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_986fa986-16ec-4b2a-ab72-03f8ab10bec8.png index 26357c42dec69fe97b1f4698a6fd8d121a031742..f283c4a8dc06e0f801177f5a415065c3caa871f9 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_986fa986-16ec-4b2a-ab72-03f8ab10bec8.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_986fa986-16ec-4b2a-ab72-03f8ab10bec8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:574bea44870dee4e827a35e0ef8e87edad2651f0164452bf394a42a4c2dfc32c -size 1066716 +oid sha256:702410763d6804ad0fa96ef36be8e8323aaa8269adda9608a4a082e5e8cebf48 +size 1081454 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_9cb7b962-2d18-47fb-926a-597470068e61.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_9cb7b962-2d18-47fb-926a-597470068e61.png index 02c4909bf081cafc33168fb833ad20c3ca8b5a02..130b5d0e26e8cb97692b852232c489e30c63abe7 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_9cb7b962-2d18-47fb-926a-597470068e61.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_9cb7b962-2d18-47fb-926a-597470068e61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2d8cff0259ae44e4387e48a82b74479a64379cd0ec34259603795359c1296c3 -size 1359023 +oid sha256:d6888c258c2d494a4a6fee871568f4ea9997fcbb1efba83b6c315e0bb9dab61d +size 563454 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_b87559eb-fbcd-470b-bf56-c63609269ba5.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_b87559eb-fbcd-470b-bf56-c63609269ba5.png index a1204d9c1dcdc6303a59f61e7155445a3b35f845..17c51d75284a7d2d330f3c81b12089094b1be5e0 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_b87559eb-fbcd-470b-bf56-c63609269ba5.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_b87559eb-fbcd-470b-bf56-c63609269ba5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f747fd087329432eead877a0e8c496b4c55b60c2f7f410a725c8a810c8e11782 -size 1002970 +oid sha256:03269c98679349f3c3c48f8a4f30cd16019c149724724e46c5df8d1e80711726 +size 1197202 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_bac24a0f-8c3f-47d7-8870-0facc3b7352b.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_bac24a0f-8c3f-47d7-8870-0facc3b7352b.png index e57ec91156e7e11be9e59b03774696e92c76f079..adfe0a89baef8f0d49235509fe8579518e0be379 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_bac24a0f-8c3f-47d7-8870-0facc3b7352b.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_bac24a0f-8c3f-47d7-8870-0facc3b7352b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d49d0103d7cf0c054fcb21df8e1175a8094edfa993725208063e7ff5a7ed7296 -size 396664 +oid sha256:fab41c4eabca53bf1da90bc19ee64482f7416c0e439ec32ea0ff0c277ed20100 +size 396364 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_ee2013b2-35d7-4611-a7b3-1a2bcad752bb.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_ee2013b2-35d7-4611-a7b3-1a2bcad752bb.png index 8b08fdf155d745b000f5638f266b68df01e1069f..9b8d77456c497e4ccab3c17304c4d5441d3c30a6 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_ee2013b2-35d7-4611-a7b3-1a2bcad752bb.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_ee2013b2-35d7-4611-a7b3-1a2bcad752bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed0dfe7f83ed88d5ee8a4329cd86ad2b9746d813faa3b677a53578b9d2e2a8a4 -size 971305 +oid sha256:aa2393f7417595ae2be35bd9d10303d6d424a0c97e23fd7e92c7a1ad86ecbd80 +size 747517 diff --git a/images/9572e7bd-0365-4339-899d-1d1dc8504543_fbe6ff6d-5197-4a28-8a47-777faa60d37b.png b/images/9572e7bd-0365-4339-899d-1d1dc8504543_fbe6ff6d-5197-4a28-8a47-777faa60d37b.png index 2c2703f39bea0409c53ae9df1b9be9ce667ec790..b1dec62fec5bcadd4a0c25c35f3ae90b412b68fa 100644 --- a/images/9572e7bd-0365-4339-899d-1d1dc8504543_fbe6ff6d-5197-4a28-8a47-777faa60d37b.png +++ b/images/9572e7bd-0365-4339-899d-1d1dc8504543_fbe6ff6d-5197-4a28-8a47-777faa60d37b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1814f4dabed89732acadc42fdebe6b757d1aed13be7fb68bf51da7af32046df -size 1208687 +oid sha256:35a91084b15ea95a7665c120bf5d08617ff211cdac3e4fb69cd88e8b854e456c +size 766918 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_19094dcc-b685-4d6a-bf2b-d3844ad0662c.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_19094dcc-b685-4d6a-bf2b-d3844ad0662c.png index 9741527d4a3f67c9c7e0b370ded7aa1d6f8738a1..2e9a03be24ed625f35f8c2ca24442be4ba681d3d 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_19094dcc-b685-4d6a-bf2b-d3844ad0662c.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_19094dcc-b685-4d6a-bf2b-d3844ad0662c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21f7badfac5f2f1c9e9ec0006f301a592b08631c396ea8f1c5ca34e9116f84e5 -size 973306 +oid sha256:036fc59b39af64e27ec6b1d62c8b83e5b34ae718af256a9766fde0728e57c866 +size 608292 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_4ca00eeb-f8c1-4324-9f21-78059e35b12e.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_4ca00eeb-f8c1-4324-9f21-78059e35b12e.png index ff35b77bf34955eeaa97cd88984616531ad271d5..671cf28ab51cf2fa35a0946fded94a901a3a3b82 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_4ca00eeb-f8c1-4324-9f21-78059e35b12e.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_4ca00eeb-f8c1-4324-9f21-78059e35b12e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:023e23eb37eed6d2448a558e2e431625b21fd49e0bdeab1b9af840f4c95bebe9 -size 1070747 +oid sha256:9b0c7a29923c0d6b4e69f53045517531fc687deec72e1404c417bb9803902a5e +size 1094252 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_507a4143-8f0c-49cc-90a8-ae3a780eea69.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_507a4143-8f0c-49cc-90a8-ae3a780eea69.png index 57549b2740e6589027293114ee8d8c971bedff24..dbe27ac220f549153c8d6df1dedc4b96157c1ac5 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_507a4143-8f0c-49cc-90a8-ae3a780eea69.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_507a4143-8f0c-49cc-90a8-ae3a780eea69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9544dff4bb2eb6143342f4479cddbb736bb7f3a1590f3882717108008e5042a2 -size 1136332 +oid sha256:876ff0c889c8d5cd62224a82ba21b0afddeff1d7cea0135f2042ba2433f2801e +size 676156 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_708acb3e-66cf-4976-83fe-0fc5a575f150.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_708acb3e-66cf-4976-83fe-0fc5a575f150.png index d5f6099f80f0d8e5f39a1901246850a9257ecaaa..3ce844375f25b8492b1f21f3d29b36566b784bd9 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_708acb3e-66cf-4976-83fe-0fc5a575f150.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_708acb3e-66cf-4976-83fe-0fc5a575f150.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9143ab64a0990bdbda11a651ab6d1e61c29d4e2f4fc5bef5c7173cc5f734d57 -size 2076129 +oid sha256:2548a50274585e7cb5127d7aabbc5fab03d73196f127c7d0066e64514571d672 +size 1307974 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_7d9695f9-c5ef-4fb4-908d-cbd0c1b4d423.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_7d9695f9-c5ef-4fb4-908d-cbd0c1b4d423.png index 4fcb15c0026eba76e3c62f0667bfe3deb9e19f4c..81b07f85e98b52b8e0a3059026590b3e1a25099b 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_7d9695f9-c5ef-4fb4-908d-cbd0c1b4d423.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_7d9695f9-c5ef-4fb4-908d-cbd0c1b4d423.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13cbfb02842202e323ef8d84f6104844a352f7b180264469e70a4a5664ba8f72 -size 2067211 +oid sha256:4db22b933342b3848b15df97dfc5eea93f230fff0b3b2fd87e6ccb65a649e9e7 +size 1636531 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_8bc96816-a9be-4f98-b864-15092427a0b3.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_8bc96816-a9be-4f98-b864-15092427a0b3.png index b6bd55ff4eed780943c3480143a7cc91f5ab83b5..d4d7802fd6295aaaf9f06c7ca325f651fa88004b 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_8bc96816-a9be-4f98-b864-15092427a0b3.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_8bc96816-a9be-4f98-b864-15092427a0b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e3c741e87058b7aa68af1fc549a85f4fd427a7c952b36eecf0be514cc5d4bc7 -size 1789317 +oid sha256:bef6a7f743dfcc90fbc21727c736893d0813f809ac288dba8713376ce023feec +size 1077900 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_b692968d-a907-4613-89eb-1760e9529b96.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_b692968d-a907-4613-89eb-1760e9529b96.png index 631e7110a3ed5604f96ae2596ea8c2f56c42dd49..512d9373ac727aed0b629c4bf5c8a88a7748347c 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_b692968d-a907-4613-89eb-1760e9529b96.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_b692968d-a907-4613-89eb-1760e9529b96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:431796426ef865fdeabcc8ef1dc3fe638eadea90c6ea58d6fd6c410d464b5ac1 -size 1775450 +oid sha256:2eebd44a1bdf2ecd0cad108813a62013ab60b03d1fc034b36d955b958ee28d62 +size 1668410 diff --git a/images/957878ba-cb47-40d1-bf37-23039b2dff27_c219dddc-bdf3-4b52-b770-2f8c34504fc4.png b/images/957878ba-cb47-40d1-bf37-23039b2dff27_c219dddc-bdf3-4b52-b770-2f8c34504fc4.png index 075b2c19f2ed2714ad599e3b2b942f26c6cc1be6..8b3eff8a2c4dcd7e18466013ef8a6df6afdab9b4 100644 --- a/images/957878ba-cb47-40d1-bf37-23039b2dff27_c219dddc-bdf3-4b52-b770-2f8c34504fc4.png +++ b/images/957878ba-cb47-40d1-bf37-23039b2dff27_c219dddc-bdf3-4b52-b770-2f8c34504fc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8075f0fe436991631fc4a30627435f55b78d6b5644ba527ec652efd5e033b3d5 -size 2057106 +oid sha256:6be63880894057aeb75904297ecfa5808f352cf52470f742aaa690bc068e4ea0 +size 1840639 diff --git a/images/95936f53-1e60-4bad-9cd2-65831e309768_0c9ca57e-f570-49cd-bcd3-cafce120d060.png b/images/95936f53-1e60-4bad-9cd2-65831e309768_0c9ca57e-f570-49cd-bcd3-cafce120d060.png index 1ee03e11e90dd9251e79044b59b41abee96c6b7b..c1c6f69ef7ff62021844721d18da170ac2203966 100644 --- a/images/95936f53-1e60-4bad-9cd2-65831e309768_0c9ca57e-f570-49cd-bcd3-cafce120d060.png +++ b/images/95936f53-1e60-4bad-9cd2-65831e309768_0c9ca57e-f570-49cd-bcd3-cafce120d060.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f5a04ac87638e13a97345582555a70bebf590296d04be3ba5e36e3831d7dcef -size 389340 +oid sha256:43f684969b0c335d70f80bc72ad45702ff07c3ef744a8bdfdbb9947fd443b45a +size 372690 diff --git a/images/95936f53-1e60-4bad-9cd2-65831e309768_49ed6f6a-ecc6-4c70-986f-d9504322827f.png b/images/95936f53-1e60-4bad-9cd2-65831e309768_49ed6f6a-ecc6-4c70-986f-d9504322827f.png index a7ba2f833c4c9511b7a1bcf22cf1192fe510fb69..01ab5620d58b6e9a9a9a5228a69075fb92cb20e9 100644 --- a/images/95936f53-1e60-4bad-9cd2-65831e309768_49ed6f6a-ecc6-4c70-986f-d9504322827f.png +++ b/images/95936f53-1e60-4bad-9cd2-65831e309768_49ed6f6a-ecc6-4c70-986f-d9504322827f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9adfb9a84208b47caff087bb1852e206894eb45442f82ed354cd405ceab59db1 -size 350243 +oid sha256:412edff893e135f06dd0304b139ba8d54d3dd36c9c1816776db7e331901796e3 +size 344039 diff --git a/images/95936f53-1e60-4bad-9cd2-65831e309768_5c56aa6b-095e-4946-8cce-398de16ac7e0.png b/images/95936f53-1e60-4bad-9cd2-65831e309768_5c56aa6b-095e-4946-8cce-398de16ac7e0.png index b566927f03fe54c164551edbad2b3f5c24af62f5..a8c258cd807ca18fccc5b62b0304a127d469f8da 100644 --- a/images/95936f53-1e60-4bad-9cd2-65831e309768_5c56aa6b-095e-4946-8cce-398de16ac7e0.png +++ b/images/95936f53-1e60-4bad-9cd2-65831e309768_5c56aa6b-095e-4946-8cce-398de16ac7e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95bbf351922ae717ae722fdd90881ce76a34f0183046d480743961a93faa637d -size 343532 +oid sha256:a0489c9b6f9359fa525d55f9c0cce0a3dab7dc373994d08bc165b9e9eb7e903b +size 292837 diff --git a/images/95936f53-1e60-4bad-9cd2-65831e309768_e7584865-130e-41bc-8b05-9c8a0376a1e8.png b/images/95936f53-1e60-4bad-9cd2-65831e309768_e7584865-130e-41bc-8b05-9c8a0376a1e8.png index 21f41f118e7158acef9d9236411e995301a48dd5..b51e2f81edec455bf35659a686fd6f18937fe748 100644 --- a/images/95936f53-1e60-4bad-9cd2-65831e309768_e7584865-130e-41bc-8b05-9c8a0376a1e8.png +++ b/images/95936f53-1e60-4bad-9cd2-65831e309768_e7584865-130e-41bc-8b05-9c8a0376a1e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c4d4f72f0c5ea5e7a1470871da680fa241176a03a0ac9328f05751152a0553a -size 798941 +oid sha256:2dcd46df203380bc7bd8b97a9c33fe637adc176ff34e0cc0e533db383e60b7e6 +size 822459 diff --git a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_45f50459-f551-4081-8177-ecefc94cae72.png b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_45f50459-f551-4081-8177-ecefc94cae72.png index 133fd59152510675d11b6e6ac5f42d420ae40d31..0a0fc288b131930eeab1c580850669796c47fb06 100644 --- a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_45f50459-f551-4081-8177-ecefc94cae72.png +++ b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_45f50459-f551-4081-8177-ecefc94cae72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:903b0fa5527859d0d47b47998fe7cbaf098c2fcfcdadab9b9e2fd3b39c680ab2 -size 519184 +oid sha256:a73c7081bc20614105a3f04af52a509a53cf16ae51286b60737259b297321846 +size 511902 diff --git a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5302abb9-8cce-45a3-8c07-a1b13fc6f6a1.png b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5302abb9-8cce-45a3-8c07-a1b13fc6f6a1.png index 21c5d685eacfed16a8114c6d75582fc6e31f8dd8..8ff1767a2c40973bff59c50036e0bd982c55ac65 100644 --- a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5302abb9-8cce-45a3-8c07-a1b13fc6f6a1.png +++ b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5302abb9-8cce-45a3-8c07-a1b13fc6f6a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1000f7e7cbb85e45052355d8818dc264161a45ac24887bc4f8e71c45239f8323 -size 1113701 +oid sha256:9858b4887331d97ee5c97cdbe19e6f60e1e1eca2f18363600308e19055f02432 +size 1217935 diff --git a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5dbffc54-e517-4b9b-a93d-8731878ee4e4.png b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5dbffc54-e517-4b9b-a93d-8731878ee4e4.png index 9bf2f34add846a3fba480a3bd8e2b32d2b0579b5..338bb4c9a9b9a3b199b1970db4de433028b5bb74 100644 --- a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5dbffc54-e517-4b9b-a93d-8731878ee4e4.png +++ b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5dbffc54-e517-4b9b-a93d-8731878ee4e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9fcdb74b7b105b42b0307cad2b286685d218ad163bf93b7b71bf79892595ac8 -size 1114552 +oid sha256:d087058c57d4a9c1b3224efb009a551ecabfb9cf2d576b46d5efb2ec95b76d46 +size 1141953 diff --git a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_95ec1d75-39b4-41c3-bdd2-fd4404dbe49f.png b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_95ec1d75-39b4-41c3-bdd2-fd4404dbe49f.png index 6849c05f2e856135b46a5de50ca39a6101a4d008..f9da82f295c1a6520c1902fd811ed5bf89ec64a2 100644 --- a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_95ec1d75-39b4-41c3-bdd2-fd4404dbe49f.png +++ b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_95ec1d75-39b4-41c3-bdd2-fd4404dbe49f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e14361a56f24838fbedf867ead041f2ccf93698fd3d2a6ed1556d897d147412c -size 1976553 +oid sha256:daa0a4c4d1002b194f87c5604ae1d687cc1c98de64fda52171389ade62aaca7d +size 1502585 diff --git a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_b8a214f6-b3a1-41b0-997b-d341e62a8bcf.png b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_b8a214f6-b3a1-41b0-997b-d341e62a8bcf.png index c435bfd7de2792d5b8c99f1faa39198360ecf9fa..fbd1dd058e13f1f829b6322969db7810755301bc 100644 --- a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_b8a214f6-b3a1-41b0-997b-d341e62a8bcf.png +++ b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_b8a214f6-b3a1-41b0-997b-d341e62a8bcf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:163b130d3ae2a692b011e67e1c34717594c8b1f824a739f8df5bf3948df2b7c8 -size 324093 +oid sha256:dbc0ed3a5813797d49aad39ff18957640aaa474eecdfa0a51d05b3b9973526b8 +size 766156 diff --git a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_f12823bb-cb42-43f8-b311-6ec6b90b82bf.png b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_f12823bb-cb42-43f8-b311-6ec6b90b82bf.png index 83f63245bb8cbd247948c904a389121c17ce743d..acee5fcf08958d9e26f2a9ed65b33fe8d23e390c 100644 --- a/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_f12823bb-cb42-43f8-b311-6ec6b90b82bf.png +++ b/images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_f12823bb-cb42-43f8-b311-6ec6b90b82bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a9a9c4b154ed13066c498510f775fe180f3e4a76689fbb2105707d90cc612bd -size 770946 +oid sha256:e32e42ebfd11f2c3614b00b4e6fa9dd74ca307ade703e66d2201c666f346b7a1 +size 316343 diff --git a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_20e92a74-f8b4-4d13-b636-5de220b1d2d1.png b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_20e92a74-f8b4-4d13-b636-5de220b1d2d1.png index 05dee37df33869da18ff3ddd6a7c030a992612b4..653ff0a71e7ed56b99bc9976a59d48e798fc58bb 100644 --- a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_20e92a74-f8b4-4d13-b636-5de220b1d2d1.png +++ b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_20e92a74-f8b4-4d13-b636-5de220b1d2d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6e96002955f5396b218aef7a1c9175b5cfd718d9f7b1848177c373fb3fb710f -size 1651733 +oid sha256:99ce401edee944d0759e67cba0bf4c3704c4500b626726f669f375160d3c368d +size 1836635 diff --git a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_97b3ffde-528a-43c7-8306-22f3294f8b0f.png b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_97b3ffde-528a-43c7-8306-22f3294f8b0f.png index 9a692d791554b6a34fc8bff8d5d31ddb88598d81..494a129c89bcf60a7df2a79e666a1d9884fbfd7d 100644 --- a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_97b3ffde-528a-43c7-8306-22f3294f8b0f.png +++ b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_97b3ffde-528a-43c7-8306-22f3294f8b0f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da64af880872c1f143235f1d09607640d948beea899aa4f6787224b59d74adb7 -size 1458108 +oid sha256:e4953628d04e49562da76193f3e31f9ef60a7729e58139297040a7b8e6d5106c +size 1554569 diff --git a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_a48a19f1-6b38-44bf-9f1c-923274418b08.png b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_a48a19f1-6b38-44bf-9f1c-923274418b08.png index 8d15cfcdb9f5abba1562e040ed6ac5f7e975df01..5b8a0474c084409162b2d2c9d813d49765acbe1f 100644 --- a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_a48a19f1-6b38-44bf-9f1c-923274418b08.png +++ b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_a48a19f1-6b38-44bf-9f1c-923274418b08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d3432c793e7bb13204e4615d5421e51317f5f25c207a909a2a436c2fbdb9035 -size 1275059 +oid sha256:7c89ce9d7f3e6228289fa5b7301e4138458443b9a7005ce998aa91377eee4804 +size 1504695 diff --git a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_b80dae22-0311-4f5c-9aed-76f14574703d.png b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_b80dae22-0311-4f5c-9aed-76f14574703d.png index a8b1f34a7abcaa5076b13592d609200176cd8c11..359d0551786855ab281a9d3bdadd16956b4a8b37 100644 --- a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_b80dae22-0311-4f5c-9aed-76f14574703d.png +++ b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_b80dae22-0311-4f5c-9aed-76f14574703d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b179b0ed9074f282941c2b486659d29a19ef4db697bb8f7adc9198ac4e1a601d -size 996773 +oid sha256:62cf2da6256a92c5dc47d159249a93307b80e08e0fea4707865ea5f45acbde9d +size 1553056 diff --git a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_d4851253-54fd-4a8e-bfee-8b3e7448733f.png b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_d4851253-54fd-4a8e-bfee-8b3e7448733f.png index e48342d0a8d95960593d8b17709868b3c4ddc3f4..785a08e134cc67167b49948a86e90f27167905f9 100644 --- a/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_d4851253-54fd-4a8e-bfee-8b3e7448733f.png +++ b/images/969f36c3-52e7-42da-80bd-11a2d04d53ae_d4851253-54fd-4a8e-bfee-8b3e7448733f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c6a85de021c469a7949081eb915480a66c0955c2185ff634e9677a283adb07f -size 629314 +oid sha256:432ea7616ce7d197082b6d2e66c45addba41f61aedbfe752914f3d187e6b1e8c +size 864987 diff --git a/images/96c35c7a-a0d6-42c3-9814-eb2698c802a4_2386ece1-b158-438a-ac9b-aad2f882a746.png b/images/96c35c7a-a0d6-42c3-9814-eb2698c802a4_2386ece1-b158-438a-ac9b-aad2f882a746.png index 7e77c6df01eb2e8fe46575fa5445ddedd933ba4c..3001af83ae3b65e5e779eef9267fc86c28d44b7e 100644 --- a/images/96c35c7a-a0d6-42c3-9814-eb2698c802a4_2386ece1-b158-438a-ac9b-aad2f882a746.png +++ b/images/96c35c7a-a0d6-42c3-9814-eb2698c802a4_2386ece1-b158-438a-ac9b-aad2f882a746.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f69a689efce3117ab2d6a1692b4363b755f9367369668647c99bedd44dbc087f -size 1147472 +oid sha256:e8b1e1787f4ae1678fb7d444495b397109877506b8a1a7d72f2cbbd340f099ab +size 1391334 diff --git a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_27521355-59c4-4bab-a47f-d1d98c5617c1.png b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_27521355-59c4-4bab-a47f-d1d98c5617c1.png index b466cbe2578cedeed0aa1af127dfc616f0958ba0..ccc52946c475f10e21c7708d0975614189d37984 100644 --- a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_27521355-59c4-4bab-a47f-d1d98c5617c1.png +++ b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_27521355-59c4-4bab-a47f-d1d98c5617c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20beaad4acec8ad75045c3a96ede9d5318b2f3565e2f3e3a8f93f1748d264585 -size 6736344 +oid sha256:1eec9de83608532a38c600eb007ddc654d6d04fb536653ce729109963c238f4d +size 2611435 diff --git a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_2fe0da29-0224-4d57-8ca7-f203f4ee7f69.png b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_2fe0da29-0224-4d57-8ca7-f203f4ee7f69.png index d125dd06ba2498c5533f8e16538d2f743a96b2eb..f2cbe1653de2973f14f9109a291104c678817616 100644 --- a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_2fe0da29-0224-4d57-8ca7-f203f4ee7f69.png +++ b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_2fe0da29-0224-4d57-8ca7-f203f4ee7f69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:779965cd64a495201b2e093308b160371ee5e9af8b17c3ce820b74501818e972 -size 3594815 +oid sha256:3c67c2c063b51e3bc269b34c55389dbad58c661c077a60818e6f6ab799d9b22b +size 2283174 diff --git a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_823fc50e-e7ed-41be-8e54-2d9088a4da28.png b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_823fc50e-e7ed-41be-8e54-2d9088a4da28.png index 6c98a6c3a8c6f30fc5132c2c513ee3f336e49c99..2081d7bc981d4150391f01676bb047a22e9eb533 100644 --- a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_823fc50e-e7ed-41be-8e54-2d9088a4da28.png +++ b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_823fc50e-e7ed-41be-8e54-2d9088a4da28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbc332cc9ea439edbc3d7017b2ee181466a9cf598cd01de0bb80aca775e9e40e -size 5552050 +oid sha256:cf398edf0f3d9639045ed356338cc88b8f0e6634531775e62f7ef73a0656c704 +size 1024134 diff --git a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_f22832a0-47d3-4e90-8f12-3060a88514cb.png b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_f22832a0-47d3-4e90-8f12-3060a88514cb.png index f6429ffed1d4bd8e28407593bb5a24cff8d45d56..be90b74e78ddf264b1e0ca6215dd545f03a77bbb 100644 --- a/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_f22832a0-47d3-4e90-8f12-3060a88514cb.png +++ b/images/96e95a76-4a1d-491c-82ff-fac663d89ddb_f22832a0-47d3-4e90-8f12-3060a88514cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:551abca5167e6e3744bfc2cfc8d9f3cd829f1981337f0437e6c834297ddb6c45 -size 3303519 +oid sha256:a087d356fa6cb102502a6a208d466540d5746c715b873afa89d0945b951f2d14 +size 3034934 diff --git a/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_8873a220-4c4a-4217-9046-012a50badcdd.png b/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_8873a220-4c4a-4217-9046-012a50badcdd.png index 5b1f37979fa0e3e5ba8a39a2d07081214cd1b2cd..64a04c4f983bd110a5f0ed75bc1cb35970ab509d 100644 --- a/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_8873a220-4c4a-4217-9046-012a50badcdd.png +++ b/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_8873a220-4c4a-4217-9046-012a50badcdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d87a4846e66815ab5897415165e1fb2efd0cb3178a5a636913f7c662ed037033 -size 770812 +oid sha256:512f68a291233ca31d7ca04041faac3596defa0afae33636e81e613705fd4a89 +size 399352 diff --git a/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9d608d6d-b482-4f2a-8241-567f41501af3.png b/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9d608d6d-b482-4f2a-8241-567f41501af3.png index 6a2d06466caa071609358870a52318808a727b3f..fa511fcb98d4c0a2a8f36ba1264078a55e2f510b 100644 --- a/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9d608d6d-b482-4f2a-8241-567f41501af3.png +++ b/images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9d608d6d-b482-4f2a-8241-567f41501af3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e9b636bf102ff97c4f72fc37ce8c1f8c80cce5c242438b2611e7eafd8de6fa4 -size 800891 +oid sha256:d10dbd5617a369573ee5d2891179ff0a1ca96a3fcf10d844c24e4993edc47444 +size 560090 diff --git a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_531e7d43-cdb2-42b0-ad84-8f482edced43.png b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_531e7d43-cdb2-42b0-ad84-8f482edced43.png index ba02c8cf6d0b1ffc79cd0e5b1ab3f64880a2acab..499782fe66cdae5af82e5e2f55cde2a9916b1ff3 100644 --- a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_531e7d43-cdb2-42b0-ad84-8f482edced43.png +++ b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_531e7d43-cdb2-42b0-ad84-8f482edced43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:519740601b76cb381b68316036d7aa26102377938d7a5b6b5b6a50890c86e363 -size 516326 +oid sha256:c92cf0074dee5232d7a4ea26bd90851d2d6912b216a2384e0ff15e74c951ec6c +size 546078 diff --git a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c2398364-0bf3-4627-8450-2d6b21c767c6.png b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c2398364-0bf3-4627-8450-2d6b21c767c6.png index fbc89d20893161eaba1a98d0695a89445160f99f..df96f208462bc8819e87a136986df822deff8d1a 100644 --- a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c2398364-0bf3-4627-8450-2d6b21c767c6.png +++ b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c2398364-0bf3-4627-8450-2d6b21c767c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8326e391c2846a101c8c5076a288c57cb0714555068f75def76fcfc529abdff5 -size 729774 +oid sha256:e8905f3dac00a5482b88fdcbd80273d3854934b40e2df9a937c5c6df6d6a9f13 +size 1115358 diff --git a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c8aaa308-dddf-4bc7-9835-e30a48203407.png b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c8aaa308-dddf-4bc7-9835-e30a48203407.png index e1e983a6909e901f543d8d924defa2ea1dd9ada3..4b8a811896f366004ad79c6c91b111a8b9d5f9d1 100644 --- a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c8aaa308-dddf-4bc7-9835-e30a48203407.png +++ b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c8aaa308-dddf-4bc7-9835-e30a48203407.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1247f488e62033f5420aa39393823b0eeda8ffa8365a9a32d54316aa13fb52ed -size 629840 +oid sha256:68102cb25497e9069e7745bea8f23bf49a478ba59c8a533615bbc32b86cd1a2d +size 633545 diff --git a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_ddec100d-f916-4321-ba6a-dceb8f48e51c.png b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_ddec100d-f916-4321-ba6a-dceb8f48e51c.png index 12145a0198403ed778135d5eeeb5ee4da1225abd..e620d6e4b8d505b8e5ebfa4f3508f6804597aa75 100644 --- a/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_ddec100d-f916-4321-ba6a-dceb8f48e51c.png +++ b/images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_ddec100d-f916-4321-ba6a-dceb8f48e51c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:995f96b779c8552c1f0ef0015ba991815d21161236828a4df1dde9d302e6a789 -size 656625 +oid sha256:a25337172ec51b50ddf301872858dd4e1fbc33485f2e3a09096f14eab8d7ea00 +size 1014247 diff --git a/images/978376c1-8545-4160-81d5-722bdea60434_025ff1b1-db7d-4df4-ad4b-77f3a2b2ee2f.png b/images/978376c1-8545-4160-81d5-722bdea60434_025ff1b1-db7d-4df4-ad4b-77f3a2b2ee2f.png index b8fc8077723513db1c9b35a59124b7949ef34dd4..6e2e2ac63621305aafb4ff06fbf8fc27594c35b6 100644 --- a/images/978376c1-8545-4160-81d5-722bdea60434_025ff1b1-db7d-4df4-ad4b-77f3a2b2ee2f.png +++ b/images/978376c1-8545-4160-81d5-722bdea60434_025ff1b1-db7d-4df4-ad4b-77f3a2b2ee2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a95480a964468691b309327ecbd9fdcbfee6f8d5d823853f8efecc82ae520dd9 -size 664227 +oid sha256:ef9669f5d3a7033e35a213e4426a72b9a8549882bd3210630e23b1a7bc9b790f +size 598288 diff --git a/images/978376c1-8545-4160-81d5-722bdea60434_97e6816f-d2a6-4372-95a4-4801d088446e.png b/images/978376c1-8545-4160-81d5-722bdea60434_97e6816f-d2a6-4372-95a4-4801d088446e.png index 31e7b6e2533e4ece32092776ddeac776a02d122a..0a52ce78ee7577eab4d905f0d583d6cb1a44926d 100644 --- a/images/978376c1-8545-4160-81d5-722bdea60434_97e6816f-d2a6-4372-95a4-4801d088446e.png +++ b/images/978376c1-8545-4160-81d5-722bdea60434_97e6816f-d2a6-4372-95a4-4801d088446e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af0ea0c1ea725e38bad0552ee9243bf3df3b19ebc29e6a8ca8c66c242d271ab5 -size 785038 +oid sha256:ee1a0102c6c38f8ea7a39dd91019937412d3d3f90cd60899e87904b313ca6cd0 +size 593099 diff --git a/images/978376c1-8545-4160-81d5-722bdea60434_c217d9bf-cb78-42fc-97ab-8e7d362b796c.png b/images/978376c1-8545-4160-81d5-722bdea60434_c217d9bf-cb78-42fc-97ab-8e7d362b796c.png index d28d13749b5355bd06c6ca0e635b73d2d79565dc..c3554ecdc28a613106cdebb92aa35cd1ca3195af 100644 --- a/images/978376c1-8545-4160-81d5-722bdea60434_c217d9bf-cb78-42fc-97ab-8e7d362b796c.png +++ b/images/978376c1-8545-4160-81d5-722bdea60434_c217d9bf-cb78-42fc-97ab-8e7d362b796c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2c29081f59897c93895676e6616ef5e78d0464020d0aa26723df02955dc6c8c -size 1268483 +oid sha256:cf546852304213b8fcacef5998ae1178af18abc3dd5f387b47af6c7ff2317317 +size 968845 diff --git a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_33f33559-f12f-4c42-9a01-9e4ce1feb006.png b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_33f33559-f12f-4c42-9a01-9e4ce1feb006.png index e8cc80e8545f1e4f29ab552906d9cf69b81a318b..ac0499230473e17395eff0f21d765c99a2918c87 100644 --- a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_33f33559-f12f-4c42-9a01-9e4ce1feb006.png +++ b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_33f33559-f12f-4c42-9a01-9e4ce1feb006.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36552f7d930e592f974dc248c718a61fbb5fbd3648684b98d33336b7267312ce -size 456194 +oid sha256:d6761eb4f83bd5237faf12c6d7379fd9531feacb8b0578735b5b02fb5f0d11bd +size 601283 diff --git a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_512c2744-f31b-4206-98c6-f69312994a72.png b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_512c2744-f31b-4206-98c6-f69312994a72.png index 56cf04b52892af117a5c8bf232d00bf829a37460..4e2497fd75500d83be1077c60f93e57d5efc4450 100644 --- a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_512c2744-f31b-4206-98c6-f69312994a72.png +++ b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_512c2744-f31b-4206-98c6-f69312994a72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86d20bc5c39b1180a7fd553bf32bf4a94bd7c5c1776f5d796a86cc0273efb30e -size 952813 +oid sha256:3805c3851d7a7937f15e0a95d372373a6754f434a9f264b77a308577280b9518 +size 509090 diff --git a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_72755349-6b54-4449-b255-f2560b342cae.png b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_72755349-6b54-4449-b255-f2560b342cae.png index 978e009e6cd62d6b4b45984172943ab4a67ad0c0..818e1cfcb5917dfc40d631826679e62bb46a4086 100644 --- a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_72755349-6b54-4449-b255-f2560b342cae.png +++ b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_72755349-6b54-4449-b255-f2560b342cae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e7ead3b1fe507525e85cd9359b346307032115bf2bf135c0c74ec353240ad19 -size 471526 +oid sha256:f1358b3ea10cadb853fbcca5e2cebd100a67c76308decca48faba9fae9e1a6c8 +size 414796 diff --git a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_77dc3641-bde1-4ddf-acbe-e7a014cf2d03.png b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_77dc3641-bde1-4ddf-acbe-e7a014cf2d03.png index 2a6bbb01c1e06112bcd634f4a32c770057b166ff..eea75d7fa2de141e5585b6bd30e3533a8f2bae91 100644 --- a/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_77dc3641-bde1-4ddf-acbe-e7a014cf2d03.png +++ b/images/978760ca-7ec3-4b78-86c6-98b173c1a00e_77dc3641-bde1-4ddf-acbe-e7a014cf2d03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea2b21fc6f91c6eff3aa4e51e6efe9349a0960a22f7a399bd6c37156b8b6fadf -size 1071298 +oid sha256:7a8a64b005b7ef35e1142dd5cac82ffeb53f978793377dd804af54b86037029d +size 540561 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_0360e768-e23e-4a3f-8e45-956e24c36c5c.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_0360e768-e23e-4a3f-8e45-956e24c36c5c.png index eb2b52467ccc4acb2756365c92bb11887e94d513..50264a5d5c1107706ac854bceee5d2aa5ac6bb30 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_0360e768-e23e-4a3f-8e45-956e24c36c5c.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_0360e768-e23e-4a3f-8e45-956e24c36c5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8360b1593b64cc5b56c19f231aae441415f885abb0d7bbcd30a5ebe1f5d784d8 -size 921894 +oid sha256:46cf5b1c310e22046114757ad8f47aa3b555d39c6048f384315d0a2c8a3f2f7a +size 988472 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_05bac10b-9c88-4c85-b380-2d89170b882d.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_05bac10b-9c88-4c85-b380-2d89170b882d.png index 81874d859c8e25f08da9798713763e94b671965c..385e160bab7ec60b52798fba42c13429e1390738 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_05bac10b-9c88-4c85-b380-2d89170b882d.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_05bac10b-9c88-4c85-b380-2d89170b882d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09dfaa1c80998163f4d790d573e3191d82e039c132ce48267347217ea8ef85ee -size 992464 +oid sha256:0a0cbed66b012375662f78a086710a8357daa419aadf5efc90d0acdce602755e +size 959253 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_06d3130e-be04-448a-a863-e5f760296504.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_06d3130e-be04-448a-a863-e5f760296504.png index b82139bb3e85f3be17563dcc1009485b350b29a5..8631fe39fe41e7d0416756ecc60d095d98d5e440 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_06d3130e-be04-448a-a863-e5f760296504.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_06d3130e-be04-448a-a863-e5f760296504.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bd5509dde18e19e52363075d1197e68d953e77b3913c045237cdb9272aeff5d -size 925285 +oid sha256:aa209f7bba367bd8669e4430a3093f125d31c42f517ef095c4e47d5276ed44a2 +size 926041 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_114bcbeb-20b3-4d5c-a261-9db6f51a713a.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_114bcbeb-20b3-4d5c-a261-9db6f51a713a.png index 91db0753b6063383e135dcb790c8834f5e82cdda..96c00e86205483abc339b20da1a4df9ee66552e4 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_114bcbeb-20b3-4d5c-a261-9db6f51a713a.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_114bcbeb-20b3-4d5c-a261-9db6f51a713a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4dcee812dd6d087ca7f19ab4fa0d2bef0c8e90dccbd1ff738d5e594ea1e7301f -size 983061 +oid sha256:63a76c12851e37c464f74aa7640cae665c78ef8b4b6b0ffadae021ae5e28da87 +size 1106548 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_3944fa19-d153-448f-82e6-3c32ea641127.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_3944fa19-d153-448f-82e6-3c32ea641127.png index d09f3d5d6a8069acd05cdcd622213092f0a49d45..b2016f9f0153296f0663c2d5a9ffb4633238db46 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_3944fa19-d153-448f-82e6-3c32ea641127.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_3944fa19-d153-448f-82e6-3c32ea641127.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f58f2fb4c96960e83edc90229f24bfbc1908d5c29a632f34c0f4c9b3d2c782f -size 988115 +oid sha256:83c619b799ff7b415e1bfe65196c16bb2fc65f83181371dd5c31a17f5083c2af +size 1173790 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4c49fa1f-80b9-49fd-b1df-515931e10c8d.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4c49fa1f-80b9-49fd-b1df-515931e10c8d.png index da31252008289911833e2583e03b065ae844029d..95fad6db0b63e4fb4798a8e47bf3207ee576e9cf 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4c49fa1f-80b9-49fd-b1df-515931e10c8d.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4c49fa1f-80b9-49fd-b1df-515931e10c8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e3ba48e22b317a151b962e21113dfd1eb60060b3c7e08a32845fcc253e1e1f0 -size 977592 +oid sha256:1d20f597f0e50fa7a0700a92a6d6c78779a6903c1b1d65f42f5b9647237ec837 +size 828912 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4dd5c447-4344-47c2-aaf4-63554db508f5.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4dd5c447-4344-47c2-aaf4-63554db508f5.png index e2e29b1526e7619b7e1c2314f6ecd0ade7076b12..c3806ff96272aa2785e271232f0b4471ca856c63 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4dd5c447-4344-47c2-aaf4-63554db508f5.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4dd5c447-4344-47c2-aaf4-63554db508f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c86d7e3a355376a1e4453730b9bcf28512cafad24819e0048bf3949cc594635b -size 972823 +oid sha256:fac41dfdbe646df113a4e6161bf58df851cf64634d6b830009ee78d63eb5cd82 +size 1205742 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4eaaee20-45f8-42fd-8046-0020ea934869.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4eaaee20-45f8-42fd-8046-0020ea934869.png index 0ee3842032134c1b0a74cf9d48ca34d1e975f083..238c29c56ae68a72713c5b1abe6e162775223379 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4eaaee20-45f8-42fd-8046-0020ea934869.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_4eaaee20-45f8-42fd-8046-0020ea934869.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:436c7ef4f57bffdebb220725b18af4e2a34ec91d22052bce3f865f74d483f432 -size 973443 +oid sha256:3bf03d8d4c84062c1642aa2dea03473fadae13a9d80e75a81b2842641092b762 +size 1161185 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_57fc6fee-25ac-4ac0-9074-5578b7fb359d.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_57fc6fee-25ac-4ac0-9074-5578b7fb359d.png index 81ac7c675f4b19b8e6f824327cef9e109d025bc4..b92849191b82a262b33bb5a663af8e2ca9a7748f 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_57fc6fee-25ac-4ac0-9074-5578b7fb359d.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_57fc6fee-25ac-4ac0-9074-5578b7fb359d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:602c154e1e0fb3322aebbf1b52a75d3376692078dfc3ff75158963242f263bbd -size 967949 +oid sha256:7cd9618e68caaf9d3f6478f488aa020a26f1d30a3f6f8fbb4e3bbc3868339307 +size 1181803 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5b60b5ed-44e7-434e-8908-e11418f9e4dc.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5b60b5ed-44e7-434e-8908-e11418f9e4dc.png index f4fc94a6fb7f0be85386d505d261f1e54054e873..842ca6a9da9b0bee9d1b64f31393ee20a77ba6c3 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5b60b5ed-44e7-434e-8908-e11418f9e4dc.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5b60b5ed-44e7-434e-8908-e11418f9e4dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54a9fe5191f17fbd45ae0dabfc0bf5b6e4fc424b4f2e30035525021bb53dede7 -size 973390 +oid sha256:5fbda3156a99c47f57022c1bfa1773e1503b389280874edc9267c183d3e3cb93 +size 1161998 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5fcb6fb8-1500-462b-902b-e0e689a6d351.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5fcb6fb8-1500-462b-902b-e0e689a6d351.png index 8daa88a1ded606945112b16f59d640b58296ef93..d5c891a8078575d337c2756349e2f9feec96eb09 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5fcb6fb8-1500-462b-902b-e0e689a6d351.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_5fcb6fb8-1500-462b-902b-e0e689a6d351.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:647ff14952f32c729aff4ee5c2d53b24fd0a12808149bb64f1dfe84e3c405ab9 -size 987743 +oid sha256:fb7cfdc55ab34fb0e818acb41c9d140257bfada4470d2dab5979b879856aaea8 +size 742951 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_60a55e3d-54ea-4570-8fe0-92972c015964.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_60a55e3d-54ea-4570-8fe0-92972c015964.png index eb47dc5b2972971e54b7213a77d72b58cdc69337..89bd35f1c08e4899297dcf3c6e06a804bd346f1e 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_60a55e3d-54ea-4570-8fe0-92972c015964.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_60a55e3d-54ea-4570-8fe0-92972c015964.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87adb152460cab7406c3762ce70d837db98671ed073975f88f9819e657fb8eb2 -size 960184 +oid sha256:bebf965e5a16ed8401c33869e1579b04b030e887bfa933d742be2bf19bc8f73a +size 1138713 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7591b059-f011-44df-9bc3-cf3399a62179.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7591b059-f011-44df-9bc3-cf3399a62179.png index d95db96bcd78e96bcb44682d61a90f5bad235487..44cb164682309f4833779fb2091e3b251dcba3b5 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7591b059-f011-44df-9bc3-cf3399a62179.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7591b059-f011-44df-9bc3-cf3399a62179.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:860e57bebc6162b6916f7b9da654a5eb826d11f3b4577d0bd28cb76d92389ac5 -size 978044 +oid sha256:61a3cfac67b9e3317e4a4f9baedc5c8960b6c910fada7136408cc95c22d837e1 +size 1211565 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7f7174c9-88dc-4df0-8fac-54a0603bbbac.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7f7174c9-88dc-4df0-8fac-54a0603bbbac.png index a562fa8db450e8703ae7772d4dfc95d8e3910b3c..4aaf0263ea99500eee4d46e53ab844043ef6eb42 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7f7174c9-88dc-4df0-8fac-54a0603bbbac.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_7f7174c9-88dc-4df0-8fac-54a0603bbbac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9dd812522be90db5c26103ac6d127298470b074023740904b07ada5779a93db -size 984703 +oid sha256:6e443828e989b01b2fd39f3f70b50dbfb7baa67a93dce0293c72364c1ad3b548 +size 1255247 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_83ac91e9-e83c-49e2-aa02-8a6085f50d84.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_83ac91e9-e83c-49e2-aa02-8a6085f50d84.png index 7595bfb06e63e92adfa11e865535ce9d34e2842e..0f0762e51d7a0c8760b2fd6e9153f454808efef9 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_83ac91e9-e83c-49e2-aa02-8a6085f50d84.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_83ac91e9-e83c-49e2-aa02-8a6085f50d84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ce9bb5475a6d8af0f53170995b5bc7a3b819407e5eaae904be1c93839539464 -size 948876 +oid sha256:00f61934f8b6d112193139ac8d226fa5b5584c089a099a1429d21a1443b814e7 +size 1199834 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_897e782f-f661-462f-9b43-bfe25ae73ffb.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_897e782f-f661-462f-9b43-bfe25ae73ffb.png index 15b39698c11500cf6ad142c1ed62e0953059e1b2..31480997f9f8f255a87f27a6e3bea4ed6be19ab6 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_897e782f-f661-462f-9b43-bfe25ae73ffb.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_897e782f-f661-462f-9b43-bfe25ae73ffb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0003276b8b5252bc81f7cf102dfbfb087731e8f1684948af4fe7079e0abb5fd0 -size 972033 +oid sha256:a76af2c40a7014908a57248b5b1bdf48d2bb1d2f71df353b02f7d272293e1009 +size 1161240 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_aa3adabf-f4e4-4b54-ab6d-9e8fbf8b11e6.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_aa3adabf-f4e4-4b54-ab6d-9e8fbf8b11e6.png index 3c3936c4c72b5bc13bd658056340b7f7a7e80310..f79fe80598bd0edd7c315b829a70cad956be8ebc 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_aa3adabf-f4e4-4b54-ab6d-9e8fbf8b11e6.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_aa3adabf-f4e4-4b54-ab6d-9e8fbf8b11e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:539d3f10216cc326e32fc856b8493881ceb7885d8f35c63753562492e9c4a762 -size 917175 +oid sha256:da9e313a148207bb6efa9c5d89cdb33b1a873d2cb2803c7f1dde334b58ca0570 +size 834344 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_ae3dbbe1-426e-4fff-9667-43fe2d1f382e.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_ae3dbbe1-426e-4fff-9667-43fe2d1f382e.png index b93d623e2c1f1d22e17375ba19aeae14ec50637a..20de908573bdb70df0bd90b574453b00a356d59c 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_ae3dbbe1-426e-4fff-9667-43fe2d1f382e.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_ae3dbbe1-426e-4fff-9667-43fe2d1f382e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46a3ece2d32e2c2c21f1246c0f00351c46faa129d47df00d3f162ed81780f326 -size 973396 +oid sha256:7bea79fbef6726919945b972e7cab4265faed3093b8576857e8a47cda022c53d +size 974316 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e0f11846-4a63-435d-a2c7-49d804e28e5c.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e0f11846-4a63-435d-a2c7-49d804e28e5c.png index 69b354e11db86224481cea6015ffebcfd4c5311c..526ff0c2fc68f7976efea682444c1f9a04b972b3 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e0f11846-4a63-435d-a2c7-49d804e28e5c.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e0f11846-4a63-435d-a2c7-49d804e28e5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc4c5570bc4b124eb80f3b8d153e9ff4b30045445436ca741955544a9dc20709 -size 968184 +oid sha256:27648f6331c8878bec3c4d79fa66b6e623f2516376f18319cd52cb5f96412203 +size 1071269 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e6e3ca49-d6be-447f-9169-b729bc647ee1.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e6e3ca49-d6be-447f-9169-b729bc647ee1.png index 6e2a2372e6f00474ecc07102f727b7f10b589e8b..3103f77838b919ba56a8f0524c55d02b18926cae 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e6e3ca49-d6be-447f-9169-b729bc647ee1.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e6e3ca49-d6be-447f-9169-b729bc647ee1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2ae63e370e56710d379f18889c0b920dd1f4958451dde71429a09b2e535dca1 -size 1008205 +oid sha256:7fd50bb543733de93a9dc635b28f22e55d9812b9c027a5eac2fe31c80942a2f1 +size 1158302 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e72a4cee-1a25-4609-a4a9-09587f670585.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e72a4cee-1a25-4609-a4a9-09587f670585.png index a628c7a8508f584b00ed360767f6663faed8a80d..f74847f5ae0f431b0c188213fe06431fa6fe4792 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e72a4cee-1a25-4609-a4a9-09587f670585.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_e72a4cee-1a25-4609-a4a9-09587f670585.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e7cb12d61d2225062ad2bcc8e2b5b29723d46a651b92588f863b5495661722a -size 946930 +oid sha256:12f4691381f64999282636b60eb9a07edaa125b4c997acbfa546d4369dfdb731 +size 1116515 diff --git a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_f566954c-0762-4dcf-a758-b847ef11f301.png b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_f566954c-0762-4dcf-a758-b847ef11f301.png index 4d74db919652f449da0bb4c96fd5e29bd4765216..468363626a25413e1901dc66952f4a7bf8048ccc 100644 --- a/images/97e3f951-891d-4626-8dbb-ab6e39261d05_f566954c-0762-4dcf-a758-b847ef11f301.png +++ b/images/97e3f951-891d-4626-8dbb-ab6e39261d05_f566954c-0762-4dcf-a758-b847ef11f301.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04fdb1d5a30ba7f928a546539ddb204f3e40852ac307eb4d9b83e1db91aa4e3c -size 973039 +oid sha256:4a8592d8385bd74946c05d6e5d495e07a8a235740a2cffc57aad387c09a7eb0f +size 909208 diff --git a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_24f70490-eff7-4c67-aaaa-e72120ee5528.png b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_24f70490-eff7-4c67-aaaa-e72120ee5528.png index ad823988b9dc547185c312fcc729b9cb941adb92..714c647c93abea0333e81d44378304ae0e8be066 100644 --- a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_24f70490-eff7-4c67-aaaa-e72120ee5528.png +++ b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_24f70490-eff7-4c67-aaaa-e72120ee5528.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:256795830e9a5948fb3c41b4180cfa2d140383e96bbc564466e537ac50d8dd4c -size 2704686 +oid sha256:ab87249699286ef697f25d6796bcea885d4c72279405680b3a38706ee550f04e +size 1205017 diff --git a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_4559b512-72e0-43a0-93f8-38b7f1688a06.png b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_4559b512-72e0-43a0-93f8-38b7f1688a06.png index 8e9ce0c6b61e3712412f45b3724a7519bbc4d0f2..060c99939ef447f08e75d1f31dbd084e7d7b7dea 100644 --- a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_4559b512-72e0-43a0-93f8-38b7f1688a06.png +++ b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_4559b512-72e0-43a0-93f8-38b7f1688a06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8171ea0a39e29295e49c3863028a0f161dffb83baefd50914c2c1a220e7febc8 -size 1187911 +oid sha256:555b5be0951dd86f1959553da41ba6e8c180cf35e7bdfcb997cbcd55cb6cec99 +size 1164559 diff --git a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_8183ac39-c17d-48ca-9e6c-5cc6db13667d.png b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_8183ac39-c17d-48ca-9e6c-5cc6db13667d.png index 7ab14e6eb9ea28a046fe2cd10e78aeb370d30fa2..e4bc7a8b07b7ee482787277f201764ea0cc19cef 100644 --- a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_8183ac39-c17d-48ca-9e6c-5cc6db13667d.png +++ b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_8183ac39-c17d-48ca-9e6c-5cc6db13667d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90ef9a861042395e987cd6e213a93cf22a9f78d57791f0208d26a8bbb8451149 -size 1159515 +oid sha256:69c3f33431645a8cdfe131253b683e9aa6d8bcc41a9a0582f78c8a92be3ab18d +size 1455532 diff --git a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_98bafeb7-8d43-4fe8-bdd5-a3b1aaf920d3.png b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_98bafeb7-8d43-4fe8-bdd5-a3b1aaf920d3.png index 63740f1837ef15e19410928f838763d6c4c2d8c7..050f3e7a2459f5df83f9148a1974f65b6a458748 100644 --- a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_98bafeb7-8d43-4fe8-bdd5-a3b1aaf920d3.png +++ b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_98bafeb7-8d43-4fe8-bdd5-a3b1aaf920d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e29d2455e8d0e2ac5d85c95d553e144b3ee0dc7030561018956396018b6ddf85 -size 2522000 +oid sha256:ca59ed0aba3108db50788ad748807f1b3eda66031f52477502bed9e947f4d123 +size 1303700 diff --git a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_9e80dd9f-5216-4dc7-8aeb-c7dc433119af.png b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_9e80dd9f-5216-4dc7-8aeb-c7dc433119af.png index dd0f44f3291b126d4abee4ebf28ee74e60c68705..da5aa4a276c01a0f2cb806b88d9dcba99b225011 100644 --- a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_9e80dd9f-5216-4dc7-8aeb-c7dc433119af.png +++ b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_9e80dd9f-5216-4dc7-8aeb-c7dc433119af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07ac1f89eac32fdc0c33a5d539b9fc92718b23b13156b04f60c5271d6cda47d7 -size 2782470 +oid sha256:f123fe5dc157d478baf7eec99809b3a6399cd16c29cb25e1e3ef78bf4b89bc18 +size 1176103 diff --git a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_c59dad46-c249-4ecd-9c02-3ffe955c5147.png b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_c59dad46-c249-4ecd-9c02-3ffe955c5147.png index 987fadcbec3eb12790de9fae043f0e072e8cee70..531eec5e470560e4abbb7ec148f605a3228d294d 100644 --- a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_c59dad46-c249-4ecd-9c02-3ffe955c5147.png +++ b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_c59dad46-c249-4ecd-9c02-3ffe955c5147.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f21484962003c17eb6f0fb44631498743dcf275ff98629e06153fac2df985f5b -size 903133 +oid sha256:495e0bf57a576ad29db15c6ee4186c8b9a03549a1fd1d9cabecacbea22d9769c +size 843645 diff --git a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_feb0706b-a5b2-4b57-b2f4-a4574d9af828.png b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_feb0706b-a5b2-4b57-b2f4-a4574d9af828.png index ced55055cd3157b9f074e2458502ed0bae0f902c..56e2d6231659c2d03def754934927cefcbce3ffb 100644 --- a/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_feb0706b-a5b2-4b57-b2f4-a4574d9af828.png +++ b/images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_feb0706b-a5b2-4b57-b2f4-a4574d9af828.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c43148032ba0f19267d23961bf3506daa4460a06bc86354b3a23d0974ccc6e86 -size 854744 +oid sha256:c83b361d9cc9a3a1451eb6679e4ca32249265ce18612e4b03f281828b0210119 +size 1276223 diff --git a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_03fbc8e2-cd22-4438-99ce-6444f9cb06a4.png b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_03fbc8e2-cd22-4438-99ce-6444f9cb06a4.png index 466b15b09bb81bc68b80d4a27619ba16fd4ca2cc..d1d9e3500d736d3d0fd4c8fcfbbe9f1c940e7dbc 100644 --- a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_03fbc8e2-cd22-4438-99ce-6444f9cb06a4.png +++ b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_03fbc8e2-cd22-4438-99ce-6444f9cb06a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4fac10fa8e047f13a198e4216ea78dd936671fc8db1496dd96b6387c1bd1e91 -size 771085 +oid sha256:8d3c5fad36959c1e545e7b2fabf639f4fbef8b74e01b52cc9dbecf82355e49c2 +size 895185 diff --git a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_289a75cc-bb17-4cd3-8ef5-214427a0b471.png b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_289a75cc-bb17-4cd3-8ef5-214427a0b471.png index 5cef81f2fb87c16fd9296ceddf77f31e84f78ebd..df74b1d2d9dbdcb7b0aefc1240a5786cdee108cd 100644 --- a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_289a75cc-bb17-4cd3-8ef5-214427a0b471.png +++ b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_289a75cc-bb17-4cd3-8ef5-214427a0b471.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25326a73588354b5fa4a5ca9adc516d38874da2f22d64eea42b100acb8030df8 -size 1776290 +oid sha256:88758a54e54cbb60e096ed535049e2eadc51f418230eaf1229c1207727fc4b2f +size 1683681 diff --git a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_2da08de7-e183-43a5-850b-1b41d9cdf907.png b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_2da08de7-e183-43a5-850b-1b41d9cdf907.png index 4109dbcb3acae3e9b61e7a8c014695b1f8da166f..49d8eeaf723bed24c5ae5d768d48c771bb458804 100644 --- a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_2da08de7-e183-43a5-850b-1b41d9cdf907.png +++ b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_2da08de7-e183-43a5-850b-1b41d9cdf907.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e7906c66b1e6aa2b74b1ad5c894ea5528aa7f30c0bddaa021cb5789d1d357da -size 349642 +oid sha256:e21c1e2fe996aff9bcbcd5ff6b4b12c6924e73fec61a89b7d2784793b2eb5361 +size 350067 diff --git a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_609108a1-eaaf-4f18-8442-8fec437811f0.png b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_609108a1-eaaf-4f18-8442-8fec437811f0.png index 7ba6437c73bfb436c9541be4e125e2aee5df3ff3..d0b14c53581740afeb9681c1b03dcc13118b7c79 100644 --- a/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_609108a1-eaaf-4f18-8442-8fec437811f0.png +++ b/images/981fdb06-2352-439f-a6d0-ccaa857c7a54_609108a1-eaaf-4f18-8442-8fec437811f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3b4e5fb85add9fe4b8bcc680c6c8a5611bddb7f359888756114b404b71f7a24 -size 258120 +oid sha256:7a5e827befb74a212d2c56cff3ce5a0ac5dcf2bac1dc6e3e83cf226017f68cdb +size 456823 diff --git a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_05faad15-74f4-4e7d-b3ec-1fecd007f9d6.png b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_05faad15-74f4-4e7d-b3ec-1fecd007f9d6.png index a5043ca839f2093e3f2319a8b6e24568c1f3937c..b650ccbbaabab1008cff055398222c5bcc8c995a 100644 --- a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_05faad15-74f4-4e7d-b3ec-1fecd007f9d6.png +++ b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_05faad15-74f4-4e7d-b3ec-1fecd007f9d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3ffe76e2717e3e8b0aa8188f2dd9ad061f3e9b3d6dca0270abf15e19865494e -size 1151667 +oid sha256:485a9b090e09c098750df06a73ba17341ccc0612086f68ae7b67211d9a101701 +size 2385362 diff --git a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_0cfb8e52-e1e1-4d68-8dd0-9510f371d4d9.png b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_0cfb8e52-e1e1-4d68-8dd0-9510f371d4d9.png index cfbe7d0ca43989405eb9aefccf750f9e3b501b03..6fc8043699b0a773d462653264d0fa3937b46d87 100644 --- a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_0cfb8e52-e1e1-4d68-8dd0-9510f371d4d9.png +++ b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_0cfb8e52-e1e1-4d68-8dd0-9510f371d4d9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5829f510543ddcecbd3eef21414ea4cea203d0b68896b78ee15c5a001ff03aad -size 1167719 +oid sha256:5dc549488a49869c4230a6e9a248e32ce73c5ced0d7c73f555a83044a288d898 +size 879567 diff --git a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_40c13d1e-b12e-400e-8755-60d0c6dd3652.png b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_40c13d1e-b12e-400e-8755-60d0c6dd3652.png index fe8301616638004d4ca5b2a0a4c2ba931ab84538..a4bf2cdb96ea363fe98368dc720a13a937dd6065 100644 --- a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_40c13d1e-b12e-400e-8755-60d0c6dd3652.png +++ b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_40c13d1e-b12e-400e-8755-60d0c6dd3652.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3265d7a858c1dddbb53cd187b05d0e06f3ca96d2c35e072b2ab4679f82c1771b -size 1256386 +oid sha256:c0fcc8cd2aa6dedb25b81b19940d1aaf674acce32674d6edadb2f97ca1adb9a2 +size 2372253 diff --git a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_6f8808c4-2c04-47c8-b464-b6fa5494f4d3.png b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_6f8808c4-2c04-47c8-b464-b6fa5494f4d3.png index 5f9f55a42055447855d154d1f1108b1caa700b1e..528d6d8a6ff4ed6d4ef3781329209ddf89121363 100644 --- a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_6f8808c4-2c04-47c8-b464-b6fa5494f4d3.png +++ b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_6f8808c4-2c04-47c8-b464-b6fa5494f4d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c006e7e9160c539723f3e38e08a0f17c6afa0d53007cfa6d6be093bd985e83eb -size 1118154 +oid sha256:dd0245fc7d296a0e7d4a190dfb8e0ccd334bedc309da833857fb881089881557 +size 1650182 diff --git a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_b066ef7b-a2bc-40b2-941a-8aeae8f79bf8.png b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_b066ef7b-a2bc-40b2-941a-8aeae8f79bf8.png index edd34d1ce6aadbb963117ca8751296fd3dd4e34e..fd8eedcf63e91a3cb1e59e8362f9be59d1e7bc76 100644 --- a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_b066ef7b-a2bc-40b2-941a-8aeae8f79bf8.png +++ b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_b066ef7b-a2bc-40b2-941a-8aeae8f79bf8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7593b3f2ccca1e6329b8d1cbe566c41928189095b494c21ccb2b808e0454b417 -size 1263771 +oid sha256:f7f597b61a0ba14ea4a1b14f6ff4a3ab826163b6ccd1d0bcb78b151f10ebb2e5 +size 856771 diff --git a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_efb82e4d-b2c7-4b75-9125-34401b88bb10.png b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_efb82e4d-b2c7-4b75-9125-34401b88bb10.png index 35b04714ea6b6f150685f3c62fba0f124e5cc5b1..fe5f51d6256af7b992f7cac25b048b0d35ec73c2 100644 --- a/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_efb82e4d-b2c7-4b75-9125-34401b88bb10.png +++ b/images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_efb82e4d-b2c7-4b75-9125-34401b88bb10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0ec10cdb687035899697bf529e18bc46b054c4b7d393f37d454175ba7cc90a4 -size 1194604 +oid sha256:672df3ce46451a9859596811c0dcfa6034bbfe20b22559160bb0ed7a59d4f7be +size 1096940 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_285f5467-2cad-4f8d-8b01-8f90a80e3cce.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_285f5467-2cad-4f8d-8b01-8f90a80e3cce.png index 2109efa01252b0d8fdbf1ec119cae4a54af1a0bd..94b0212053e4cbf859ba7c1fe92a242b48670e74 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_285f5467-2cad-4f8d-8b01-8f90a80e3cce.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_285f5467-2cad-4f8d-8b01-8f90a80e3cce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43a558cf072e79469ae749909d10b50c67d2aa90e34ecdb2552dddc2b7969bf8 -size 1105723 +oid sha256:9dd2d6eb42d76ea5ffdf33f6a563f0146f855d5c86a7b90cf2bfadeb6a996094 +size 873609 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_4c77cf02-5428-4f24-86c3-dd73dee21f63.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_4c77cf02-5428-4f24-86c3-dd73dee21f63.png index 41f8bef0dc63ce5db2afdbba64a593b3ffeed23c..1bde80e214305ac4aebe33f6bed8e884b6c2b0c2 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_4c77cf02-5428-4f24-86c3-dd73dee21f63.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_4c77cf02-5428-4f24-86c3-dd73dee21f63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2bc78a17d4c8c64737e88114634b57b7ad5551338dff0aaf8348ce082bc354a0 -size 1008846 +oid sha256:e4ff47630b3963e3bf22bf0e564296a2a05ec4c072d5b08356ec59738dfbb57c +size 1165919 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_73a60d20-1d3e-4b8a-bbea-d906c3a1faeb.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_73a60d20-1d3e-4b8a-bbea-d906c3a1faeb.png index 94956bd8abd65588f2486961aa5e4b4d420bb592..4e2e164fc8f85f75eb53df4e2056a85e831c9760 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_73a60d20-1d3e-4b8a-bbea-d906c3a1faeb.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_73a60d20-1d3e-4b8a-bbea-d906c3a1faeb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05caae50045f9b437ad391a3a72ecda0713ee007de87cd48d900d6826f17707b -size 1098821 +oid sha256:513d118e69819f12ced19ef168949bc7661bf05999dd7518628bce186b1297f6 +size 1480423 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_846f8e5c-0ed6-4857-8e54-b61d65a4f687.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_846f8e5c-0ed6-4857-8e54-b61d65a4f687.png index 840da881f0dbcb52597a6483e6105c3d8de1757f..fd9fac7e9039b84e14b2e0a5876a2488d68970db 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_846f8e5c-0ed6-4857-8e54-b61d65a4f687.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_846f8e5c-0ed6-4857-8e54-b61d65a4f687.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d5051882e8dfa91f69264fd18991626700dfd178643808a14a655aac90a31e0 -size 1567700 +oid sha256:bada6c72e06f4e93d4b557bc482306609384b3696fbd82ef1d9d90bb6aaab75c +size 1161146 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_9981ef68-97b3-4388-906b-0b285e5b74f2.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_9981ef68-97b3-4388-906b-0b285e5b74f2.png index 3f82e98cfb7dd8f563c2a41eb96623bfe7f8dfd4..c0925ece183d3f99267005b2c1c5f73f69cd6f8f 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_9981ef68-97b3-4388-906b-0b285e5b74f2.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_9981ef68-97b3-4388-906b-0b285e5b74f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84eaf700d4c93356e9448c9e3b196cb4224ede59bcf5279c7445d4e9d664dcec -size 1658798 +oid sha256:207a4d84dbf6dc0f4bccb4b312a55671446fc8ba1f73824713ddb698213d3173 +size 1552309 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_a3d23bf6-5aa6-4245-8bb3-0c4d05470750.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_a3d23bf6-5aa6-4245-8bb3-0c4d05470750.png index fdcac90ba94abd54a9f39a685015a09b2cb72467..a2fa2d691769953eff0a7c05bfeb83ad9f6b7b4a 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_a3d23bf6-5aa6-4245-8bb3-0c4d05470750.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_a3d23bf6-5aa6-4245-8bb3-0c4d05470750.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e69dc99b00315d3c415d0d01ed2918ff410fd33cfa38d952f6be8f99a3d34122 -size 768876 +oid sha256:3ad31e6ef6071a82564e72fb6946955f844761035ebdf6dd9c1e711a76f2883f +size 1141946 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_c0f7e9d2-0b58-43a7-bdb5-3aab72d5ffb7.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_c0f7e9d2-0b58-43a7-bdb5-3aab72d5ffb7.png index 031c72f69e8ddc212716dfd7838028cf8e7461c7..d8f27cde6c56db70d1c074075ada1830c0430f10 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_c0f7e9d2-0b58-43a7-bdb5-3aab72d5ffb7.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_c0f7e9d2-0b58-43a7-bdb5-3aab72d5ffb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c2b92599c4c92187264ef673f05e104351363716f83fa5a92cf9f23a2ab2106 -size 1719251 +oid sha256:11dcf5a4bee167626a495a2efadc1f4b4b73513ea599a7989022d3159cfc8e47 +size 727406 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d26c73be-ce7d-42a2-8980-4bb23f15a0ce.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d26c73be-ce7d-42a2-8980-4bb23f15a0ce.png index 82c48dc280150ebe3924461a60c8968acd0ec326..bf5be72bb62abe04c0c95ce2e38ce4f2ff7e4738 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d26c73be-ce7d-42a2-8980-4bb23f15a0ce.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d26c73be-ce7d-42a2-8980-4bb23f15a0ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b96065fee81e8118cef11d4e568faa168ee2ca83ca1f54525859bb21d35f30ac -size 1277249 +oid sha256:0028f8c3b0d95e0b2b6d1287102faed8800cc2f32e6a7659ada850c89c07a069 +size 1124060 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d59c93cf-f6a3-40df-b51f-40934918fa67.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d59c93cf-f6a3-40df-b51f-40934918fa67.png index a4835cb72e1b2ef2c55dd9c83cdd92eda3ac966d..597a939dc7ac76066b5be80a139de00bdc16becf 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d59c93cf-f6a3-40df-b51f-40934918fa67.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d59c93cf-f6a3-40df-b51f-40934918fa67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4af0c62066cb969c90c2440952211a96f69d11f072b2b3a0666cc00b0f2650cd -size 1628882 +oid sha256:f246bef76d9d41e1e78ef2e6a3860466a09ac0f610dba3efe728dd059bc32646 +size 1056338 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d68603fa-5c88-4cc0-a276-31fbc5052bd6.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d68603fa-5c88-4cc0-a276-31fbc5052bd6.png index dacbb4b318804d3771e5bab4c9c391f7ebf60d8a..a67d61fcbe39d0914b82441fc844df32d8b15935 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d68603fa-5c88-4cc0-a276-31fbc5052bd6.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d68603fa-5c88-4cc0-a276-31fbc5052bd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7b98a60563f0bdeb5a232845144dbde4094f2baad2a31cede56510e00efa0fe -size 944942 +oid sha256:ed031b141e64da69b3906867f2372fdcbe3f73195a546273ba324551ad11437d +size 1081563 diff --git a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_fb2cfc47-01cf-4aed-96aa-7632b3d5e2e3.png b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_fb2cfc47-01cf-4aed-96aa-7632b3d5e2e3.png index 9bc60cc833a8daa6ae7600cbf3c1577778377405..6f69e574298bb45ae615b6efabf10300a15a47ec 100644 --- a/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_fb2cfc47-01cf-4aed-96aa-7632b3d5e2e3.png +++ b/images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_fb2cfc47-01cf-4aed-96aa-7632b3d5e2e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:522c73176376c8d51fa11b77f21dba2afa539df14fa655d49d92943340c814f9 -size 951581 +oid sha256:633dd85fef13ccd374a4d46c2748f61c58c5281a377b063e0006f0aa0d9ff408 +size 993020 diff --git a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_1f18cb52-46db-4409-ad2e-47505cdbbcf3.png b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_1f18cb52-46db-4409-ad2e-47505cdbbcf3.png index 0bcb7827037700b5b45fb901be51d6f176496732..08ee2cae653941b32d5816a1e611c970d1288be1 100644 --- a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_1f18cb52-46db-4409-ad2e-47505cdbbcf3.png +++ b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_1f18cb52-46db-4409-ad2e-47505cdbbcf3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf67bba78e73e352a6b1ce93a19d693773b3137762df5c3e1066fc7d8677d35b -size 936687 +oid sha256:27f11be72811208d0e0ae9419b51909c48d100f1aaedd4e650b681fc5e735621 +size 1398601 diff --git a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_45697305-9fe5-4695-9ccf-4a73a68552e0.png b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_45697305-9fe5-4695-9ccf-4a73a68552e0.png index 0e2f6dd3cf3512a9e54a7580a42174640b774cb9..183d85ff36016cb8fb75a1eed9e1ddb6556ffd59 100644 --- a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_45697305-9fe5-4695-9ccf-4a73a68552e0.png +++ b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_45697305-9fe5-4695-9ccf-4a73a68552e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:830b75dbff65c0f0528005597cf69cf73a807bd7aec4aa2090efd33d283e63d2 -size 1257773 +oid sha256:0e72f24219609f6863a51f4ea184ca333939de51f1481a6b8d87ba7d15e22e0f +size 939494 diff --git a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_4581fcd6-7468-4230-b488-bcaac1055d22.png b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_4581fcd6-7468-4230-b488-bcaac1055d22.png index b11469f3cb0121eea0e50845d13c2850bde4416b..80c57aa735e258e7fe5b27dc321b8300f166d13e 100644 --- a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_4581fcd6-7468-4230-b488-bcaac1055d22.png +++ b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_4581fcd6-7468-4230-b488-bcaac1055d22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cde6aebd022492e855c3db085beb54a3b0d80cd5665c8d4d3f0b4aa56c861ba2 -size 1257770 +oid sha256:46c12910e45480f60b1cd00c1a653677aa02e703e8cd9f4e3d47aecc8a993f34 +size 1001422 diff --git a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_8fb7f444-5f4c-47e0-998b-193424bfc319.png b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_8fb7f444-5f4c-47e0-998b-193424bfc319.png index 2b8980792e289f5572aa37169702448bd15ab589..c0ea5d0a0d36bcf364c4ecd687e764a9c53aa049 100644 --- a/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_8fb7f444-5f4c-47e0-998b-193424bfc319.png +++ b/images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_8fb7f444-5f4c-47e0-998b-193424bfc319.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07062fd86300673e18b4f6376484258107401c4e1ae1ee8ece8f142341b9e7e3 -size 1265513 +oid sha256:a070558e27c3203a2d20de05d5031c81eee9a7f767acd6b5a263a1e056c66034 +size 1221329 diff --git a/images/998d121b-c858-485d-9dd3-4609575d144b_3105db36-2d46-422c-990a-31de39ab0a29.png b/images/998d121b-c858-485d-9dd3-4609575d144b_3105db36-2d46-422c-990a-31de39ab0a29.png index feb4d0129728af238eeec7a4e436910b6e32a4f3..574e3d87695e248c36865a06ca69a6cb784d28c6 100644 --- a/images/998d121b-c858-485d-9dd3-4609575d144b_3105db36-2d46-422c-990a-31de39ab0a29.png +++ b/images/998d121b-c858-485d-9dd3-4609575d144b_3105db36-2d46-422c-990a-31de39ab0a29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53cb1ac01ee79936dda7fae98d5364906dce5a8f13439fc44491e55031405453 -size 1231630 +oid sha256:b668b7ed19903967ac4e97cd1437f665ad77c49041c626f93020f21b773d1ce4 +size 1486199 diff --git a/images/998d121b-c858-485d-9dd3-4609575d144b_6bed6fe2-2ce7-47ef-9b12-9a8f308a3102.png b/images/998d121b-c858-485d-9dd3-4609575d144b_6bed6fe2-2ce7-47ef-9b12-9a8f308a3102.png index 96d01802723fd0521c9458f918db91dbf2d34c7b..96271ec2049ff5d247586756fd29fa87bde41bbf 100644 --- a/images/998d121b-c858-485d-9dd3-4609575d144b_6bed6fe2-2ce7-47ef-9b12-9a8f308a3102.png +++ b/images/998d121b-c858-485d-9dd3-4609575d144b_6bed6fe2-2ce7-47ef-9b12-9a8f308a3102.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c612b4726dc908c9bc4887c53cf4954bdbc20033e299d0cf423b3831837a8cb -size 930253 +oid sha256:28943a1adda80589f7705af99cfefc2b93ab4c091c74e6f2c4e9cf2f6a434e9e +size 691398 diff --git a/images/998d121b-c858-485d-9dd3-4609575d144b_afaeea13-bdcc-4dfb-820b-5ca847f3103e.png b/images/998d121b-c858-485d-9dd3-4609575d144b_afaeea13-bdcc-4dfb-820b-5ca847f3103e.png index a2ce0261a5616547c631dac767f372ee3560a22e..2b02272dd99f337ee654150a633a48465f2344a1 100644 --- a/images/998d121b-c858-485d-9dd3-4609575d144b_afaeea13-bdcc-4dfb-820b-5ca847f3103e.png +++ b/images/998d121b-c858-485d-9dd3-4609575d144b_afaeea13-bdcc-4dfb-820b-5ca847f3103e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fefc5291589a08425cbc559db436349727d20679d5f44fe265dfec1db3e18c60 -size 1696614 +oid sha256:e8615cd98fd42bb71bc97cee593e1d5654688e77094bf491bb48596d054549b5 +size 1625411 diff --git a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_2212f16a-7a5d-446c-a124-7afa61604d92.png b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_2212f16a-7a5d-446c-a124-7afa61604d92.png index a6bcd43e8809f386cea03359300980929365391e..d805398841742d60b9dfbfd0986df9f50292b10a 100644 --- a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_2212f16a-7a5d-446c-a124-7afa61604d92.png +++ b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_2212f16a-7a5d-446c-a124-7afa61604d92.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd6d3f7cb73616b3bdc73da3f91714459ec75b0c349ceeb4a2e18f3a827db089 -size 1150540 +oid sha256:b1a4922203a02d35cdf3acd13a04584fc95e0fab6d6bb0000dad769d9384f743 +size 729715 diff --git a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_47baddf3-e09d-414f-8c3a-7de89a39aa06.png b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_47baddf3-e09d-414f-8c3a-7de89a39aa06.png index 074cbcd432e348e89b3f22b06503b418be49b5e6..09bfdb6925bd46443f17a68e0f72faebb229f755 100644 --- a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_47baddf3-e09d-414f-8c3a-7de89a39aa06.png +++ b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_47baddf3-e09d-414f-8c3a-7de89a39aa06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0543a9b5bcca5bb0d002d30fc4c1e48e2b30c43d4f0d6ae634f51f3154e743f -size 1098749 +oid sha256:db2619de6da3e828119508858a37608ac94369bd2a197ecc327771d603f3d2ea +size 1114819 diff --git a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_5799d00b-7193-4441-9a23-5d2fd1c7d4f2.png b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_5799d00b-7193-4441-9a23-5d2fd1c7d4f2.png index 11c087b4406bdada8bec5ead9ab8ba28a58414d7..7f04009159113c193cec45c59e69decad9666850 100644 --- a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_5799d00b-7193-4441-9a23-5d2fd1c7d4f2.png +++ b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_5799d00b-7193-4441-9a23-5d2fd1c7d4f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf242fe1d28ba6b56627aedd3a06dab8594f85d1b3877492eace2a4f4f749bc6 -size 1547232 +oid sha256:c9918a170a18483717727042fb3948085819df239ce7b4621398409191049084 +size 1578485 diff --git a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_60fa0498-f3bd-4aec-b81c-d4bc4ee53e24.png b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_60fa0498-f3bd-4aec-b81c-d4bc4ee53e24.png index 9cb3e17e7238150674980b6b5e5f58ad67f7e455..e8b67b35a6983ac6c74d372cbb21d7bd60e329f1 100644 --- a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_60fa0498-f3bd-4aec-b81c-d4bc4ee53e24.png +++ b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_60fa0498-f3bd-4aec-b81c-d4bc4ee53e24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b934c460f7b5042da6a4e8f58a64a0e5c2e0c1cd9d5f180135586d69719b772 -size 1098019 +oid sha256:4ea6960ad456556dbbc64be54407710e5f9948c2f21e35c8849c51e183125649 +size 1019205 diff --git a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ac051332-2898-4bc0-96ac-0c7c39c53824.png b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ac051332-2898-4bc0-96ac-0c7c39c53824.png index a093052abf75ccc7e6970309c09a82de6c8678c5..ec6552bb1b4b39e5a2decb2c3aba3303726023c6 100644 --- a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ac051332-2898-4bc0-96ac-0c7c39c53824.png +++ b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ac051332-2898-4bc0-96ac-0c7c39c53824.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0d617bb61e90738e955f2fa2bceb64e7748ca635359709e0e0fcf0744d485a7 -size 1096882 +oid sha256:f814a3bc825211cb51b6a2c17b6597e8ff7f072577ae5a1ea127d5abf6525df5 +size 1136003 diff --git a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ddea7a9c-3acb-4198-94fd-eb659f813bf6.png b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ddea7a9c-3acb-4198-94fd-eb659f813bf6.png index c41dc637b2e13c8315102735e4953be2da4dc390..7a548a1b38137715073d70a0c477c5997a9fef93 100644 --- a/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ddea7a9c-3acb-4198-94fd-eb659f813bf6.png +++ b/images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ddea7a9c-3acb-4198-94fd-eb659f813bf6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04f035db9f59ec449238352d2caa205e9e4052182059f37fcecebb8b38f107af -size 1523778 +oid sha256:00e68c37f77100cc31bfa4a66e2e7b7b1086a15984003e15ef3132d98687d4f4 +size 1632888 diff --git a/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_78355c99-0145-4534-9eeb-48c77afa1487.png b/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_78355c99-0145-4534-9eeb-48c77afa1487.png index 495b05ebca61c7aaea40d0453c618512a81696f3..67ccba0a33e9b0c889f9e6958f26d6ee8d3f2ac7 100644 --- a/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_78355c99-0145-4534-9eeb-48c77afa1487.png +++ b/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_78355c99-0145-4534-9eeb-48c77afa1487.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd6838d98ca1df72bce67424646c6d7d12015e52aa861452b798f36c1b4b525c -size 1505153 +oid sha256:e1fc1f57250a96e71796bb1c8db53280973727b9c4b455a6bc009ddf85d3fd19 +size 1728402 diff --git a/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_8fda4ce4-72f7-453e-bc1b-0f357512edd2.png b/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_8fda4ce4-72f7-453e-bc1b-0f357512edd2.png index 99dd3e79b3f6b9fa0dfa01437aebaea564a5ffb7..4847556b545c78e36730c97e693d33079b74f67e 100644 --- a/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_8fda4ce4-72f7-453e-bc1b-0f357512edd2.png +++ b/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_8fda4ce4-72f7-453e-bc1b-0f357512edd2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7830b3ddb4509428026ee84ec27fddf7a14dd85eb1767fd35c2a675d8ea610f -size 1521529 +oid sha256:6f29023937d1be2d4e80937fc8f95c4a97d7e1f05897ec3b72672a8005a2f9ab +size 1691600 diff --git a/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_97b059c4-5e26-47d1-ad8f-575d1ce528c2.png b/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_97b059c4-5e26-47d1-ad8f-575d1ce528c2.png index e1c4ac2ad237d5dda7df709cec8dd0e296db9005..a544d65d7327501b1a3fa41ee030cd7c1d9d6951 100644 --- a/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_97b059c4-5e26-47d1-ad8f-575d1ce528c2.png +++ b/images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_97b059c4-5e26-47d1-ad8f-575d1ce528c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5dd166c0d78e4fd7d1c23d0baa6a9284bfca2cd7ab47a3eff909723f6e1b3bde -size 1519912 +oid sha256:0ca46279b4a0ff5a3ab98603b5f3f73cf09228eafb7e0c9e1610fd38d21f9bc5 +size 1504238 diff --git a/images/9a462751-758e-42bd-967d-373c13b90382_3b89323b-0233-4588-b13e-f2ff2621ba0d.png b/images/9a462751-758e-42bd-967d-373c13b90382_3b89323b-0233-4588-b13e-f2ff2621ba0d.png index c8de266dcb890d3ab075f1e0d23066ccbdd53641..15eb1dd418ffa5a3a523ca2d672d8f4f3b052db1 100644 --- a/images/9a462751-758e-42bd-967d-373c13b90382_3b89323b-0233-4588-b13e-f2ff2621ba0d.png +++ b/images/9a462751-758e-42bd-967d-373c13b90382_3b89323b-0233-4588-b13e-f2ff2621ba0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dae4e821c74155ec5ede39b4869c7a923092df0723ba7a4b9cc7f863debed375 -size 920459 +oid sha256:0d905b626a1678ef0aec0442beeae8259aa4781b417356e74cfd98144c01b72d +size 1333490 diff --git a/images/9a462751-758e-42bd-967d-373c13b90382_4ddaf55b-33d3-4784-8370-56d90014d635.png b/images/9a462751-758e-42bd-967d-373c13b90382_4ddaf55b-33d3-4784-8370-56d90014d635.png index b9827003445c75c2d48678bfd78c2345d7a475ae..b0b9e751c7ad3208c3297afad93dcfbaad90b9e8 100644 --- a/images/9a462751-758e-42bd-967d-373c13b90382_4ddaf55b-33d3-4784-8370-56d90014d635.png +++ b/images/9a462751-758e-42bd-967d-373c13b90382_4ddaf55b-33d3-4784-8370-56d90014d635.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:713b22ab2f3a6357c2b910b560cedc9bb66ea2e036d66a5b788ffe0cd73270ce -size 1210103 +oid sha256:0dcfbaac176a21ac17332d632c9a5dbf1a29b7ff36877970240906f849173bc8 +size 1168591 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_0a22a18c-983e-446f-871f-d2fd71f2c9ce.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_0a22a18c-983e-446f-871f-d2fd71f2c9ce.png index 9e9d27413f237d216de5d08a3cce7d1466f74e30..1483c8ac78b36249e4c0bdb1f57386aadb752709 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_0a22a18c-983e-446f-871f-d2fd71f2c9ce.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_0a22a18c-983e-446f-871f-d2fd71f2c9ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e72879f09100832294a9f02fd8a60f7047ed9804e9cfe9451f39bc538fba5da -size 604463 +oid sha256:19a94c977f71890d8803d2e813a9e5945a04ff7ea4a7c4ebd129b822fc2789e7 +size 1079220 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_23187c57-820d-46f2-9022-ea9050f9f41d.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_23187c57-820d-46f2-9022-ea9050f9f41d.png index ca09b3279563b4f4ad66e968f49cd2d09cf704ca..938078a60e10e40a1c4763d65bcd4e9f253fbb35 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_23187c57-820d-46f2-9022-ea9050f9f41d.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_23187c57-820d-46f2-9022-ea9050f9f41d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eebf81826cdead1a05819e77836c368bb9c16179cdcf471cd71b4de4bdf878f2 -size 1467368 +oid sha256:494a7c1d21ee080edc9fcaad893e1d0fb9688b0d0ec24ebc526ea64debfe90f0 +size 1467575 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_296e4fb3-13b3-4223-ae21-3bb06155dd9f.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_296e4fb3-13b3-4223-ae21-3bb06155dd9f.png index 4e13f7574f055e1f245a905775d64d74fd16b178..5ea12389ed56decff328fd30f37e9b33a94cf44d 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_296e4fb3-13b3-4223-ae21-3bb06155dd9f.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_296e4fb3-13b3-4223-ae21-3bb06155dd9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0aeeba007db16b4854a918e6ae0731ad1db4943d9315ff49526a61f6c7b49ccb -size 1315338 +oid sha256:42f39e019a7f155eaffbcf680f6588b961cdb405427ab74b2dd61e451f5f7a87 +size 1733427 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_4fdb839d-8ee9-406c-bd79-3ae4d764b752.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_4fdb839d-8ee9-406c-bd79-3ae4d764b752.png index 7cae32a76d101dd3454231d0608b3b28e4af9723..5738c724556a21d87ba9ada4f0cec306ee525e68 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_4fdb839d-8ee9-406c-bd79-3ae4d764b752.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_4fdb839d-8ee9-406c-bd79-3ae4d764b752.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dca75ac68b285ed3c787e601cdfeb9863ec66b5e3daf7b217915bd46c3d96b72 -size 1492448 +oid sha256:aeddf172b8ffebc39fafbd1bc56c277325c156c44b11df9b29f7ee39b3d81cdf +size 1344311 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_788721c8-ba93-44be-af15-056a8fd86356.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_788721c8-ba93-44be-af15-056a8fd86356.png index fb27405d99953edb67dbd71e97f16dd43bdef66d..ad99c76c1532b55e9321dd602360757e265291ab 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_788721c8-ba93-44be-af15-056a8fd86356.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_788721c8-ba93-44be-af15-056a8fd86356.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9464c24741ed4d7e502558b80f1cdcf06e7a2c34df2a5104e17f07133145879f -size 651563 +oid sha256:252eaea7418c57568e38d01ee80e5ff1f5fa5677d90c314df011b7b946d24790 +size 1210731 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_a297de95-784f-429c-9ff7-b987f1cbcbef.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_a297de95-784f-429c-9ff7-b987f1cbcbef.png index 788e3a516df39ee63b2632b2fd7b247c118977b9..809f81da98803e3e26eba5add827c10235a272f8 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_a297de95-784f-429c-9ff7-b987f1cbcbef.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_a297de95-784f-429c-9ff7-b987f1cbcbef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd895650bb21d6c406271a6fc05f9cee764d330b10e6cbb0e5dd40240783f8ad -size 1832453 +oid sha256:7784524584d99a6db1b3839d9f1bdae60d3d15e39368b527c0bdd2fe86b99cb6 +size 1361340 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_af09a4fc-7fe3-430f-9aad-50d6c1d8ce02.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_af09a4fc-7fe3-430f-9aad-50d6c1d8ce02.png index 31076f5b908c5e3905a7f4715ad572fb156312ea..2a726aa1ba0b080172102e3b26b3cadafe2456da 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_af09a4fc-7fe3-430f-9aad-50d6c1d8ce02.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_af09a4fc-7fe3-430f-9aad-50d6c1d8ce02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d5201ef1ecb2149184ee33d734c74d31ee6cfb023cbd0f970e79971e89700d9 -size 654948 +oid sha256:a1ad3d22caf9cc0feabf54b7be8eab485286a4045c47d689342aee9bedba2e51 +size 1157139 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_bc0cace1-a724-4637-933a-587043f890c6.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_bc0cace1-a724-4637-933a-587043f890c6.png index 698d10d58a804aaf1272f6bf3073f1c0b2e169a4..27a0c884359b4ca80c4c9fba7b84eb2b89a50228 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_bc0cace1-a724-4637-933a-587043f890c6.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_bc0cace1-a724-4637-933a-587043f890c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51e11ec9c853c700e9c29c774eec0395720751177a70f7847c1a15b8cd231b46 -size 631608 +oid sha256:6496ed11c7fa8e624349b25aa0ea8ec610dc1978a401699d4b1f04ef4587da75 +size 1136178 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_becee9e3-be6c-4d01-b62e-3b2e23d3413a.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_becee9e3-be6c-4d01-b62e-3b2e23d3413a.png index e1d5330fb476fda9a21eb64726eb4a7435b897cd..a6bce28c0a92fb940c74c45a32871d18e5aca97d 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_becee9e3-be6c-4d01-b62e-3b2e23d3413a.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_becee9e3-be6c-4d01-b62e-3b2e23d3413a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c677b3bc5ae968cb6427e12b249b0a17674d3d317e66bfe6799474f2ca1acd41 -size 649764 +oid sha256:2fafd1a3a802cef7f5f0fc32893aceaa16b3359d0eac3dbdc230bb138d41c604 +size 617073 diff --git a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_dd055c45-e037-4fa1-8b06-c6d60efae226.png b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_dd055c45-e037-4fa1-8b06-c6d60efae226.png index d7323dfd5e802b67cabe396000294b3714a15d2d..70b039f9c4b7ab7b17a05adda1e6ae446a384a56 100644 --- a/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_dd055c45-e037-4fa1-8b06-c6d60efae226.png +++ b/images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_dd055c45-e037-4fa1-8b06-c6d60efae226.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:795adcd47980b917e6bbeb08f0ae9dd8c45a452864f3885346fa218caf64bc2b -size 349352 +oid sha256:7d2fcd02a990c3d5f16fb9e6c85e2ea19634ace774d90a70e82c98aeab3ac0ba +size 282264 diff --git a/images/9a9b1b45-cb23-4073-897f-67410087df9d_9bcdd47d-1557-4bda-b942-08571a6d3688.png b/images/9a9b1b45-cb23-4073-897f-67410087df9d_9bcdd47d-1557-4bda-b942-08571a6d3688.png index 56a267f25fd9cf9f8582ec254a372994eb4f82f5..92c266997e44d89ad30bab026ee9955ca850809f 100644 --- a/images/9a9b1b45-cb23-4073-897f-67410087df9d_9bcdd47d-1557-4bda-b942-08571a6d3688.png +++ b/images/9a9b1b45-cb23-4073-897f-67410087df9d_9bcdd47d-1557-4bda-b942-08571a6d3688.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d003a50b4bf8249158be6a936f77763066cb9768c0d0749eedf8a2ee26d02db5 -size 1252440 +oid sha256:ef5a480d98482b37fffffda15b62a3885dfeeabd06760c7fc8c702c39cbf2133 +size 1261999 diff --git a/images/9a9b1b45-cb23-4073-897f-67410087df9d_b86bc343-260f-4335-980b-ea5e2fca2a71.png b/images/9a9b1b45-cb23-4073-897f-67410087df9d_b86bc343-260f-4335-980b-ea5e2fca2a71.png index 996b7b0ce3713740dae5b328c71f828b5cebeb46..92299eec879c31e45511c9c3651c2aa60329436e 100644 --- a/images/9a9b1b45-cb23-4073-897f-67410087df9d_b86bc343-260f-4335-980b-ea5e2fca2a71.png +++ b/images/9a9b1b45-cb23-4073-897f-67410087df9d_b86bc343-260f-4335-980b-ea5e2fca2a71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f99154b9d2c3340544cbbd7bd704811f6af6e77080ee9291ece349c4b6fe224c -size 948113 +oid sha256:a49317442a28dab327b033b26b417ba225d0a03e4a55b6e80c2cc641f509ad38 +size 1040183 diff --git a/images/9a9b1b45-cb23-4073-897f-67410087df9d_ddeea3d8-a96c-4584-8dae-084e4a76aaae.png b/images/9a9b1b45-cb23-4073-897f-67410087df9d_ddeea3d8-a96c-4584-8dae-084e4a76aaae.png index c06947bb8eba21563b88e897f6548a850d50cd0a..3847539996e641f594865156501e301a57fe46d3 100644 --- a/images/9a9b1b45-cb23-4073-897f-67410087df9d_ddeea3d8-a96c-4584-8dae-084e4a76aaae.png +++ b/images/9a9b1b45-cb23-4073-897f-67410087df9d_ddeea3d8-a96c-4584-8dae-084e4a76aaae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:003e1b2e51877be1208fb3b67fd97a16a1502fc5115a4cdc85cd7c99fc4c9a28 -size 933277 +oid sha256:70119b7c3c94145b18fa4151edf60ab553839746e33f03882e37abae10014289 +size 783077 diff --git a/images/9a9b1b45-cb23-4073-897f-67410087df9d_e2409464-38c3-4846-b44d-16d5e4f8752c.png b/images/9a9b1b45-cb23-4073-897f-67410087df9d_e2409464-38c3-4846-b44d-16d5e4f8752c.png index 53be213e3f3cdb60fd389e03622f633318cad457..d5fb5253061b77d8f3016f4ed97f062d3ed5e8ba 100644 --- a/images/9a9b1b45-cb23-4073-897f-67410087df9d_e2409464-38c3-4846-b44d-16d5e4f8752c.png +++ b/images/9a9b1b45-cb23-4073-897f-67410087df9d_e2409464-38c3-4846-b44d-16d5e4f8752c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f53035da1dd86a7173dbd7af978713785a463d6e4c0ffcb60cb94bfcca6025f4 -size 1042723 +oid sha256:dec1d6f0a0a49c42368b52594ac3076b832658667ce2b78e77be0466acccf263 +size 517795 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_05fdd74b-f274-4e42-8108-8a4d93a95506.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_05fdd74b-f274-4e42-8108-8a4d93a95506.png index 2cefeae3095316d100658f95c2b0c3fb1156b532..7ccbec50424a3947dfd21c67359667ba73fbd31d 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_05fdd74b-f274-4e42-8108-8a4d93a95506.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_05fdd74b-f274-4e42-8108-8a4d93a95506.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5ca40a7086f309d7be597bfb9c494abf1a8467726af35e5db8d30c1c48b9dd8 -size 1262004 +oid sha256:028cca9578cda243421db73b54dbceda5fafbd802494db935165da54642987dc +size 249546 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_26b869aa-497e-4de5-82f9-4f7fc39977d7.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_26b869aa-497e-4de5-82f9-4f7fc39977d7.png index 54a629916420a67987a562e1a9ea9463aaf4ff34..b1bc004c3865f54ea58160179439316302c1e2ed 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_26b869aa-497e-4de5-82f9-4f7fc39977d7.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_26b869aa-497e-4de5-82f9-4f7fc39977d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53ef5750bee657d637163112eadcc78ed4583fd0ab00a5d763fae18389509db4 -size 1060204 +oid sha256:a2e623e413a009f1dfeef12a3844c70008c00d8987755278d16fd45e1a3291a3 +size 1107550 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_3359fd6e-131e-481b-8a7f-dad00e69757b.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_3359fd6e-131e-481b-8a7f-dad00e69757b.png index 09c2ffc9dd873b1887b53e581b3773033ad1f33d..175d8b873fedd0d7ac52fcb5fb93be01174f33db 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_3359fd6e-131e-481b-8a7f-dad00e69757b.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_3359fd6e-131e-481b-8a7f-dad00e69757b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8451e64e0e903d0c7373378b501a405328cff07f7e6afb0ba3b2f80e1eebd244 -size 322195 +oid sha256:656fc3737df02b138ccfc1030f10b598cc9bf20132fb9d645452cabbaa279232 +size 431089 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_35217f76-ff90-428e-a78f-72c14b82dc4f.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_35217f76-ff90-428e-a78f-72c14b82dc4f.png index 63c0b6259aa2fb42c14f8e8b41b0f50bf2d7d928..039a9abc0650c7192237b0697cd4b17aecda8d5b 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_35217f76-ff90-428e-a78f-72c14b82dc4f.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_35217f76-ff90-428e-a78f-72c14b82dc4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d3d54a40fbb7617f097d68023599f8d5f6e1ca2fd81c36d4b059f09e9c87422 -size 1916963 +oid sha256:ed4f34098a13fc539cdd73c6c45b1fd1ec901220cdd4c2b666c90aa8cb46b44c +size 2000498 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_388f7180-285e-4867-8f40-f223749016f7.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_388f7180-285e-4867-8f40-f223749016f7.png index 03cbce6158f47f5a9a21b0ddb47b04cd4f6fd550..5e77e7f43f057d013428ae073364a00f466b2aa2 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_388f7180-285e-4867-8f40-f223749016f7.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_388f7180-285e-4867-8f40-f223749016f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29541fbaa518b67045f946cac3249bc6ef96858dd8ab4165a9fc062e54a35f29 -size 977074 +oid sha256:4e021cd8b07f1199f51c415e36311441da5f46002520450292789ae09927a0d4 +size 796788 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_43599216-5ae3-4012-bc6f-4583b95a4523.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_43599216-5ae3-4012-bc6f-4583b95a4523.png index b083f29f2410eea237ef37c835dd678db878b8a9..5e6e1457ed535646ba6c8410a87a5bc086ee9073 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_43599216-5ae3-4012-bc6f-4583b95a4523.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_43599216-5ae3-4012-bc6f-4583b95a4523.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6511a0316b5684e25ab3c32624d26a720c9b7853d090f50fe9a8794f6dc3779c -size 1919984 +oid sha256:aabcea1002776cb878137a8e17c54ec4d58f1aa52e9caca88df103ee23ab15fe +size 1732150 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_747b1dea-1bd9-469d-8c76-54d19e3895ca.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_747b1dea-1bd9-469d-8c76-54d19e3895ca.png index 53b49e14df1c6de85c13414b27e06c6ce7299ac3..8b774f59fdbdfa754d772954c0e9e694672b7058 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_747b1dea-1bd9-469d-8c76-54d19e3895ca.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_747b1dea-1bd9-469d-8c76-54d19e3895ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:179a967db60ea3ded8b3d7e3aaa4ba028e8f3da520cb9a5455c661733c0fa55d -size 1741837 +oid sha256:087289e2bd79b7bffc4c278b920c3b34db9203df9153e3e526290bd06f856ff0 +size 1323182 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_9055b4da-fa8e-445e-ae40-52b8c5e24167.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_9055b4da-fa8e-445e-ae40-52b8c5e24167.png index af9eb5099004e8469abf983a0d4f2d5ccd0f82c1..88ad7d1bacef32d145775b18b7f948ca627d41e3 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_9055b4da-fa8e-445e-ae40-52b8c5e24167.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_9055b4da-fa8e-445e-ae40-52b8c5e24167.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9fec80334e8b3d06b0dfb51069da71f3e5388709ec52faab21b081dc3c84b2a -size 1759163 +oid sha256:854e9dab7fd9c610c2b5c3a914de59ca1572c5c398be08903c3cabe0a80c490b +size 1163870 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_90692452-f027-4608-a74d-8382631f665f.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_90692452-f027-4608-a74d-8382631f665f.png index caf5a7b9187b1fed612bdce72b55278c5183cce8..063ccb785e3e5e8a8e8e02446165fa9a5aba4238 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_90692452-f027-4608-a74d-8382631f665f.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_90692452-f027-4608-a74d-8382631f665f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38b5d9d276078a6a4cddc852a3ac9f358580e302266ae3c0306178c418e78cdc -size 1031277 +oid sha256:bfd91703e43fc2e72dd3fb1cdaec48b050a29592d9e5b00142e96fb3d1988f7f +size 667458 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_a2787ae6-4fa3-444f-a098-41c7fe2c1de0.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_a2787ae6-4fa3-444f-a098-41c7fe2c1de0.png index 1c7f9d33a23ca3135025165dbaae26996ea4a076..59798b5cb6f8283080f9d5b38dad482a0370346d 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_a2787ae6-4fa3-444f-a098-41c7fe2c1de0.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_a2787ae6-4fa3-444f-a098-41c7fe2c1de0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2b9854be3d651b1ee5a59785d5ede2253bc80d14e00c456c6586ac6f8999d5d -size 1837408 +oid sha256:cc7f96abf750efefe9e865e187451c5499fbc509e91c58176cb1c092ded886c3 +size 1267503 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_af4734a1-b0a3-4f99-9519-9c0a2ece32b0.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_af4734a1-b0a3-4f99-9519-9c0a2ece32b0.png index cc747d38e40088f4192cc28b6c86f648cd0f414e..eef90bdca3d3af2f8016ccac61d58c9b0194bafa 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_af4734a1-b0a3-4f99-9519-9c0a2ece32b0.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_af4734a1-b0a3-4f99-9519-9c0a2ece32b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fafd1c09c56be61bc47357a21752cd1d4b42f1e0426aa8f5a1a7b70af96dcd0b -size 1801731 +oid sha256:5fb8c6b1c899db970d11531272ed8a252d14b3c18f76f2f453cd8d327a376074 +size 810415 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_c1213d35-a9c0-44e6-a81b-c3f04bb4ef40.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_c1213d35-a9c0-44e6-a81b-c3f04bb4ef40.png index 6e8e33c7de1bed228da0246eb16024ca91269cfb..c2443b16cf6d9ef3f5bdb8be649dc996e9ee0a17 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_c1213d35-a9c0-44e6-a81b-c3f04bb4ef40.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_c1213d35-a9c0-44e6-a81b-c3f04bb4ef40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4d3f3540c33997db61a2888d57f09ac451b6d06f1d6cc97347fb0f0ac13c912 -size 1124862 +oid sha256:a91634efec4dcaf90ce7e3978fcc4a52d23b288954f07b3508cf3be1585237ce +size 1341100 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_cc84e6d9-c116-476e-8c9e-7bc04f3fe1ab.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_cc84e6d9-c116-476e-8c9e-7bc04f3fe1ab.png index c1b867c9a1e8930bcef49675219859b4a8d9c865..210b3d3acdbcc47ade13b88d5cb7465fa04e908c 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_cc84e6d9-c116-476e-8c9e-7bc04f3fe1ab.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_cc84e6d9-c116-476e-8c9e-7bc04f3fe1ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c0fdf85874d5daacb9b2484fa66df7a7718451923a20368904787a4b179049a -size 1890348 +oid sha256:6f395cf8bf950e1f287ebd3873093f851f4e8b5d9a2f1b2466fdd0a9f13a3a27 +size 1861803 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_d0539316-5a09-4304-a9c2-2beef62f1c93.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_d0539316-5a09-4304-a9c2-2beef62f1c93.png index 4eaf531aa3619461f9775a8506b035125bdbd411..efa2aea254add845664233663bc1ff46e415f6e4 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_d0539316-5a09-4304-a9c2-2beef62f1c93.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_d0539316-5a09-4304-a9c2-2beef62f1c93.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d3601ebfa1c4a9e052855c330ae7d1075fa726e6e5673971ae6a8ff13f1eec5 -size 592392 +oid sha256:996e72fdb8b65002e1808db7dbe148e6e16e664abb68e24366c02cb0f52d54ba +size 452430 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e0f6b19c-2da6-4552-944e-8e375cf719be.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e0f6b19c-2da6-4552-944e-8e375cf719be.png index ceee3592df3e2a76f0f5f4ef3096238869514be2..0ae1a6c76c09c4f632f8eaaa9ec2314a77ec7981 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e0f6b19c-2da6-4552-944e-8e375cf719be.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e0f6b19c-2da6-4552-944e-8e375cf719be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51782ec10d5e0adb14b5c6c3f5765a67ea023e7254e6128965f22599906a2863 -size 1017091 +oid sha256:8fab3e5dc6296022ffd1a8e0890b76b07d4ca59e4ac13f73424b40b7ff1cd3a1 +size 935888 diff --git a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e78aa8b5-4d2f-4aab-a13a-e7a4d0be9428.png b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e78aa8b5-4d2f-4aab-a13a-e7a4d0be9428.png index da0cabe6e9ba05fe402a0264abd2bf5f4ec368c5..e2ef87f2cecd04fbef612375205bbed346edde6e 100644 --- a/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e78aa8b5-4d2f-4aab-a13a-e7a4d0be9428.png +++ b/images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e78aa8b5-4d2f-4aab-a13a-e7a4d0be9428.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96a99df9fb0be7a15a3eb56850b3cb977de7ef0507f6341499d62e847516fd44 -size 1891588 +oid sha256:cfa2ed67c3be1a723a7905704303d9daf2ca956b6e8b8e0e3d5edb6cb8a71398 +size 1947521 diff --git a/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_9761249d-77f4-4009-a2f7-051f0a77a81a.png b/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_9761249d-77f4-4009-a2f7-051f0a77a81a.png index 4265ee284f669dd206e4202d1fdf2064c44d1807..9340edac69def8fd11113dc8919d4af4c9463623 100644 --- a/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_9761249d-77f4-4009-a2f7-051f0a77a81a.png +++ b/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_9761249d-77f4-4009-a2f7-051f0a77a81a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7cdc9364dfb42af5b7699f8d15bd58200762e63ee67be87a49568b49d58621db -size 1362479 +oid sha256:d93e8fc192f07a6ed7e1ad0356e6c38da35f25742856a6d73937cfb2de50c574 +size 1260891 diff --git a/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_e152cb15-af77-4b96-b03f-c5feb507de22.png b/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_e152cb15-af77-4b96-b03f-c5feb507de22.png index 9e21e30e557267480e33cffa0b81f95c79be3239..e56878ffcd6e1cf844c30cc3d5a99f71097f6df4 100644 --- a/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_e152cb15-af77-4b96-b03f-c5feb507de22.png +++ b/images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_e152cb15-af77-4b96-b03f-c5feb507de22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9337c47ac147621d0a616d326b52b48631c5cde60a73a8b64b0f3a0ee581481 -size 1192217 +oid sha256:1d6edeaf72cd41e3f0986a8f2221c5def113aef5e0a0bbdd93c4037e20bbfd66 +size 1314066 diff --git a/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_66efeee7-26da-4ccc-a8eb-1cb6b87f7b7b.png b/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_66efeee7-26da-4ccc-a8eb-1cb6b87f7b7b.png index 071340dbe102ca0a6cdcd322581b47e72c746f4b..175b5db2e091677ef18892454bb6ae5fb477ca83 100644 --- a/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_66efeee7-26da-4ccc-a8eb-1cb6b87f7b7b.png +++ b/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_66efeee7-26da-4ccc-a8eb-1cb6b87f7b7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e71b2f61f3f19a3c544ce36768026cabca24149ea973232f68a552b9303eae4d -size 2197765 +oid sha256:1568d5435fc6b21307f440d6b7f9ffa1cbcc097fc0d814080140da1079582f7f +size 605861 diff --git a/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_a77dd692-2148-4a62-8cf9-b62a855abf40.png b/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_a77dd692-2148-4a62-8cf9-b62a855abf40.png index 5ce639e4351523aba562ca786cfa02b7720bb345..6ccab95df54d8b8142d6cd40a51b9a09651c9bc1 100644 --- a/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_a77dd692-2148-4a62-8cf9-b62a855abf40.png +++ b/images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_a77dd692-2148-4a62-8cf9-b62a855abf40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ccdd10bdf38ea1fee1ed7eab63228a1811e78ea8f6ed1166214b01aa552ec75 -size 1249492 +oid sha256:f5651178ca4c990532921faef1955abdb0fbb17511631cd2e26f2a5d1109b2bd +size 1436063 diff --git a/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_110a1d40-5b06-4c86-821d-a085f20b70f5.png b/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_110a1d40-5b06-4c86-821d-a085f20b70f5.png index c94cd33a4ab5d99c08dbc3ffb2e5914c78acf091..317a2bdb04a423bb910eae7a82b1d46ab775bb56 100644 --- a/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_110a1d40-5b06-4c86-821d-a085f20b70f5.png +++ b/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_110a1d40-5b06-4c86-821d-a085f20b70f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d00d03d1f5f76432006d76e1340e77e5493eeaabb09867e891e4e84eb79ed841 -size 1339824 +oid sha256:32c472caf0875116bb14c0f24f3e66aea3685b7741d637d09cd982669794af50 +size 843840 diff --git a/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_b77ac57b-8075-4c8d-8104-6551fac80aa0.png b/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_b77ac57b-8075-4c8d-8104-6551fac80aa0.png index 74a7b29a13d542c1ba3f598b30895a8fd507a15a..c762bfd1cfc9d156a027b553911d111b00397383 100644 --- a/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_b77ac57b-8075-4c8d-8104-6551fac80aa0.png +++ b/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_b77ac57b-8075-4c8d-8104-6551fac80aa0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a87e9609ebc4bf187086dc19158977d874b5c80a93917aa8792ab73b11482af0 -size 1775346 +oid sha256:a7370dcebf658eb84f49313d05ef3e457b69deb1a16139865bca9c355db191b5 +size 1247146 diff --git a/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_d82a81d4-25cb-48d9-921d-0cc1a8624a2f.png b/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_d82a81d4-25cb-48d9-921d-0cc1a8624a2f.png index 10509770cd6977edec2c045776ca7211177c0283..44f900a74ac64df5bc2c34b6e21a0d09fc5d7cde 100644 --- a/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_d82a81d4-25cb-48d9-921d-0cc1a8624a2f.png +++ b/images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_d82a81d4-25cb-48d9-921d-0cc1a8624a2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e40acf6895712334b1d8e1f8689b55a34a505e840b637733bc51814b35ae2107 -size 1301992 +oid sha256:b155a911a948151e8e79b2a8ad933cd33a21339efb7e4cf2eba66af12006ea94 +size 1512072 diff --git a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_27502e8e-1ee0-49f3-a0ed-60b044dd585c.png b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_27502e8e-1ee0-49f3-a0ed-60b044dd585c.png index 12103fe39aadaf80e29fa792b6ad515eee7e7a6b..422569a4c57f1eb18a1c97f20816389777a6a267 100644 --- a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_27502e8e-1ee0-49f3-a0ed-60b044dd585c.png +++ b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_27502e8e-1ee0-49f3-a0ed-60b044dd585c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ed8d6c1ed4cff7388427462eda49f47599c0f98512012a2a9792f4a38c7d60b -size 675335 +oid sha256:6a84d9a5d0898aa5ad0da3cef9827cb5d8c5b74851cf7baf06008da30a3a92e0 +size 593145 diff --git a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_556ab0b3-e0cc-495c-a76a-93f9487d41a0.png b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_556ab0b3-e0cc-495c-a76a-93f9487d41a0.png index b27d60a923851e483cf5159886a159f4999551ba..c09a485c7298a6ca92e7a040f170b6add9c739f8 100644 --- a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_556ab0b3-e0cc-495c-a76a-93f9487d41a0.png +++ b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_556ab0b3-e0cc-495c-a76a-93f9487d41a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d059fda69afd153a00d5b8eea722e5035d0bd23d0070655db1a5770b2ca1915 -size 859826 +oid sha256:6e316c4b9589160f9e3dcf41e24c91ac2ddcebbc5069d92a3584e896b94bde7d +size 849954 diff --git a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_5968648f-ed49-45e3-903a-229b30081048.png b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_5968648f-ed49-45e3-903a-229b30081048.png index 75dda5876f35e3e147065f3f81fbc22e3a24858d..878dfe2b2c1e6dac83d0ecebe8ca1b6c2787387b 100644 --- a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_5968648f-ed49-45e3-903a-229b30081048.png +++ b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_5968648f-ed49-45e3-903a-229b30081048.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba44936adedd1d43454f098abac5e4591aaebedaf178c2ff81b70f4aede26952 -size 750094 +oid sha256:0e162f95841c19e356c4af8c563ea4fc907dd099a0b7ae57f6e841d0cb1f43ef +size 843708 diff --git a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_d522a186-135f-4d08-a07a-852004c505fa.png b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_d522a186-135f-4d08-a07a-852004c505fa.png index 554a8b037c76ac8a62f769c13ceb99b6ca3d1902..b4519f8cdcf391d063dda2071943eceec617181a 100644 --- a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_d522a186-135f-4d08-a07a-852004c505fa.png +++ b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_d522a186-135f-4d08-a07a-852004c505fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80fd87c4356af919c9ecd1c1d1e7b9f28bf2222b3e66eab317558d3159fa996e -size 1327697 +oid sha256:865a88dc6793e01d77cc7a3266c5f001112d51d312e05fa81aa2ee183c68d525 +size 1164141 diff --git a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_fe8b8ffe-c907-403c-a03d-6850c3d9f96b.png b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_fe8b8ffe-c907-403c-a03d-6850c3d9f96b.png index 573bdc69e706392ef251280d78ba5db6222c6e09..da5a3208f637fd97c952ae20678ee22e58bc8376 100644 --- a/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_fe8b8ffe-c907-403c-a03d-6850c3d9f96b.png +++ b/images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_fe8b8ffe-c907-403c-a03d-6850c3d9f96b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0fc9f3a0edf93fe0948a867e0256c4c99dd98e9a6dd0fc69a3bcee3ef9d5eb1 -size 633349 +oid sha256:303497b61ecffba5f72f2468edb0a550654414ced7de3bede1509fa2f62d18dc +size 687573 diff --git a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_879e4979-0951-4fc9-a7f2-10d0324f5524.png b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_879e4979-0951-4fc9-a7f2-10d0324f5524.png index eb2351ad4cb5819957d60b352f3a6d5166d6527c..db1cc241195e2ae40e7f3901d76818a6ce84750e 100644 --- a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_879e4979-0951-4fc9-a7f2-10d0324f5524.png +++ b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_879e4979-0951-4fc9-a7f2-10d0324f5524.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:216dc6f80cbe1423c5ab9ca91a0d7d02212aad0dd76b24e3cb51f60294cf1fe9 -size 271670 +oid sha256:fc7769b1dab05c115c2164970dc05a13833e32e1da4ba3d0ac19c7d4cc4c277e +size 423580 diff --git a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_ab469efb-c6dc-47f7-9426-0938350e8063.png b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_ab469efb-c6dc-47f7-9426-0938350e8063.png index 357c6d60246ad88fdd44cdcabfc2bfb8d74d8f92..bca5f073fe3f0e2df8cba2afdb198df0b02e7067 100644 --- a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_ab469efb-c6dc-47f7-9426-0938350e8063.png +++ b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_ab469efb-c6dc-47f7-9426-0938350e8063.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1f2041c0ed10d2ceba25dd77e0b8fa10bdf076735b554780efd0b8da5f01c90 -size 278200 +oid sha256:7238c97ee48ab0ac04cb4c9255d5f873937309162d7e496c8b1e1a525da8882f +size 441027 diff --git a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c55541b2-0344-486c-b183-b3494993e838.png b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c55541b2-0344-486c-b183-b3494993e838.png index 366ea232c8cb855b0a660a43fd506d8374a0e43d..c1e4a86629a6b48da1f269063cbf1c2e0d679d54 100644 --- a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c55541b2-0344-486c-b183-b3494993e838.png +++ b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c55541b2-0344-486c-b183-b3494993e838.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cac6fc43e017e6417633e69b33c6df6445ee67ec6788e02f3fdabf84868c8a2 -size 317780 +oid sha256:b730cfc5fa5bf9cfd715ba1dec31dfa49344bae11049e54300624d95581aa2ab +size 389139 diff --git a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c748a44b-f6e8-496a-be28-c3b90ad70a2b.png b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c748a44b-f6e8-496a-be28-c3b90ad70a2b.png index fee2d7607ebfd253fcd1d5dbedcb8b5ecb9cee80..a50002e26456542284bb6192f431f5747802b1d8 100644 --- a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c748a44b-f6e8-496a-be28-c3b90ad70a2b.png +++ b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c748a44b-f6e8-496a-be28-c3b90ad70a2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec1664c745af5e5ba5d256d776bf49dd705458961bb8453ddbaf980d16910bb7 -size 284583 +oid sha256:12283a37e0be351e49c9f906a3e8866922d9cdd4577b0844031bf28ca071ef6c +size 217569 diff --git a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c767c653-e622-4df9-8b1f-a83eb531e1fb.png b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c767c653-e622-4df9-8b1f-a83eb531e1fb.png index adb282bd261118adda3f8fdb1e2622c35c274c4c..da979f131d0efd633ec8a75333a754bdebfc3323 100644 --- a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c767c653-e622-4df9-8b1f-a83eb531e1fb.png +++ b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c767c653-e622-4df9-8b1f-a83eb531e1fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bac95d6833e9900c5fd1e469a37958b06ff9bdf462b579dca4d24325c84352f4 -size 267981 +oid sha256:ff2f7bc56de5d353037e2e98d2aec25bc05addc724715ecc78fa8a391394d6af +size 310172 diff --git a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_d80603c1-f854-4923-ae8c-dae6003a5cd4.png b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_d80603c1-f854-4923-ae8c-dae6003a5cd4.png index 8b17bad45856683fd94c8951abd6eef5c37c2cc6..ff04d87ef66b321e70bb2d3ac9a709cd1403d9ad 100644 --- a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_d80603c1-f854-4923-ae8c-dae6003a5cd4.png +++ b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_d80603c1-f854-4923-ae8c-dae6003a5cd4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf89521d9729dc361f4bc4fe70f459b57e02a7d43975b576b388438a81e36a32 -size 420637 +oid sha256:76eab81f2f7c6ed6751da9f406fa37bc0af0882cca47b4ccf7a7ab55397836af +size 404768 diff --git a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_f04f3bd9-068a-491b-a24c-c356de9dfcc5.png b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_f04f3bd9-068a-491b-a24c-c356de9dfcc5.png index 31004e97671cc3ea978cd00cd4040a017d3a644d..a6a828ff5bcebeef1c155edfeabe50027be92f51 100644 --- a/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_f04f3bd9-068a-491b-a24c-c356de9dfcc5.png +++ b/images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_f04f3bd9-068a-491b-a24c-c356de9dfcc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0e9f743cf2dd4b605246202267fe95b883d1c88f17c419d9cfbe24e13b883a2 -size 927831 +oid sha256:c37bf17239697e7c43085f9d4a5b34f630e8308d9e9b856e31bde352df65f497 +size 866988 diff --git a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_477c560d-170c-41df-8298-b3a5df097ed9.png b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_477c560d-170c-41df-8298-b3a5df097ed9.png index 48d3e62eab1eeba7015fbea54ce11109fa5d16e3..6c29f00dc213da903d978e2dd2cd140302399e8c 100644 --- a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_477c560d-170c-41df-8298-b3a5df097ed9.png +++ b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_477c560d-170c-41df-8298-b3a5df097ed9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:711464c0ed47c98c74a4497e404230d8d40d2ae37108c79fe716dda211b09b68 -size 672407 +oid sha256:e2484f4abaeef0e266b8d15ef9a886b2d46e73d5f284aab462be212d61c8a1bc +size 824932 diff --git a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_8be9df77-6ac7-4962-bc6b-b8084033cc94.png b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_8be9df77-6ac7-4962-bc6b-b8084033cc94.png index ee5f2ead127107596a77e571b4f0a1a0fa34b78a..20ff473469c96fec68c3dd7e501b6ad29da81f57 100644 --- a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_8be9df77-6ac7-4962-bc6b-b8084033cc94.png +++ b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_8be9df77-6ac7-4962-bc6b-b8084033cc94.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ab64813131dee95e9117e3dd3bb9e28cb111ad3a7f332f7a22060109fc0995a -size 1783064 +oid sha256:d83649b1151f664b0dd3dbb40e0a775d3679a86eec63d62e45e9eaae3b784960 +size 1546338 diff --git a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_9dbba049-3068-4fc1-853d-a48205645473.png b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_9dbba049-3068-4fc1-853d-a48205645473.png index 2459656f874279ad903cc3bf3ad3c0903794ea7e..4b8e40f7a09741161aba634d823968e5e02cde8a 100644 --- a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_9dbba049-3068-4fc1-853d-a48205645473.png +++ b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_9dbba049-3068-4fc1-853d-a48205645473.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3ebf9bcf5fa5400c2ccfee53109f1700d1d358d9f226cad253af4d82c8b6fef -size 537357 +oid sha256:8a2a9871cb6623f881e59f32cdd9f20fc0a2f20358ccc96a318347c7cc5f962a +size 489148 diff --git a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_d06ad609-36ae-4f0b-8623-247fa123cbb0.png b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_d06ad609-36ae-4f0b-8623-247fa123cbb0.png index c8630d83659d99eece2539b6cd940a807e302e61..0568e8a9132dbfd6e742f81b2a96fa7d92e9e5fc 100644 --- a/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_d06ad609-36ae-4f0b-8623-247fa123cbb0.png +++ b/images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_d06ad609-36ae-4f0b-8623-247fa123cbb0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e980f9a26734a9db89f057edfc481e445d8347b08c853d2d236a4fedf028b28 -size 670402 +oid sha256:9b51ede8d49349a4669c43d9b9f6336340d8d228e5df0e0c66980a465c58f3c3 +size 936468 diff --git a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_0ceb3fb8-d1c7-4b00-a0f5-15d88fee4234.png b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_0ceb3fb8-d1c7-4b00-a0f5-15d88fee4234.png index f6fd40ef8aa7547d1e21253e59d679b1854c30ad..b96180ad82aa58658cc944443c8d86b343671a46 100644 --- a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_0ceb3fb8-d1c7-4b00-a0f5-15d88fee4234.png +++ b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_0ceb3fb8-d1c7-4b00-a0f5-15d88fee4234.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:044699169e9d1335e6cc43d7ac1cba55e87084d6d0ba50ab6e04cb7b3e07fc10 -size 319050 +oid sha256:0197d158b7c224a31841242ef9e27095e52a3d7be513d8636248daccf79200b9 +size 477991 diff --git a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4359bad3-5e98-42a3-8ae2-157730acf87f.png b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4359bad3-5e98-42a3-8ae2-157730acf87f.png index b3ba9e4575be37f50474b253ebbd6f83fc5099d1..d7e990ef95804534df7171c5e8c21b66840cfc41 100644 --- a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4359bad3-5e98-42a3-8ae2-157730acf87f.png +++ b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4359bad3-5e98-42a3-8ae2-157730acf87f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e91d47de13617440aad1c439c6a9bcd677afeefa22ddd6233b041beb572de93 -size 359908 +oid sha256:3fac34625a5af3319516133535513bd5610fdbf57345cf6133bedfdb9d97bce7 +size 581071 diff --git a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4b230fe6-7974-432d-89e3-e9d599c8b47e.png b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4b230fe6-7974-432d-89e3-e9d599c8b47e.png index 9afc4aef7bdde28b5890d7a45db978725671825c..eadcaf48ccce02b5f42cc585d3b9e30a75add3e3 100644 --- a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4b230fe6-7974-432d-89e3-e9d599c8b47e.png +++ b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4b230fe6-7974-432d-89e3-e9d599c8b47e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56d94880775a485344299067a8c89f4ae52813965e20c8b4d120e91e5dd88365 -size 990518 +oid sha256:c4413f6e7e69ea3534b6fde491edb77c6527a22de66b5f5147c682586680a8eb +size 1017477 diff --git a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_69956295-9900-40e8-b095-abf025c73dc0.png b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_69956295-9900-40e8-b095-abf025c73dc0.png index d25f0c95e2eb82052c9ac946e8a7abeac2f710bf..732b6e6fe3acbab0d35a18534562b2f1a512df15 100644 --- a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_69956295-9900-40e8-b095-abf025c73dc0.png +++ b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_69956295-9900-40e8-b095-abf025c73dc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03b5a66f6cfd9ed7cb976434e61c3d06e4149e1bc235e517ae2afa07f4fdffea -size 319375 +oid sha256:899ea5d12e8686d49d9c8fe7e2e6a7581a05bd4be013d380c76d2397c741a616 +size 425626 diff --git a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_a91d399d-3343-457b-b60a-9d9d2ec0676e.png b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_a91d399d-3343-457b-b60a-9d9d2ec0676e.png index 581c54ad3df55c152b7fdf60bd67d753bff22fb9..ef81f6360d3d9617632e6c5ba16e7abb70fe00ea 100644 --- a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_a91d399d-3343-457b-b60a-9d9d2ec0676e.png +++ b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_a91d399d-3343-457b-b60a-9d9d2ec0676e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32e19b0acda65cbb3041a805f873ccfc353926ed925ee8394e156740a4ca00fb -size 942672 +oid sha256:e58871448f66e4c8f0b46d837e9349ba59aaae04ac8d77565b320bcd52a7391f +size 1017633 diff --git a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_bb98aed0-a7f0-492a-95e4-623bdc9edf55.png b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_bb98aed0-a7f0-492a-95e4-623bdc9edf55.png index 90ad1983ec06b80a96ce90b976cb4af0964cb0b5..ca3c7992de6b72ebf077d4e534770d89e4f08965 100644 --- a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_bb98aed0-a7f0-492a-95e4-623bdc9edf55.png +++ b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_bb98aed0-a7f0-492a-95e4-623bdc9edf55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eeffff0cdf55458fdf58407726a064a31d86567704185bfeae1fd7f8e2b3ad80 -size 320730 +oid sha256:906478a20099d63f2b1269233e8d535e05461a422832cfd948dc940120832c20 +size 442376 diff --git a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_eff68b62-ec7c-4049-8586-0a770d5b987d.png b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_eff68b62-ec7c-4049-8586-0a770d5b987d.png index 575fe430bcad745b899ee4be1326b52aa04fa59b..fd53c24e79dff8f9992a12edce8863463c20f881 100644 --- a/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_eff68b62-ec7c-4049-8586-0a770d5b987d.png +++ b/images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_eff68b62-ec7c-4049-8586-0a770d5b987d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3312c5b7b8483809df07c0e4a906564b10c1d40c253164cdd9015cf25e56e267 -size 321214 +oid sha256:9133a3dbeced5dc6666b8eabafa2543fe32d6b25d8c5bc8c2d73c6f49ef1d1bd +size 324078 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_03b3771e-2a05-4a39-8770-852b2e28652c.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_03b3771e-2a05-4a39-8770-852b2e28652c.png index 4c7e9997f81118ab605ae0040df12db6c78a6d26..4cb119c400d5604445058f39576b908aa4e9d8ce 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_03b3771e-2a05-4a39-8770-852b2e28652c.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_03b3771e-2a05-4a39-8770-852b2e28652c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b74fbbee8a33fe97c9adb3caa23b469a3adfbbd0d6057eb6db9c8f532a5c6b2e -size 574485 +oid sha256:621ebbe0627369c8a124a5456c7b8df273266980604b60e7aab167aab3866a04 +size 453159 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_0b0bd6c0-a4c4-4a82-8757-549fe7ac92a4.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_0b0bd6c0-a4c4-4a82-8757-549fe7ac92a4.png index a5cc1730c6394bb5003eb97208ad7d72480510a5..2d79b3e27ba9a4f5c05d3672adeda6c072a53bb0 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_0b0bd6c0-a4c4-4a82-8757-549fe7ac92a4.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_0b0bd6c0-a4c4-4a82-8757-549fe7ac92a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98741fdc723ad09bdd9c090c8f95576a4eabacf207a2555948c446d741750092 -size 440937 +oid sha256:18bd36cd4a95ad04963d16bc0b405caad51df4f0b734eee7772a5f679289cefc +size 497748 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_10212b24-195e-48c9-acae-cb2350a78ceb.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_10212b24-195e-48c9-acae-cb2350a78ceb.png index 3c511336ba9a7c0eda8f3552d75cb61903c6f38a..7d6471c26d16c78bcc921f55d1b38fc7e9772cba 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_10212b24-195e-48c9-acae-cb2350a78ceb.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_10212b24-195e-48c9-acae-cb2350a78ceb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37c316c4c44a7ab906fb7817d45700dce2ef92a4894c3371185b325a36d8d14b -size 902787 +oid sha256:29b8d5651c1e107294d354a1009b911ed79db16df29445c844ea7340bd9ed5ca +size 968518 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_20ad31c2-db7f-461e-ae21-1fc7eb68b0bd.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_20ad31c2-db7f-461e-ae21-1fc7eb68b0bd.png index d259e3c8390050252ec32e15b5c09cb713b68f1b..9fbaea370e99e824042d2ba57433183fc132b7e9 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_20ad31c2-db7f-461e-ae21-1fc7eb68b0bd.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_20ad31c2-db7f-461e-ae21-1fc7eb68b0bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4b076266377d098f24004f6ff14b4585c5c975727d852683fd22f54dc8be16d -size 1041056 +oid sha256:85622ad7e691ceae0701ffbcc8a54870b62fbf5d26ca9601e4da8b548f9b534d +size 1172497 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_6a78e4e9-77a2-4025-8623-e0d7eda8379b.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_6a78e4e9-77a2-4025-8623-e0d7eda8379b.png index ca7f3988ee753a334ee031f29e825dc084e02a9d..70dc89a34c7985084558e8ff73ce8f3d821487e0 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_6a78e4e9-77a2-4025-8623-e0d7eda8379b.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_6a78e4e9-77a2-4025-8623-e0d7eda8379b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f25b94481c34c445af481e5bb37af43116cfaf7855af1ec6962a07da7bf9131 -size 422656 +oid sha256:5e1e2b4cae5c8da475b845051ea40e2fa3189b6519f55f8fcc6b64ef06b922ed +size 495987 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_92de7066-88c4-424e-97df-4a48ba0b59b5.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_92de7066-88c4-424e-97df-4a48ba0b59b5.png index e0458c2b19779966c8578870c532bb0bfd51c0a8..fa748d7dbc97bb50b455e5a656721a4f88d542d6 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_92de7066-88c4-424e-97df-4a48ba0b59b5.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_92de7066-88c4-424e-97df-4a48ba0b59b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60e98a50ef40b3e30cfa4c08db04bb8e402ad54be212e8586ae148db17ef329a -size 1139381 +oid sha256:98319c5b38882f9dab69907ea258b66f525d452e32453e4facbb2334e7cbfb52 +size 1257962 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_a3049f40-57a6-4b30-bb6f-49183455254d.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_a3049f40-57a6-4b30-bb6f-49183455254d.png index c3741898883135748ad22a6eb3fcb491f8844df6..7632d9ae89cccfd27262a4d802f1fda694c1a48a 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_a3049f40-57a6-4b30-bb6f-49183455254d.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_a3049f40-57a6-4b30-bb6f-49183455254d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e93793acba81e219d9c47fd2d12370d4cd745cfa5c3d42380bb87aa9b14a6e2 -size 630086 +oid sha256:03f0ebf1aa64fab16f3d55ccacca62b18e4c70e7c2af98c905569bb808fdf88d +size 832070 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_b74474d1-d37d-4bc9-9fb5-5d6a91112763.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_b74474d1-d37d-4bc9-9fb5-5d6a91112763.png index acbb8c12465a3fdead7315c8f10cee2f821f46c3..ac76b88a4e5f1d07398838924ad08230a874c6b1 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_b74474d1-d37d-4bc9-9fb5-5d6a91112763.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_b74474d1-d37d-4bc9-9fb5-5d6a91112763.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4bad547f766324b9706c7910c93d909e4bdac24f8b54aa94ed5eb60bbdc8fed -size 914328 +oid sha256:c1e034e51bfdc400609359e408f52f55136c06223ccc6a581056dc6a0e65006f +size 966631 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_d7a941ee-56f7-4e42-8143-8a9ef38682bd.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_d7a941ee-56f7-4e42-8143-8a9ef38682bd.png index 10f447a13bee84cc34849f44eb180c319c8c08d6..00475ffce887c7240e87a372ed2e47deca2bd59c 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_d7a941ee-56f7-4e42-8143-8a9ef38682bd.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_d7a941ee-56f7-4e42-8143-8a9ef38682bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:278a52da305c56241977dedc1b2c711d0a60e91457ac07cf754a230a5936cc3e -size 1087571 +oid sha256:3ae0f9b9ef67c689d078cf067f56f4bfa63402ba4958f5cb8d14e5a814efc447 +size 1156501 diff --git a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_f34ec95c-8c93-4e9a-8b49-69c0eaee86e0.png b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_f34ec95c-8c93-4e9a-8b49-69c0eaee86e0.png index d8e9696943129712882e4f40405632b0c2afc98e..5383c2e4e8657901cceccb56f475238afad0fffb 100644 --- a/images/9e035a36-1c77-4014-98ec-4d48ee41d904_f34ec95c-8c93-4e9a-8b49-69c0eaee86e0.png +++ b/images/9e035a36-1c77-4014-98ec-4d48ee41d904_f34ec95c-8c93-4e9a-8b49-69c0eaee86e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6399be1ca865850163f1a0c3f6fd4f17976aace33c7461f38603c34d3d188f5a -size 1576329 +oid sha256:1821d44589a31096c0926a4c0f4e67a71ef2f96aea9fcefd67316ac958ce3824 +size 1563430 diff --git a/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_062beced-fb6a-435e-9e47-a52f8ff8db4f.png b/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_062beced-fb6a-435e-9e47-a52f8ff8db4f.png index daeed469934d7fb509a971abbac160b214687242..4c034fa574db6b0f5dfa5a17da885cda2ceedc0f 100644 --- a/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_062beced-fb6a-435e-9e47-a52f8ff8db4f.png +++ b/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_062beced-fb6a-435e-9e47-a52f8ff8db4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3c9b19726b5d3294a79c7ab839ddd0b58e767828701309a5a1a9664df4c0f56 -size 161195 +oid sha256:7edb3858924894a8f1ea8d8291ecf377689298e5530b5927e0435ac14781a520 +size 162986 diff --git a/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_7a9111bf-ea91-4f63-b47b-e1117bc84494.png b/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_7a9111bf-ea91-4f63-b47b-e1117bc84494.png index 2b86b7d877aa2f60b795aedfeb2b39a4e374e215..702c08be0073c2399b8b29b1d0b88a89495200df 100644 --- a/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_7a9111bf-ea91-4f63-b47b-e1117bc84494.png +++ b/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_7a9111bf-ea91-4f63-b47b-e1117bc84494.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1054a00051e7a2f0ac5bf402426a6815b5288bbfea801c4228c627841bc4adee -size 1623326 +oid sha256:f16bd51371987b9b1bb8e334cf46b632d867f5a2a27843a1b507150ecccded13 +size 2278370 diff --git a/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_fa576ce8-1b4b-433d-8ac0-b881f304710c.png b/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_fa576ce8-1b4b-433d-8ac0-b881f304710c.png index 68703a4e50b2457baa06ab44df08cd4f0ae0b000..fda251f55025853fb157d98839f104f93687201d 100644 --- a/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_fa576ce8-1b4b-433d-8ac0-b881f304710c.png +++ b/images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_fa576ce8-1b4b-433d-8ac0-b881f304710c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4e0ca261f551f7be88d41474934ee313383a04b3db458ff390ada2644a25f8f -size 162488 +oid sha256:a13934d7be76fc5cc2959bfd3b5412b4fd6d56ac3adde00155919258751616c3 +size 195295 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_00f9659f-e69c-42c9-92cb-fb3779a46c05.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_00f9659f-e69c-42c9-92cb-fb3779a46c05.png index 27d112264fa4c0d068df2cede84ba74b05a6408b..12884d18200b1fb5c50d290053947a717c60e8f8 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_00f9659f-e69c-42c9-92cb-fb3779a46c05.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_00f9659f-e69c-42c9-92cb-fb3779a46c05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d184fe81b2db214f89f34554ed6767b074fdb4fbb98934945981206063b867e0 -size 774721 +oid sha256:b1de43fe47acd339cfb78eb8d4b50b97962b922c30e4bccf357223a679eb9bdb +size 1440072 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_12883b19-0053-43e8-9cfc-2b87ef699e9c.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_12883b19-0053-43e8-9cfc-2b87ef699e9c.png index aaee864b6c47c22476a4a4dea4a440f028251267..d9926af5398b0b668fb639a167983aaf6dd0fb0f 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_12883b19-0053-43e8-9cfc-2b87ef699e9c.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_12883b19-0053-43e8-9cfc-2b87ef699e9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01c5ef76190c23fed79a42d1fa769e57413242de7ba0eb44d0a0e964c7474c3b -size 1217686 +oid sha256:7ef7c06e6353feff2be015c26cde4d70d434bc7ff524000f946cb5072747fc57 +size 1585965 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_13a5e3e7-94b4-40ec-9580-9b81fe415d79.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_13a5e3e7-94b4-40ec-9580-9b81fe415d79.png index f16f4951b1d974e29dbb0cd4072449be8ebe6e6e..783bc8cfa0e9ce4164d31a6ce2e7bcdea82ae7ef 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_13a5e3e7-94b4-40ec-9580-9b81fe415d79.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_13a5e3e7-94b4-40ec-9580-9b81fe415d79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da7585999d979e88bbce9f1873a0e4d1a3460aa353a4aac2295455b5e6cec2f1 -size 1203407 +oid sha256:c2f7449d87695e400802146661d6d9d3361b77365f8a1b98f799929d176c68c0 +size 1514623 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_21b2e6cd-1e81-4e05-b5f0-37feba1bafe2.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_21b2e6cd-1e81-4e05-b5f0-37feba1bafe2.png index 25ba36743a1ccd0e5829a415fdf8344b5dcf1349..97b5c84457e75fb3b4f151a83dd87ec179b58e98 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_21b2e6cd-1e81-4e05-b5f0-37feba1bafe2.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_21b2e6cd-1e81-4e05-b5f0-37feba1bafe2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2df60060f9cab990f783326c18d763ed6740fd58f187a9a712770a608b180a5d -size 1029384 +oid sha256:289d09b94d387949ff2caa98fd2641388a41fc1c44e8527f9713a01035f2bcd6 +size 1748645 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_397474e3-703a-42e4-b314-d556bd57c30c.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_397474e3-703a-42e4-b314-d556bd57c30c.png index ee87e005eb98e1e3eb77ca0ef05a15910cb6b16e..f11175abfa1548b75715195fdc9bc0e88b9c94bc 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_397474e3-703a-42e4-b314-d556bd57c30c.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_397474e3-703a-42e4-b314-d556bd57c30c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07d669c0d81eda1d11ad20a99fda2c6fb22a471652368a91b115b1bee8e0f6c8 -size 1162860 +oid sha256:c7540e86b71bac771a65471c527336379fed720f5ac13e899ec71b1257d9fa7e +size 1531705 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_404e24fc-6086-4570-a4aa-a1f1530104a1.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_404e24fc-6086-4570-a4aa-a1f1530104a1.png index e5e3227fc7d9a5e65fcf25017f4ea976ac911103..15c6178fc4c11c8c20ce1163fca2c6c451119270 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_404e24fc-6086-4570-a4aa-a1f1530104a1.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_404e24fc-6086-4570-a4aa-a1f1530104a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55bb80b1d993fb8ac0526fa2794b44c3788f79e49010ef9e7b1bbdbede5f3d26 -size 1247684 +oid sha256:7e68bc7f2290e2f2a7d9db816c287266c6b402a36ab425489bce6b389463596c +size 1444120 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_43dc260b-617f-4482-b487-26b210b2a179.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_43dc260b-617f-4482-b487-26b210b2a179.png index cd108d662a9e47ec07735a113a9271c98301e941..950862b8483c07dcd70403758714e987f3ef1089 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_43dc260b-617f-4482-b487-26b210b2a179.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_43dc260b-617f-4482-b487-26b210b2a179.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc6bac1871620c803c6c54014d26c6a8bcc633b5f8d64b981d01155e7fb20eb2 -size 1212451 +oid sha256:f12c090babb07a0e2d96f062cccf76747d583c16e49f5cfbf7db0f4504b6c90d +size 1264152 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_6838a03a-5a15-4837-801d-1217ce23b10b.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_6838a03a-5a15-4837-801d-1217ce23b10b.png index 69f33ad00f82ab648acd26114c316da5ce364094..55221344a4303cf946016dfce849fb4b642fbd13 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_6838a03a-5a15-4837-801d-1217ce23b10b.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_6838a03a-5a15-4837-801d-1217ce23b10b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d0bf6fafc032b6e115a24df5e570978946bd8c58016acae6dd5976dd7698ce9 -size 1230400 +oid sha256:f81e6c5ebd88ea1ae6a353b4142be175059c55e74f650e22e3b0b4b5b49dbc63 +size 1876484 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_68c8701d-311a-40ba-ad1a-482ee7d84c6a.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_68c8701d-311a-40ba-ad1a-482ee7d84c6a.png index 06adeb601adc3eba17be390b6008f77d2d936d73..7ba14e420a2ec32218d98a3287c08fa7120b55bd 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_68c8701d-311a-40ba-ad1a-482ee7d84c6a.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_68c8701d-311a-40ba-ad1a-482ee7d84c6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b598a2d0a23959c2512267adc6cb669743875cfaa853ee6324984218f7add1dd -size 1177374 +oid sha256:438cbaee5a9cf4191a8650a3bfe7d3aa6b07fefd51bfdc51383fd2db787ff28f +size 1565230 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_85de58d9-3241-44fc-be41-d50c28190e22.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_85de58d9-3241-44fc-be41-d50c28190e22.png index ef533394458044bf379d17a3dbd5abc178754aac..b04076620bb21ec8018b3f5d3c61bf82ae988e8f 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_85de58d9-3241-44fc-be41-d50c28190e22.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_85de58d9-3241-44fc-be41-d50c28190e22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d31e5f21dd6698b4cbca1a2956bcf3e4539e9b8aa71dfa6b4d23a44b626d8255 -size 1140684 +oid sha256:3bb07c0d8615a501163324db1d45a9dd7317ff20548b37a6993dd9cd5ad5a826 +size 1309819 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_9e375a31-474c-4e38-a58d-96d951a5e0d8.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_9e375a31-474c-4e38-a58d-96d951a5e0d8.png index 67b47ad2c943f23f38abe01e18accd8c438e39b5..cee3b11b1ea9dda0aa99e132319155440a835bc7 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_9e375a31-474c-4e38-a58d-96d951a5e0d8.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_9e375a31-474c-4e38-a58d-96d951a5e0d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bbbc0965cd091290452f171e0ed23acd0db326e8a94bdf4e9f931378ec5a46d -size 1202082 +oid sha256:e7b3fb2fb9757f77f10830ddba7a7fd4ff84199cf7dbc5227cfa6e72ac628d63 +size 929505 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a1c39cfa-dd66-4ab5-ad95-670f09f04659.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a1c39cfa-dd66-4ab5-ad95-670f09f04659.png index e5514e7a3b11e6e42fba7789cded83ebeb213042..4b14bf6f8de2011c3eb801f55dd6b896072e3731 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a1c39cfa-dd66-4ab5-ad95-670f09f04659.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a1c39cfa-dd66-4ab5-ad95-670f09f04659.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:949d6db0a5e5f7c460f1dc919ddf6423bd269f7e86325141e12a5465d6f4c93a -size 1202733 +oid sha256:8cf3798f2e22d53ff23b510b1a0b4ee63b92fa7bf1cbb86076428f136704a4d2 +size 1806751 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a3d44210-e42f-4f1a-99c7-6c695782189e.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a3d44210-e42f-4f1a-99c7-6c695782189e.png index 6dbb942d8b58b167791d77a9bad37067c31b17ee..eba459f4585216bc0c66fed81153db546cf47535 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a3d44210-e42f-4f1a-99c7-6c695782189e.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a3d44210-e42f-4f1a-99c7-6c695782189e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b791d4d5ded9950eee11d3cf2f2b313e55c9df759f60e9df08da2042ffd11d92 -size 1219292 +oid sha256:62c417ab82379cfc35b27ec2901aac3a74260977b3a9c7aca36cdd54a6842a1c +size 1226407 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a7f02e91-d2bc-4941-a731-ad039f3c4cbb.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a7f02e91-d2bc-4941-a731-ad039f3c4cbb.png index 1502e92096994779f2290879baad4d104eb42da7..256184e65a0144a8b9c3884c08d4374fc479b9e5 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a7f02e91-d2bc-4941-a731-ad039f3c4cbb.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_a7f02e91-d2bc-4941-a731-ad039f3c4cbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4df626aff784fdbde6e4600d152283bcb98c4da93d991c8c425e97413af5489e -size 1025919 +oid sha256:47286b3f672a553a3a1a06587b76ab310430b59372b732811de13502197481da +size 1063199 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_ad9349f2-8c75-4639-902b-53ab55d5777d.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_ad9349f2-8c75-4639-902b-53ab55d5777d.png index 82c3358139d582fb4944247847d282ea80fda7c1..eb10ea74dc07fbab1c6a0a93558a4188e0473154 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_ad9349f2-8c75-4639-902b-53ab55d5777d.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_ad9349f2-8c75-4639-902b-53ab55d5777d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68eb3063fef9f50f10fe72602c836ede8ee619e2ed61c703f6f8dea2b7e2872a -size 1211849 +oid sha256:554d6136b13b407e7291e28e98121fcceaf316ec05a8de7105613d1b02d3cfe5 +size 1281242 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_bebe4a89-a653-4ad1-8562-d2d151c0fa90.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_bebe4a89-a653-4ad1-8562-d2d151c0fa90.png index d108f3002a5079e22bfb4198b9dd465cd021c80d..0e1baa33edfc99220c995287e6be5d975c737b7d 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_bebe4a89-a653-4ad1-8562-d2d151c0fa90.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_bebe4a89-a653-4ad1-8562-d2d151c0fa90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d3c962448f3c963b0a8fd2b1daeb107be3e2a421a5593497840607a96389128 -size 1081749 +oid sha256:daf704fc08764a6bf20719552ad3688e908dbf657973e4f227d16156e8daa21a +size 1122351 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c196ba4d-236d-4ee2-8936-569abbd6f1bc.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c196ba4d-236d-4ee2-8936-569abbd6f1bc.png index c49fb1b9dcb39f53c27b5134b91fd3e40acae838..924c8c21ccb3d2fb34a203e70279069a8faf776c 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c196ba4d-236d-4ee2-8936-569abbd6f1bc.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c196ba4d-236d-4ee2-8936-569abbd6f1bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca36ada85d350e592048b35ebc6fa4e981cffde87a99bc816cd3e428c5da75f0 -size 2422392 +oid sha256:84aba3ad9fa6250e537876fdd5352c3854d45099613962233e80d6214a99f192 +size 1380647 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c397e50d-70f8-4294-bf79-ee3a2d1d1385.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c397e50d-70f8-4294-bf79-ee3a2d1d1385.png index 7c23a22541a4c3df6804c4a0164959f98a4428e3..6842f45d23238db4cf63f4ba97285aee874b0cd3 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c397e50d-70f8-4294-bf79-ee3a2d1d1385.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_c397e50d-70f8-4294-bf79-ee3a2d1d1385.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da04f3ee93f6457f314420a07d6ee26fde311132e3c5173628c89711ead8ad62 -size 1184297 +oid sha256:851e1f180636ecfbef341893389a7962baa674e2310f81395302cda91d321c4f +size 1418216 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cb0df657-8141-4273-979b-5d66494faae9.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cb0df657-8141-4273-979b-5d66494faae9.png index a175c4021c9852980830ee62f6db96692d1e739d..ad4c9d87ecffa05659109e62859066e16b8715ce 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cb0df657-8141-4273-979b-5d66494faae9.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cb0df657-8141-4273-979b-5d66494faae9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa8620f5d1c2b904ea4035abab9882165620aac82fe1e5cab3bbb2b36132816d -size 1260575 +oid sha256:fbe9e2af9cef45f4ac4c0ae12cd38421921e24babca7698a5aaa703b61ab6030 +size 1408425 diff --git a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cd75134c-4538-42c7-9197-02dc7aaa3621.png b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cd75134c-4538-42c7-9197-02dc7aaa3621.png index d22533144cefcd1204abba08e67fd6907514f547..fb14eea4fdcbb5dc2b37453e081fa795c45d1176 100644 --- a/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cd75134c-4538-42c7-9197-02dc7aaa3621.png +++ b/images/9e3d2edb-6535-4180-9050-ade88dbf798e_cd75134c-4538-42c7-9197-02dc7aaa3621.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b47d2191ce6ff6f4a774e9a5ae19cf938fb585e16aa4c8e4badbc4bd3f3b6d77 -size 1124898 +oid sha256:8c06733b982208de8edaf8f6c4a5083e35ba92c023d5eae2ec051cb8623bcf65 +size 1565222 diff --git a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_078ff56d-4223-448a-a76d-bd8b5b747c95.png b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_078ff56d-4223-448a-a76d-bd8b5b747c95.png index 2872665bb9100789a763a2aba6ce9ddba1c99a8d..40386f28c1f874fdb6e2ca76a9df2d8ade4e7f1c 100644 --- a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_078ff56d-4223-448a-a76d-bd8b5b747c95.png +++ b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_078ff56d-4223-448a-a76d-bd8b5b747c95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d73babc6ae86e2ebe26e4740d2f15c90f6166914f233007fbea6131bf29dd45 -size 721809 +oid sha256:596f03799149bc36d5203d15967d530e49572092dc939b68cae0a47962057e7d +size 480352 diff --git a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_270f2f26-4be9-4b51-8347-ec9ed1712b35.png b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_270f2f26-4be9-4b51-8347-ec9ed1712b35.png index 1f838eb647dcd00f4dcef9205a55b965df2858cc..1b4649adce3e1a817ad9056f7724d9fa9b82b58a 100644 --- a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_270f2f26-4be9-4b51-8347-ec9ed1712b35.png +++ b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_270f2f26-4be9-4b51-8347-ec9ed1712b35.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:917d17a0e8cea49000539fac04869d1c488ed46451d2dbfb3c0b62c36673aee6 -size 727289 +oid sha256:ae617367252ed1ab663a00d1ca6be0765b03380b5deac6c95a27b487e6b1e694 +size 732014 diff --git a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_2b8a3c38-07a7-4ef6-af36-a725dc25cc96.png b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_2b8a3c38-07a7-4ef6-af36-a725dc25cc96.png index a8493c896d177aff8635fc52a27d476aacb6a6ce..fa244e81a28291ab1f5a8832655b562c32fcd0cd 100644 --- a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_2b8a3c38-07a7-4ef6-af36-a725dc25cc96.png +++ b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_2b8a3c38-07a7-4ef6-af36-a725dc25cc96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb163d61a644fea486d73dd916337e1229a209698ec39f67bc77e8860b056c43 -size 789731 +oid sha256:4e88a110208b778c408e6b376cce785f41d2adbc937f3f25cd94d8a6e3d47032 +size 608550 diff --git a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_a0fd3657-be32-476b-8d86-b7dd38afd2a3.png b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_a0fd3657-be32-476b-8d86-b7dd38afd2a3.png index b15e0b9509162715f3a674c2ad038a2715be85d9..9f7dbf7b29af670e9712c5f90ab74eb4fd79b29d 100644 --- a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_a0fd3657-be32-476b-8d86-b7dd38afd2a3.png +++ b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_a0fd3657-be32-476b-8d86-b7dd38afd2a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72e9482c5f3671606515b3341e1a8ec5bb00f8919fb757ce9dbdab63f90a3a5d -size 1019425 +oid sha256:a5fcc8b68181ab38341b31807774a0c67232a009c80f344d018d89e14cd1d481 +size 1379610 diff --git a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_aab2a04e-c41e-4057-abc6-d839f51cfcc6.png b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_aab2a04e-c41e-4057-abc6-d839f51cfcc6.png index 33f27934c68d60f93d4d578c62c6b835ee221bd7..488b350f21d3aa39694b859f2abe3cf973620f4c 100644 --- a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_aab2a04e-c41e-4057-abc6-d839f51cfcc6.png +++ b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_aab2a04e-c41e-4057-abc6-d839f51cfcc6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65ef983097f114ed9ba1b53d2a759a18f9af78dd8976e6a00a46bb851d016455 -size 726258 +oid sha256:1353e7a24fea64db7ad43b4c9aee1b84a30091ecad0f523d50a7387ec7438195 +size 479284 diff --git a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_ba296304-ef11-42fd-a39e-3d7c465a811a.png b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_ba296304-ef11-42fd-a39e-3d7c465a811a.png index 9ab166532e2fc62466a33a1791dfe203a3015ab4..bb7ad1736616294c9a7e0baab70f91aaaa5cefa2 100644 --- a/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_ba296304-ef11-42fd-a39e-3d7c465a811a.png +++ b/images/9e44c63b-2c7a-445c-b9c4-6580f4271268_ba296304-ef11-42fd-a39e-3d7c465a811a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4236319e3c8bf6339cd222e8ef95f30b8af05b1ff09473239a19c53d4699a7a -size 727522 +oid sha256:288cecd30719c3c52c4ae1e3e120534f0203bcd3276f6f48e215926d3f090153 +size 567070 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_1aff894c-1861-4fe9-a936-bd4264f0c644.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_1aff894c-1861-4fe9-a936-bd4264f0c644.png index 8cea3c981337d312918bd1c9979a72a692e5ae1d..bb8844e0483764d136eb709db06602b14011158e 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_1aff894c-1861-4fe9-a936-bd4264f0c644.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_1aff894c-1861-4fe9-a936-bd4264f0c644.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ae71a90cb861754eaeb5adb0639d187741a43b9947ea7a31773cf6721fc52ef -size 302448 +oid sha256:9ce25ddaf63f42ae508635ce7aebc5e7d0e81c5e4024c46e2cc738c0255754de +size 496098 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_2099ed07-e8fd-4a2e-9004-0351a78a8e72.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_2099ed07-e8fd-4a2e-9004-0351a78a8e72.png index 92b0c021a80482ec58b1f21bf8ef3c2cb0095095..e1a338a1ac36358af961e696a756c82d22c93eb0 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_2099ed07-e8fd-4a2e-9004-0351a78a8e72.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_2099ed07-e8fd-4a2e-9004-0351a78a8e72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16a0fc1981fdaf07843dec11b7b08f23a7a28fd5654a0622514cfe2065e64072 -size 330247 +oid sha256:71371eaf00aad1235c34802c12265aa35ed8a61102f54416476ead56d0f1c719 +size 390807 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_4e0cd350-2884-4841-b365-0b0d62b7a9ea.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_4e0cd350-2884-4841-b365-0b0d62b7a9ea.png index 7b55f1a4362c60add92a53ff4a0f0ee881e15a7b..06060e6f086fb7f669c3c05914e9b74ae3b0714a 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_4e0cd350-2884-4841-b365-0b0d62b7a9ea.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_4e0cd350-2884-4841-b365-0b0d62b7a9ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:911f45a9a8691832f659b4ec48cd8962d6ffea116ce2131dc76dd5e368136534 -size 1187323 +oid sha256:d8334508db6c21dd08039c2dab9a5ad2bd659a3b2b0b65183eb664bca4e1adc2 +size 798800 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_8dce160e-b79a-4de0-88f2-cc59dc891541.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_8dce160e-b79a-4de0-88f2-cc59dc891541.png index 56e876bf1849086a6a08bc36a134c9f317384859..37ac08dfd1c2059ea7e9437894ddeb4b9e008409 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_8dce160e-b79a-4de0-88f2-cc59dc891541.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_8dce160e-b79a-4de0-88f2-cc59dc891541.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fb988c6d8ae799325093d5924f1c3134f047c417ee676ec4841dc512943afc7 -size 302251 +oid sha256:6053d4157572a5e03b9aae937236d65dfe54edccf91447d628ca678e65cf0ce6 +size 476307 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b82f1731-57ab-4129-9d15-c006e6895af7.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b82f1731-57ab-4129-9d15-c006e6895af7.png index 880a52e0937b10c1df61d7d94c9dde1c468fc513..2f774273934955d3513911748fb28dc7aa6c97c7 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b82f1731-57ab-4129-9d15-c006e6895af7.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b82f1731-57ab-4129-9d15-c006e6895af7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bba934d132b1be090fea998bdc3e52863364d3a007b9345568fc0a0a0e15ecb1 -size 322893 +oid sha256:f25273318275044983b7d2a187089dc58f32e6c96b03df8c900b04ad7b675906 +size 223841 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b9148fc0-f5ac-4ff4-a188-9705698633f6.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b9148fc0-f5ac-4ff4-a188-9705698633f6.png index da8013bc237fdf0a9c9530e7d526b658856162be..414f9b41c23a00c4976756c4d89a20955a0aeb67 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b9148fc0-f5ac-4ff4-a188-9705698633f6.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b9148fc0-f5ac-4ff4-a188-9705698633f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f24b8de9ccd5f846cddb5457c5554242dfb9d9c19589c70e4cd8515f38ad8ac -size 647365 +oid sha256:5c12242c916baca4f0a1c273fd26b22312bd8e60d0dffcfdb4048a69acc5d169 +size 588853 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c609672f-a7f7-4d05-bf6d-e0a3beac539e.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c609672f-a7f7-4d05-bf6d-e0a3beac539e.png index 6eb7779ba38ae82f9ad7da7d138d323c00e62639..6f5af53593256f3fe12ea03396662c0d97bd04df 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c609672f-a7f7-4d05-bf6d-e0a3beac539e.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c609672f-a7f7-4d05-bf6d-e0a3beac539e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f8d95bab56b414c6c1cf0d68ff2e8766e664d7d2e0c460a01145adaafc12412 -size 301710 +oid sha256:2dc595e5a5f2222812c68ed3ec8a0d12c41d3301af08b0ab2aa663937d672cf1 +size 514087 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c6b0f017-31fc-4a65-880b-30d19b72e561.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c6b0f017-31fc-4a65-880b-30d19b72e561.png index 245f987c3b3b051d76350c2edf91b84c70cc60a8..83dc1f1247cf0771a3906f98af0abc0c83af153c 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c6b0f017-31fc-4a65-880b-30d19b72e561.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c6b0f017-31fc-4a65-880b-30d19b72e561.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c602903c990562bf222f587f16d22a7e8b053856a91847a75296e9083ce6cd0 -size 609932 +oid sha256:e7fdac9b3ab49c26b8cffa93b72c98ce869cf1c2ab16e9a702d3dfec55d6a299 +size 730112 diff --git a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_d55123cf-0a80-4b19-9b22-6719bbab3231.png b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_d55123cf-0a80-4b19-9b22-6719bbab3231.png index 5b970f0c6e6add7a565595a679b81d8af5f6b7d3..0277f2cec7a3be0e087dab0f0faac51894324ed5 100644 --- a/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_d55123cf-0a80-4b19-9b22-6719bbab3231.png +++ b/images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_d55123cf-0a80-4b19-9b22-6719bbab3231.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0b99a2687296af7d8fb57d902ad3deb34132fa928e9ca3fbf8c3bb26fb3fc53 -size 594607 +oid sha256:4c813646c456f9dc4bfe5cf8cb14c431fc27ddf4fe11be1aa3e666520d33c9fb +size 391097 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_10d3f0c0-5536-4a81-809e-3a9b1bd98b96.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_10d3f0c0-5536-4a81-809e-3a9b1bd98b96.png index 80fa43921007928d8e6bbe9efeb61fcd1365ae02..76ed2c2378eba7e3bfdce0aac7a384acf6df14d9 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_10d3f0c0-5536-4a81-809e-3a9b1bd98b96.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_10d3f0c0-5536-4a81-809e-3a9b1bd98b96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17336f735e3f3a9fbd7f6d4df757fe30190e4a1820d44402ae73054bbed08d1b -size 1850951 +oid sha256:70fe38bf448daf34f62a58e5f5ab977e33f40791a437e1383568741e3ba92e71 +size 2128987 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_2bd52759-721b-4129-a6e8-16877c8237d6.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_2bd52759-721b-4129-a6e8-16877c8237d6.png index e038e22af092095b735d12a48b85cd2773253311..efa2507e0405f0bb10f4cf086d92890e2a1b348f 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_2bd52759-721b-4129-a6e8-16877c8237d6.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_2bd52759-721b-4129-a6e8-16877c8237d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e6b195c7c65489bb9a17e3792cd16f9e0b18a9940865ed9189f7b09067429a -size 853233 +oid sha256:cde14745ce02019f3ed8cf1a176cf8e036017f046bae811e0f4fd862464bd7d5 +size 739498 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_4ce5adec-f536-4f51-9dc7-4867949f1c20.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_4ce5adec-f536-4f51-9dc7-4867949f1c20.png index b5e5aaf90e5c89d23402e66e46f3b5d456521d4d..99231771fe687e23d9883c6256440031bfbbda9d 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_4ce5adec-f536-4f51-9dc7-4867949f1c20.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_4ce5adec-f536-4f51-9dc7-4867949f1c20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34172c696c8c724914df94f4fe01e1f57dae9dad827426953760d624ae009400 -size 871749 +oid sha256:1120b229272cc4160d0d8e0adf7197caafd75764b0ab9b2b180e2d2c79e78682 +size 323063 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_7f04e0ee-bde9-4028-ab6d-0f911194d39b.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_7f04e0ee-bde9-4028-ab6d-0f911194d39b.png index e4a8ca34d555835be20420ed5b5ca75114ac4e42..77db9c1850a3969d7421a087c422b7d01a1e95fc 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_7f04e0ee-bde9-4028-ab6d-0f911194d39b.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_7f04e0ee-bde9-4028-ab6d-0f911194d39b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:053dd3617aeeb0b711d93b822a35d18ffa92b145806e0f048fd60729d92b22e3 -size 1462206 +oid sha256:4cfa78fbb20461f11efa32beb7f610eb3dcd4690200528979c74d7dfa29109b6 +size 1402097 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8368fa5f-1af2-4abb-bcf6-cf089a8ca346.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8368fa5f-1af2-4abb-bcf6-cf089a8ca346.png index 9cd3d8b0db7413573914e732c7777af5f2dd373c..514fca1f7f65b89a102537bd9ecb31339d8f961e 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8368fa5f-1af2-4abb-bcf6-cf089a8ca346.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8368fa5f-1af2-4abb-bcf6-cf089a8ca346.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25f9914714c6b2f71bf83ead317ba76f6ac7a69a4176c6d07417824186a81ea5 -size 1655139 +oid sha256:e503f956394fcc921a2cddd100c6310e756c73fe27af03b3e96d14fc487489d3 +size 1352970 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8951ad3c-1dc5-4117-a207-a89a61ef0655.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8951ad3c-1dc5-4117-a207-a89a61ef0655.png index 9050456526cd8382ac834844a3904446c3ec5330..9b29149e9f75ae02e874253dcea621da247263d6 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8951ad3c-1dc5-4117-a207-a89a61ef0655.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8951ad3c-1dc5-4117-a207-a89a61ef0655.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37ef4de9d4b15514c248cb3d2f795171c7eb1a1d840bb55e72e4c76ed64af497 -size 2036616 +oid sha256:3afe1200428841543b80676b5835e09238e23ef2bcc74a8ebf7453a9c69fc1d8 +size 1831656 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_ad58eba1-5d2a-4f85-905a-6d2d0b1312ef.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_ad58eba1-5d2a-4f85-905a-6d2d0b1312ef.png index 9f5166acbb9f6a751594de70578363b0343c1ea7..3c8b2d36377a8c4b0f5049502ff581cf71cbeccf 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_ad58eba1-5d2a-4f85-905a-6d2d0b1312ef.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_ad58eba1-5d2a-4f85-905a-6d2d0b1312ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1340c203793f6f51d39c8c950f0f27f65210e952888c110534dcbfc36d2b85b0 -size 1748162 +oid sha256:e0819ce70c678d5603dc7040ae4ac62ef55c36cf99e4c563762c10d8e8ad3214 +size 1122563 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_bdf6ba83-c40c-4f99-89e1-56131fab37b1.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_bdf6ba83-c40c-4f99-89e1-56131fab37b1.png index e11aeb42b8ef1861c4c9241134702b75f39237a9..f76db638a1e971f796ef5226f45a5873ba064608 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_bdf6ba83-c40c-4f99-89e1-56131fab37b1.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_bdf6ba83-c40c-4f99-89e1-56131fab37b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45fb741fe588858c6b8f5dac3f5c444a0cd3fef9d36d6ff4210e94ce821eb142 -size 1554355 +oid sha256:e703f21543529152c3c404ac042d51e50abbe73a20f9d1da57e12e8b99e68263 +size 2058475 diff --git a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_cfaf73bf-07fa-433e-a651-8c1c4a4a633c.png b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_cfaf73bf-07fa-433e-a651-8c1c4a4a633c.png index 251a2007097de11b82d0b73b24c46e8422e526cf..c58fc833b9b2a1d131c2b1f2257a7d2370a1aa9a 100644 --- a/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_cfaf73bf-07fa-433e-a651-8c1c4a4a633c.png +++ b/images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_cfaf73bf-07fa-433e-a651-8c1c4a4a633c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a9b1b63a1605d1a41d9aa3ae93f4e84f3905d46a69d162924f407fd961d37c7 -size 1584409 +oid sha256:6e7349649ceb97c5b89d56aae1fd16d6fa1d7271189ad0e9281efa4f4632c137 +size 1926284 diff --git a/images/9ebd069a-7703-47b5-9c75-53958637e7c0_21b9e2ba-8482-4690-94ca-59dbe0423aba.png b/images/9ebd069a-7703-47b5-9c75-53958637e7c0_21b9e2ba-8482-4690-94ca-59dbe0423aba.png index f31ea6c42fd8eb7623effbf60a3ae31eb9a37341..aae238454153d23a5ccd0d469197d55a5cfbcff3 100644 --- a/images/9ebd069a-7703-47b5-9c75-53958637e7c0_21b9e2ba-8482-4690-94ca-59dbe0423aba.png +++ b/images/9ebd069a-7703-47b5-9c75-53958637e7c0_21b9e2ba-8482-4690-94ca-59dbe0423aba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ba4d9d0bc8a886b32f6c6d7bc5fe74026e5acb20c458854651fa9c38c220e59 -size 2690609 +oid sha256:9fb7488d27267597e931efb1e1d9885251a35214bf01d595306700598074d9e7 +size 757864 diff --git a/images/9ebd069a-7703-47b5-9c75-53958637e7c0_ba2bbee3-bfcc-4bab-91ef-2fd4893e6c39.png b/images/9ebd069a-7703-47b5-9c75-53958637e7c0_ba2bbee3-bfcc-4bab-91ef-2fd4893e6c39.png index f15ae9d369d1e771c943e68a4e51b945bbc967a8..6534361d45e2f13ce1ae33cb86402d84d406273a 100644 --- a/images/9ebd069a-7703-47b5-9c75-53958637e7c0_ba2bbee3-bfcc-4bab-91ef-2fd4893e6c39.png +++ b/images/9ebd069a-7703-47b5-9c75-53958637e7c0_ba2bbee3-bfcc-4bab-91ef-2fd4893e6c39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8da3a001fac7c3ff24e603172f6956310ef2d96b4048fb7c6b43e9505f8b2a9a -size 1664898 +oid sha256:af6d068b9aab81ba193fc1bc68dabace9cbf3645f4cca20fd325da703cee877c +size 1372473 diff --git a/images/9ebd069a-7703-47b5-9c75-53958637e7c0_c36efecc-f544-470c-93bf-162cb1a83f69.png b/images/9ebd069a-7703-47b5-9c75-53958637e7c0_c36efecc-f544-470c-93bf-162cb1a83f69.png index ed67fd7977d98592fc3ddb102a75d97e62ce9360..c6cbd0c9c1a8942c38b3d1a82bff0d56d9c94e70 100644 --- a/images/9ebd069a-7703-47b5-9c75-53958637e7c0_c36efecc-f544-470c-93bf-162cb1a83f69.png +++ b/images/9ebd069a-7703-47b5-9c75-53958637e7c0_c36efecc-f544-470c-93bf-162cb1a83f69.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eaeb25436f48f66dc7db67663316573d8485eb2a7a28834c73208f1d4b3acdb7 -size 1379412 +oid sha256:df346add860141c377585a8d3ae1f095d6ea2476288f7260f7a4f11340019177 +size 1315511 diff --git a/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_2bbc5313-c847-4bd1-8db4-1fa235c134ed.png b/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_2bbc5313-c847-4bd1-8db4-1fa235c134ed.png index 43f66256da324eb771eae6d77dd4a4d3daec0206..40ee02496fba6331fe1dcf24f219100e29ecbed1 100644 --- a/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_2bbc5313-c847-4bd1-8db4-1fa235c134ed.png +++ b/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_2bbc5313-c847-4bd1-8db4-1fa235c134ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:559f895d2b4a13572acd93837924caf0a73a3df9ed64b6285e1d080eeb2e0dc0 -size 1009906 +oid sha256:37941e8d8d032337d58fb0197b4446290e703478028c5c92002bdfe61bf3f102 +size 535085 diff --git a/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_5bf6ca81-46f9-4681-b086-fc23bd8fb027.png b/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_5bf6ca81-46f9-4681-b086-fc23bd8fb027.png index e318cdd703ac16a1ac4f16a323231631126cf422..155abd600ae295637e7024ee85bdb4170ddb3e5e 100644 --- a/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_5bf6ca81-46f9-4681-b086-fc23bd8fb027.png +++ b/images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_5bf6ca81-46f9-4681-b086-fc23bd8fb027.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d172017aa8657d61f8cae198cc4e8dd2285d48ccd3530da7457769f046b42b83 -size 1257836 +oid sha256:1549bf250c2312b70690f46c4dc604d9e7f5e8c0731570ac0c7c6bea6be85751 +size 988149 diff --git a/images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_b30b2ef7-14ff-4170-b14a-4894124b0efc.png b/images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_b30b2ef7-14ff-4170-b14a-4894124b0efc.png index 8b80a9b21a4d525eb05e0c1592649aba2276173d..af751146f5ae1d43b0bc84cab4cb3fb800927f1b 100644 --- a/images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_b30b2ef7-14ff-4170-b14a-4894124b0efc.png +++ b/images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_b30b2ef7-14ff-4170-b14a-4894124b0efc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab822e64064129df89227a3bae16a41041f519977111d8a190339b2e00fcdc25 -size 1927778 +oid sha256:3b83e9b2fb3232033be94707178c00598cf0bd1434bffc3f3b663f6ce84f05a0 +size 1583679 diff --git a/images/9f57055d-c269-47d7-99be-3525d725439e_6dc6377a-e668-4b3b-8e93-5f5f1899b8f9.png b/images/9f57055d-c269-47d7-99be-3525d725439e_6dc6377a-e668-4b3b-8e93-5f5f1899b8f9.png index 1b253f82087ad1419cf9546946ade4d7e62a35e4..d68511202abdd71cd21004a5d0f791d6de39d33b 100644 --- a/images/9f57055d-c269-47d7-99be-3525d725439e_6dc6377a-e668-4b3b-8e93-5f5f1899b8f9.png +++ b/images/9f57055d-c269-47d7-99be-3525d725439e_6dc6377a-e668-4b3b-8e93-5f5f1899b8f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcfe63d89833a7bc4d604580c70df3c6ff80c44c16af950759d26053b3223193 -size 1252967 +oid sha256:674de58abfb72528cbea31de1d13433e72830fce29328dc2c02e5483908732b9 +size 1979252 diff --git a/images/9f57055d-c269-47d7-99be-3525d725439e_dba417cb-26b6-43b5-a275-b52134a8df8d.png b/images/9f57055d-c269-47d7-99be-3525d725439e_dba417cb-26b6-43b5-a275-b52134a8df8d.png index 6c0907518cc448dfb71a500e2a58d20495b018a5..14fb8acc86ac689a69d6accb226ea9c634f84e2a 100644 --- a/images/9f57055d-c269-47d7-99be-3525d725439e_dba417cb-26b6-43b5-a275-b52134a8df8d.png +++ b/images/9f57055d-c269-47d7-99be-3525d725439e_dba417cb-26b6-43b5-a275-b52134a8df8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dacd68513b13859b2796dc4913ed20d9ca3881c85b7b41a931b6d9e7c306ce0a -size 787856 +oid sha256:cee772e4963424f3c42990493be013b889f62a157a34730be49b5c08fea2c703 +size 639297 diff --git a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_734808d6-57d3-4ae8-98c1-b2f33ee8aef5.png b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_734808d6-57d3-4ae8-98c1-b2f33ee8aef5.png index 20283232294778fe60c3ef18bae23ad0ef10b105..355930bf42b0b853ef5e31fe67fecf4157e2ca9e 100644 --- a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_734808d6-57d3-4ae8-98c1-b2f33ee8aef5.png +++ b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_734808d6-57d3-4ae8-98c1-b2f33ee8aef5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7d0ee39a915c34d0088804f3cc757942d2efc8b2a5c5c68b3b8cafc6126574a -size 492921 +oid sha256:6a6d36a8b0b4607784aa664e5c6783af0dd62e7370dcf7a154ab61699f551118 +size 388002 diff --git a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_c3adce54-837b-4bea-880c-d8500152c67d.png b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_c3adce54-837b-4bea-880c-d8500152c67d.png index beffbc0f7c4d06642f1c8f2d6665dc449a66b5ea..fa49e194d27726d385b8724338acbf6348284658 100644 --- a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_c3adce54-837b-4bea-880c-d8500152c67d.png +++ b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_c3adce54-837b-4bea-880c-d8500152c67d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23cdcf2271c6a5fba2759064d1338b75f39095205460eb2f2712d9b51155d9ab -size 749471 +oid sha256:9ff715dffe855a768b6456550ee8d2908125299f28f1a669e3fe7329107c5b97 +size 847023 diff --git a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d6d9d6a9-2059-43c5-8b87-7b7bf2cbfeb5.png b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d6d9d6a9-2059-43c5-8b87-7b7bf2cbfeb5.png index 581f313227291ec0be98b27c502ecd53e3d1d221..a8f33384ecf9b04378d8386ab4276c236818aa00 100644 --- a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d6d9d6a9-2059-43c5-8b87-7b7bf2cbfeb5.png +++ b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d6d9d6a9-2059-43c5-8b87-7b7bf2cbfeb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f25d8f73768ed11603c280948a858587935e5c12cb731cce81e5f187a08fbc13 -size 456636 +oid sha256:69f39f7ad81370925a34473408bc811b4ea3632745bc01337423250202d4b263 +size 616592 diff --git a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d7f294ef-efb7-4ee6-907f-6bdd6dca408f.png b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d7f294ef-efb7-4ee6-907f-6bdd6dca408f.png index 042eca8b4fa6cef787f0f1b6e674e218f4af746a..ed6760d1b75b6e956947505e41d2dc12367b6da7 100644 --- a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d7f294ef-efb7-4ee6-907f-6bdd6dca408f.png +++ b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d7f294ef-efb7-4ee6-907f-6bdd6dca408f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5fc55bd57617db08788a31d443b916d49aa834f1ec315db0c11e8d53a7f82d2 -size 624403 +oid sha256:1d2b35da7915edfb775b1ec19586a577775a4aeedbd74df1df10a7de59216642 +size 456536 diff --git a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_e0021e91-9a16-4aee-8a31-b7efe4147c37.png b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_e0021e91-9a16-4aee-8a31-b7efe4147c37.png index 3de465c604015052a2dc635abd56c67acdd9e1a7..841b767ab57db7e59c2e535e38bb38ea3e893a9b 100644 --- a/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_e0021e91-9a16-4aee-8a31-b7efe4147c37.png +++ b/images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_e0021e91-9a16-4aee-8a31-b7efe4147c37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91c4d26f57078b3db2cf71b094d0d58c201e7c645546c4b5c5b62bcbf09a2ef0 -size 1052301 +oid sha256:a177b9c15ded2b2ae2bd172e38d8eddb3371c185e8b630a859e3348f806c07b6 +size 1368764 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_0ab53b01-e6f9-417c-87e4-bde4e5ba5393.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_0ab53b01-e6f9-417c-87e4-bde4e5ba5393.png index fa81f058b2228ca728bfeb624a89d24dc6fac954..594c6da41347f63a4be5e9e17fb603fbd993d8a4 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_0ab53b01-e6f9-417c-87e4-bde4e5ba5393.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_0ab53b01-e6f9-417c-87e4-bde4e5ba5393.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbf800862b6a6a9f135d3f3b0dacace10df40b7bdebaef4e8d193a4d13f5743b -size 2203183 +oid sha256:7fc6b78ea7e2ce9d37a77dcf5e59ce080c282f39a3e606bb41233d4d2f8bfab9 +size 2095334 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_45d41999-3dfb-4c9d-ba3b-cac736ee5256.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_45d41999-3dfb-4c9d-ba3b-cac736ee5256.png index 6366bcc2731242700a7df052609674dc4b8f6155..19ebed310e768b7b1b47a458256d488ff271e14a 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_45d41999-3dfb-4c9d-ba3b-cac736ee5256.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_45d41999-3dfb-4c9d-ba3b-cac736ee5256.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:563cc4712918887484f952c288ce52d1169302bdc49e24cc4c5c3a0c39fb9dd2 -size 2426440 +oid sha256:7950cea2deef682d9c63fd0c8438184835017dd1365f02cdf93f832321a3eb6b +size 1390127 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_4852e3fe-905f-4e27-9a12-35d97fabc229.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_4852e3fe-905f-4e27-9a12-35d97fabc229.png index 9ac90ba4573dc2e1dce3acd5e87cce9a795638ec..b28d01f40bcb6c7da47ebae80d87be52e4d8131b 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_4852e3fe-905f-4e27-9a12-35d97fabc229.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_4852e3fe-905f-4e27-9a12-35d97fabc229.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b744ee23fd56af6a914366eb8f6875ee63f16a79b04e0c4b5f49dc8ea976e923 -size 2384723 +oid sha256:62bea3d17f9eb1496f6f8aa59ea57be4ce7096351f20520ac6edca563287d93b +size 2332440 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_a2193e39-2058-4b6c-bfaa-6395e17662a8.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_a2193e39-2058-4b6c-bfaa-6395e17662a8.png index 524ee4d293a18d88437a6a55a7a0e27aea62193a..164213515ab2ddf17a4a36981e207a12c9440c3f 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_a2193e39-2058-4b6c-bfaa-6395e17662a8.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_a2193e39-2058-4b6c-bfaa-6395e17662a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4009855d877a4d7bccaee2cfd9f3c2eb14d2ad24fc408bdf68f845cce15416d1 -size 2432878 +oid sha256:998104546755d2adbe6823d0e2cf1484b45dae5115ccfdfc444159e26fb97c9f +size 2307958 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_a86258c1-e7d5-4cb9-9e2f-2ed9966b5cf8.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_a86258c1-e7d5-4cb9-9e2f-2ed9966b5cf8.png index 48ed438e81c014a6302a795d050d20402b58234f..5b1b544775012f305441e624b1169740596bb33e 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_a86258c1-e7d5-4cb9-9e2f-2ed9966b5cf8.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_a86258c1-e7d5-4cb9-9e2f-2ed9966b5cf8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89215b1f67fa106f51a82c15d14f3532280aec133224bcf300bf0040c5fd3d30 -size 2388792 +oid sha256:b55a5ddb6ef34cfdaef06a6c7dc22c9ed6823a987b0a5c0c15e49745f12313d1 +size 2471692 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_a8c71db4-4c7d-45a0-befd-2bf4ea75e2fb.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_a8c71db4-4c7d-45a0-befd-2bf4ea75e2fb.png index 58dc416026db24b3f39c28ce9d2010a2570ad930..76be94b398f9728d1b1d12ef35d10829b88d9642 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_a8c71db4-4c7d-45a0-befd-2bf4ea75e2fb.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_a8c71db4-4c7d-45a0-befd-2bf4ea75e2fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5f4b1b4ca3874d49aba43e3aac22fa91ba7d2b20d2d13874220af47de46289e -size 2436385 +oid sha256:51bb71939ce71be6984c484dbfa2630fc1cb84fc821366591091b28d2ce1329e +size 2132903 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_bad1f3ca-f331-49c9-b384-520ef6d972de.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_bad1f3ca-f331-49c9-b384-520ef6d972de.png index 4c80d8cdfb6c0d1fc31d45b438b7ae72695e6335..749117e47f8d9ed795a31a74eef4cea6e22a106f 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_bad1f3ca-f331-49c9-b384-520ef6d972de.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_bad1f3ca-f331-49c9-b384-520ef6d972de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0129068dfecf203a3ff504be316bbd85285193ae50543412e73a26432e857649 -size 2359926 +oid sha256:43da7a7a93ec7e621e6e7c1b9196f45ebd9c59c877831c80ada01063d1883830 +size 1861755 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_cff4214f-6139-4db1-9095-650cfe5bbd79.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_cff4214f-6139-4db1-9095-650cfe5bbd79.png index 377d642580bae09c6e9f524d25341c7245d8864d..a52885c4459e299678f184a9b913c5fd8d907ed8 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_cff4214f-6139-4db1-9095-650cfe5bbd79.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_cff4214f-6139-4db1-9095-650cfe5bbd79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:851ca7263053f988b47791661f4e88bbbe44d273d3aaec6d5928941e45ddaacb -size 2433141 +oid sha256:3201a54b2c7f3ab29a9cf7a6fc77ca5027b75b5b15d4ebdcfd297203898b9e3d +size 2348839 diff --git a/images/a065d3cb-e588-437b-b2e5-38359e770014_db4f9635-d23d-4d28-bebc-f80e58667212.png b/images/a065d3cb-e588-437b-b2e5-38359e770014_db4f9635-d23d-4d28-bebc-f80e58667212.png index 51e0bdc7203ade1e4ba5d40e155ff870227f615b..298481105228b89dd85f5db41392a9e54308bcb6 100644 --- a/images/a065d3cb-e588-437b-b2e5-38359e770014_db4f9635-d23d-4d28-bebc-f80e58667212.png +++ b/images/a065d3cb-e588-437b-b2e5-38359e770014_db4f9635-d23d-4d28-bebc-f80e58667212.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:272fa633035652a033e93292bc1a6ad4c0dd1795d3334977a7c250716b1dc7e7 -size 2207112 +oid sha256:f481151d6f102793d205bfe2038d2f1aab245a6ce223468baa5e5a95edbf808d +size 2140224 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_418c5ba7-fa3b-477d-a6f9-939e21fd0c7b.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_418c5ba7-fa3b-477d-a6f9-939e21fd0c7b.png index 85eec99496446e2d1b3e18bb2d64b70d877f81b7..4c5059038ceca1147bf342da8d1f5e72133ca212 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_418c5ba7-fa3b-477d-a6f9-939e21fd0c7b.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_418c5ba7-fa3b-477d-a6f9-939e21fd0c7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb14a6d0f102b43238c45dd5b0c2bf489c4332e086e953f0934b7d0414d155f1 -size 77582 +oid sha256:9097f194b73b5674e1302b4f3d6b98e8164504701fec10762229e172fb91795c +size 171693 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_53fd9509-ec94-4760-82c5-afdc92e45ba4.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_53fd9509-ec94-4760-82c5-afdc92e45ba4.png index 3713fa14d07d843cea5f5c68918e5c6f921482ac..40b41628c4b74db9de57e4cae354af9fd6c5ab2c 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_53fd9509-ec94-4760-82c5-afdc92e45ba4.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_53fd9509-ec94-4760-82c5-afdc92e45ba4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2eece751ccc1da4f2af3914e8799cf45223b5505f5ab545b32214c601c685501 -size 130365 +oid sha256:83f899e86abbc96663d65e8cc8644c5de7e8e20b8086af18b253f0deebff4606 +size 131763 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_5e338d2e-d804-4b2b-8f81-7311a89ccce4.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_5e338d2e-d804-4b2b-8f81-7311a89ccce4.png index 76d6cca0adce7951a6552eb7749033e3a473b238..822e4ddc8f7af70acf8aa4bd202757c1add42872 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_5e338d2e-d804-4b2b-8f81-7311a89ccce4.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_5e338d2e-d804-4b2b-8f81-7311a89ccce4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aca5ede284c502460309f540197d959bfb2d67353f7a459d6d0e65140b0a2e15 -size 544925 +oid sha256:1992e25a9f934db57adf15cf4b14f5c44e35955fafa26c56c4786ac620e9fd95 +size 492408 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_65752882-a6ba-4e9f-9b03-278bfeabdf73.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_65752882-a6ba-4e9f-9b03-278bfeabdf73.png index db9daa354311aaa133b124b9ce16a8d0fcf3e201..fa06213b1b7e6602fb616abd77d6b6d775d58893 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_65752882-a6ba-4e9f-9b03-278bfeabdf73.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_65752882-a6ba-4e9f-9b03-278bfeabdf73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41ccf5346908a40acf4a36c8b249be78894dc76befd72199e223dfc973b6aa3b -size 407259 +oid sha256:1c143921399396f7576961a80c3e946e66e356c65a30dbe57edf5daffb39ef13 +size 1069232 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_69a737f7-e943-4d47-87e7-54c115520042.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_69a737f7-e943-4d47-87e7-54c115520042.png index 6087c34db3edf5f3561fe4de02fe35f1c15c895a..7567ddd78c3c3d3a13672ff59d2db39fd2639782 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_69a737f7-e943-4d47-87e7-54c115520042.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_69a737f7-e943-4d47-87e7-54c115520042.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8f2d53e5742bbfc3402365b48379315cca2c25809460d7bae620c2cf4cf949c -size 376858 +oid sha256:06fba5279a74c06f64523b2aaca5f5ac99a861a6dcd0236444265115909ce25f +size 228355 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_6afbd6e5-eb0c-41c2-a3b1-6befe4805e1a.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_6afbd6e5-eb0c-41c2-a3b1-6befe4805e1a.png index 85eec99496446e2d1b3e18bb2d64b70d877f81b7..01c8770f08364337549fb9f7e7e1246fac9e6f9e 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_6afbd6e5-eb0c-41c2-a3b1-6befe4805e1a.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_6afbd6e5-eb0c-41c2-a3b1-6befe4805e1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb14a6d0f102b43238c45dd5b0c2bf489c4332e086e953f0934b7d0414d155f1 -size 77582 +oid sha256:a89375146ab1e6e1e3191570b8e5e1582676a22c4d71ed49ef57fa74ebe0b19e +size 279204 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_78f0467d-3283-4ead-972f-8e6d64bc3eb1.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_78f0467d-3283-4ead-972f-8e6d64bc3eb1.png index aa5334c4d38a4ca3d908ac1745e62cce0e46f332..2bc578c92f36a5e5d5bf9766805aec6bfe4d7658 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_78f0467d-3283-4ead-972f-8e6d64bc3eb1.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_78f0467d-3283-4ead-972f-8e6d64bc3eb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0c63c8ca3ef985b4e495065548e4c9550f6d4e2702b8b0ea90be4f8abcb9a92 -size 166624 +oid sha256:f5167eb8373ea0ad79d756c049e10f729f14275d43436e2a2c3c470dc793299e +size 220073 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_9756bb38-2000-423d-b77a-30db19b21f08.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_9756bb38-2000-423d-b77a-30db19b21f08.png index 6287cdfec6ba9395267815f9c73c1f9690e1f739..156f795edbfde33b067967a6aaf746506fe07ebb 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_9756bb38-2000-423d-b77a-30db19b21f08.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_9756bb38-2000-423d-b77a-30db19b21f08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a459bcae742a6e7aa32da4d7a15f52517d07d87adcf4d5c05ce1673d309662ef -size 83036 +oid sha256:bf8d32f0ce9d7e27cdd15dc5881107c5907b3aa29618e4e4fa3a9732448231df +size 191385 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_9f25fc17-b721-4977-a0dc-11a07f97ed5a.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_9f25fc17-b721-4977-a0dc-11a07f97ed5a.png index 85752f2602929ec6f1cf0915d467d90357d14508..20f5c94d0a24a2f31fbb5a4adc8f38034cd2f42b 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_9f25fc17-b721-4977-a0dc-11a07f97ed5a.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_9f25fc17-b721-4977-a0dc-11a07f97ed5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d7529cbc4179af83ae3f94faa7a10c4ee4c4fe871cb476483061dd35af0fe02 -size 82802 +oid sha256:72ffb3428fb0daa6176649dbe0f0a26707ff0e7cacf4ff59015571676f7d548a +size 257373 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_be9d5c1e-0aa0-43d7-8dff-c3ea5a77d4f4.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_be9d5c1e-0aa0-43d7-8dff-c3ea5a77d4f4.png index 927f84b84cc7b91c7bd93ca2f7606c298a9d85e2..29619b28ba48aa6d2f6778d50a7fdb1287d5f15d 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_be9d5c1e-0aa0-43d7-8dff-c3ea5a77d4f4.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_be9d5c1e-0aa0-43d7-8dff-c3ea5a77d4f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43a1f301fd72b277e4efea83e2fda1d7879da9ab40a2996555f829e472eed8ff -size 832486 +oid sha256:3bc1d0673c9cc8829041026eea4bd580ae85967c9314ae0ee5918985b8a4e88c +size 756467 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_cd121173-fad8-49c8-9a0e-05fb88fc82f6.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_cd121173-fad8-49c8-9a0e-05fb88fc82f6.png index 52ec5fb7aa1407daab1db8298b0bf762f0a17258..067d5f618540ac9b66fa5a672c003aadb21d79a5 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_cd121173-fad8-49c8-9a0e-05fb88fc82f6.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_cd121173-fad8-49c8-9a0e-05fb88fc82f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b60b99797c40f608f7fed2e44fa08a1b19b2bdab3f9b89c2ade05ebf79d8cf4 -size 467219 +oid sha256:91d357608a8a4915cccb6e36e1b094f262dddc9547d7570ffb546da354351e77 +size 311700 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_d9a196d8-e11e-4d85-aab2-89aa169ebc1b.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_d9a196d8-e11e-4d85-aab2-89aa169ebc1b.png index 83eb26ba06899cccd0082104027f5d3cae1d4511..ba623c31bc158a7aedc27cada68428f5d50e61a1 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_d9a196d8-e11e-4d85-aab2-89aa169ebc1b.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_d9a196d8-e11e-4d85-aab2-89aa169ebc1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:375cde207669a74a566c08100335c4fc679f543e4fa5220f5367c554cdb91e43 -size 389532 +oid sha256:a9ec2b525ae019244f0d594a77b92863d85c19dc1ab7fbd724fca187cd67e93c +size 260002 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_de5428ef-b45a-4d20-ac2b-f8e854db520c.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_de5428ef-b45a-4d20-ac2b-f8e854db520c.png index b7f7c873b01e1fc3ea02769686079bd157e264f9..bafac663cb0076da55db001596369ab750987405 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_de5428ef-b45a-4d20-ac2b-f8e854db520c.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_de5428ef-b45a-4d20-ac2b-f8e854db520c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19b5d7a082d78ef0498ae5423a383c710ef46ddc7daa6974489a2599692e96fb -size 422847 +oid sha256:73ac3fbdb23c835d865bbeef6a9398d1da7c19b23840466f99f15cac32ebedd1 +size 266227 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_f2b4a031-d62e-4f07-9ebc-8b3d9684c116.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_f2b4a031-d62e-4f07-9ebc-8b3d9684c116.png index ed27f627676878ba5a85379e22a923f13d709bae..f53e4bd1338ddbc559a2e05bec3fae4b6e7c6ae9 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_f2b4a031-d62e-4f07-9ebc-8b3d9684c116.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_f2b4a031-d62e-4f07-9ebc-8b3d9684c116.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9891d39779ccf3a8f7db2a54dafce30bd81bcaf15c31b91fec644e97da7cf500 -size 496638 +oid sha256:02259bcc8217f6a09ecceb5ee86c853ffb6775a392bd33eb629e498d61602875 +size 372693 diff --git a/images/a10e6232-923d-4635-bdc8-c6d989380e45_fa95b8e9-d45b-456f-b16e-73f81d5dfb59.png b/images/a10e6232-923d-4635-bdc8-c6d989380e45_fa95b8e9-d45b-456f-b16e-73f81d5dfb59.png index d33384d405ce447dcafa7982ba40c91242eae341..b91dbdebeb368cf43156df33dfcc3e47e5882975 100644 --- a/images/a10e6232-923d-4635-bdc8-c6d989380e45_fa95b8e9-d45b-456f-b16e-73f81d5dfb59.png +++ b/images/a10e6232-923d-4635-bdc8-c6d989380e45_fa95b8e9-d45b-456f-b16e-73f81d5dfb59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:097a38280d278e68f159a421aea95a1b8c020a588976aa718a5f8d48aee843b6 -size 367507 +oid sha256:8bb10d0d48c407fb5d9b996b310d1e30da92487bbbd66139b5a590743087d07a +size 221855 diff --git a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4463379b-ecb8-4fd3-a871-b3ba26ce27bc.png b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4463379b-ecb8-4fd3-a871-b3ba26ce27bc.png index c5808480eb177e66b24d386e2e4a69f4ffa164f1..494c2ff9f9bd8b2465834d1ffaecba55dc78ac8a 100644 --- a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4463379b-ecb8-4fd3-a871-b3ba26ce27bc.png +++ b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4463379b-ecb8-4fd3-a871-b3ba26ce27bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58773569cbc569be9d3b36e8f7a458af89bfc1977dbfda5646cfd08364cfccbe -size 3281812 +oid sha256:0e7a2e82d8c0eb8a57b57753a77360e4629485754c7d019d9875ac75b370ee21 +size 2210495 diff --git a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4af813e6-fc10-468a-bc8e-cba17f545e06.png b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4af813e6-fc10-468a-bc8e-cba17f545e06.png index c1d892b6f55f6c682b7f27318075d2a0e44ddf58..5a98af435e84b8b37bc291e7beaa2dec3f169540 100644 --- a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4af813e6-fc10-468a-bc8e-cba17f545e06.png +++ b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4af813e6-fc10-468a-bc8e-cba17f545e06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a160e1fdcc6e0a01cb02bc0fcd61b6a9105fb661089524507780e8d3cf840215 -size 3174092 +oid sha256:62ed5e45457980563227239b85c5e4f9287714c2b10af5a9998cdf3cb8f4a44a +size 1568156 diff --git a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_6ba16278-a7a3-4e06-8426-356b05e3219c.png b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_6ba16278-a7a3-4e06-8426-356b05e3219c.png index 1aba2a4ed2d4ebbe4b874ca97c1b36eeb23cebf6..08320299074726dd21e1b187f50d388c5a63ca32 100644 --- a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_6ba16278-a7a3-4e06-8426-356b05e3219c.png +++ b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_6ba16278-a7a3-4e06-8426-356b05e3219c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bac9ee280776fdec975617955298bd02f54ee2fb276d73a28c3da82b49b41a4e -size 3281942 +oid sha256:15d0a9fa2e437fa3a0ea93893e0cbedd23f7a11b08307a98f977f1f1209f67d7 +size 1037470 diff --git a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_a504d03f-205a-4905-8823-9493469d0034.png b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_a504d03f-205a-4905-8823-9493469d0034.png index f534d057bd098d1c5e1803c9501d15fa24aaa0e0..44c6e1ac8a7b44f0cb6fb7ff0391380e4e38ddf8 100644 --- a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_a504d03f-205a-4905-8823-9493469d0034.png +++ b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_a504d03f-205a-4905-8823-9493469d0034.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92f994efaf62781f984ae3cc875833db8b1312ed1e9ded46013dd5fa354aff29 -size 3289251 +oid sha256:22c3b59ceec4f208186f373b9cbc37cbd59f617c5b54a083ce0b8bf386eb19a8 +size 2102269 diff --git a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_c1fda10f-e5ef-47f2-852f-a6385a2ed99f.png b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_c1fda10f-e5ef-47f2-852f-a6385a2ed99f.png index 9c2d182dc7747e10cbcda59e97e7855bc71b31fa..51a2ac95a8d0f16f2a668a08fd723a14c9bd5ac1 100644 --- a/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_c1fda10f-e5ef-47f2-852f-a6385a2ed99f.png +++ b/images/a11022ab-f733-4295-a2f2-0da19cccc1b4_c1fda10f-e5ef-47f2-852f-a6385a2ed99f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1c669126e3e9a69664e82385ad7b5cd98f6fc03112bdaca5adbc911edc50c3b -size 3279570 +oid sha256:6d9ad0a0affcd7e0465b877bc5e878dc3d90547f49c233f93fb09ca51257594c +size 1824322 diff --git a/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_2a83533c-7eff-4390-817b-d6032b626a7a.png b/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_2a83533c-7eff-4390-817b-d6032b626a7a.png index 1190dfbc7b79eca3178babd0e23bb7e0d9e3ac62..3008ae5c97bf652786160a8aab74dd7737dd0611 100644 --- a/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_2a83533c-7eff-4390-817b-d6032b626a7a.png +++ b/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_2a83533c-7eff-4390-817b-d6032b626a7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:decd910009235a09a95a1f7461cf91619e24d55a78814402ceab0f8e5c243f26 -size 414947 +oid sha256:d025638c4fb72d5d9a73324d637d2092dded7c22bbecae17235d0418c3134084 +size 368760 diff --git a/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_82693ec7-34ef-40f8-b3b7-daca962c2a76.png b/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_82693ec7-34ef-40f8-b3b7-daca962c2a76.png index 72f1cc2dd86ae37cc0b3778469090e202a313e2b..08ac5f931023af51db3d18d501b309e81c26cdcd 100644 --- a/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_82693ec7-34ef-40f8-b3b7-daca962c2a76.png +++ b/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_82693ec7-34ef-40f8-b3b7-daca962c2a76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a7ab9a09db69a41c0b65c19517c20359ab98e753ebd2a8d5df112f6f6d5b6af -size 898400 +oid sha256:8436344496b4e9a00b3c8acdd1f16fb9506d6957a9c278be3ff4e017a61c5e78 +size 894551 diff --git a/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_bf77953e-e135-4403-9b3d-494a7bf161f8.png b/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_bf77953e-e135-4403-9b3d-494a7bf161f8.png index f1f7fe564a300291095bad86a041d4b536e1e9a4..4a631a03e92abc614b5d84a004705dbe53bd1c96 100644 --- a/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_bf77953e-e135-4403-9b3d-494a7bf161f8.png +++ b/images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_bf77953e-e135-4403-9b3d-494a7bf161f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac2fd47051371c2efacbfee9a31bc4e462b85ea859597e53411eeec64283a281 -size 610792 +oid sha256:27506180fb7e31447e5ebcb7cd3cc000b757b9bdc7d2fc8c07dd263777e34280 +size 516356 diff --git a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_19738f5d-7377-4d14-9f1e-8589bd2c655c.png b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_19738f5d-7377-4d14-9f1e-8589bd2c655c.png index 22f31dbd5425869b3c02d3dd3752623904319d71..38910768a08e58ccf5fc9a90a6ec48fabf66a0fa 100644 --- a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_19738f5d-7377-4d14-9f1e-8589bd2c655c.png +++ b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_19738f5d-7377-4d14-9f1e-8589bd2c655c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b5fd6307c4191bb6ee259d23256827274d4c87f2325a5e23890b3911ddc8f59 -size 1335852 +oid sha256:d210481d3913236c3a5c4330d473247685b1dd57a4990d9e76d2ecf05a3d0655 +size 1240839 diff --git a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_393218a4-be87-41c9-880d-9dff65eb1a23.png b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_393218a4-be87-41c9-880d-9dff65eb1a23.png index 294711579d23970f33ccb220556bbe94c864afa9..da47f08769ae045d06f35fc60264543055d0f926 100644 --- a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_393218a4-be87-41c9-880d-9dff65eb1a23.png +++ b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_393218a4-be87-41c9-880d-9dff65eb1a23.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d215c029c4e59fd59da76da4000bbc2837e6868f01d3cdd6f1223f86e0244bc -size 1590394 +oid sha256:44fc43c5a007d97909edc43c6ca2104f8463e546815224bc50d0fa6534b6cb8e +size 1562796 diff --git a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_88a36484-1756-4a0d-8e91-cf10a9abaa0e.png b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_88a36484-1756-4a0d-8e91-cf10a9abaa0e.png index 726537970bc5fcbf1ab77b68dfa36bdd2ed7aa18..c4559f5a504e81ef5bb4c3d1b6d95db6174bd1ba 100644 --- a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_88a36484-1756-4a0d-8e91-cf10a9abaa0e.png +++ b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_88a36484-1756-4a0d-8e91-cf10a9abaa0e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90a00d571d258dc8bb3c4d3b824c870135ea5e22d443a05fd84b6ad48132a239 -size 1271947 +oid sha256:28bfb9084101d498fd58e11c1ff28a5f64166b274ac4cf06b524225d71ac05b3 +size 1040012 diff --git a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_a48d2bb3-e783-4679-9d31-3a86b8e0353d.png b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_a48d2bb3-e783-4679-9d31-3a86b8e0353d.png index f6015d7e5ac80029a1da5325b4d29ab3af4579b6..c95ed127c93de479bda977de9a23727b96fb8e52 100644 --- a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_a48d2bb3-e783-4679-9d31-3a86b8e0353d.png +++ b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_a48d2bb3-e783-4679-9d31-3a86b8e0353d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a84a1e560e134ce86700cfc76e97fe81b1f5df82a3fc4ebcde7b5400b0dc5a9 -size 944117 +oid sha256:43972b3dec852a929e8e5466e0c774fb9dcf67b565a9c94a31e68387bbe47bd9 +size 768292 diff --git a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_b119563f-04a3-4152-97ee-312e2601cea8.png b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_b119563f-04a3-4152-97ee-312e2601cea8.png index fdc7238f444a9f676e7c2a0d34d0bcc7487972f4..5f6a6b8fb6b87f75cbb6230bdfde753d0d3f6484 100644 --- a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_b119563f-04a3-4152-97ee-312e2601cea8.png +++ b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_b119563f-04a3-4152-97ee-312e2601cea8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:436787b5957e04d03232f422f72139dfffef44cd6f91c15e6f3368af303f3ecc -size 1275728 +oid sha256:5bec18bb70679a9035fa81085fa3687f9123509559755838dd2f40fd2b890efa +size 1113573 diff --git a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_c94f5e1a-4d22-4c21-90b6-164dcee297dc.png b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_c94f5e1a-4d22-4c21-90b6-164dcee297dc.png index 98b510cca2af74e26c65eede3446e3df8dc22627..02897083f7017bbdcb8736ef810c973bd1508254 100644 --- a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_c94f5e1a-4d22-4c21-90b6-164dcee297dc.png +++ b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_c94f5e1a-4d22-4c21-90b6-164dcee297dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d3a2c9f7fc22745e69a9958acad0533c8ee3d2c9513654ac5a6816d27f2cb97 -size 1145762 +oid sha256:d34ec6146543e01fb23cebd28c8744a9bd2d459e7710711f60b09b29733afbb8 +size 505192 diff --git a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_f487d5af-079d-4256-aea8-c423f788c7b4.png b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_f487d5af-079d-4256-aea8-c423f788c7b4.png index f1244989ddd215d343cdb165dfafe7813eb4e98b..256fadcb2ce26c3fcc1fe8721bd27900347c35f4 100644 --- a/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_f487d5af-079d-4256-aea8-c423f788c7b4.png +++ b/images/a2959cdb-fbc4-435f-ba89-85d50d22298c_f487d5af-079d-4256-aea8-c423f788c7b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:494d4194f77105afa9050ad2e1f0034fb82f59b923afd52479c753855f597aa8 -size 1313961 +oid sha256:8b67c763637dd68758b573bb33d2b4005f1ea708363a5e1b205711f0d49c8f9c +size 1344799 diff --git a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_2188f829-c7c5-4e97-b301-26caa57486ed.png b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_2188f829-c7c5-4e97-b301-26caa57486ed.png index b6e127a41997ac7d8dae5bcae320916c68dffa73..062fae9c4434f98447a4344365ffdd5bd98f068c 100644 --- a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_2188f829-c7c5-4e97-b301-26caa57486ed.png +++ b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_2188f829-c7c5-4e97-b301-26caa57486ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d05cbe3597aa811e857d1242c3efb4bb1f1c1a3483415b46fd30fff4d34b8094 -size 2692512 +oid sha256:438d450ee95b73891a564cd142f9e77d87ce9e93dd56c545b2f94f64b76b35ec +size 2743315 diff --git a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_4c0199ea-38cf-4516-9beb-08e2fcf2e5c5.png b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_4c0199ea-38cf-4516-9beb-08e2fcf2e5c5.png index 0af1211773ded8c581fc193f5ad325a2fdead76a..e49fee2c007cbc8cf26bf592ef4b3a34c810d0a7 100644 --- a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_4c0199ea-38cf-4516-9beb-08e2fcf2e5c5.png +++ b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_4c0199ea-38cf-4516-9beb-08e2fcf2e5c5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d71dcbff6425d9946d8f932d5c18b7f284a172f3cb1aac56b6bdadb2ad9d748 -size 1822200 +oid sha256:e0fe369f2c5af55b629cb37ac3e7660f2cf5607733ba3105ad0554cf08f67a99 +size 2013731 diff --git a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_519c155a-6823-4c59-8683-a50bb52b637a.png b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_519c155a-6823-4c59-8683-a50bb52b637a.png index 744b858183e708799c93b3744050e3b0db91d265..5d61afb4dc859c0a228bae52bd0cca31f059c891 100644 --- a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_519c155a-6823-4c59-8683-a50bb52b637a.png +++ b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_519c155a-6823-4c59-8683-a50bb52b637a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5fbaaee15590922ea4073f27a71e5026f71e89cca895238d216852daba33bec -size 2410034 +oid sha256:b2d3a272b0c596b8db240738f23eb1446a9c9ba5b70800070817dd11db65e586 +size 2567474 diff --git a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_8782a791-9cd8-4ff2-be7e-865859dd7fc5.png b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_8782a791-9cd8-4ff2-be7e-865859dd7fc5.png index 9dd941753b37cb854b0ddc87e627b2d67fe1cdac..24ce3462971aae4d959e2b4548a548a432936f4a 100644 --- a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_8782a791-9cd8-4ff2-be7e-865859dd7fc5.png +++ b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_8782a791-9cd8-4ff2-be7e-865859dd7fc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36cab97b8aebc7033e035cd5e5c5c5e5cd20a914e575402475f019a36358c33a -size 1314685 +oid sha256:a7194d6d48c16035b0ba4b92d7fa1038c2b8bb35a9fdc66d31804ba880f066b6 +size 1314949 diff --git a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_dfeae7f8-eb3c-4d38-96e8-ddc4967e89d2.png b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_dfeae7f8-eb3c-4d38-96e8-ddc4967e89d2.png index dd9b540b031b0acced888e6df08ce94176ba379e..1aa04683df05c46fb804367ab1dac6fc5906b182 100644 --- a/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_dfeae7f8-eb3c-4d38-96e8-ddc4967e89d2.png +++ b/images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_dfeae7f8-eb3c-4d38-96e8-ddc4967e89d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a661aecca386084528a7e234e9f83ebc7076633a3324c64c82a88aab473ccde8 -size 2019578 +oid sha256:210a445fbae064f2eb22f159c856926052a46bdacec0ef17f74a687816f4d7dd +size 1460836 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_39e4017e-d59e-4582-be70-07a8b8cfd2fa.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_39e4017e-d59e-4582-be70-07a8b8cfd2fa.png index 2c5a862d3f39c964d7a364ad4e317fb3eb7adaff..6f9bc231357687fdc13b30b980ae5cf476348de0 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_39e4017e-d59e-4582-be70-07a8b8cfd2fa.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_39e4017e-d59e-4582-be70-07a8b8cfd2fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1daaef7ed2c8941659005fe05baa5762b0b9396975413863316444232f35f3b6 -size 784651 +oid sha256:d35a0ba170cd3d56a86a90332837ce6c32766fcdae0abe1811eeafe3c934fc09 +size 1012977 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4545e3b8-201f-4a29-b5e2-cd31dc104bb7.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4545e3b8-201f-4a29-b5e2-cd31dc104bb7.png index 20670d1567c55b412e9e5ad805d4a03dd986e3ec..9a367c51a7bc04e38ed00ca5cd1f93883e362ceb 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4545e3b8-201f-4a29-b5e2-cd31dc104bb7.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4545e3b8-201f-4a29-b5e2-cd31dc104bb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:977bf83a3db5223c7f092b52925110c8408b5b7e31532bd2ad02c189ec1da8bb -size 963824 +oid sha256:d89b7b68ac9d6012889ed77c548ef8dfe0f51aa6441b999c48fd2952d706b3e3 +size 1203626 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_46d72dc5-24a9-488b-9fda-4f168686e6be.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_46d72dc5-24a9-488b-9fda-4f168686e6be.png index ba26c48ee91f2d38512d069cac0d58d116d8ab4f..19c99d74fcbb0f79e21678552b45cb456f9bbac4 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_46d72dc5-24a9-488b-9fda-4f168686e6be.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_46d72dc5-24a9-488b-9fda-4f168686e6be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed695941f501100ae4e937f107a6c9ec5693910b314cfcb8d66dad51d7d2c5f4 -size 1230966 +oid sha256:c6307ad7c56bc571008c57bee50f8b04282509ff605ada6011ff3ec2f75f3989 +size 519302 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4e58e6b2-b337-42bf-8a9e-f516499a1f51.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4e58e6b2-b337-42bf-8a9e-f516499a1f51.png index 153813197072075d58b8a3742fd6b368f0d611a0..af8a1d46099b05964e40f2e1a4d0b1cdd8d02aa8 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4e58e6b2-b337-42bf-8a9e-f516499a1f51.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4e58e6b2-b337-42bf-8a9e-f516499a1f51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2634c380c7be8fa85645ca356b5579bd361e50a558078b7dd8f40b09c78c7fee -size 774849 +oid sha256:d8e4f1d94139dc9e05ff21755c9fa231d24844f9a3d733603d54ce5a29e202a6 +size 890893 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_51cf2cc6-26b8-45fd-b9b8-eea01ca732a2.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_51cf2cc6-26b8-45fd-b9b8-eea01ca732a2.png index f3757d53f795da3e29ad558c56c1ebcb95d3459c..26fd8f84ec6e5177ca2a631d91c4ae416cb75b95 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_51cf2cc6-26b8-45fd-b9b8-eea01ca732a2.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_51cf2cc6-26b8-45fd-b9b8-eea01ca732a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2dbbcf9828ba6a4ddc6396a0b7de001378714bbb88b7ba8cf599ff3ed50598b -size 765765 +oid sha256:ab0586fe702100b8ce115b2967670aeef2fd021ff5a87c02d9cc1a6f061fcb4d +size 1449549 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_5a450f14-ffec-4efe-83fa-4383f087c099.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_5a450f14-ffec-4efe-83fa-4383f087c099.png index d19718cc1a29f107757e623fa5c409a722ca91c8..8f99d512cf0e7bcd77b5d5a2c7aff366d84337de 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_5a450f14-ffec-4efe-83fa-4383f087c099.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_5a450f14-ffec-4efe-83fa-4383f087c099.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f324628096956d75c3d17ce7fc9bb70406295bb0d1876bdd1e5a5d507f3d2ca -size 1916346 +oid sha256:0a358877c2b960014e5efe862d9745f9ea2e09997611bf067d65bbfa96764cfa +size 1692854 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_794a7443-6027-4bd4-bc18-229028decf0b.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_794a7443-6027-4bd4-bc18-229028decf0b.png index b717b522385da9f6230fa34f6de23d4902ab8646..bdf1ad06f3ec4f493463f12ee6999ac96c937ef5 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_794a7443-6027-4bd4-bc18-229028decf0b.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_794a7443-6027-4bd4-bc18-229028decf0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d235430a19c9b01133021aab83fb9eb73f194912487d4fdae3dc99e814a6f0a7 -size 776984 +oid sha256:888de85d948f3ec2f9814edeecad18b522e5863852473e58df719ace896cfad9 +size 788666 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7afdfbab-b581-4794-8584-185fa115bbba.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7afdfbab-b581-4794-8584-185fa115bbba.png index ae4ffcf9826d9f2ce47dd64a7c49bd187703697e..b95049e08801e36466a05538558636ab980d0d41 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7afdfbab-b581-4794-8584-185fa115bbba.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7afdfbab-b581-4794-8584-185fa115bbba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9a7123636887eb9acbd5ed46e180987ef87dcba539357b2b5424162c975c54c -size 1168007 +oid sha256:12b2e8993b0f80f30405a09aaa5b59d99573fc66e163a2f49b121e12d9b95370 +size 1295479 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7b712fcd-3dcd-44a1-a57b-e574ddb56109.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7b712fcd-3dcd-44a1-a57b-e574ddb56109.png index 937d1095fbf728104646ef675415b58d6822ad2d..1ba4da6fbc4c3e19861d657653167d8eb27088ba 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7b712fcd-3dcd-44a1-a57b-e574ddb56109.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7b712fcd-3dcd-44a1-a57b-e574ddb56109.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00792d8b0c4b36607f41001985e73f6151008011217c170f8a24a52ecc7a2ea8 -size 745945 +oid sha256:887fa0fe26313bcffd9a37ececd4404332c037ed06fbd21f7ef274b479d56b7a +size 658305 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_b7a26037-9c60-43df-a713-2ae35e0bffd9.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_b7a26037-9c60-43df-a713-2ae35e0bffd9.png index f25fe99de2d2a9095286694788c2a2b3e301ca2b..d17b71e4b2f5b37ade9b3859acef5f45a6da9b3b 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_b7a26037-9c60-43df-a713-2ae35e0bffd9.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_b7a26037-9c60-43df-a713-2ae35e0bffd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c78386bf620087b8340c1300eb7142e6eeaa1c11652ee2afe5874461c0f34258 -size 784557 +oid sha256:6c9d76dc04b853bb91c530ed30532bc4fd3148776f383cc21ee43b79857a3b19 +size 869493 diff --git a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_e0bfc000-6c7f-49c9-bfea-873bbac85dab.png b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_e0bfc000-6c7f-49c9-bfea-873bbac85dab.png index 71bbb07b122852917ee033d6aefeffd7d86e65d8..aa8f39f134bbc69c30a0cf14b416575b5b327ad9 100644 --- a/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_e0bfc000-6c7f-49c9-bfea-873bbac85dab.png +++ b/images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_e0bfc000-6c7f-49c9-bfea-873bbac85dab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:837f19485b42daf6b7e6597485e3a1b5a99175697e990331724f624f7c944f40 -size 771022 +oid sha256:9636ee223096a300e5bd64666f5d3ab0c9b45eb36ccb8fe6fff9e14ae551d0c8 +size 724686 diff --git a/images/a31de393-c6e0-4175-858b-03cdc435d585_4f58be7e-a7f8-4d07-b40c-649e97d4ab84.png b/images/a31de393-c6e0-4175-858b-03cdc435d585_4f58be7e-a7f8-4d07-b40c-649e97d4ab84.png index 05f52d2b205449c74b7c1d2211c384959dd5594c..b390f76f3636fa489ce7a97dcf9dbe20e30fe425 100644 --- a/images/a31de393-c6e0-4175-858b-03cdc435d585_4f58be7e-a7f8-4d07-b40c-649e97d4ab84.png +++ b/images/a31de393-c6e0-4175-858b-03cdc435d585_4f58be7e-a7f8-4d07-b40c-649e97d4ab84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:512b1195785ea9ed8c232c1c9086e2c4b4593f4170813634a2aed2b16057085f -size 891115 +oid sha256:c41b12f9562e87e0c69bb99239396012423405d93c22955b2a98a28a70c6c8bf +size 1237874 diff --git a/images/a31de393-c6e0-4175-858b-03cdc435d585_edb2c211-bef6-4991-a828-73831abc411d.png b/images/a31de393-c6e0-4175-858b-03cdc435d585_edb2c211-bef6-4991-a828-73831abc411d.png index 9f629e1f9095596163436c639262f04f3b49a292..2e5dd82fcdca090657e0769d59423c9cef834a70 100644 --- a/images/a31de393-c6e0-4175-858b-03cdc435d585_edb2c211-bef6-4991-a828-73831abc411d.png +++ b/images/a31de393-c6e0-4175-858b-03cdc435d585_edb2c211-bef6-4991-a828-73831abc411d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:457a3b4c2cd7c3d930dea6ca90e480365814f4d926caca0b332718b547445a58 -size 1101048 +oid sha256:6c2287dab000ff119d3726566af4c5ebef0df80f6cf38a53b434172b5455daac +size 1211159 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_1adae572-b7a8-479b-8e02-5cff5c0f35b3.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_1adae572-b7a8-479b-8e02-5cff5c0f35b3.png index acf0cbc75539273e8f224e32311b8daafb59a0a0..da5e10beda1b136515312c268e961ce515429bb8 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_1adae572-b7a8-479b-8e02-5cff5c0f35b3.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_1adae572-b7a8-479b-8e02-5cff5c0f35b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df7447f3e48b023e75312eb340f231f3c5ba3e9ef5c59adc84ba330f919c06c6 -size 1053012 +oid sha256:4ebafdc8d82dbc7379f9d5ea91a28cc49006d2f548b7c0c058f8d9c57cb85b41 +size 1125008 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_1ce7ca32-49ee-4274-abe9-5294b2487601.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_1ce7ca32-49ee-4274-abe9-5294b2487601.png index 8d7e30bd0a6ceb54003e2272d70ff6e20622544c..2f2c244895a77d3e68e84953c47ac997026036ee 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_1ce7ca32-49ee-4274-abe9-5294b2487601.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_1ce7ca32-49ee-4274-abe9-5294b2487601.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e555d9ba7792edd371cfdcac06386f9ed6b8f930da1ea678f406c98adf65c6fb -size 1120061 +oid sha256:b6c9a46e5b6e691b59c6af1a265219ab9d0bd78bdc5ca5cf630ac59e95447516 +size 785016 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_2ec2117c-c501-416d-a25e-c24faef4c518.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_2ec2117c-c501-416d-a25e-c24faef4c518.png index a22a003a31fd06ff71538d3b04251fff382c8026..f9a0f1ac4d19de1d4d44cdf6ade68ed2acfea8c5 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_2ec2117c-c501-416d-a25e-c24faef4c518.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_2ec2117c-c501-416d-a25e-c24faef4c518.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcf5bc9779f3b458c16e6a04d8c22a499ce3c13980a8ae5416e4b15de3d5d448 -size 834418 +oid sha256:b8a4686d4833d517fb96978fbdd02b686d9c44c0c66ac716b84dae7c30b2ae6a +size 871737 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_31782c9f-b77c-46ce-bee1-4ee1d7199cfa.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_31782c9f-b77c-46ce-bee1-4ee1d7199cfa.png index d03ba8603f7ed6e1296904a891300850de8481ff..2a7f40c246284e417f98ea9e1242b0288278e419 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_31782c9f-b77c-46ce-bee1-4ee1d7199cfa.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_31782c9f-b77c-46ce-bee1-4ee1d7199cfa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5485d4f6d3170a876f7b678cc78ca7146de0b84560b249acb7bf26cf36bbdc8e -size 846045 +oid sha256:fb290ab0e6f0ea01ea1d07e92b110cd54ab1f143186b0bee020a0af4fad8fb67 +size 931975 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_38f65ad6-587d-45ea-9b87-d3c973ca9acf.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_38f65ad6-587d-45ea-9b87-d3c973ca9acf.png index 6ccd3067485ff8b01fa7a11eb5d38743f5c84dff..1470dc7fda21af3fa0ed33dc6c0452b8b9c7d3e9 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_38f65ad6-587d-45ea-9b87-d3c973ca9acf.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_38f65ad6-587d-45ea-9b87-d3c973ca9acf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:320f05512cb88b41ba9a94da76a9523c6e053f362b7b2c696f0858a51681c05d -size 1149236 +oid sha256:afa8716e4edb89cc954d10c212afb62a2cbb5be02da3a30c621155d1ab123f3a +size 1275329 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_39ab3869-5aa7-4ee7-b1f2-d2e182997e3a.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_39ab3869-5aa7-4ee7-b1f2-d2e182997e3a.png index 45f3c7ccb3cb992863bd44dfbdeb69425b183da9..4864e6b85b5595dacdc704c50f2f7dd90b7afe73 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_39ab3869-5aa7-4ee7-b1f2-d2e182997e3a.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_39ab3869-5aa7-4ee7-b1f2-d2e182997e3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abb7ef6a4bdb6fe209742d7c089a8e3a68f13e46c64417c9b5ce76e37401e1c8 -size 388604 +oid sha256:adb9108f3db002ceff1473b0f61f4ab778cec78560456e39d0ca7982dd675cb1 +size 510078 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_5400dcf0-be71-4d8e-9a26-ad6290b3814d.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_5400dcf0-be71-4d8e-9a26-ad6290b3814d.png index 42fd1f395518341cd38c99e8a12511f2e5ba9830..5361dc8f2c711fd19c93c42aa5ed4819d88ea834 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_5400dcf0-be71-4d8e-9a26-ad6290b3814d.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_5400dcf0-be71-4d8e-9a26-ad6290b3814d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56e0b0962434b1c441fe5a4d9ca1fbb27f3df84eaab431040b499661f8a7b0fb -size 1071382 +oid sha256:b4942966785dd6982a9056bcb8f90cd1d121f550777ceea29c62f42bd8979106 +size 729989 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_63b17018-e0a0-4c59-95fe-5f76311a2bf9.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_63b17018-e0a0-4c59-95fe-5f76311a2bf9.png index 731293ce45a2106763927def342e49d593f9facd..c3a78657a7c45cc598401c3d22f167bb138510d4 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_63b17018-e0a0-4c59-95fe-5f76311a2bf9.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_63b17018-e0a0-4c59-95fe-5f76311a2bf9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e7706139a7e0f1e4a823cdaea8a21b7009dca0c2dabe920495a280980499e58 -size 1149699 +oid sha256:ff04163b68b8c5a97a26e069261602267573ec103a688936950199d8009508ed +size 689213 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_67839d50-ac48-453d-a9a0-acb0cf67f1de.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_67839d50-ac48-453d-a9a0-acb0cf67f1de.png index 6955ee773ac23cf47bb0c384c7e26dec1affde8e..04fab981beb040fff39ed4c57ca6c79776fd78ee 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_67839d50-ac48-453d-a9a0-acb0cf67f1de.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_67839d50-ac48-453d-a9a0-acb0cf67f1de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:068856d5de4132e5a5218ee7e051b8cb790e3ac8213bd57634b8cbb1ae76783d -size 1070853 +oid sha256:7a9f618ad1fcad5b259fde829a4c9a06d19e64d9104a2f9377c0e1667b6a1bdf +size 1055038 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_68be7878-cac0-4d19-8c5c-ccd542c407d8.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_68be7878-cac0-4d19-8c5c-ccd542c407d8.png index ea89df6f8ddf41a0b11cada1e73bd8d6b39fe02e..e6f6495fcd8ee2025017c27afc79ca10f96a8e8e 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_68be7878-cac0-4d19-8c5c-ccd542c407d8.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_68be7878-cac0-4d19-8c5c-ccd542c407d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2d83308cab16d79dfa57b019693e19d5c245b65e4bea9d39e3759e1949c86d2 -size 913403 +oid sha256:d5e1bd90d3ae8fbef0da05d796e8267443c4471f0cd563f522163dcf8ab74e72 +size 925306 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_75b9ad08-ee27-423c-8cb0-0605c7531495.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_75b9ad08-ee27-423c-8cb0-0605c7531495.png index 7493c5837c3e62645e14fae8b5e8242e1f7ad85f..49982366ec05b06477958838205454b596beed77 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_75b9ad08-ee27-423c-8cb0-0605c7531495.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_75b9ad08-ee27-423c-8cb0-0605c7531495.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:531cfb6f52b48c24070985360a9c28dac567a3e98a31feb3bd8bfea7abb8428e -size 378225 +oid sha256:046d1bbffcd3d5168e1834f9cd796f4b413cf9bec8229ddb606418e820b7b865 +size 543195 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_a41fc74f-1dd6-4352-a2f5-1d7bd52e9cfd.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_a41fc74f-1dd6-4352-a2f5-1d7bd52e9cfd.png index 51e2e535951c7e550351c54e65bd585bb2983397..59f683fd0ad41ef416071d81c13eb57289fe3882 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_a41fc74f-1dd6-4352-a2f5-1d7bd52e9cfd.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_a41fc74f-1dd6-4352-a2f5-1d7bd52e9cfd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83a2b9ae5b8a257971a0d77af22cf59fead449950e0fcaa9cec7dce764888525 -size 400948 +oid sha256:3f7dcb5fb4a67cf812ba59771830e8353d124f8463492a54ab27cd18758954a8 +size 439276 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_b7d1099e-22d6-4680-b1cc-95da2374335d.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_b7d1099e-22d6-4680-b1cc-95da2374335d.png index 640e00e7c97faa2f95ebe3c3b8387b6745d96b88..b5861dafbe96a6aedb36ea174b51b5112ec714cc 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_b7d1099e-22d6-4680-b1cc-95da2374335d.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_b7d1099e-22d6-4680-b1cc-95da2374335d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1eb0326698740d0519f712bdebc53e40418bd885b9a52e9e3c3ebe50a216a54 -size 834355 +oid sha256:85c0cabe9523bd0881dfa36fd741c319cc031e36ab16a5019c6264da9b5b8834 +size 998722 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_c88d4a1e-9abf-487f-886a-34ca0b837800.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_c88d4a1e-9abf-487f-886a-34ca0b837800.png index 736a04c511f38aea013c11edbb32e4e87dd3545e..cae9f15317df37b9edf849259ee1512c5ccc65fe 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_c88d4a1e-9abf-487f-886a-34ca0b837800.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_c88d4a1e-9abf-487f-886a-34ca0b837800.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17294b1f56336b60044616898ad2fc9334869db51d0a02767845df5e964475e3 -size 1137944 +oid sha256:358e5c6bd277319c635e69dc4d49fa21fc61e5e0f093e9f9ba9537be130a7d63 +size 763921 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_ed51f63d-6cab-4ecb-831a-81833977302f.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_ed51f63d-6cab-4ecb-831a-81833977302f.png index 57e1c5000393e373b3cd0cb37abe0fb4c757f084..7687d23744f9a44b03fc580bce1c8b44c1fc1310 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_ed51f63d-6cab-4ecb-831a-81833977302f.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_ed51f63d-6cab-4ecb-831a-81833977302f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b8bd4c761f44e1316840695daf3b12622fb980f075db6e443af37021063b7d3 -size 496436 +oid sha256:f7509644a110f330ee527e9505267ee2a1b946a732b906a3514b77297a30bc9d +size 611274 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_ee2b0b59-efb3-464f-958e-90d6db5839f2.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_ee2b0b59-efb3-464f-958e-90d6db5839f2.png index 883183967d51273de86ffe4a96ef6ffdc415ae27..c0f4ba78a7807926190131bad2352daedb450586 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_ee2b0b59-efb3-464f-958e-90d6db5839f2.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_ee2b0b59-efb3-464f-958e-90d6db5839f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d463acf80933740721fbdd66ba0ea36a99dbd4d7d7aab6b8613e0fe40e47441 -size 841465 +oid sha256:4933f43d7100a5b222920ec21510ee6f388b84002433bb4e786a78856cea5c11 +size 903528 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_ee422c7b-5f11-4a16-9245-1fe1fd5e4e3f.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_ee422c7b-5f11-4a16-9245-1fe1fd5e4e3f.png index a9c7772ec923513d856d740c3117f39676dbc2f9..10749215a378010758371489e5a8b20779e64f6c 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_ee422c7b-5f11-4a16-9245-1fe1fd5e4e3f.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_ee422c7b-5f11-4a16-9245-1fe1fd5e4e3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5586c38cee5fd43d1a00efc914e938522c399dd5649c058acc48f3b5c45e99d9 -size 399693 +oid sha256:fe8eb339049426d2d4491fbcdbcf96599be67c76510380a620b22ba891aae218 +size 504658 diff --git a/images/a338a731-596c-456e-9a9f-25f9e284309e_fddf88e2-6ba5-4b77-94a6-aa4c1b5c0c67.png b/images/a338a731-596c-456e-9a9f-25f9e284309e_fddf88e2-6ba5-4b77-94a6-aa4c1b5c0c67.png index df331ffe4db64ee0b209c6251c0beec0b4f634f0..17ae4f665d7c8aac3a0d65d6cc038a25aa9f9981 100644 --- a/images/a338a731-596c-456e-9a9f-25f9e284309e_fddf88e2-6ba5-4b77-94a6-aa4c1b5c0c67.png +++ b/images/a338a731-596c-456e-9a9f-25f9e284309e_fddf88e2-6ba5-4b77-94a6-aa4c1b5c0c67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6a8bee78b4bffcce683365e6eaa827a3dcb9c6df8bc996e32aa6e5cfa01ebe7 -size 1125869 +oid sha256:90ce81b6f7cd3ffcd06afd74447c01df3b356eb473a28c55b22c9499ca330b29 +size 934366 diff --git a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_2167a763-7333-43ce-8b28-5dd161d43cf5.png b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_2167a763-7333-43ce-8b28-5dd161d43cf5.png index 099d03e64305b4dc96d2a4b8f4d673c628d7f9df..6ccce5eae34067ef5258fa77d0112a1dff5e68c1 100644 --- a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_2167a763-7333-43ce-8b28-5dd161d43cf5.png +++ b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_2167a763-7333-43ce-8b28-5dd161d43cf5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6775110eaae45210089fd1951e2c4e38d85a61dd1b52365addd49fabfc2d3399 -size 1474024 +oid sha256:4bad43766c6eda2bd95c463e7666dc60c826d637e85eab0c096b96f0cbc6fd04 +size 675718 diff --git a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_47a2fbbb-9821-433d-8f1f-7fcf371505a4.png b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_47a2fbbb-9821-433d-8f1f-7fcf371505a4.png index 278d380e8466e3bb4868f9cda005c300ad46cbac..b1b90f943a595e400571f5a7faa38bbbbe8d78fc 100644 --- a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_47a2fbbb-9821-433d-8f1f-7fcf371505a4.png +++ b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_47a2fbbb-9821-433d-8f1f-7fcf371505a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5907b18dc742b22160500a55ceab2f5e972c3af1dfbd51634cd69463dd6b08c0 -size 1476792 +oid sha256:e910add4d1d000c4c69528b00d6409d2897a085813aec93dfb9f5cef71c849f2 +size 1212021 diff --git a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_513ceb8e-6771-4ee4-850a-2aabe2c17e0c.png b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_513ceb8e-6771-4ee4-850a-2aabe2c17e0c.png index 3f6902e1ca08689a575f024b7631866002b82cc3..38971f79f6fff988c122b82bd41e5adf404efd14 100644 --- a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_513ceb8e-6771-4ee4-850a-2aabe2c17e0c.png +++ b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_513ceb8e-6771-4ee4-850a-2aabe2c17e0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67411056d69470158fe43deb10f6a5b0ef7dda7a3edf06e010484c8d49db2125 -size 1883880 +oid sha256:4f49ddacfb548c4bf7815ee08ad7c15de1dfb861824ff60f9666a5703b1f6135 +size 1037628 diff --git a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_7a9bc022-4dfa-4b45-bcf3-35db5c5902c3.png b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_7a9bc022-4dfa-4b45-bcf3-35db5c5902c3.png index 0ca04052ad04f289e8662befe6b4361b4e83debc..3c945de876a7bc9f404ef5a3c5d093b87ae4bf52 100644 --- a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_7a9bc022-4dfa-4b45-bcf3-35db5c5902c3.png +++ b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_7a9bc022-4dfa-4b45-bcf3-35db5c5902c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09790f52246a4228818ec3a2147a2de061cef7f529488810c97f98d5ea24ecf0 -size 2392199 +oid sha256:65da1fed627596c534265218cd319d1cf58ef16a401e86b5f987daa3b4936a62 +size 2496130 diff --git a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_aeeb7ac6-c8f3-4c56-bdb6-e9269dafab16.png b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_aeeb7ac6-c8f3-4c56-bdb6-e9269dafab16.png index 1e5dec02ca95d297bea7b729a553396ba031f81e..bbbc1647960ab3af3b8aa7b35ae61f669d0b2e1b 100644 --- a/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_aeeb7ac6-c8f3-4c56-bdb6-e9269dafab16.png +++ b/images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_aeeb7ac6-c8f3-4c56-bdb6-e9269dafab16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06b3406ac1d9543e45b035c23f1d803cd69099e9097c2e6c21bde5ecff70098e -size 2660875 +oid sha256:dd1768fbf34e5b29f20a8db2ba568188c43bd75d839e8381cce4fe277d1d6a9c +size 2893013 diff --git a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_1d109036-07b4-4d9e-83e3-9ec6c93111df.png b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_1d109036-07b4-4d9e-83e3-9ec6c93111df.png index d701130374812e281dba59b122fd5c3a7909d9fe..1d62c527346f351779fd01ef0aeaccdfd351558e 100644 --- a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_1d109036-07b4-4d9e-83e3-9ec6c93111df.png +++ b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_1d109036-07b4-4d9e-83e3-9ec6c93111df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1bb097f0a2645862febb104a8bc2333cc3e9b253742169b734070dbe88ca29e -size 1101736 +oid sha256:38991c1e013598280b5486f95e8357fc1507e5ac9a62837fca01821b2ab092d3 +size 775576 diff --git a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_228b0634-9b76-4570-b428-fafc3b439443.png b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_228b0634-9b76-4570-b428-fafc3b439443.png index 7497aedaa6c509241d1efb9c00a64fdcd65a4e54..af9a3378622f8cd10642f6e68c2c78ef57990d1c 100644 --- a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_228b0634-9b76-4570-b428-fafc3b439443.png +++ b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_228b0634-9b76-4570-b428-fafc3b439443.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a7bb41fd362acd56fd75dd4e569a1a9c685c80ea9f055acbeae5fe46417548e -size 1071509 +oid sha256:51b79d406ef66cb2f48f779a99ce0b43e57597f4979eb6d81a4a935590b20e47 +size 884186 diff --git a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_395355a1-b038-4bc0-b846-7df25d07f4d2.png b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_395355a1-b038-4bc0-b846-7df25d07f4d2.png index ea748d1c8547c316cc6bf16311ed8d5a2fce6414..945a919bea0efcf0ccf756d97de87f1c42a214f6 100644 --- a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_395355a1-b038-4bc0-b846-7df25d07f4d2.png +++ b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_395355a1-b038-4bc0-b846-7df25d07f4d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:569304bfaa8b2e1f710d5994f0185368f8a57a836940a94c7b223179e634a57d -size 1462592 +oid sha256:d39628fb498ed349f1ea52eaf026770f31ed4a936b06cde1f14d424299cc5d70 +size 911153 diff --git a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_b53007a1-0221-4a80-88a1-ccc9575705d2.png b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_b53007a1-0221-4a80-88a1-ccc9575705d2.png index efcee54da969856cd75d9cd98325a5757804501b..59fcd51c280329e70242f150d6b7c9c64958a074 100644 --- a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_b53007a1-0221-4a80-88a1-ccc9575705d2.png +++ b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_b53007a1-0221-4a80-88a1-ccc9575705d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5143402369a850a20fb1051d2fe79e85948ad2cbbb5bbf21b6a03f4ad61fd4db -size 732460 +oid sha256:6c1d3eee8abe83e7a25eeec9d949932826942ea8893cedf92a7f70cc48da3d27 +size 1125028 diff --git a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_cd20ad0a-0250-46b7-93aa-2bcd1837d9f8.png b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_cd20ad0a-0250-46b7-93aa-2bcd1837d9f8.png index 8dba260baf4868354de77f31419e66ab495c4bda..37ab61809896c36a98f84024c4375184fd2cba4d 100644 --- a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_cd20ad0a-0250-46b7-93aa-2bcd1837d9f8.png +++ b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_cd20ad0a-0250-46b7-93aa-2bcd1837d9f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cda64dc3c002bf4807a457a70717ba7093c783de06bb9d828cc98a84bbd2c333 -size 1105446 +oid sha256:39d6b32f82f5a7b8145fc80f0e41fa141b867fd7dc347792e4b008b23e0b1760 +size 1492542 diff --git a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_d1fd77e6-3905-49e3-8aee-58aa0a2df50d.png b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_d1fd77e6-3905-49e3-8aee-58aa0a2df50d.png index c5701cf16c67e500af3e5c4b6521f7039973b392..7b81f7eaffeca481362ef7cad21ead4ef64a4937 100644 --- a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_d1fd77e6-3905-49e3-8aee-58aa0a2df50d.png +++ b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_d1fd77e6-3905-49e3-8aee-58aa0a2df50d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ab5950f3440d970d5b7a54ba4427ace3707358943270bf1d6fc0d1687a1c3aa -size 988106 +oid sha256:341005cf40b7247fc9cdf1d27425596923b08fc51354a973626a9625e6ba10dc +size 1433495 diff --git a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_daa6fad6-24ba-49d3-a6ad-2370649a2e8a.png b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_daa6fad6-24ba-49d3-a6ad-2370649a2e8a.png index c5e5f233ab780a2b83012beca4f2ece6d2c503ce..c69b41115c8624f42097e2e2af1b0991eddb2a24 100644 --- a/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_daa6fad6-24ba-49d3-a6ad-2370649a2e8a.png +++ b/images/a3bc6528-5fd5-45ac-81aa-7fafde757022_daa6fad6-24ba-49d3-a6ad-2370649a2e8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b0d9b509f4350fb89b965897f3a228f496aef3f0332edebc635d18c1fd1aae2 -size 1483944 +oid sha256:a17a3393e1c72e609b5984130e7bc850687ef3842a38ff25af479ebf19d535af +size 1416929 diff --git a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_2dfa3c40-55c0-42ec-b141-50b68a77b02a.png b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_2dfa3c40-55c0-42ec-b141-50b68a77b02a.png index 1098f7b8b68bc89b5ee5d3f4686f4a1335da5372..ce441ba263c0e4fca8ec7178cc7320254fce4992 100644 --- a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_2dfa3c40-55c0-42ec-b141-50b68a77b02a.png +++ b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_2dfa3c40-55c0-42ec-b141-50b68a77b02a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f8ccdbce0d382263b82921dd314d85b2c36eb07e64b4ed3ffaf177ae7dd72f6 -size 627927 +oid sha256:7fed26ce6e0c6f70e872276e272e6d5f098f120c6e8f25a8d8cf7272c825996d +size 805700 diff --git a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_6fc4eaa0-463a-4839-87d9-6df70024abef.png b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_6fc4eaa0-463a-4839-87d9-6df70024abef.png index f21784bb1fc3973039199ff81b9937d1abbe143b..9cfbe2fcf7d9711ce3d344734ff64aeb8bab48ae 100644 --- a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_6fc4eaa0-463a-4839-87d9-6df70024abef.png +++ b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_6fc4eaa0-463a-4839-87d9-6df70024abef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa3bbd2c9ef01ece8d51d7b35551402f8d4fe0955d39bfa1fc2a6eee2792eaf1 -size 592253 +oid sha256:9be769e276a4ac5593568ceec31461d11e1db5fe4eb0422b153afc2f191686b4 +size 676225 diff --git a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_971e520c-3d24-499b-9111-fd67d7d1a884.png b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_971e520c-3d24-499b-9111-fd67d7d1a884.png index b58d9a6d683e1400224f345d49970b96e9d12102..96eb67beff4f7908ed84b909895fa1baabc9cad1 100644 --- a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_971e520c-3d24-499b-9111-fd67d7d1a884.png +++ b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_971e520c-3d24-499b-9111-fd67d7d1a884.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4697f9fe71cfc10f2344bb6c9361599e11e988e1dd0e414bb293924f1d671a79 -size 632429 +oid sha256:7699009e41f51bfc9043db81e2e80b874efc53e68f11c6f9489bc8b7d719d61e +size 796385 diff --git a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_c3255b9c-7b61-4e73-a586-b21159ed70fb.png b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_c3255b9c-7b61-4e73-a586-b21159ed70fb.png index 0a558a1ee140cc4bc5c02e077ffba42b14e4edac..2e45ebb07480659e7ffcc2adc12c8fc3c5824db4 100644 --- a/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_c3255b9c-7b61-4e73-a586-b21159ed70fb.png +++ b/images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_c3255b9c-7b61-4e73-a586-b21159ed70fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3d954b41ad24a4c8e705c5d7c68298fb7137945016ffe214ac7a320569eeb72 -size 600454 +oid sha256:9cc49f0488085404325d1cf9b5a91c6409045288327de676b97de8a038c44c04 +size 654546 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_019508db-f61f-4343-93fe-7df53859be47.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_019508db-f61f-4343-93fe-7df53859be47.png index d8b780af370e645bdcd35b366ad40b877acdf171..1479b2d08127ce706c7b9e6c9fdb75c9533c2496 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_019508db-f61f-4343-93fe-7df53859be47.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_019508db-f61f-4343-93fe-7df53859be47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:954a1b0102509c2826406d003204032b46b2e375c2889ac717fb24f38336c39a -size 2087151 +oid sha256:9a955caa9bceab22ba70f16cecd082ec8dbec4af36ce8a108d15e5691f9c9e04 +size 1214834 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_05ee4572-3449-45bc-81de-0ca98ab19c32.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_05ee4572-3449-45bc-81de-0ca98ab19c32.png index a5671a52a39a5542bef36a33ecf23c91487819cb..51b4595f190903470aaf61fcd7d89ed93f583f99 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_05ee4572-3449-45bc-81de-0ca98ab19c32.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_05ee4572-3449-45bc-81de-0ca98ab19c32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e19c405303a1f7904ee57529fe744f4fe0b9e1eb5b587c1ee92de5ffbf0e9989 -size 1021578 +oid sha256:19d287010eba3120542f7866ed614affc6499488776e0fdbe6a1e22533fff2e5 +size 702380 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_0bb44b72-6b4c-4892-a91f-d640f266ff44.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_0bb44b72-6b4c-4892-a91f-d640f266ff44.png index b4ed3615187c9d9800e97db6be479ff84cacef43..25ce4b89ce737525f76afb0a85452c7e52a711d8 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_0bb44b72-6b4c-4892-a91f-d640f266ff44.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_0bb44b72-6b4c-4892-a91f-d640f266ff44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:453ad67e696596a13ee845134dca49ca877966faac1966142d0ecf2f6ac638d4 -size 1106228 +oid sha256:15a227bad4bf6f1f828f0d41cd3fe46373280371ba00df000bbf0f957275bfc2 +size 1253093 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4ab4e823-aad2-4316-90bd-3e6b9c41cf08.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4ab4e823-aad2-4316-90bd-3e6b9c41cf08.png index 632e7e3e9bb9a5328db3410aa8337a78337eae46..ad85a275a38ac77641a25da58215c1915f51751e 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4ab4e823-aad2-4316-90bd-3e6b9c41cf08.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4ab4e823-aad2-4316-90bd-3e6b9c41cf08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81bf1539dcb5b70c27170177d3aed0ddf96963eb030064b605b4729606a4e01a -size 383753 +oid sha256:831750b3333faa59f47c0e156b1c6eb2aa3d605a659bcd179f2573890faa1cb6 +size 433065 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4da58b5c-bb8e-4c17-be85-757cbff832c0.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4da58b5c-bb8e-4c17-be85-757cbff832c0.png index 1f2acc79bbfe87e1a6b78e74b4ddb6cc013f35a6..d0a7974f7294f653e333e391e2996965d627f0a3 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4da58b5c-bb8e-4c17-be85-757cbff832c0.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4da58b5c-bb8e-4c17-be85-757cbff832c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80e92696a94f1cb302b3e97fa215672c583ba3e82324f9ff2f59f3d74900c33c -size 2161436 +oid sha256:f7cde7bcae5da29cc6e239a4a9fafd999ffa8675ea4c3f841ba71ffe255371b5 +size 937170 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_671088e4-09b7-41ef-b93b-264eead46e7e.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_671088e4-09b7-41ef-b93b-264eead46e7e.png index 2a09434f3258861d3f3f704dca1df4c0fcabb74c..d4ec0e90f5178dbcd9151badd3659b636885d35c 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_671088e4-09b7-41ef-b93b-264eead46e7e.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_671088e4-09b7-41ef-b93b-264eead46e7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81190823240df9659e086280edcb2ea2cd2725cd4375f40f11d46119b6051b91 -size 676976 +oid sha256:3f902c6a016636e5735933889e62c6165c8e11b1e9d4085d6806bd0a69aac1c7 +size 692933 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_89aed3f2-a0c4-4b0d-85e2-04f93aaca067.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_89aed3f2-a0c4-4b0d-85e2-04f93aaca067.png index 1bfc0b02ffa7a0fc2af855b81e7d176871c61aa5..8783ca001228e5fafffc4cfd536b94770b7f95ef 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_89aed3f2-a0c4-4b0d-85e2-04f93aaca067.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_89aed3f2-a0c4-4b0d-85e2-04f93aaca067.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:948c982f1353be0ed1a0d1d33334232089795ec0936bd16e326119b5e0151b89 -size 809182 +oid sha256:9d7c57a6ec7d7a922fac2f1b6d3979eb2b8c749cfe95b7e2bf30019b940db066 +size 410244 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_bf438b4c-9dbc-4369-82fb-005f6e63e14c.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_bf438b4c-9dbc-4369-82fb-005f6e63e14c.png index 4f8814b0c7ffe6528238469f259bdc80bc4b3f13..f2701449bdaae0e2f867e8bd633744667023aceb 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_bf438b4c-9dbc-4369-82fb-005f6e63e14c.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_bf438b4c-9dbc-4369-82fb-005f6e63e14c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa584a9843d8ee5e4fec1c3f0e2d0d5b294df594e78c0ed0be4a8ef1bf4a8550 -size 473530 +oid sha256:3e5bb78c7f5954c20191fc105d52505c3078bdfd86b4368acf2d33ff3e2e89c2 +size 263410 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ddc4c6d0-c812-4ea9-ae6a-06d94c832d47.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ddc4c6d0-c812-4ea9-ae6a-06d94c832d47.png index baf32848d5606c0ddb86ef2ec8ceaf0375406611..e14b1dbdaf984befe11aacf3df0a115ea52b6be7 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ddc4c6d0-c812-4ea9-ae6a-06d94c832d47.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ddc4c6d0-c812-4ea9-ae6a-06d94c832d47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e517ff3844f342476adfb7eeb3f33dad7b744508b3d115edea22698734e514db -size 312938 +oid sha256:cd30f92606951d6e2018425cf1962019c6516bdf9ec577546fb94262babb9066 +size 313178 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_e34a10ea-d14a-452f-a318-785adaca157d.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_e34a10ea-d14a-452f-a318-785adaca157d.png index c028053e794a5868be9d62668ef1c07dfc7883ce..068694c26d87d782764220799a6da203e9f09b8c 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_e34a10ea-d14a-452f-a318-785adaca157d.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_e34a10ea-d14a-452f-a318-785adaca157d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c012b5cd49b70f6c3f70c6c654398736c0d385729eeab80d5a16cbe28af87bd8 -size 1118013 +oid sha256:b839ea53d3c9d1b65ec21bffb1f1e2802387a3c42870acd2e34280d9053c9404 +size 560282 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ec2be064-6f50-458b-8f90-5473118a60a4.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ec2be064-6f50-458b-8f90-5473118a60a4.png index 439fa7defa93d2a15ec94ba325386f7f7b2a1caa..6d1bbe334248811029237faf3b2bd0a7e040e014 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ec2be064-6f50-458b-8f90-5473118a60a4.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ec2be064-6f50-458b-8f90-5473118a60a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c08aa41a650a72ce84d64280899ff781c864a462c7776fc362c907af645cf26 -size 423887 +oid sha256:ee389972c67fa908153d5d81abfd638893a89e220a7c51ad9456e5027a71ab40 +size 466419 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_f0282d1a-fb09-404d-88ce-5a583a75a055.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_f0282d1a-fb09-404d-88ce-5a583a75a055.png index ea7cc5fe79941ab7605f5e6a2a6f484371ab5887..2d2ed3af366149c13c0a5277a7d8e38c752ef551 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_f0282d1a-fb09-404d-88ce-5a583a75a055.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_f0282d1a-fb09-404d-88ce-5a583a75a055.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0465048a0bee7917f07b79127693737008b501f45fa0868017fd4fbe28caef2a -size 835325 +oid sha256:84fbc6ccd37ed8e84cabaec3ffa0e54797cb2f06cd27954379c1403fdbae79b5 +size 398021 diff --git a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_fd4aace9-856c-4933-a18f-8817c81c926b.png b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_fd4aace9-856c-4933-a18f-8817c81c926b.png index dd5cecfcb1c8782699e3c7f0404b8ecc5c7b1541..0856bfade866f85686c591c273c443b98fea11aa 100644 --- a/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_fd4aace9-856c-4933-a18f-8817c81c926b.png +++ b/images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_fd4aace9-856c-4933-a18f-8817c81c926b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d0e59bbc60e2fe89435db3098c6a9ce752621f10ac93ea360d45fc3357eb9e1 -size 976606 +oid sha256:c092fc8e8b24ed8ad6f7fa66069d7b6dc395fb7ebde770d34b6ce9222b599ab4 +size 1031911 diff --git a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_5dea1dea-02c6-4442-845b-c06ff9529037.png b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_5dea1dea-02c6-4442-845b-c06ff9529037.png index d1d20da6e8ceb8edcbbff1adec916bc2533748f2..86ef6cc1cd602c88f8bb528cea6b4e8ff4def8b1 100644 --- a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_5dea1dea-02c6-4442-845b-c06ff9529037.png +++ b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_5dea1dea-02c6-4442-845b-c06ff9529037.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72e1a5b5105da1a89895f197c8f3a402efa0bfcd8b66c1f57119bd547415439b -size 728746 +oid sha256:f1589d6225dbf90b8c7e1cd8ee4990fd07482dacde08837c69e41ebeb04367eb +size 894739 diff --git a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_6ed06c1b-36f1-4b31-8129-16887e34d948.png b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_6ed06c1b-36f1-4b31-8129-16887e34d948.png index 21af1bb89c8754904ec0119fa2bd06c0d393ce90..12db01198c72d63aa7ca2f196d860c537dec23fa 100644 --- a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_6ed06c1b-36f1-4b31-8129-16887e34d948.png +++ b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_6ed06c1b-36f1-4b31-8129-16887e34d948.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3d7f2604454d9e0354af06290530c1873e386ddd7b3a7a2aabad8b48bc5335c -size 894385 +oid sha256:4c2d09ab7632aeb259da40fff9b92e15f71360413469f81faec38c9203f869f4 +size 863604 diff --git a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_d86ccff6-5a66-4228-8fd7-92644017347d.png b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_d86ccff6-5a66-4228-8fd7-92644017347d.png index d0ce07557f2f4482a8e7218e6f02358ffe2b57b0..b846107e080367553a1c9d6b408d02b8dbee9337 100644 --- a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_d86ccff6-5a66-4228-8fd7-92644017347d.png +++ b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_d86ccff6-5a66-4228-8fd7-92644017347d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:886bca8fc5a27dc89691eb63437ee5a7946fd2f611ff64edf04a63f719917bfd -size 729583 +oid sha256:d370d3fc63ea3dad946209f4e3bba0c2a9082e0c1a36e4f9107404e3e804df73 +size 713622 diff --git a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_f6aefd41-aa8a-43b1-8161-19406378a4db.png b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_f6aefd41-aa8a-43b1-8161-19406378a4db.png index 06556a0aadd93b79055ce0e31870e2773b4f39a5..bd68f6c38d3292ee78b651c719ed114f797910ae 100644 --- a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_f6aefd41-aa8a-43b1-8161-19406378a4db.png +++ b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_f6aefd41-aa8a-43b1-8161-19406378a4db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e033ceb2884ae466ae69fee3602ae2cb2cb4b373a14ad25de873f2d5e322d0e0 -size 763055 +oid sha256:dd3d10193a9c86a8eb01b80da552a125410ad867ecced0d74be7f1b49101e489 +size 670273 diff --git a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_fe15d851-e3ca-40c2-bc4a-afb820d1d12c.png b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_fe15d851-e3ca-40c2-bc4a-afb820d1d12c.png index dd0fb2b70895d3ac25eaca428d123a208710e5cc..49536edf310bb10fcd5fdcb4459f091656fe072c 100644 --- a/images/a4397261-95a5-4d57-a531-6082b2af8ac3_fe15d851-e3ca-40c2-bc4a-afb820d1d12c.png +++ b/images/a4397261-95a5-4d57-a531-6082b2af8ac3_fe15d851-e3ca-40c2-bc4a-afb820d1d12c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2bd227a0f7344a65f372dadfe9e8c5c0c8a2e663c257804275d085f82cfdd949 -size 894425 +oid sha256:e4f84aa267f184b6ffc64a84308de18c5390d89adcc29798ea5076fdc8df40a2 +size 1146340 diff --git a/images/a513befc-46c3-4bb6-87c0-8f219107c756_6d323066-077a-4cce-884e-23a3f42ac7cf.png b/images/a513befc-46c3-4bb6-87c0-8f219107c756_6d323066-077a-4cce-884e-23a3f42ac7cf.png index bbc1c2da4fede372ed66222a9e66554e36fee837..cf00c3783ee6d4b1d73035995bbf7339c1bc64b6 100644 --- a/images/a513befc-46c3-4bb6-87c0-8f219107c756_6d323066-077a-4cce-884e-23a3f42ac7cf.png +++ b/images/a513befc-46c3-4bb6-87c0-8f219107c756_6d323066-077a-4cce-884e-23a3f42ac7cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e506d61552cb10502c94d146cba9a5592871f24a1ac83939db280b8179c3e8e1 -size 1706923 +oid sha256:5bca5eeb446e1ab000f5a57df933a319990fd2185ce91fdbf7a44027da945b7d +size 1587684 diff --git a/images/a513befc-46c3-4bb6-87c0-8f219107c756_cc647dd5-2319-4fd2-a79e-b64dc529275b.png b/images/a513befc-46c3-4bb6-87c0-8f219107c756_cc647dd5-2319-4fd2-a79e-b64dc529275b.png index 2d7bd5df42c7ed78f1f4dd877f30afc75758afa9..03e78a82a13d407992c7e28036f9041beeeb439c 100644 --- a/images/a513befc-46c3-4bb6-87c0-8f219107c756_cc647dd5-2319-4fd2-a79e-b64dc529275b.png +++ b/images/a513befc-46c3-4bb6-87c0-8f219107c756_cc647dd5-2319-4fd2-a79e-b64dc529275b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3fab98029d36c3291168c08d0938a430eb391c824cad3d364220f1629a0011de -size 1745510 +oid sha256:83bb657468fd313d0033834360cf3cfe73fc68aceb3c65e5cf627a6e6b320689 +size 838195 diff --git a/images/a513befc-46c3-4bb6-87c0-8f219107c756_f7834a7a-41e6-48ab-9ed4-922a1940da9e.png b/images/a513befc-46c3-4bb6-87c0-8f219107c756_f7834a7a-41e6-48ab-9ed4-922a1940da9e.png index 8176aeb2dc54ee38623773c05bc14a2da10bb578..b21ad96ba02d084e319d41375e813e800c7a8629 100644 --- a/images/a513befc-46c3-4bb6-87c0-8f219107c756_f7834a7a-41e6-48ab-9ed4-922a1940da9e.png +++ b/images/a513befc-46c3-4bb6-87c0-8f219107c756_f7834a7a-41e6-48ab-9ed4-922a1940da9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee3d429149ed10def84dc67765ef35e4426d1ca6f5c9c6520bd20e7ac1e8c6de -size 1128223 +oid sha256:30cdcc272cde86faede68fa3b005e23404e26b2028b14bd4f95af93b89c5c6c7 +size 1134135 diff --git a/images/a513befc-46c3-4bb6-87c0-8f219107c756_feefcb3a-19ea-438c-9bfa-b4c99631dcbb.png b/images/a513befc-46c3-4bb6-87c0-8f219107c756_feefcb3a-19ea-438c-9bfa-b4c99631dcbb.png index 2bdb4106a52eeba11b8d63b8c6c3010bbf53475d..f60d03a1d199130cb5d3e867bbb01601c1ab0779 100644 --- a/images/a513befc-46c3-4bb6-87c0-8f219107c756_feefcb3a-19ea-438c-9bfa-b4c99631dcbb.png +++ b/images/a513befc-46c3-4bb6-87c0-8f219107c756_feefcb3a-19ea-438c-9bfa-b4c99631dcbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c31382d389cb8d01e10e0fa831eba933035d66b777d8fcc1b7844bf02d4b5b5a -size 982085 +oid sha256:d430cc8381f0132bfc8a12afe3c2cec20863ab567ef3ff1b894af4240a6d1696 +size 894114 diff --git a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_0f407117-ae70-42ea-9230-41fca96353ca.png b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_0f407117-ae70-42ea-9230-41fca96353ca.png index 6ab4db4c01ef227c15a7ffd6f37dc0b2231ba2b4..e2b6630038418cb89dc3dfc60812cf6cb44e7060 100644 --- a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_0f407117-ae70-42ea-9230-41fca96353ca.png +++ b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_0f407117-ae70-42ea-9230-41fca96353ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e14599e9a88869153c3e51cef47837a3ed351e5e6df90d09ff10ffc9da84eb8d -size 1325396 +oid sha256:dffb12f2593d72a24de4082aa0b2aa4a306e34b36ce9b34ed0c27f7bd5d9c4b8 +size 1117061 diff --git a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_34ecbbb5-7d8b-4f1a-b63c-b940c801e7e2.png b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_34ecbbb5-7d8b-4f1a-b63c-b940c801e7e2.png index 7a346f66f8538814dd40fa848a3ed7ce457474f1..bee2c7978b4cdd65e64840df7af81d23acfb0cfd 100644 --- a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_34ecbbb5-7d8b-4f1a-b63c-b940c801e7e2.png +++ b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_34ecbbb5-7d8b-4f1a-b63c-b940c801e7e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cbbf2719f13738639a0370dfe37e3ba2eac0cdc823938fc026f70968fcb8dd0 -size 545749 +oid sha256:b1ec894bb5ededcff0865483a0312370305f842218bc9c6ff1da617b7bf5c530 +size 452694 diff --git a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_4c159fd1-de41-432a-8c93-4ffef904d093.png b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_4c159fd1-de41-432a-8c93-4ffef904d093.png index dfc0d20288b3a2e083866d7a0b793d4b9da3fb01..63c0232661a64ea6b28d64f9ffff09e2efc5f957 100644 --- a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_4c159fd1-de41-432a-8c93-4ffef904d093.png +++ b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_4c159fd1-de41-432a-8c93-4ffef904d093.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a89232be587c75f6e5b1334a6b1bf8d036d1ee282627f3a9b83e2698a4bc7f6 -size 543493 +oid sha256:318463e5ba38ac4610fdffd83d87a6d8227750550fbc2ab768c72ec224645519 +size 520759 diff --git a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_9e72492a-35b1-496e-9260-942c1aaf9854.png b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_9e72492a-35b1-496e-9260-942c1aaf9854.png index a04c85787ad850cc69504bb491005fbd46027b92..134487b0e94d2d4a1b34c4f19f9a172dedf52b46 100644 --- a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_9e72492a-35b1-496e-9260-942c1aaf9854.png +++ b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_9e72492a-35b1-496e-9260-942c1aaf9854.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ea4dad2b4c0bfb169640d113401c22578c2056a71e65376645296e240925f71 -size 689798 +oid sha256:0becc07b14da231d31bb91f016faaebf0ecf5934d757efdaa934d217ce96db4d +size 988976 diff --git a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_c5bebb8f-e171-4b9e-bc26-c49c4e876152.png b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_c5bebb8f-e171-4b9e-bc26-c49c4e876152.png index 5ec5dd3f92151ba30edb5fe4b8c9c22b1d05185f..8b8fa32d104573df00a86fdcb9c0b7af08064c2d 100644 --- a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_c5bebb8f-e171-4b9e-bc26-c49c4e876152.png +++ b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_c5bebb8f-e171-4b9e-bc26-c49c4e876152.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b857328b648c546491d0e5a948ff489998e99c02a99aa950e2d2d373ff2f87c6 -size 495522 +oid sha256:9bf79cf2a285c547edcceaacbda57ca2e02c7267fc4c2e85a7b9dd40dd32e30f +size 479863 diff --git a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_e853407b-48cc-43e9-9872-9a927347af03.png b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_e853407b-48cc-43e9-9872-9a927347af03.png index f68078e28f33de8fc1317ec4a74df8608d296ceb..fe83342a80f61c4a2d66aaf625c049969080b5ac 100644 --- a/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_e853407b-48cc-43e9-9872-9a927347af03.png +++ b/images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_e853407b-48cc-43e9-9872-9a927347af03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fe4a320d8d84fb842e46b07f5d05532d6dae1225ed16958ee8d82cfe02d7226 -size 1393329 +oid sha256:69e5a02bfa6886e7f9dc058d9742b6c9d0b5fea2d4f41e1310d818283ce33e26 +size 1867561 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_098efde6-eb53-45cf-890d-7ea0024c1471.png b/images/a52774d2-550d-475a-922c-25c37d44557c_098efde6-eb53-45cf-890d-7ea0024c1471.png index 43b5e4216ca6b58b06d229e6c203903f4836c3cd..7ec936a1e165f693dffc12aebab80743efc20b4f 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_098efde6-eb53-45cf-890d-7ea0024c1471.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_098efde6-eb53-45cf-890d-7ea0024c1471.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c8e1a212c4ac706323f24785c67e844fda841023636bb7cea3ebd60ef85e3e5 -size 1381207 +oid sha256:d43c26a9faed3bf0db7c7609e484f0df68382bf56af075b3f90578b1279353ec +size 3140812 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_0b65497e-6dbe-4d02-b48d-0662c365c294.png b/images/a52774d2-550d-475a-922c-25c37d44557c_0b65497e-6dbe-4d02-b48d-0662c365c294.png index 1607d56d4d49b965a262cd3e8c487492db60686c..d405e4a9163754199e45292925c6f21f9141aef0 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_0b65497e-6dbe-4d02-b48d-0662c365c294.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_0b65497e-6dbe-4d02-b48d-0662c365c294.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31b3c0aa58290f3f0e7b8b52ae5b1cd9c45816257eeee1ec630bd2e692e26059 -size 1224367 +oid sha256:5fd3a2377d33e97a43e10c2e3a9490fe1ba18d87bfe0a28b3d89bad52079686e +size 1048826 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_1ad12bdc-c43e-4bd9-acfe-a5fdd5d8e2ca.png b/images/a52774d2-550d-475a-922c-25c37d44557c_1ad12bdc-c43e-4bd9-acfe-a5fdd5d8e2ca.png index bdb8c67098d1a12337ea873eef371bf9d64d98d9..175e45a6815f3659d8955f26b6d41809c3a7e236 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_1ad12bdc-c43e-4bd9-acfe-a5fdd5d8e2ca.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_1ad12bdc-c43e-4bd9-acfe-a5fdd5d8e2ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2bd996ab4a192802426d0b4174f63ceb6a146b1d39078869eefe89b6eae3ddd7 -size 1195773 +oid sha256:d62657bd76bd688b4e849d146843872bfacc23770b68a8de32a1c6c833d9153e +size 2177571 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_1b09b330-9c1a-4b78-8a83-e9beab45cccd.png b/images/a52774d2-550d-475a-922c-25c37d44557c_1b09b330-9c1a-4b78-8a83-e9beab45cccd.png index b6320479dca5694f26e15c75f0862b2123ad2017..8eb5fef63521a35e86d8c1e4e20f29acaacfa3b4 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_1b09b330-9c1a-4b78-8a83-e9beab45cccd.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_1b09b330-9c1a-4b78-8a83-e9beab45cccd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47390e595b41b5a68204b13e2dec7f0d01407f61df76fbf7de8e241fd3563d44 -size 1139582 +oid sha256:d6518747bc5457367d625f35839ef2b971224c04751cff46268d70c195c6201b +size 767388 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_2442d176-ba01-4693-9f18-ee18aeb6baba.png b/images/a52774d2-550d-475a-922c-25c37d44557c_2442d176-ba01-4693-9f18-ee18aeb6baba.png index 0de5f8c2f081b3663e605c9393df0f9c263c9eea..a571b24e539d4a25dab43dfe930c0f20bbce70ad 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_2442d176-ba01-4693-9f18-ee18aeb6baba.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_2442d176-ba01-4693-9f18-ee18aeb6baba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8d363d212e56b5c28a301f0624bb4109d34d41d7746809e1aa08311de9bd9af -size 1567266 +oid sha256:05297e38b652dae41e211a2ca46a70e8ed5461f937fbc12386e737b2ffb54dfb +size 2740249 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_59920c95-802a-4e3d-b08b-4807653406d4.png b/images/a52774d2-550d-475a-922c-25c37d44557c_59920c95-802a-4e3d-b08b-4807653406d4.png index 545eb240e220ff5639abefbf37d6ba39b38e48ff..48ec4ac3f35a05bb2e53d95398707ad63134cba8 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_59920c95-802a-4e3d-b08b-4807653406d4.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_59920c95-802a-4e3d-b08b-4807653406d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e47f2812423185ef84f940ad70e98422014d390c949a2ce18cf57bfdd6d928b7 -size 1377628 +oid sha256:1c9ff445d42029c41068cfc00553708b5ef60786a66aecd6af9cecc677e06fbc +size 2923920 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_788991d0-db86-4c7f-94b6-f19ffa1d7996.png b/images/a52774d2-550d-475a-922c-25c37d44557c_788991d0-db86-4c7f-94b6-f19ffa1d7996.png index f65e3eb66f92a0d563ffbebc92d21cee655ec717..3f61e1b13b2f373685abc861c58d5d75666712f1 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_788991d0-db86-4c7f-94b6-f19ffa1d7996.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_788991d0-db86-4c7f-94b6-f19ffa1d7996.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c2acd130c4c226624ad322d8cd05718068fc4c2322a58e3217e8ea020469884 -size 1378168 +oid sha256:94fd2ae2b7a995ba988fc96cdaef1049ec4cc26ee265884ea44f3b4708346033 +size 2446497 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_a3dabbcc-97e0-43ed-b5eb-1c323c70302e.png b/images/a52774d2-550d-475a-922c-25c37d44557c_a3dabbcc-97e0-43ed-b5eb-1c323c70302e.png index 194aa25e7483252ef8ba90b64b45f0125ec65d14..e29dddd76b557a4f786360625edfa3855d22b412 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_a3dabbcc-97e0-43ed-b5eb-1c323c70302e.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_a3dabbcc-97e0-43ed-b5eb-1c323c70302e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b93b48a6e77517c28bb868c8711b9c1fb104ce871055088772e2db393808060 -size 1620156 +oid sha256:4a285a8702cebe72ac8b5ec232c1a173f3b91f61d1b3faa27503db046341c2f2 +size 1782845 diff --git a/images/a52774d2-550d-475a-922c-25c37d44557c_f463f8e4-acbf-45ce-b77a-59e6eadc7213.png b/images/a52774d2-550d-475a-922c-25c37d44557c_f463f8e4-acbf-45ce-b77a-59e6eadc7213.png index 62e30a4e13f04bc27e4b63b7601fe1ca3cd3b06e..7ab20ab26234c5e84f14876fe51bbff34f0a7a6e 100644 --- a/images/a52774d2-550d-475a-922c-25c37d44557c_f463f8e4-acbf-45ce-b77a-59e6eadc7213.png +++ b/images/a52774d2-550d-475a-922c-25c37d44557c_f463f8e4-acbf-45ce-b77a-59e6eadc7213.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00a69e0f67bedebbf41cd8389ff707b063ec7b9f4033f165c2eb38a6e64e344e -size 1195446 +oid sha256:c686b37f46657b5f1ce5a7f9e10c40fa959167db8748964b221bca6fd4503a64 +size 2740569 diff --git a/images/a531a379-7770-4b72-a8cc-ee19419536cb_1549bfcf-8eef-4ed1-bb64-9f74c32b7be0.png b/images/a531a379-7770-4b72-a8cc-ee19419536cb_1549bfcf-8eef-4ed1-bb64-9f74c32b7be0.png index 330f902f6096779130b86761cc524a0ed73f7129..8890c030b859cf54ef427d25daddad44c4f1ce9c 100644 --- a/images/a531a379-7770-4b72-a8cc-ee19419536cb_1549bfcf-8eef-4ed1-bb64-9f74c32b7be0.png +++ b/images/a531a379-7770-4b72-a8cc-ee19419536cb_1549bfcf-8eef-4ed1-bb64-9f74c32b7be0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e85e7f2f0ffcf6afb7840ee7aedc1fe9a21ae039b7633d87c346b6baa3486ce9 -size 789906 +oid sha256:b5f8650761d67e18fa8e8063d2f257f61361406ec14db07fd696bb9e66c03228 +size 735724 diff --git a/images/a531a379-7770-4b72-a8cc-ee19419536cb_73800d44-ed5a-4491-8f5c-117137ca2c28.png b/images/a531a379-7770-4b72-a8cc-ee19419536cb_73800d44-ed5a-4491-8f5c-117137ca2c28.png index 14503a7b81cef41a25888bbcd2d2f71a9fdfc074..729c1c5d5de9ca9a89e33309517dc8a4ab51d893 100644 --- a/images/a531a379-7770-4b72-a8cc-ee19419536cb_73800d44-ed5a-4491-8f5c-117137ca2c28.png +++ b/images/a531a379-7770-4b72-a8cc-ee19419536cb_73800d44-ed5a-4491-8f5c-117137ca2c28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06172e529dee02c73e984fd3984201dfdf94846ef17433518ce8f2d433bc73bf -size 1992640 +oid sha256:064a775ac42594d296881c65506ddbd93a55e7d9cad1f9440c531c3dc62a7a03 +size 1208619 diff --git a/images/a531a379-7770-4b72-a8cc-ee19419536cb_76b93c24-ab52-4865-8491-2c3423d615af.png b/images/a531a379-7770-4b72-a8cc-ee19419536cb_76b93c24-ab52-4865-8491-2c3423d615af.png index 6ed6f39c529fa9dbe2a743a5b1b7628fa532fef9..9fc74dc91bd8155cbd3706bd9c90ce77abe5dfa0 100644 --- a/images/a531a379-7770-4b72-a8cc-ee19419536cb_76b93c24-ab52-4865-8491-2c3423d615af.png +++ b/images/a531a379-7770-4b72-a8cc-ee19419536cb_76b93c24-ab52-4865-8491-2c3423d615af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f7170fb9fce1d06dbc5b72b337703f61631c28909ea31e0e129ae85c5a2f064 -size 2319935 +oid sha256:80481e95aca011d5e166c84003b6e778d4a1c79cd929337b98c978c1e0b6ffa4 +size 1959197 diff --git a/images/a531a379-7770-4b72-a8cc-ee19419536cb_b477c115-47bc-4eeb-8d34-b128c643d648.png b/images/a531a379-7770-4b72-a8cc-ee19419536cb_b477c115-47bc-4eeb-8d34-b128c643d648.png index 247cf5a2bd2b1f15c6f5c76c597ecc127a01e7f6..8b2f680a6d9a42e14b12cdcb54f5f3caf6dab4c1 100644 --- a/images/a531a379-7770-4b72-a8cc-ee19419536cb_b477c115-47bc-4eeb-8d34-b128c643d648.png +++ b/images/a531a379-7770-4b72-a8cc-ee19419536cb_b477c115-47bc-4eeb-8d34-b128c643d648.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7ba276a20dfc52b7240694a65ce1ef63f7849048a21ce2a704daedb30c8dd79 -size 2287472 +oid sha256:962a70b50f7250c0363861cb6275a123f7fde4c23326aa2eaca15b285781ccc1 +size 2905468 diff --git a/images/a5c1095b-bba1-4029-8b8d-fa5848702827_79c4b306-cc3f-48cf-afe4-16fc9eaaf580.png b/images/a5c1095b-bba1-4029-8b8d-fa5848702827_79c4b306-cc3f-48cf-afe4-16fc9eaaf580.png index 4f1fc56dbfc8fe87ffc4b41c8128489dbfc1e099..8744f3f10f84e992ab11e59c605eab03e9e0dfd6 100644 --- a/images/a5c1095b-bba1-4029-8b8d-fa5848702827_79c4b306-cc3f-48cf-afe4-16fc9eaaf580.png +++ b/images/a5c1095b-bba1-4029-8b8d-fa5848702827_79c4b306-cc3f-48cf-afe4-16fc9eaaf580.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb38600ef2e7979c3ea8a7468ed754eebc3b1f6f42c5d7b3df1c34a95263c9d8 -size 1109428 +oid sha256:e32d34edaa65180b641d78852876f5755de290c9e8144d2e537acdb90536d378 +size 832166 diff --git a/images/a5c1095b-bba1-4029-8b8d-fa5848702827_c6b4511e-d878-4799-9d4e-4cbac9de4c48.png b/images/a5c1095b-bba1-4029-8b8d-fa5848702827_c6b4511e-d878-4799-9d4e-4cbac9de4c48.png index fb1eaa3a92ee3aee1a351f1ed89ddd525e5fa39c..a007c5bcc6d096ebc4ab9fcd8cdccbdf4c0f39f8 100644 --- a/images/a5c1095b-bba1-4029-8b8d-fa5848702827_c6b4511e-d878-4799-9d4e-4cbac9de4c48.png +++ b/images/a5c1095b-bba1-4029-8b8d-fa5848702827_c6b4511e-d878-4799-9d4e-4cbac9de4c48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d7ea80b50dba113e6332892bb75b424328cce6e7fd6af7acd664dbadfe0a5d9 -size 857520 +oid sha256:451ffcbcb285a54f0d24cb120599255deec6eb4687ec882400b83a034d7142e6 +size 411115 diff --git a/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_18f4c557-d4ee-491a-96af-1a5bc2509a8d.png b/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_18f4c557-d4ee-491a-96af-1a5bc2509a8d.png index f7a8a5e455aef1b3e1592c6c56a51503dda06dee..305d766a1d41dcf0dd7c66957b9a97c9c7129135 100644 --- a/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_18f4c557-d4ee-491a-96af-1a5bc2509a8d.png +++ b/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_18f4c557-d4ee-491a-96af-1a5bc2509a8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e953f3f3de2795dcd2c49cd0697008d1bb6bcc3c5d52f3a87e04912d2c810008 -size 396495 +oid sha256:b2e76df61b4736fa52c2d3d18dac9695a9104934f5839e65feb4ba46c7054502 +size 321339 diff --git a/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_9e41be5e-d1a9-4ae3-82bb-2d9cf7e3fd22.png b/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_9e41be5e-d1a9-4ae3-82bb-2d9cf7e3fd22.png index 8487e6a593ea5dedbddd28cd17795b72d35bec0a..428c747efb260eac83ec786e1af9dfbd5b0b3804 100644 --- a/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_9e41be5e-d1a9-4ae3-82bb-2d9cf7e3fd22.png +++ b/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_9e41be5e-d1a9-4ae3-82bb-2d9cf7e3fd22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c60792a7cfd0d8fd53482992183be454d4af861223d996f2677786b092988e8 -size 554345 +oid sha256:93ca7daaca0eb3de8164b592e27f8713c4f67f14079664be77681ada4bdfb7af +size 659779 diff --git a/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_ff20befa-13eb-49ef-9601-c1423f6d06d2.png b/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_ff20befa-13eb-49ef-9601-c1423f6d06d2.png index 8157175afb74f26dce027d065bfd2ce520baec9b..e1f971693ebd741590fc827dc46d7763105e504c 100644 --- a/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_ff20befa-13eb-49ef-9601-c1423f6d06d2.png +++ b/images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_ff20befa-13eb-49ef-9601-c1423f6d06d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47e7a9282a242d00313e401324d92bec1520d20a7d6b41e9c060eb70f644fc59 -size 162241 +oid sha256:0485f2bcf6aefda7aea8204679c9515d3e633aeac2c4eb863fe62c0dd50eb518 +size 163668 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_2ac7ed26-b119-4a0a-bcdb-a83f63b211f2.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_2ac7ed26-b119-4a0a-bcdb-a83f63b211f2.png index e808cc6a8240b067066e51a77f726169d9c55f36..bbf2da37b844bcee3f56b29f47428eb9f4522de0 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_2ac7ed26-b119-4a0a-bcdb-a83f63b211f2.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_2ac7ed26-b119-4a0a-bcdb-a83f63b211f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53dd420b8ad4d4502938212f724cb1800a0c5b431b291ec07c8f3c862936d578 -size 619764 +oid sha256:8210245667cf0108c40ae86d2495ba61290eef2dfdf4fc82d3b9daf188075c18 +size 558280 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_3f1a8555-f976-4d2d-a9cc-d53a972709bd.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_3f1a8555-f976-4d2d-a9cc-d53a972709bd.png index 1d0cdff72f17a25320fe2fa075d3a1ac97a8fbee..32627c144659f41191924a367de0fbd9b922f0aa 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_3f1a8555-f976-4d2d-a9cc-d53a972709bd.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_3f1a8555-f976-4d2d-a9cc-d53a972709bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb633b8d9ecc760b3acc2f4727a2ce8c902043b4427dd5f0230f486badb82014 -size 435234 +oid sha256:a62d3dae5e2b3577da2dc40e84b83805adac3bcc0300e157d099e40f2ab8d2cb +size 289618 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_549a5ad5-d37c-4180-8797-01abf12af15c.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_549a5ad5-d37c-4180-8797-01abf12af15c.png index 79732e4c07986b0f85fed799b79a28e0aa18f2c5..38459afadd60a329a0c190577c33ed4eaf42a3cc 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_549a5ad5-d37c-4180-8797-01abf12af15c.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_549a5ad5-d37c-4180-8797-01abf12af15c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1eb6d4a1bf6bd769c8ef75c20482f1dc14acd80aa2e10f0916119edbbc9e2f4e -size 582497 +oid sha256:b646aa81018ebbcca5c876424d9c12ea9dcb4a0f14599d0772d65289cb532704 +size 576794 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_5631a528-35ea-425a-acb7-41c0fa888737.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_5631a528-35ea-425a-acb7-41c0fa888737.png index e18a25cb011d84dd75fedf97222e309b0b4f627e..8ebc02b8d7946c36acf820a66df1e8c0679ca3da 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_5631a528-35ea-425a-acb7-41c0fa888737.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_5631a528-35ea-425a-acb7-41c0fa888737.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86a5b8d2beb9fd1026c699e819b1e5f357a28d403fd096c1b81fdea59f4ce0e1 -size 1273698 +oid sha256:bfa7885a4c6af2eadd9ed89475ff34320c99cc6af3cf7ab0c1c1cd78c0341586 +size 1714913 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_97773a8e-0f6a-46e6-b900-726dc84c0b7d.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_97773a8e-0f6a-46e6-b900-726dc84c0b7d.png index 7a81757f3099c72753303af182447a4139cfb9ab..f933ad5d2ba088c3a0b69b13f96d33195c6e8d0b 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_97773a8e-0f6a-46e6-b900-726dc84c0b7d.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_97773a8e-0f6a-46e6-b900-726dc84c0b7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b56b43cd263a347ac23294f5321b21f0d53d4297b469661a99dc405f2555b8e0 -size 776108 +oid sha256:21c14c2d9495a26e3ef222304e6b853e2df5f26c0caa14db037c618710815ef8 +size 875463 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_9f40df58-a7fa-4181-b1a9-f08a0f0bd2eb.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_9f40df58-a7fa-4181-b1a9-f08a0f0bd2eb.png index 8e092e78fbe8413e102f2d8deedc59fcd200592b..09dfc116a830f7e8ecb89f84dd8ba79d383beac1 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_9f40df58-a7fa-4181-b1a9-f08a0f0bd2eb.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_9f40df58-a7fa-4181-b1a9-f08a0f0bd2eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30e60272c547bd82776867a6ded63817f5a7e6edb2557417f6252b9a8e25cb57 -size 333108 +oid sha256:a7a15422cceff051b523ba08368cf58bdf351ecccb6bf44021a91988ffcb5550 +size 478522 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_cb306aa7-a977-4b22-a191-4e7ff1683495.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_cb306aa7-a977-4b22-a191-4e7ff1683495.png index 8f521cf88fe95ae01d0182fa5ab99b4faa0f727d..ba08ad71e8831fbf3dc609af2ada81a726ad2822 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_cb306aa7-a977-4b22-a191-4e7ff1683495.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_cb306aa7-a977-4b22-a191-4e7ff1683495.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df334a98aaf072a2def5213cafd85ef4129f2143b3201a46b32ed0a1675045c5 -size 782419 +oid sha256:fe30f0714845b4d7e88746cedd1f8f491df19057daf572fb2b33e8aed6bd803c +size 706870 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_d4ddbbb9-4b1c-4cd9-b4b9-e938ed17e04e.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_d4ddbbb9-4b1c-4cd9-b4b9-e938ed17e04e.png index 1d4011e06152af205ebf20559d008cd2bba54ccb..9bdff51d39308b287c35a32e9d3b37969d91b33b 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_d4ddbbb9-4b1c-4cd9-b4b9-e938ed17e04e.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_d4ddbbb9-4b1c-4cd9-b4b9-e938ed17e04e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58ee5a02301b1c7ceff8ec2277d5ec2760816ef116a5b9a5ca3d24948f0cb11a -size 776976 +oid sha256:f36bfa608d044d9208141ce77b94d8307f76c83413805b8d3d4effeea8b6f9e6 +size 707718 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_dab1ffd9-2df1-450b-8d68-120b605f8d45.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_dab1ffd9-2df1-450b-8d68-120b605f8d45.png index cd43272b2fb9728016c86e3a426c4b5253811392..d34b29785e5be58c5b4815dee38cecda3ff74726 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_dab1ffd9-2df1-450b-8d68-120b605f8d45.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_dab1ffd9-2df1-450b-8d68-120b605f8d45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2bf44e027a7114e841f3a6965d0ae352d003639ae5c542728aca96077afa1fb9 -size 331710 +oid sha256:bd7d128c003397900ca164e4a474b81a7083ed05e2b3995e6dd97ac65a6e7c5e +size 243071 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_e7444f63-7c77-4462-a723-ab00e729c46d.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_e7444f63-7c77-4462-a723-ab00e729c46d.png index 1a63a36d0dcdc01707bbd54703d9e3e1265ca182..df3c17fda4cb2ad10dce71f4d7ec4212a41b88f9 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_e7444f63-7c77-4462-a723-ab00e729c46d.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_e7444f63-7c77-4462-a723-ab00e729c46d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4741e6315f5aa417c54bc43abe62542fa431cb2536821423cc41bf4174334855 -size 1273572 +oid sha256:636d378319760d505f6ed3a42a834da65c4790b6a871290e3baae9e22f06c345 +size 1635418 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_f932237e-6a41-40a9-8df8-38ab876cc6b5.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_f932237e-6a41-40a9-8df8-38ab876cc6b5.png index 6c6ccc8825ee71099e72035abed7a60fb8cf63cd..7795ff5e8e69b0a7f2b76079517f7f06cde2c3aa 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_f932237e-6a41-40a9-8df8-38ab876cc6b5.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_f932237e-6a41-40a9-8df8-38ab876cc6b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ccc5879e8187fe134f050720b4200e768d2f57f5cd4ebc60ad6aa9889d607d0 -size 1273605 +oid sha256:ede7f7156e7a101bbe2d63ad2ba01715c431b88bc0a4deba7d0af92e400ed0ca +size 1158248 diff --git a/images/a6372f23-f462-4706-8455-5b350c46d83c_fbca318e-9c4e-417a-9f36-e39c77345c0a.png b/images/a6372f23-f462-4706-8455-5b350c46d83c_fbca318e-9c4e-417a-9f36-e39c77345c0a.png index 916fb5e9bc61d31747b2d65969a3fc923c5e0725..bb0159eb17037853caeaafcbf7b3a592849cde9a 100644 --- a/images/a6372f23-f462-4706-8455-5b350c46d83c_fbca318e-9c4e-417a-9f36-e39c77345c0a.png +++ b/images/a6372f23-f462-4706-8455-5b350c46d83c_fbca318e-9c4e-417a-9f36-e39c77345c0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:718005433065e803d1edcda51970d6a468b3568a6fe38329cdea2d9583006767 -size 634346 +oid sha256:3e820b2af3e3872db8a333a56159f010628b47c5d05a92dc6b65c46782f81787 +size 761170 diff --git a/images/a63b891b-4703-46ab-8633-b151b26574d1_00ea167e-ab9c-4cb5-ad27-3e2a9d4808c1.png b/images/a63b891b-4703-46ab-8633-b151b26574d1_00ea167e-ab9c-4cb5-ad27-3e2a9d4808c1.png index de834991ce38b458ee0e0df93399da59ff7982f6..fe1bfb1ef72b600dafcbc0771a96b1aa6d80ae79 100644 --- a/images/a63b891b-4703-46ab-8633-b151b26574d1_00ea167e-ab9c-4cb5-ad27-3e2a9d4808c1.png +++ b/images/a63b891b-4703-46ab-8633-b151b26574d1_00ea167e-ab9c-4cb5-ad27-3e2a9d4808c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b97ba0c3280b017351834bca74623b792412c43a6acf859bf305b033ff20bbe6 -size 2578302 +oid sha256:2fdf84715edd4ba1e98e28c18a4cdac6a19fcf849be6574f040cf8439f32c591 +size 2226207 diff --git a/images/a63b891b-4703-46ab-8633-b151b26574d1_54a94b9b-4c79-49ef-a0db-d62109ac4ff6.png b/images/a63b891b-4703-46ab-8633-b151b26574d1_54a94b9b-4c79-49ef-a0db-d62109ac4ff6.png index d1cb50a18e6247a5680c1d63b5f466f885a28f02..8a4d2b7a7ae0397d7bf33a02c92d9f76e27cd8fa 100644 --- a/images/a63b891b-4703-46ab-8633-b151b26574d1_54a94b9b-4c79-49ef-a0db-d62109ac4ff6.png +++ b/images/a63b891b-4703-46ab-8633-b151b26574d1_54a94b9b-4c79-49ef-a0db-d62109ac4ff6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb51cbe0268f51a5c64d768d7edbf2318588b0666ebcca221788313e3f37fa7f -size 548171 +oid sha256:3647c302146227c1e42204aa9352f9f6de96fefbffd9dcceed7215cb52bf265b +size 704018 diff --git a/images/a63b891b-4703-46ab-8633-b151b26574d1_88e67c68-b1a0-4509-b2f8-bb568aa3142b.png b/images/a63b891b-4703-46ab-8633-b151b26574d1_88e67c68-b1a0-4509-b2f8-bb568aa3142b.png index 4d664066a1137e638b8997a70f5e0f5dd6449fd1..98a61a33c0a771099c5f9291a2b6d52a73b676a6 100644 --- a/images/a63b891b-4703-46ab-8633-b151b26574d1_88e67c68-b1a0-4509-b2f8-bb568aa3142b.png +++ b/images/a63b891b-4703-46ab-8633-b151b26574d1_88e67c68-b1a0-4509-b2f8-bb568aa3142b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcaedd202499f9692f80b440fb7199c508a52a7e455b6eae2d84eb850cd7027a -size 592236 +oid sha256:e5b1407447e1d2e247d5036f7f5dd2811667e24ffd5959e323ab0049247ddd7b +size 712345 diff --git a/images/a63b891b-4703-46ab-8633-b151b26574d1_91fc5db3-bc62-423e-ac0f-c4b6fa8cb02f.png b/images/a63b891b-4703-46ab-8633-b151b26574d1_91fc5db3-bc62-423e-ac0f-c4b6fa8cb02f.png index 2e65a1f275c5faa29d219ea324a8e12ef6d596b5..0ea95dfb6d992ab01ee20ffe15afffc31d2c52f4 100644 --- a/images/a63b891b-4703-46ab-8633-b151b26574d1_91fc5db3-bc62-423e-ac0f-c4b6fa8cb02f.png +++ b/images/a63b891b-4703-46ab-8633-b151b26574d1_91fc5db3-bc62-423e-ac0f-c4b6fa8cb02f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b563304e55a717fec4305b921eea9cd8479eea3da1a3a231f85512748a271d5 -size 2013673 +oid sha256:430f89657505500aab4b9eae85fb6045c5b79db5b6a57370557c041710fa0c4e +size 2014015 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_01cf52dc-6cba-4c51-a75d-5f2a7730b67c.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_01cf52dc-6cba-4c51-a75d-5f2a7730b67c.png index e5cbbb8b196242d8102bebda23f2b1331b7c7eed..b7675c8697c9c4ef083c6ac8760e62d56de21ac3 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_01cf52dc-6cba-4c51-a75d-5f2a7730b67c.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_01cf52dc-6cba-4c51-a75d-5f2a7730b67c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85444335f49f779cf6d9e25d3d7a29f15f51f16b44607e4a1ee7832f5136de6f -size 818489 +oid sha256:5468d23e222f8fbaf78828c2c7f859a78b85d254590a503c66184fa2d5873b61 +size 1050905 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_10ce4c27-0114-485f-b18a-e5fed2af6d57.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_10ce4c27-0114-485f-b18a-e5fed2af6d57.png index 0e05f375ee44f38e25616bc8e429f70667ec307a..48108b3abc16954ee23d245f13f82b0b6bd2e456 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_10ce4c27-0114-485f-b18a-e5fed2af6d57.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_10ce4c27-0114-485f-b18a-e5fed2af6d57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e3b4c0558b5c07e8e134d2edcbc5c9c708030659b2999cb1d8529d60cc66c4b -size 1538645 +oid sha256:36ee793cdf44bf0dfe606b2ce211d74e2c39484404ef7868fa1a590c8ecdba9c +size 1117498 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_1a324d9d-69b2-4367-9576-c0f051d94050.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_1a324d9d-69b2-4367-9576-c0f051d94050.png index fec7d2a2c35b899aad512919e420a45898e846a3..43dc324c2c8df6307bbaa45f38bd5c539ac93bf2 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_1a324d9d-69b2-4367-9576-c0f051d94050.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_1a324d9d-69b2-4367-9576-c0f051d94050.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32d748d8b39ced8b1831c31999a7e478099c23c4828e4961481316f5a0619eaa -size 966229 +oid sha256:538792e70436f3b848c49ed77a57e0a4d02e6537221e5de5da764a9fc5752d93 +size 666358 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_475e620f-02b0-4a95-ba4b-bce28ab58e23.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_475e620f-02b0-4a95-ba4b-bce28ab58e23.png index 3acb5216210ab96fe665ab62985b4860aac26984..d8e657ad77215a8ef794e5282e7b17d721048c5a 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_475e620f-02b0-4a95-ba4b-bce28ab58e23.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_475e620f-02b0-4a95-ba4b-bce28ab58e23.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e21286981d18d9655e642e45f0acc62c04fb31e522aac4590ed0b8949c3aeb3e -size 764580 +oid sha256:90c45e23730b1ba6d622603b4414041d6d80973dbc8b19c3975bb079a1ea86f7 +size 705877 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_66e0df08-da51-4405-a484-0d02219ec44d.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_66e0df08-da51-4405-a484-0d02219ec44d.png index 751a2011151478b1584144cf70d3d81d5c2af396..f060b24165063e2cd7a5e77eaaf9a38edebfb90c 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_66e0df08-da51-4405-a484-0d02219ec44d.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_66e0df08-da51-4405-a484-0d02219ec44d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4a2be0b01418a8b0842281a11b0dc2a8250cf18b5fb6f1e7165e8218ae6cc05 -size 936272 +oid sha256:75db877d4037123adfbd44fa8e7aa561f68139cc90d82955ba4e4ff6a59ae043 +size 840806 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_6911a96a-5cf1-45ea-a4ac-7b020fa68506.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_6911a96a-5cf1-45ea-a4ac-7b020fa68506.png index 475ab592f283f4968b64427882c2e747359bccfb..7dc19c0590a3ec6db23556374d24898d1b7b35f7 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_6911a96a-5cf1-45ea-a4ac-7b020fa68506.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_6911a96a-5cf1-45ea-a4ac-7b020fa68506.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:036a080087cb9ff8ab3155faa977978fc8069267989ad9d48322f24ca0dcda37 -size 815220 +oid sha256:9752a5b8e0d18c09fc8cc6c2a2fb2ae752f7f876d4960b7d637be204d360008d +size 769029 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cb92fa99-6e9b-4b9e-983c-a85d76580669.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cb92fa99-6e9b-4b9e-983c-a85d76580669.png index 3d8ad23d377b86e6c9f1e680ccf0e2f038b1aa3c..c716e28f4241ca6b65be790a4fd3fd1fe95fde48 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cb92fa99-6e9b-4b9e-983c-a85d76580669.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cb92fa99-6e9b-4b9e-983c-a85d76580669.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8412d714c8ba4b600119de592e73a83c82a895ab78f79021f509dfebbad641dd -size 789062 +oid sha256:86dfdfd47ca16cb992fdcbcc5d649a901bcae5f3acf55e94a73275b25ac3f2d2 +size 758515 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cd9ce436-abb2-4fd7-aef5-356733c7e1a4.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cd9ce436-abb2-4fd7-aef5-356733c7e1a4.png index 072590cc7e5c172b5b9641b0ccffd07c773cdb66..e61f857a9c9e97eda2b4c79ad5cf8429f9560d42 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cd9ce436-abb2-4fd7-aef5-356733c7e1a4.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cd9ce436-abb2-4fd7-aef5-356733c7e1a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08220b5a6344478dc243e5ab4fc11228a358a0ef18c744a884901c7ce5a434ff -size 816737 +oid sha256:cebcba09a0ab04adfd5d73d1314cbbcfaa0fea651a481f2e35605f2f2d7f991b +size 629903 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_da47c157-9f8f-414e-a839-ea1a2dfb5244.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_da47c157-9f8f-414e-a839-ea1a2dfb5244.png index f5c15d4c2731f47432ec99367cefdec4cc9ea6de..c538b99a293df4cbaee3432f0a21ba8da31fed19 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_da47c157-9f8f-414e-a839-ea1a2dfb5244.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_da47c157-9f8f-414e-a839-ea1a2dfb5244.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd2cbf7eac86926dc6ea85d38d8b05ba68ceb636c80adafb51ee86fc9dc7d1ee -size 948826 +oid sha256:6bf6bb956872b918b6241f1516a5f4cc5f9b16813d3ea95c4837ee31a7087566 +size 1055903 diff --git a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_f267bc79-4189-4353-af8c-74f490c0c6fd.png b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_f267bc79-4189-4353-af8c-74f490c0c6fd.png index 6b3578922ab94a124a53dae04c3bebc1922ec324..2a6e87d704bcd1274a5fa00163ebe281b83c5fd8 100644 --- a/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_f267bc79-4189-4353-af8c-74f490c0c6fd.png +++ b/images/a67318a4-5049-4ac5-8c6b-c14fc527483f_f267bc79-4189-4353-af8c-74f490c0c6fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e34cee8d05516177c2e12bed331d23a4dfe5d539e944ada6da75646510f2538 -size 765404 +oid sha256:f70e58890c64bdda40801200f2756c15c590b5081d773ea8cdc111068bf05c94 +size 866006 diff --git a/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_09bfdb44-76c7-465e-ba08-dab8c6dc2e1b.png b/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_09bfdb44-76c7-465e-ba08-dab8c6dc2e1b.png index 29b2c06fcb0754c281d4cf04318906b31490879b..47764281b70de2643269a9c3fb59a97087787a0b 100644 --- a/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_09bfdb44-76c7-465e-ba08-dab8c6dc2e1b.png +++ b/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_09bfdb44-76c7-465e-ba08-dab8c6dc2e1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a7870ee146d1c2967b74ef468288a7950b1ee6964136e5aa1f219e2b28e962b -size 612882 +oid sha256:e16d2dec15b9f36e009d75f0e313e9702b8d87659eb05d4962e2e8b1111f15e4 +size 662082 diff --git a/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_7037c6bc-63db-4a5d-93fe-fe2a87738c8d.png b/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_7037c6bc-63db-4a5d-93fe-fe2a87738c8d.png index 295b7d97263bf60f337be338b91aece2fea6c24d..ffe8429774391f728d5e89304fc9ea7f04ee6100 100644 --- a/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_7037c6bc-63db-4a5d-93fe-fe2a87738c8d.png +++ b/images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_7037c6bc-63db-4a5d-93fe-fe2a87738c8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1f05f7ae2f9b3ef348727de090fc7fb69df6f924509bad4abeb51c376530fbb -size 1496586 +oid sha256:faed0435163fe9b9b68ec398bcdf6fae97e29b8a2136d9b4925ceb6ccd92d839 +size 644134 diff --git a/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_5ecad292-0fb1-4d8e-8963-715a5e924186.png b/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_5ecad292-0fb1-4d8e-8963-715a5e924186.png index 7fe3493798b581a4b470aa82fb033fa2df593fb0..3e6312afbe5a698405c73ef7fdbd1dc2c4d2eb5f 100644 --- a/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_5ecad292-0fb1-4d8e-8963-715a5e924186.png +++ b/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_5ecad292-0fb1-4d8e-8963-715a5e924186.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0d70cf8d8371da0ab0e285990ad21fbd76843fcefe3d72cff65da5c790d295b -size 1188129 +oid sha256:719901bde115d4f4a696d853c2c4604b0af9d52f4451537b223c430badc8b8eb +size 1793009 diff --git a/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_a436dc7e-365b-4bc6-ad92-f1444c628f9c.png b/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_a436dc7e-365b-4bc6-ad92-f1444c628f9c.png index 5c9080d1baa5a949c5814d34b0ed111085ec40f2..6a32d08ee86b6e25feb463972a6fa9277babb380 100644 --- a/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_a436dc7e-365b-4bc6-ad92-f1444c628f9c.png +++ b/images/a747bed0-0f45-413a-8f48-2c45795e4e3d_a436dc7e-365b-4bc6-ad92-f1444c628f9c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a61e9762f1365c350e70009b776e3e58016b36a2173c9aed1ecd7d40de7fe94a -size 559277 +oid sha256:a4a1a567b5c4d65f9dee64c2a85317693f1405e80ccc2aa4e557d97030bead98 +size 912802 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_1b1282e4-21fd-4dee-8bd1-d6e3b5e60e32.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_1b1282e4-21fd-4dee-8bd1-d6e3b5e60e32.png index 3500f5a55fdf522d0fe5abdb0d39a5abb26a3cd3..8711b0ae2e5e9913af2c92decbfb14a7486657b5 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_1b1282e4-21fd-4dee-8bd1-d6e3b5e60e32.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_1b1282e4-21fd-4dee-8bd1-d6e3b5e60e32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de0d299d8ffca888126edfd9d2e7edfb1c3b667818c2f6d2fa3ae9016915228d -size 1695316 +oid sha256:2929e01b139a7b3f292d755349a7171370369aca48bb43b20dbe9eed5d10dd61 +size 864375 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_6dc7cd7d-428f-4f78-971b-fa96dc6a2afc.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_6dc7cd7d-428f-4f78-971b-fa96dc6a2afc.png index f87c36fb93873e51a0551aa258f6e7aa20b421be..e3c570699e20d6167872a9d1f3e78015f826f761 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_6dc7cd7d-428f-4f78-971b-fa96dc6a2afc.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_6dc7cd7d-428f-4f78-971b-fa96dc6a2afc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f3f84c34fcd0def18f4ce5a31b496264eaa8b0bbf6e7288d46375c19d86bfec -size 2323696 +oid sha256:2eccc6ab67ccb3b9a3a4d4f57203021f7b571307011894249eefea7a877b41d9 +size 1527215 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_8de0346e-4043-48dd-b59e-01e8edd713df.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_8de0346e-4043-48dd-b59e-01e8edd713df.png index c87a2a10e2a00a1386258c200a459abd812846db..d6b46684fc7d807b3d939485caa2c4cf9d63d229 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_8de0346e-4043-48dd-b59e-01e8edd713df.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_8de0346e-4043-48dd-b59e-01e8edd713df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8b641f1bf245344529063e3619d3c483cc02cc92a4a675a111b31ce76c651d0 -size 2459025 +oid sha256:2b642f73263d24051c5863cbda3f62aec458301a4716e979d5ac8decba36335b +size 1464889 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_9be4c700-75be-4c66-9202-8f31718ddabe.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_9be4c700-75be-4c66-9202-8f31718ddabe.png index f0afd55c4c9b107b76c44b65e62723b65c14870e..fecc0802476a5634aaffe5977385d164201e8b60 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_9be4c700-75be-4c66-9202-8f31718ddabe.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_9be4c700-75be-4c66-9202-8f31718ddabe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16cdff8964819093b410be4833015ab4e1d7c2094abf964f85de0ff5abeeb274 -size 1159685 +oid sha256:95362936ba076bd3b13903f2c1d86372fb8b4d201044dcbaac55ee5bb6ef6980 +size 525933 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_a0620450-f297-4f91-9643-1324d3373687.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_a0620450-f297-4f91-9643-1324d3373687.png index 265cfda7a7c1b3dafaf9cbe34d47e39ff7eb8d41..f349ea72dda38d079c973a6a23115e72f9367166 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_a0620450-f297-4f91-9643-1324d3373687.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_a0620450-f297-4f91-9643-1324d3373687.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ffc285e30c98ba164b3fcac850c457eb79380481764e26be72cbbcd3b33784b1 -size 1153348 +oid sha256:1c8a397f2fb3c69f59c58665930b34ad585e02682ae8d7173ed3d2bb2673a8f7 +size 1661558 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_bbfb3d84-6cda-4a67-ae68-04e8649f8c38.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_bbfb3d84-6cda-4a67-ae68-04e8649f8c38.png index 81e1d365f9b8d1e524088e2941b229044091e038..32847ba215926d386eac528a79ebed0a21700ac0 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_bbfb3d84-6cda-4a67-ae68-04e8649f8c38.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_bbfb3d84-6cda-4a67-ae68-04e8649f8c38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:307c3c9668f7bbab54baa876bb83436e8969f9dd6757256b3af37bd1e57916cf -size 1697766 +oid sha256:6c8b4a16582f905694f0a0e5b177102938216adbf9edec253e0ca73db32c23c1 +size 650126 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_d5c61581-da19-4144-a5a5-540bb4ab10d4.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_d5c61581-da19-4144-a5a5-540bb4ab10d4.png index 221075f04b48011b1a0d6635763111a4de157670..5fc214f28f152d0ed1bc266ec8a16c432a9f7a22 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_d5c61581-da19-4144-a5a5-540bb4ab10d4.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_d5c61581-da19-4144-a5a5-540bb4ab10d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08d01699780508a1b07a7d00cc4e6b11ef36c156d15c5a5f34f3fd19888dcad9 -size 2238479 +oid sha256:17d344f9083e241c9193a2ae4db2d4d8be4aa284877fefaf59ba6b4fd5c3e007 +size 2104178 diff --git a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_ed47769d-ee72-44f7-bdc9-f58989e4f21d.png b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_ed47769d-ee72-44f7-bdc9-f58989e4f21d.png index 62ca5bd755f2570722fc4d3f8db43b4eead6e830..c13b3dbfe62885eb6a1678b01b025fd29b9f41c1 100644 --- a/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_ed47769d-ee72-44f7-bdc9-f58989e4f21d.png +++ b/images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_ed47769d-ee72-44f7-bdc9-f58989e4f21d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2f33fd8d307504a4e69f5315e8f78649a65521730a5eea3c10b50549d287017 -size 1697508 +oid sha256:b804007f20670ba3eb674dce1f9b10a3e5e6b8ec614d169bc1a558a9d32389d8 +size 2018241 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_1a7f0d8f-b5fa-4866-b871-59de5b9c1c1e.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_1a7f0d8f-b5fa-4866-b871-59de5b9c1c1e.png index 322526ae4d30a8eedd1bed707f2148a887fbedfd..94a5abf8ca3fbbfbc3b4348d54b109cc71d80a21 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_1a7f0d8f-b5fa-4866-b871-59de5b9c1c1e.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_1a7f0d8f-b5fa-4866-b871-59de5b9c1c1e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbd596759a51241e85d66e99b9258546a1bf17716651f43c3d4f960ad5077cfb -size 687687 +oid sha256:ded15bba4b70159a0f9b6193c0b55f98a87682233d8e165d07d64255cd5d443a +size 742739 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_1c772b71-1382-4a17-9f3c-fd3cf0115d5e.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_1c772b71-1382-4a17-9f3c-fd3cf0115d5e.png index 2b7b6d295498b6af7f0eae4b30bd711867842153..cb72214abe6c116a7c0934b91d45aa2d867316c5 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_1c772b71-1382-4a17-9f3c-fd3cf0115d5e.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_1c772b71-1382-4a17-9f3c-fd3cf0115d5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b0c1dcf1063e0e7c3987c07b0403e1799b434f8778a7532e53ab33f0a090cfc -size 911611 +oid sha256:c06ca8c8ca0f779891780b9e3ff323d589232ced0f88d65bae675541e3e7289b +size 923464 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_4c7017eb-b5a7-45a4-9644-d3b39dfe5c2e.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_4c7017eb-b5a7-45a4-9644-d3b39dfe5c2e.png index 1c29e56d913f6fa423547210baa1ddb467068d2c..d3e6ae10a3d6adf680abe35e3b91fe334d58a7eb 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_4c7017eb-b5a7-45a4-9644-d3b39dfe5c2e.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_4c7017eb-b5a7-45a4-9644-d3b39dfe5c2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1721f10b6aea8080b3bc8273e1187e6530ae9798f75d582bdaa527588fef226 -size 702781 +oid sha256:425ce8a5b5b4f2b83de08f6445264be35034d4faadfa7cb2557a63340dc37ead +size 345716 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_9a991a36-6a7b-42c7-9599-fbfebc37336c.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_9a991a36-6a7b-42c7-9599-fbfebc37336c.png index b1e579be0e481817b2a550b29803eafbc481045f..5d28bbc1746748183e3df0c882d1ff8ed8498a71 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_9a991a36-6a7b-42c7-9599-fbfebc37336c.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_9a991a36-6a7b-42c7-9599-fbfebc37336c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ad56d0edf7e289827f375f501fd2e16c79a711368e0a8f883e6a03670737d10 -size 536625 +oid sha256:b8500f2301f9b8caf74d003ec02b24b67953c81c959b179b1f74d8decf88a5b8 +size 239940 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_a414ba1e-c263-4dcf-865b-04054f9e18aa.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_a414ba1e-c263-4dcf-865b-04054f9e18aa.png index 30f6dc2038e152eb0481e3e75e8741ea05d69454..4a357b4a0fdcd97a8e6920f534c936a8b971c9bd 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_a414ba1e-c263-4dcf-865b-04054f9e18aa.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_a414ba1e-c263-4dcf-865b-04054f9e18aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d443bebee0894e32e0f4119b76210ab23dcc2c4e63a5d7af60d739153d946d4a -size 686034 +oid sha256:41d76f343c9bfbd3ca7c29ca24feab51f35eadd7835d06e7dedc3830a09c0096 +size 676854 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_c44a1e52-643c-4487-b25d-dedd81984892.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_c44a1e52-643c-4487-b25d-dedd81984892.png index a6eca54056cc8c6e377462aa3d3d32f7ac555ed5..168b2f52a02a84670945fc9519f5ac211a5f943b 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_c44a1e52-643c-4487-b25d-dedd81984892.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_c44a1e52-643c-4487-b25d-dedd81984892.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6f2e61a6dbe57994d0b323ff019fd6331effd6cf0d5121a9ea1b0a6bd76669d -size 650013 +oid sha256:262a7ec160ecc00063a919d8bce87a1d090a6210409c32e35bf276478be7e41c +size 758005 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_d229d3ba-1804-4ac9-ab0f-8fff81657d28.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_d229d3ba-1804-4ac9-ab0f-8fff81657d28.png index 354a553509eca4c62bb0f3b69b6f6ed4831b0c31..08c88e54fcb3aaf3aa1fd08c2a142b1d900dec48 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_d229d3ba-1804-4ac9-ab0f-8fff81657d28.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_d229d3ba-1804-4ac9-ab0f-8fff81657d28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3c7890d9320a64b47536d124891d08540943eb8d93bb36c29303a3ddb8e5a95 -size 1632342 +oid sha256:9c44f54a3463f131c159cbb3471df60ebcd91b622c78158c5b15c63761792aad +size 2066751 diff --git a/images/a8474730-82eb-4f12-b947-db64ac08660b_fdf8015b-4c08-45d5-a48a-750a95229995.png b/images/a8474730-82eb-4f12-b947-db64ac08660b_fdf8015b-4c08-45d5-a48a-750a95229995.png index 439e261b47f64b0b19f46c849ed8d35791b953d5..e46e98aa1cac3551b7635a31f7ded97f6727f9ae 100644 --- a/images/a8474730-82eb-4f12-b947-db64ac08660b_fdf8015b-4c08-45d5-a48a-750a95229995.png +++ b/images/a8474730-82eb-4f12-b947-db64ac08660b_fdf8015b-4c08-45d5-a48a-750a95229995.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83e5d5d595f4b384bd0d28841fd96f70cdf850c8055e3efbcf421110b65dc5ec -size 679213 +oid sha256:48a480677cc1fc5a8b28edad41734a638f16b9268da7f98f85ab3023e593a5ac +size 751406 diff --git a/images/a88676d0-c252-408f-b796-93c95f6b71fc_5499a9c1-e7dc-4ed6-a400-bbb551015eba.png b/images/a88676d0-c252-408f-b796-93c95f6b71fc_5499a9c1-e7dc-4ed6-a400-bbb551015eba.png index 737c9896afd59477996cb4f59633e11c759cb8b8..640a403358f56fb02a8f014708d5ddab32831243 100644 --- a/images/a88676d0-c252-408f-b796-93c95f6b71fc_5499a9c1-e7dc-4ed6-a400-bbb551015eba.png +++ b/images/a88676d0-c252-408f-b796-93c95f6b71fc_5499a9c1-e7dc-4ed6-a400-bbb551015eba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0861a258690d8b87609c0d9e75102c8a349eeb421f9c98540deafaead6334e4d -size 2328298 +oid sha256:8a083bcb2a3d7f8e0bd7367eb8fc15f9a6b26febba26da674bd2a3e20b1a5888 +size 1245177 diff --git a/images/a88676d0-c252-408f-b796-93c95f6b71fc_e8963296-becc-47f3-ad53-3d823ede9da4.png b/images/a88676d0-c252-408f-b796-93c95f6b71fc_e8963296-becc-47f3-ad53-3d823ede9da4.png index 4917962a7dbb0dcf621426d56b8435607e63560b..d05b0d830abfac3997b02c669c8de49aecd7ea89 100644 --- a/images/a88676d0-c252-408f-b796-93c95f6b71fc_e8963296-becc-47f3-ad53-3d823ede9da4.png +++ b/images/a88676d0-c252-408f-b796-93c95f6b71fc_e8963296-becc-47f3-ad53-3d823ede9da4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96bb35eb3c2d415ab8023bd7cb35aa0187ed9cc669922c347de14252dc145903 -size 2491207 +oid sha256:4d82a0b1275179edb2bd5379f22067467b2ed4591f507412fb467b9b37413175 +size 2498689 diff --git a/images/a8de57df-b0be-400d-9211-931321c6500c_1596c7ad-f09a-48bc-b641-b66197b5b5d0.png b/images/a8de57df-b0be-400d-9211-931321c6500c_1596c7ad-f09a-48bc-b641-b66197b5b5d0.png index 84d1d585a107532522d95c8bd4fa4c8c8dc265a7..d9306b6b33689a663a22ce7c69bfd3850862ac8c 100644 --- a/images/a8de57df-b0be-400d-9211-931321c6500c_1596c7ad-f09a-48bc-b641-b66197b5b5d0.png +++ b/images/a8de57df-b0be-400d-9211-931321c6500c_1596c7ad-f09a-48bc-b641-b66197b5b5d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef676b36f2b4ec62f1b4e38c4ce86aa03f37848a0e6b52f5944d7e645c520eb8 -size 1373128 +oid sha256:687ec9084d0008e73e31bd4081748272b9816ab33065622bd3927ce41b02a42f +size 1993414 diff --git a/images/a8de57df-b0be-400d-9211-931321c6500c_2dc6fa55-e5aa-4511-92b2-71dde630cdcd.png b/images/a8de57df-b0be-400d-9211-931321c6500c_2dc6fa55-e5aa-4511-92b2-71dde630cdcd.png index cdac0f835f256aaec70754defc01c69f8efde84c..2a4612c3a772843be9dc0a7f4acac73e8120b017 100644 --- a/images/a8de57df-b0be-400d-9211-931321c6500c_2dc6fa55-e5aa-4511-92b2-71dde630cdcd.png +++ b/images/a8de57df-b0be-400d-9211-931321c6500c_2dc6fa55-e5aa-4511-92b2-71dde630cdcd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd7a9e6f5356d9b2fcfe49f5a68926a873a3055eea028d291facdfa2dffb9ddc -size 1278141 +oid sha256:3e134d37d947098fd072ff94845182992bf6caaa6fe9baaf700cdd2c22c912ea +size 1097331 diff --git a/images/a8de57df-b0be-400d-9211-931321c6500c_537cac8a-10bd-4de9-8487-99bf3041bd13.png b/images/a8de57df-b0be-400d-9211-931321c6500c_537cac8a-10bd-4de9-8487-99bf3041bd13.png index 17755c0222fbbae0339896b050a7453a8f6768b6..c5fe6f299b358e16c079ad1f01fef1c3f8b66281 100644 --- a/images/a8de57df-b0be-400d-9211-931321c6500c_537cac8a-10bd-4de9-8487-99bf3041bd13.png +++ b/images/a8de57df-b0be-400d-9211-931321c6500c_537cac8a-10bd-4de9-8487-99bf3041bd13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b08bdf7b057e6aae52cf21d1304004f7f64721f716066bbf8861ed7bdcfac1e -size 906297 +oid sha256:c503d3da0ad5804830572888e245b2bc5b8cd64a4075c19f7787877097a7fad0 +size 874371 diff --git a/images/a8de57df-b0be-400d-9211-931321c6500c_da2aa56b-9261-4456-8a0e-d4d5b0087429.png b/images/a8de57df-b0be-400d-9211-931321c6500c_da2aa56b-9261-4456-8a0e-d4d5b0087429.png index 63d8ba0d90096e6384de52eefb14b4bed72e8801..31b5c509879a74d1a5c71b5407ed4126a4a7b99d 100644 --- a/images/a8de57df-b0be-400d-9211-931321c6500c_da2aa56b-9261-4456-8a0e-d4d5b0087429.png +++ b/images/a8de57df-b0be-400d-9211-931321c6500c_da2aa56b-9261-4456-8a0e-d4d5b0087429.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b95b39f51e0f2ddd72f7c92556a025163fc728c49f1601aeaca733e38fa7e31 -size 513671 +oid sha256:157e28ccfade3bfe59bbdb0ecea9b99340a836559e059ba118a68b722572dabf +size 441923 diff --git a/images/a8de57df-b0be-400d-9211-931321c6500c_ef11694e-c52c-41dd-bc90-a2ec7bc71e2c.png b/images/a8de57df-b0be-400d-9211-931321c6500c_ef11694e-c52c-41dd-bc90-a2ec7bc71e2c.png index 5864849b105f3f490d20d526e5c79701eb0ecce7..947e90d6fd532bb6fcca6ae418f742e8899c0beb 100644 --- a/images/a8de57df-b0be-400d-9211-931321c6500c_ef11694e-c52c-41dd-bc90-a2ec7bc71e2c.png +++ b/images/a8de57df-b0be-400d-9211-931321c6500c_ef11694e-c52c-41dd-bc90-a2ec7bc71e2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7337c42737cccc7f0feb72e54fee666315a1e411aa1210defbe0fa2cdd534a70 -size 526096 +oid sha256:55dc677a89583cb9c59d5d7de6c2c4b366d335f38babc0d63e72f4e3c70c83fc +size 430496 diff --git a/images/a8de57df-b0be-400d-9211-931321c6500c_f49a5c09-68a8-4d43-9871-746acda3a89c.png b/images/a8de57df-b0be-400d-9211-931321c6500c_f49a5c09-68a8-4d43-9871-746acda3a89c.png index 6129f3f3bb8af1afedbb4240cf2ab2bdd3f02838..1be456f3241533f2164e643697ac1d5696c5934d 100644 --- a/images/a8de57df-b0be-400d-9211-931321c6500c_f49a5c09-68a8-4d43-9871-746acda3a89c.png +++ b/images/a8de57df-b0be-400d-9211-931321c6500c_f49a5c09-68a8-4d43-9871-746acda3a89c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8761b4c13731f11b10500140892fb518993d06ad363d349d931b26695b38d207 -size 978796 +oid sha256:0fe9505dc89d6afaf5a75ca12fcf2753e33bdbf09d320c8a10d5c829adfa020b +size 825995 diff --git a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_098ac9fe-e29c-4f6a-ab2c-78e793bf43d3.png b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_098ac9fe-e29c-4f6a-ab2c-78e793bf43d3.png index b926c618d8a53fddc3ecb28ae04b0615e2de5295..add98bbb61b9e1f268a3698adf08c9f43bcc8ca1 100644 --- a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_098ac9fe-e29c-4f6a-ab2c-78e793bf43d3.png +++ b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_098ac9fe-e29c-4f6a-ab2c-78e793bf43d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:604e1b4f64c76d9c92c94fc58ba4a3da0b7c7a15349c1ff71781ecc66fd6931c -size 551240 +oid sha256:e372c0f263265c9bdd81882c17a3ec21f95aa4f357492633aeb4fa97a47ad247 +size 502736 diff --git a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_22ff8d83-db1a-44b1-8a74-fd9c1bd0b489.png b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_22ff8d83-db1a-44b1-8a74-fd9c1bd0b489.png index 42bcf47a48fbcf15b4ad62d7c5ea70b4ca084319..1cf1ebc2a738fc5dc56f5cd05354cb0deec3f2c4 100644 --- a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_22ff8d83-db1a-44b1-8a74-fd9c1bd0b489.png +++ b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_22ff8d83-db1a-44b1-8a74-fd9c1bd0b489.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e82e8fa8d6ebd7944fd793e659e86dcc715adba14a2a9df262204cc5c2e3cf7 -size 645708 +oid sha256:2f4e45db1bdd4f76ed0cd7c544b48978177a76de0ad0435e011f91495b80b5d4 +size 552141 diff --git a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a3c07b8-54dd-4137-b462-bc030e3860d5.png b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a3c07b8-54dd-4137-b462-bc030e3860d5.png index 36d205c6eba9d77dbab19e4be2481b02e76d179e..ec33e190388dc58d8a68a6fe269c7e0b31d697ae 100644 --- a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a3c07b8-54dd-4137-b462-bc030e3860d5.png +++ b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a3c07b8-54dd-4137-b462-bc030e3860d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e882c8e2cd9ca886080ce7663aa520191e79c2bbc5fa3fe9ebcdd0a336c7504 -size 476255 +oid sha256:1159b4d6de96bc0ef48fdc77b782757ff1d4d89b68160ccd97f5afb71880f884 +size 560964 diff --git a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_3a673d2c-870c-483f-8337-b1359c2cd031.png b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_3a673d2c-870c-483f-8337-b1359c2cd031.png index 68b3ce95a8ee9f903e5e44b1123ef325f8980bb6..c8f1bc486775ceb627159c7998e24b588cf56499 100644 --- a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_3a673d2c-870c-483f-8337-b1359c2cd031.png +++ b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_3a673d2c-870c-483f-8337-b1359c2cd031.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17af2458ca6b77d85b14b9e25a0295b728d34fba495a1525464e79899675c0be -size 734581 +oid sha256:0c4085806405cd4a95c8bc698c0545d05168f3b77427a73f45a9dc205f0284f1 +size 876641 diff --git a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_8e24ad34-2bd0-42ae-8e55-d78362055463.png b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_8e24ad34-2bd0-42ae-8e55-d78362055463.png index a77372ab94b23dae84f6e93f4a4210b8f156f800..8d7e6285657e705d2adf5afa9095418fbd97a24b 100644 --- a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_8e24ad34-2bd0-42ae-8e55-d78362055463.png +++ b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_8e24ad34-2bd0-42ae-8e55-d78362055463.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c5f7f5e8be24cc39c6a65e7b2d85e9fa881a069a74c2999222651140a7df58c -size 879939 +oid sha256:2359e4890562aef885f050830ef8670bafbc2d99b94de8c189fbab1e846a40b1 +size 869870 diff --git a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_b2e7c3b2-0fa3-49bc-8478-4cea20de1dc4.png b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_b2e7c3b2-0fa3-49bc-8478-4cea20de1dc4.png index 817d79a03ceb6a9a71d9446c6d245cbdc3bb9cf3..a8e418393439f5e231160416afbbdb5c69bbf542 100644 --- a/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_b2e7c3b2-0fa3-49bc-8478-4cea20de1dc4.png +++ b/images/a919aa15-2acf-4090-8468-1d3a0d6450b1_b2e7c3b2-0fa3-49bc-8478-4cea20de1dc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5bab758c2f79171f81a2a65829a3060e1b3e64ea438472ae9b561e3eddcb70e6 -size 884493 +oid sha256:1f4787bd286a5669f0055a64dc70d115810109d4e992f66b380b97f5f9b42dcd +size 763093 diff --git a/images/a92a83ca-25ff-4751-8754-c65ef858699d_038530a6-b2a4-4695-8a15-81312f121013.png b/images/a92a83ca-25ff-4751-8754-c65ef858699d_038530a6-b2a4-4695-8a15-81312f121013.png index 6f4d36081ece4dc1c8c2254b136e6e2ddc486604..0ae6ad1c59ad0526b327b2751fd5c26c8ebe472e 100644 --- a/images/a92a83ca-25ff-4751-8754-c65ef858699d_038530a6-b2a4-4695-8a15-81312f121013.png +++ b/images/a92a83ca-25ff-4751-8754-c65ef858699d_038530a6-b2a4-4695-8a15-81312f121013.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3958b2374933ed288fa542251c25f3cfbf6704e07d6ed1db0a837acece6f52d6 -size 3374766 +oid sha256:d891b3aa43cd8942dbd7369e9915ee0bd0b21a309d7fb820aef1505ec46328b2 +size 2497410 diff --git a/images/a92a83ca-25ff-4751-8754-c65ef858699d_0428fa36-92d7-4cc9-8e63-e5e07cfa06e9.png b/images/a92a83ca-25ff-4751-8754-c65ef858699d_0428fa36-92d7-4cc9-8e63-e5e07cfa06e9.png index fe822e855be07665a54cc2d554e41bbeea524c05..6195c5b1d09fc22dea01d842a8cbb8a16dbb0cc8 100644 --- a/images/a92a83ca-25ff-4751-8754-c65ef858699d_0428fa36-92d7-4cc9-8e63-e5e07cfa06e9.png +++ b/images/a92a83ca-25ff-4751-8754-c65ef858699d_0428fa36-92d7-4cc9-8e63-e5e07cfa06e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0453adcea477558ee6831efd7bf15218f90021be2d271bac1697ae3f67a7c3ca -size 3406942 +oid sha256:ff38b92861bfacb9c400f244b304d34a5bb09021d00c9bad2504d30902a6bbf9 +size 2601473 diff --git a/images/a92a83ca-25ff-4751-8754-c65ef858699d_98fb426d-f6ca-4336-8792-05ee6ea8b7e7.png b/images/a92a83ca-25ff-4751-8754-c65ef858699d_98fb426d-f6ca-4336-8792-05ee6ea8b7e7.png index 9e52ad7a5a73e601c18e4e98cca0c7346886074a..1ca0dfeb1b9f9ce96e1e0b75bfcaebb53bf3394d 100644 --- a/images/a92a83ca-25ff-4751-8754-c65ef858699d_98fb426d-f6ca-4336-8792-05ee6ea8b7e7.png +++ b/images/a92a83ca-25ff-4751-8754-c65ef858699d_98fb426d-f6ca-4336-8792-05ee6ea8b7e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a7d65d4ea1fa7c8b01e2d7932e7cb7cebf29ff02e33b28c3033ff7a72f2948c -size 2643519 +oid sha256:798516db64efc89866e98f7506e9dec97d8cfb0d72faafcd2b9b0b92892f3d30 +size 1729117 diff --git a/images/a92a83ca-25ff-4751-8754-c65ef858699d_ed54258d-f01a-4eb0-8b28-5c6b95d348fb.png b/images/a92a83ca-25ff-4751-8754-c65ef858699d_ed54258d-f01a-4eb0-8b28-5c6b95d348fb.png index 0e712f67d3038e4b6c6e7042f14e449764f82887..c2e4756eb3fa472174ab129d76c9cba90bceb20e 100644 --- a/images/a92a83ca-25ff-4751-8754-c65ef858699d_ed54258d-f01a-4eb0-8b28-5c6b95d348fb.png +++ b/images/a92a83ca-25ff-4751-8754-c65ef858699d_ed54258d-f01a-4eb0-8b28-5c6b95d348fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e704ea87f8162f7a13ade8218af970a9eb58cbcf547ade4bccd321318bf6de8a -size 3911831 +oid sha256:666e048eef5dfd908359c3bdf23ee093916a79083263b08152b1d953e6a73109 +size 2299293 diff --git a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_0277e754-72a2-4593-be67-f4a773a1bc74.png b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_0277e754-72a2-4593-be67-f4a773a1bc74.png index 503501ebb1840f3b341cd2c237d7d7c052cfa404..2eab93a206a0d76efe190ef9391a0a49e9178455 100644 --- a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_0277e754-72a2-4593-be67-f4a773a1bc74.png +++ b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_0277e754-72a2-4593-be67-f4a773a1bc74.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c81f793d3559b2c1787a7063bc35b0f6f4e0006bc275159a4e2630fac7b7c567 -size 1133433 +oid sha256:398edf759e309198717a0e9332f00866dcb23f92c67777491ece8cd2b1c24b25 +size 989726 diff --git a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_332d0daa-f81f-45b6-aa45-2bb32665819c.png b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_332d0daa-f81f-45b6-aa45-2bb32665819c.png index ccfee2a2a36a76c7b73c41d4c7aede6d3e76190f..0ad26f4967e43400d04237ce039900104b97d2a4 100644 --- a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_332d0daa-f81f-45b6-aa45-2bb32665819c.png +++ b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_332d0daa-f81f-45b6-aa45-2bb32665819c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b9fc949939b077a1210c83111128b3e498db51657c3389cbf86d86f42d4e681 -size 1207641 +oid sha256:465c174397c9d3529b1c8b23a191ec16b5d3e005facd4eff36e46344401d43f5 +size 1149801 diff --git a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_34355832-70f9-4329-a27d-567ce662a636.png b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_34355832-70f9-4329-a27d-567ce662a636.png index 1e4fff9cdb88d8fd530e642bfccc2ef869509339..e1b88f704ce430764b9bcc4e2408072baf77943f 100644 --- a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_34355832-70f9-4329-a27d-567ce662a636.png +++ b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_34355832-70f9-4329-a27d-567ce662a636.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2436271f505f7655f5f96bfeab6e76a9c26e2eda1f3a96160812ea255da3cd74 -size 1171461 +oid sha256:5849ab787848a456b5cbb026059e1594b889674f4a6b2648658d28ad2591c1d0 +size 782870 diff --git a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_529bf27f-0365-49a0-a525-a223e2d1d091.png b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_529bf27f-0365-49a0-a525-a223e2d1d091.png index 73538ca1650fb48d520456d04c0f00f9383ad53b..5bf8a1cf15cd60e2e9f9f286e3fc264e1cb980aa 100644 --- a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_529bf27f-0365-49a0-a525-a223e2d1d091.png +++ b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_529bf27f-0365-49a0-a525-a223e2d1d091.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:352f7dc224bdc8f4bbacec2d093318439054a8c802d0d0f685c979fb37c17b70 -size 1584095 +oid sha256:c159d3af86a556ed8b7ed28640f799d1b073c344ff6a68722f8ac6099df3d0bb +size 917388 diff --git a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_6baa7eb5-d650-4920-80c4-bfea64397c55.png b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_6baa7eb5-d650-4920-80c4-bfea64397c55.png index ba10acd036c1d90e697864fb47b09222bab03de4..d7627f1d601132d26bea951ae4a697d483e49fc3 100644 --- a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_6baa7eb5-d650-4920-80c4-bfea64397c55.png +++ b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_6baa7eb5-d650-4920-80c4-bfea64397c55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4398a98e61647ede1bdfc5b285f13f3dd3e306872a988062936d87e34de1d68 -size 3731315 +oid sha256:fc0f090d9db5331d6d368e8510deea9fca1f93843fc96de1e3137f82f39c7ab8 +size 1585368 diff --git a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_df64899f-f2f6-4a81-99ec-a0029e9790a8.png b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_df64899f-f2f6-4a81-99ec-a0029e9790a8.png index 4ab886ed65d0ca69cc8038d2544a60b49d95e8f1..02bdaa2b64f22035d49e54f940a039d2689c724c 100644 --- a/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_df64899f-f2f6-4a81-99ec-a0029e9790a8.png +++ b/images/a9708ad7-bec8-4435-b055-8ef6da520b8c_df64899f-f2f6-4a81-99ec-a0029e9790a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:688a33129ceb68255292434c289267a156efeb3f587f1c75afc5e6048c69652e -size 2443158 +oid sha256:48648dd806836b7fb12a2299b2933ab79495a7f128d19efbfc3eea342603ae09 +size 1345515 diff --git a/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_a885209f-e3dc-4d1b-a292-b3631c292916.png b/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_a885209f-e3dc-4d1b-a292-b3631c292916.png index 43b8eaed605db199da1361a4224560591312711f..f304ffc58c27ddfd54ff9891dc0b590525b0b06a 100644 --- a/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_a885209f-e3dc-4d1b-a292-b3631c292916.png +++ b/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_a885209f-e3dc-4d1b-a292-b3631c292916.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a56ae87dba0987348600ecccf6a0c43c26ea9397f093d6fdf27af613e458d16 -size 976718 +oid sha256:59a635609e22adc5104c5e00f849992a4070fdee9440b9574a130ce94e720418 +size 676821 diff --git a/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_e79a792b-7b25-460a-a0cd-ec532fb4a26e.png b/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_e79a792b-7b25-460a-a0cd-ec532fb4a26e.png index 38f20036ef2974142a3c866d136b024aecf97b7f..8e572c709ad8e0f36f0510a4023b2a97df93b47b 100644 --- a/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_e79a792b-7b25-460a-a0cd-ec532fb4a26e.png +++ b/images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_e79a792b-7b25-460a-a0cd-ec532fb4a26e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4260825a372f22d8a53f359e508c16e5a2460b3eb866c5a5743cfa6b0ae49868 -size 1255511 +oid sha256:862b16f9fbf6e74daac6dcc85b286bf1e67cb2516379d72a4284215c8b818852 +size 620198 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_03092a53-0508-4731-9c6c-27e82d5e74e3.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_03092a53-0508-4731-9c6c-27e82d5e74e3.png index b0f28ce1e49d9108677db73e245cd47309562c1a..7c1af865c3d89ccfaa2762f33a06acb0cb52b629 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_03092a53-0508-4731-9c6c-27e82d5e74e3.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_03092a53-0508-4731-9c6c-27e82d5e74e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c131c9256b0518dfa13ab47ddee43803066f77359888518721a18caf152ce9e6 -size 810654 +oid sha256:7fa828a0e7aa98bde264718991082f649af9f36a82ca3c9f19ffe85ab6869dcc +size 1190216 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_1c141689-6500-45dd-b75c-d0e4ff1588db.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_1c141689-6500-45dd-b75c-d0e4ff1588db.png index 3766d77e3c3a931df21ed786edf37feb84c814b0..c22172665966f8aae2a0a198710e094d16cbcfeb 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_1c141689-6500-45dd-b75c-d0e4ff1588db.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_1c141689-6500-45dd-b75c-d0e4ff1588db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7da09a4f30e5702b0e1c3b610f0063a75550428c8b4371f8262416334be541b5 -size 980263 +oid sha256:3475a8815c8d66544f15a3ebeb0a7e6779c0cc30f8b10db1f9405181fe09e9e2 +size 782332 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_309508ef-3fbc-4301-898a-906c004937a7.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_309508ef-3fbc-4301-898a-906c004937a7.png index f7d414413f6124f93f5c0630473fc906aebb0d68..f0e89d25c8c21bd923afe15637fc3985e63c2c35 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_309508ef-3fbc-4301-898a-906c004937a7.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_309508ef-3fbc-4301-898a-906c004937a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:271c5016d2ecb38cd4f2e6a2bb4765ebc9e28d52e93f2c241093d08628929f50 -size 892036 +oid sha256:b0de3d662f3b22d23b379abafa3062534910b161ac314ab8314e4efd4c9671d3 +size 980060 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_3b99d7fa-1730-4fcd-86c1-d5244bc75520.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_3b99d7fa-1730-4fcd-86c1-d5244bc75520.png index be14c5a729466ebd852d37f4ce0830e7f40f7d6b..fdb51f801341f7056211441587dcd8e31bdfcf9f 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_3b99d7fa-1730-4fcd-86c1-d5244bc75520.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_3b99d7fa-1730-4fcd-86c1-d5244bc75520.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:297cfcb49debf30e839f27b19d98b0b4f80b10f2fd068ab6d530aa271bbe0e49 -size 827855 +oid sha256:44d3761481a1d310eb0adbb9157281165535b8d13b64605eac47d86ae416eb65 +size 248317 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_58a721bb-f7cf-4303-b880-5014865024b3.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_58a721bb-f7cf-4303-b880-5014865024b3.png index 2163a212312da2a2dfb9c17717e1c22c868ae5a4..9708db51b989b3da248e5caccc4c5bec465c57a2 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_58a721bb-f7cf-4303-b880-5014865024b3.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_58a721bb-f7cf-4303-b880-5014865024b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d356c95a8df36f0a9fad31a863dd8f844d67fc3798eec8bf84e425e6147e5b2c -size 758756 +oid sha256:64664406f13d3a154b7570ab7472e37672fa55b560aa3dc086979605d15ea1dc +size 1159187 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_5d7ae1da-80a4-41de-a2e0-8088ad791b79.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_5d7ae1da-80a4-41de-a2e0-8088ad791b79.png index 360b0feabec525de71981a30ad57d4021fe521a9..0b8b96b7c031d0678c6a410ef0da8057010b2146 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_5d7ae1da-80a4-41de-a2e0-8088ad791b79.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_5d7ae1da-80a4-41de-a2e0-8088ad791b79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:139c5a8b556c2a3e51ad4b8757a264ba29cfd8918c617e3ef4bd180921eb8954 -size 816630 +oid sha256:02607d1d4726e6dcf0c9a8ff1eafb2158423a92900b82c64e8ca11d4bb5d07d0 +size 1178243 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_a18e6348-cedb-4bbb-9fb5-a4a982378a3e.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_a18e6348-cedb-4bbb-9fb5-a4a982378a3e.png index 82399728b1926327882ffdbf8c0f368765d0faeb..ed884bb8c7c7a753a52bd656a556164e3eb388c8 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_a18e6348-cedb-4bbb-9fb5-a4a982378a3e.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_a18e6348-cedb-4bbb-9fb5-a4a982378a3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a5e61aaeace5cd2f57437f0e53f3f91f8cae09ed6bb26d77964cc7fcb28298a -size 2528171 +oid sha256:ae87652912f47585a13976d4501281def77376bc0a3f576a02b9ba4e14a14f3f +size 1628046 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_bda0171c-5a26-4bdc-83a3-b7f80d13498e.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_bda0171c-5a26-4bdc-83a3-b7f80d13498e.png index c6e372a59e1655c3b037886e1e9f6471a7df04d8..e81db1e1bed01e36f5090c48a3d73cd5f83ca5dd 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_bda0171c-5a26-4bdc-83a3-b7f80d13498e.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_bda0171c-5a26-4bdc-83a3-b7f80d13498e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a01c9e989e63033a43cf49bfa0d652922c5d3759fcee88d2b2253f750464e8a2 -size 1796267 +oid sha256:14d17414ca943ca42735089aa4f12e66b4039c61de4e54590f6fb4d20aaf43c9 +size 1026544 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_c8ff6366-a117-4e3d-8a22-0c74ccf24360.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_c8ff6366-a117-4e3d-8a22-0c74ccf24360.png index 94bac31658c679faa55ffa231e0e262c2cb710b2..f7b28db9ae8e36f2586becc0412c2fae89f0c060 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_c8ff6366-a117-4e3d-8a22-0c74ccf24360.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_c8ff6366-a117-4e3d-8a22-0c74ccf24360.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ef6e107c88b5b912d86c22bb194442074867e1ae367f8605384a2d25672e605 -size 872849 +oid sha256:a07594624d3d020792ab3df4ddaa73cc38a282b8238506e680219c852e64ee2b +size 1047046 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_dd2c3bba-7534-49fa-9567-2db678f5486e.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_dd2c3bba-7534-49fa-9567-2db678f5486e.png index ea692801e02a0de33f4bf6a7a5e18d8d13c0fbfa..79c1d3e99b347140041c0aa931c925470da21576 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_dd2c3bba-7534-49fa-9567-2db678f5486e.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_dd2c3bba-7534-49fa-9567-2db678f5486e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:796727bf2604ee9540310e08b4f2d467cce15cb9ffff06eb2f087d14596929e8 -size 847673 +oid sha256:91ba5aab64720fe41ce48aeff6bc89ef88945c242014b4327ee908f2245d47f3 +size 667392 diff --git a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_f3653021-d6dd-40c8-a6af-e8b82e9cb356.png b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_f3653021-d6dd-40c8-a6af-e8b82e9cb356.png index 7c5ed214971f1f6aed4981615bba366c527b8113..3183a7bec3fb26a7b40615033fd9a38022584b0a 100644 --- a/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_f3653021-d6dd-40c8-a6af-e8b82e9cb356.png +++ b/images/aaade1d2-3a7b-4373-be01-ef98ed86288e_f3653021-d6dd-40c8-a6af-e8b82e9cb356.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe125f2073cc251b340c825998fa1d47d64843bd1faccbdf77e77bbb9f49dc2d -size 826495 +oid sha256:0c8704b81ff23825a7c6c3bb2b27923065f6eef3dadabe9be6fe53f4059e0e8e +size 750264 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_1ebf5cde-2bf7-47a6-8c3f-b567ff20ba4c.png b/images/aab91310-0100-4a40-98e9-720c53199bff_1ebf5cde-2bf7-47a6-8c3f-b567ff20ba4c.png index bf4a25d512463f2b520c8fdb178dc2a2f76572d9..f545dbbf3bbe7ff6dfed9b1b494b15cb4de65415 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_1ebf5cde-2bf7-47a6-8c3f-b567ff20ba4c.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_1ebf5cde-2bf7-47a6-8c3f-b567ff20ba4c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef805e18275a4aa54f4dc2c56d46299ef4eb26606dd0fe77e21917f1b7f0f659 -size 701025 +oid sha256:083587df23d9eda57c4e95c3755142d6043b1a9b3482f8a46765fdf87294716a +size 695655 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_20241afc-259f-45ba-9e50-080f1d830f99.png b/images/aab91310-0100-4a40-98e9-720c53199bff_20241afc-259f-45ba-9e50-080f1d830f99.png index 935c3fba7d412b8c18c267c06908bc238cd527ee..e637fe18e14b6ed8a888361f5fabec71d4a50e61 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_20241afc-259f-45ba-9e50-080f1d830f99.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_20241afc-259f-45ba-9e50-080f1d830f99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5f9ea26d622b6cfa276564f7e0b827957d51b2c862b9047fa6f220d819b7b9e -size 1149667 +oid sha256:689cfbaf7a40f9ea948c1902616969025278192cc4e55246231018ef903fc130 +size 752263 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_42d37c11-0990-4059-8327-9f2c132e1b28.png b/images/aab91310-0100-4a40-98e9-720c53199bff_42d37c11-0990-4059-8327-9f2c132e1b28.png index db7a811eb61aacacc1d8a46cfcbeec19ab6bccd1..a310b979afd88d0d7b7900b64e287d744a531b85 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_42d37c11-0990-4059-8327-9f2c132e1b28.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_42d37c11-0990-4059-8327-9f2c132e1b28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b987aa7f607d04138e16c6eb925fabb73b7a47e0373504db8b6476ad823ef9a -size 586156 +oid sha256:de9ec03582150c7eeb5341ac9088cee0d54917d0219028280f04e74beb0a1a8d +size 933039 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_43686440-2fc6-402a-baf6-13907700d8c0.png b/images/aab91310-0100-4a40-98e9-720c53199bff_43686440-2fc6-402a-baf6-13907700d8c0.png index 7e097d9b836532c5e20c111c3454e790f6a4a42f..8ca3996b0812f7ea4334d34321619839dee9219b 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_43686440-2fc6-402a-baf6-13907700d8c0.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_43686440-2fc6-402a-baf6-13907700d8c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5639edbcd6b51721b43b57c9a2d57b4fbf461f434fe7128bd000f9be0cdbe396 -size 1766684 +oid sha256:faeb926b26892f540ba426e67a7e311154be30497bfc296ff6604be9736fe3af +size 1524669 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_558a5ab4-4e4c-48e0-b00d-6ee44a495bae.png b/images/aab91310-0100-4a40-98e9-720c53199bff_558a5ab4-4e4c-48e0-b00d-6ee44a495bae.png index 1b4f8d47b91f414228aa706be195a59873d0afdd..91ebc84287c109ebba9ff682900d91484300aec3 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_558a5ab4-4e4c-48e0-b00d-6ee44a495bae.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_558a5ab4-4e4c-48e0-b00d-6ee44a495bae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19ded1e1de7fbe6c0fa4e69acdae600f3e03427defdc8dab049a0161e94bafd9 -size 1025923 +oid sha256:8ee727801eaa4785176f5f1973ca350c0b6283492d015e23eeed053d34aa925a +size 719627 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_600552f9-d248-4c02-bede-2b4624a229da.png b/images/aab91310-0100-4a40-98e9-720c53199bff_600552f9-d248-4c02-bede-2b4624a229da.png index ddf0e01d1e53fa6063970956e66ac1b18f3a85c7..1887627de52e0839c57cd4170951086dcba88448 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_600552f9-d248-4c02-bede-2b4624a229da.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_600552f9-d248-4c02-bede-2b4624a229da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60c463784f3df0fb01a7bfe5b79e36d5c3269f418dc936e6580930295cd4227f -size 510009 +oid sha256:92ab32a9cf3f26dfb892b25770867fb4d21c635ef32e55acf43b7a6f8cae59a6 +size 733013 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_b3929fbc-0cb0-4e73-8144-eab7ac9ebb5e.png b/images/aab91310-0100-4a40-98e9-720c53199bff_b3929fbc-0cb0-4e73-8144-eab7ac9ebb5e.png index 71ddd2c2e2e1b939987c5ac8f0235efdc3ba4661..c2c36965f69fad7b0602bdbf82357172025f1fc5 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_b3929fbc-0cb0-4e73-8144-eab7ac9ebb5e.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_b3929fbc-0cb0-4e73-8144-eab7ac9ebb5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75cc4b754b4af4fc91b4d9c594ecdf66ebbdfd745ea551da1a2b45934e802dd1 -size 1032451 +oid sha256:4832fac8a561f113fa38736db58a975a1666b90ea740418f08f6d459fb59edb4 +size 893682 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_bd391f4c-1ced-4137-99bc-1d337bbf2639.png b/images/aab91310-0100-4a40-98e9-720c53199bff_bd391f4c-1ced-4137-99bc-1d337bbf2639.png index 588d47e1554eceb34554ae30382dc1961776327d..8c980a5b5caa000b1638ce6e618f35d1a7fcf128 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_bd391f4c-1ced-4137-99bc-1d337bbf2639.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_bd391f4c-1ced-4137-99bc-1d337bbf2639.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2eeddbc01bb5020007ffaa5de2e5856f7f584bf24b5f874a14813273741e09ac -size 848511 +oid sha256:8b333ddb5d7149ea3d69724f0aa10093ba5a3aa66757bbf2495a812caa554000 +size 838032 diff --git a/images/aab91310-0100-4a40-98e9-720c53199bff_effb76da-8083-4512-999b-1c3c41b8d5a5.png b/images/aab91310-0100-4a40-98e9-720c53199bff_effb76da-8083-4512-999b-1c3c41b8d5a5.png index 20119125c557220e17cd1ee3ccd7d7744121561e..8096887b812cf0a3669f792ed670594a1ec9c20b 100644 --- a/images/aab91310-0100-4a40-98e9-720c53199bff_effb76da-8083-4512-999b-1c3c41b8d5a5.png +++ b/images/aab91310-0100-4a40-98e9-720c53199bff_effb76da-8083-4512-999b-1c3c41b8d5a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2332428c6a803df06ffcedbaf85c8bca2439f8df05955f5261916522cfc69dee -size 1816164 +oid sha256:41c8f18c647f481e61f80de1eb5672be4621594d7594bbf6a102232ef1c04d31 +size 1895201 diff --git a/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0fa434b2-302e-4839-bd13-a2426c8a7367.png b/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0fa434b2-302e-4839-bd13-a2426c8a7367.png index 17bb5db44b4b5f8e10538d5581c0908e1c75ba0a..86de7267143e56b8eb9082cced8537704310fb7e 100644 --- a/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0fa434b2-302e-4839-bd13-a2426c8a7367.png +++ b/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0fa434b2-302e-4839-bd13-a2426c8a7367.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccb808c23aa5692655f17bb611974d9ced0f61932c6fd7a2fb2730dd091b4645 -size 1087830 +oid sha256:91a36a45b51bfb85d204acb979a50c0fd00a31de3d31f2c4af7728f8807a9cb6 +size 1684543 diff --git a/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_b3e14891-f17f-4a02-9c65-53333af0daf6.png b/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_b3e14891-f17f-4a02-9c65-53333af0daf6.png index bd7604511fd3fe114b62666a26f3a62d57f45864..fc61f068f97a35332145b8818f4ee22f3650ee90 100644 --- a/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_b3e14891-f17f-4a02-9c65-53333af0daf6.png +++ b/images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_b3e14891-f17f-4a02-9c65-53333af0daf6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b15995762ebaba3641a94429a778a350ab22b35c0f4f4dd6fa762ee0439d3fed -size 1095826 +oid sha256:b756b64d16f56abd7efa96284b34933d63a3855bec59c32cb8888daf192c8846 +size 1067466 diff --git a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_0c9a515f-1917-4832-8e49-d33f76581263.png b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_0c9a515f-1917-4832-8e49-d33f76581263.png index 7231aab935ddd36710de7f1c035ee1322de0920b..01af997cb5a7404b38500340c86f39488a2b5cdd 100644 --- a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_0c9a515f-1917-4832-8e49-d33f76581263.png +++ b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_0c9a515f-1917-4832-8e49-d33f76581263.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03d638c85ff973772310ee654e6af0bc36dfe6dbe7aad524a193e1bcbfa669a7 -size 1136816 +oid sha256:c0c9c71ca32ac113dd052bed880b965b68b84b9f75cd312f4181fba8f628a6ee +size 1238891 diff --git a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_2d83fc2f-5d8e-45ab-8a78-7b0c8705d37d.png b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_2d83fc2f-5d8e-45ab-8a78-7b0c8705d37d.png index 8109a9ebf2d9bfdbb8280946239130b7860654d4..362d9cd6cd21ba2bde4c380294bf87c1744869b6 100644 --- a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_2d83fc2f-5d8e-45ab-8a78-7b0c8705d37d.png +++ b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_2d83fc2f-5d8e-45ab-8a78-7b0c8705d37d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1aa22f5f9928e6ff2c0b5f4335a6472b2347471cb04e55a89c3c1dc31bb879f -size 1089617 +oid sha256:41a712641f370900352fe5cd3953eeb41bb1bc8fcb69a2991837ecd15cdbaa70 +size 1238279 diff --git a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_639a3c92-a608-447c-9f65-176900f37e9d.png b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_639a3c92-a608-447c-9f65-176900f37e9d.png index 362d9cd6cd21ba2bde4c380294bf87c1744869b6..fffb16712e46049422bf46d4ed75577ffbf93d5d 100644 --- a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_639a3c92-a608-447c-9f65-176900f37e9d.png +++ b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_639a3c92-a608-447c-9f65-176900f37e9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41a712641f370900352fe5cd3953eeb41bb1bc8fcb69a2991837ecd15cdbaa70 -size 1238279 +oid sha256:a6084139bd0f1bb9edba731c8a935f26eef88d83c81e2a1fdabf407a3cd2a39b +size 1212448 diff --git a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_860311ce-e09d-4946-99fa-8af11abde481.png b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_860311ce-e09d-4946-99fa-8af11abde481.png index 44574d1df1a2c0f22e31d9efb72c1150581103b0..2832b28b28ebb78e117d94f9e0bcbf723a73415a 100644 --- a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_860311ce-e09d-4946-99fa-8af11abde481.png +++ b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_860311ce-e09d-4946-99fa-8af11abde481.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51abaebd644ae245ec8b962e8e1a57989aab89ae50c08733e9175d9cded01f5f -size 853542 +oid sha256:60b1ba5ff66826d43082621bbb3984826d1251577796ff6b2a2987f72a47f4f9 +size 865031 diff --git a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_9fb3d99c-0a3a-4d49-b2c8-223e33028333.png b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_9fb3d99c-0a3a-4d49-b2c8-223e33028333.png index 335f388634cb66fe02c4f57359202bfe38368d7e..e2568ad7ef096817c8b0c4ea1fcd0766be185e94 100644 --- a/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_9fb3d99c-0a3a-4d49-b2c8-223e33028333.png +++ b/images/ab1ae27a-029c-4f0c-9e01-82715e08d518_9fb3d99c-0a3a-4d49-b2c8-223e33028333.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93d1eec938d41c3dfcb7b2203b956c332f8889b736485422af1cf9093a496f52 -size 962761 +oid sha256:33c0ca6ce2bac126d50907656b98735c30943c523dbc28a120821fb157ef36e1 +size 1029592 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_068af6bd-86ad-4e0b-8449-467e35cd186b.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_068af6bd-86ad-4e0b-8449-467e35cd186b.png index dbf7a09c789f33ef8879efcb6548e18ed83557df..ca5974247a8b006b55e710c991bcf87d24052e3d 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_068af6bd-86ad-4e0b-8449-467e35cd186b.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_068af6bd-86ad-4e0b-8449-467e35cd186b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ea736de935a4ea44463ae9cfc532447b877b912070d7b3b62190b5c8ef0339d -size 746477 +oid sha256:18dbca9b0e2de360d70bbdb981934305e7c161ba98f52a7d9928352ec3fe8e3e +size 563699 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_0af4d0bf-3b98-45b8-b7a4-a0c99d68398c.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_0af4d0bf-3b98-45b8-b7a4-a0c99d68398c.png index fd9293cc121317d281e9e7f24e2e9a4ef0f4d0cb..b3625a9c547be721874e5d2fad1b9060054f1894 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_0af4d0bf-3b98-45b8-b7a4-a0c99d68398c.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_0af4d0bf-3b98-45b8-b7a4-a0c99d68398c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a8920a0a70f2cc97ac6e769d4bf9d90f0c5797d952da7423f55567a0d988a34 -size 612271 +oid sha256:d28a908c4d7756b63ad9d8f1eb7e840dfde7a41e6a13bb6e167cadeeb0753900 +size 413398 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_2f8df0c4-d081-4b95-a0d3-3d80f872ac6a.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_2f8df0c4-d081-4b95-a0d3-3d80f872ac6a.png index 5a2814ba56de1723d89e27c4265e7d031f6c3f83..55c53b7aa59be793c357070698959925279c4585 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_2f8df0c4-d081-4b95-a0d3-3d80f872ac6a.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_2f8df0c4-d081-4b95-a0d3-3d80f872ac6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d30bc19cbec196b1f8b1de7225e15c32a38abec0e754dcee299d015c7ea129e -size 742671 +oid sha256:ee5dfbabefa9a676daa38df16720530cb9ccad1610e3cfda152b4055b4101d5d +size 547351 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_46fee503-6ddf-4dcb-914b-7b66ac2afbaa.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_46fee503-6ddf-4dcb-914b-7b66ac2afbaa.png index 15ec7925eadfd47cc8a99a8a77eaa2aca95d691b..214922e9143eb14c8ffa490dbe22fe990be7e4de 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_46fee503-6ddf-4dcb-914b-7b66ac2afbaa.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_46fee503-6ddf-4dcb-914b-7b66ac2afbaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe5cddcb5a91ae0dc383dd0908caa450fc3b304eef9d0cadf26da2ec4b0f7709 -size 611533 +oid sha256:cfa629bd4143f849e7b11fedf0dee6c0bf16966bcf786a0a95a83c1982e71dfa +size 495436 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_82f59f60-849f-4b79-be21-114105330e2d.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_82f59f60-849f-4b79-be21-114105330e2d.png index cd85d18b1081322863506692096ebb2f10e409e0..825f8bfccfbd791cd56df118ab70c6b46a954a47 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_82f59f60-849f-4b79-be21-114105330e2d.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_82f59f60-849f-4b79-be21-114105330e2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe25887b746f3aecad8c0e8e1150bf3b3a05f7221894b94f99f934d6596de43c -size 1233002 +oid sha256:4695ccda37656781bf467996b7df58f3126905c2e0ec3b51fec6d94be8e98599 +size 1351069 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_8abf4ce0-758b-4ae6-8a7d-5a906da17d25.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_8abf4ce0-758b-4ae6-8a7d-5a906da17d25.png index 79dad185737f7a17774a7dd2bfc1dc116dabdb8f..b24ba419530674a203da231ec8dded3749c69b5e 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_8abf4ce0-758b-4ae6-8a7d-5a906da17d25.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_8abf4ce0-758b-4ae6-8a7d-5a906da17d25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72c32df7c994fd18a8efb9da5e25f2739d8ba1051034d0b114590008ef75fa2c -size 730210 +oid sha256:83626adb4961cfae8dd185c740c9a9a645c558c1d56f8817c03c64ca80a4c605 +size 854739 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_b08e91e1-2c15-4dda-a02d-558267a8292f.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_b08e91e1-2c15-4dda-a02d-558267a8292f.png index 80ff0bffc9eced40cebb89606f930186dc503b64..32094de1609de7522aa2b910138fa4d8e266d8f7 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_b08e91e1-2c15-4dda-a02d-558267a8292f.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_b08e91e1-2c15-4dda-a02d-558267a8292f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3b7924dfe7febe2a4790e6b8a757542aea0e52d7d0994c314680c98873bbbd7 -size 621389 +oid sha256:eff93e9d1871fd7cd318f385f15088925b1cacdff128f91d27680b5fcad7cc68 +size 388198 diff --git a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_e6d6233d-53a8-469e-b68d-dc33eb7a03f6.png b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_e6d6233d-53a8-469e-b68d-dc33eb7a03f6.png index 8c7b1d05f420b11a233ab8f5f5098d8a7882a828..65d199ee859b21c3d9cb01c8b287cd99969d11cf 100644 --- a/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_e6d6233d-53a8-469e-b68d-dc33eb7a03f6.png +++ b/images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_e6d6233d-53a8-469e-b68d-dc33eb7a03f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f112258660b880297e41227fa8ad0730a51f1c6e76591f3b972773ac883ff3d2 -size 740622 +oid sha256:5d5115650a29a5556c343fcaa2ee5930541dcf06f1b39a577660d261d3cdac29 +size 290802 diff --git a/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_64b1e75d-0c6e-4f23-b134-1f8115a9bf31.png b/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_64b1e75d-0c6e-4f23-b134-1f8115a9bf31.png index 2a1f320905a7bd4afe74c4c4a16f0f1289e350f2..186b453cd41f6601b813d2af762d48da55c1ab38 100644 --- a/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_64b1e75d-0c6e-4f23-b134-1f8115a9bf31.png +++ b/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_64b1e75d-0c6e-4f23-b134-1f8115a9bf31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed31c588edef6a2da68d773f98615197ba0befc1c26b5d08170325f7ccb0d163 -size 516281 +oid sha256:1c1f1f2fe808a25b62486b99d68d9147441f921375a241b147a84b66d3314e94 +size 576028 diff --git a/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_73c5cb1c-f750-41f0-8bd2-ab89bf3b403c.png b/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_73c5cb1c-f750-41f0-8bd2-ab89bf3b403c.png index 3edc1edf723be5a31c9364c9fa4bf08fd6ffd427..a73be31efc8d95c0273eefaf6e33a34ac52e789e 100644 --- a/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_73c5cb1c-f750-41f0-8bd2-ab89bf3b403c.png +++ b/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_73c5cb1c-f750-41f0-8bd2-ab89bf3b403c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db667f3a9001e4d119dc88a5893e7aed6c84e9d7b29339438c2930a1212bbec1 -size 1229203 +oid sha256:c902783d9069944fb36128effa9ce3e4e56e12254652252440faf4cf1e8c5545 +size 1294072 diff --git a/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_cf49630d-7148-4457-b45c-0bc7ccde4df7.png b/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_cf49630d-7148-4457-b45c-0bc7ccde4df7.png index 09e9aff5f593ead2e0233b0754dd840d4b24e9d7..1a372da0597d3108d80c048c623cf1ecaf4d1eed 100644 --- a/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_cf49630d-7148-4457-b45c-0bc7ccde4df7.png +++ b/images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_cf49630d-7148-4457-b45c-0bc7ccde4df7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:44150191c4cc17a9712728f96d0ca95a7f27492622c29e0a57b114e1bc8cab4c -size 509168 +oid sha256:58267fbd66194a354b0613e191412e5dcca4b02b8c8ee6296b9316316fca921f +size 499734 diff --git a/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_54a253e1-012d-435c-8ab1-277ef327c33f.png b/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_54a253e1-012d-435c-8ab1-277ef327c33f.png index a58f73804a3264f6064dd597876e706d9fac78c6..40f79978cdfecf726179e32eeb1ed0a0e461b58e 100644 --- a/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_54a253e1-012d-435c-8ab1-277ef327c33f.png +++ b/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_54a253e1-012d-435c-8ab1-277ef327c33f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c33c26d9404a284b5853448d10447d4a03299410d33854f8f06017a62130d984 -size 1633810 +oid sha256:ce85e477f40329dfc7fa61296f3cab444fd21fdb32f877148470805396cfa843 +size 1492221 diff --git a/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_8cc079c8-9a71-477b-8bf6-9bbdc8ccf88b.png b/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_8cc079c8-9a71-477b-8bf6-9bbdc8ccf88b.png index a4aa334d9207a24319ab2e4bd20bc520411f6e99..e83ce9d45e74a38460cafe1bb478d8b260081883 100644 --- a/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_8cc079c8-9a71-477b-8bf6-9bbdc8ccf88b.png +++ b/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_8cc079c8-9a71-477b-8bf6-9bbdc8ccf88b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9688bf1b33da0b8ae49bb3044f8660cba8190e49d2cf6b1725837aa96574aca5 -size 888359 +oid sha256:a32caee8ab7080f3bf00a6446533fdd1e73ee0a47e92187cec6216a6767ded6a +size 1692132 diff --git a/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_a634663a-b496-4ead-94e0-e2c1a1f4b86a.png b/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_a634663a-b496-4ead-94e0-e2c1a1f4b86a.png index a968a081042f0478dc3d4ce9c7f45b52ff1b26e4..143f4a6d8f871af67b9711e74ebe0b94ade04403 100644 --- a/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_a634663a-b496-4ead-94e0-e2c1a1f4b86a.png +++ b/images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_a634663a-b496-4ead-94e0-e2c1a1f4b86a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fead7c495560ce972a5435a0bf574cbbf2f078a5aef96964f2eee3bb42943f4 -size 1345606 +oid sha256:2c9d1cfe373354dad0beb158b513a52aee64cb27c547e747fd7821501afa402e +size 868461 diff --git a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_321b75fd-1b65-443f-ba11-fcc65a6007b9.png b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_321b75fd-1b65-443f-ba11-fcc65a6007b9.png index 2fc616aa3105b36594e011599336e46a2efe8649..e8c8e2e6b7e751d163be66f94492d15bef52f634 100644 --- a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_321b75fd-1b65-443f-ba11-fcc65a6007b9.png +++ b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_321b75fd-1b65-443f-ba11-fcc65a6007b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2366f877a31d020a45f9639fcac5d7d86a3594d3cd94932a2eec981b12c49a14 -size 1113755 +oid sha256:096c927568cfe589fac6e86d82378e9f8ba0fe08a7574f952bc84b6e7cbd1e2f +size 1520976 diff --git a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_51737bb7-36f6-4b37-a121-8d829c2c17ac.png b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_51737bb7-36f6-4b37-a121-8d829c2c17ac.png index d82c923a64c771a5ea1bde3be5ed777d265d4e30..fe01b6727c63f50b3f8e404be74b36df4bce96b0 100644 --- a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_51737bb7-36f6-4b37-a121-8d829c2c17ac.png +++ b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_51737bb7-36f6-4b37-a121-8d829c2c17ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd41fe206b885b8713dae4d378dab8c8097ed1d1093a43d41a9081fe433dbf77 -size 987859 +oid sha256:fba73f3247e5ce7482886806f66ed5d6f6d91fd00e7d4b11e86bea422dd412ab +size 366878 diff --git a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5bc86a39-0dbb-450a-b960-5d87fd390bfb.png b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5bc86a39-0dbb-450a-b960-5d87fd390bfb.png index 4cf12d12322796045d8cd5102d93f429d27ab3a6..6a5dda63e61e3dabeb68ca37eda6ae2e8881d4ab 100644 --- a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5bc86a39-0dbb-450a-b960-5d87fd390bfb.png +++ b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5bc86a39-0dbb-450a-b960-5d87fd390bfb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:728dc6974150711872d6de8753ddbc3104056fee565c736a959f2339800ab5d3 -size 783914 +oid sha256:e0b832738dbf3b827f42365a1a2cb8f58497a42b822a7c230b382e4ef75f35e1 +size 803377 diff --git a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5df012f2-ae71-4ede-b641-41f8e3e454f3.png b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5df012f2-ae71-4ede-b641-41f8e3e454f3.png index 8ee38a3b969ad9293a0245579b523ec83497af4d..2602cfce5092a3a784b745b125c9f794187c6654 100644 --- a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5df012f2-ae71-4ede-b641-41f8e3e454f3.png +++ b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5df012f2-ae71-4ede-b641-41f8e3e454f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2085360e88a0b2ecbf1cc26b7edea2ce7d661c8e32317c85c3b8173bf68e9ad -size 1070163 +oid sha256:2c66cc565769bc7ab7efe53088cc4f8987d5c022d1c81557b12ca6bfbd13d923 +size 1347620 diff --git a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_fad40521-7262-4bf8-9611-be44c197681a.png b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_fad40521-7262-4bf8-9611-be44c197681a.png index 2fc616aa3105b36594e011599336e46a2efe8649..067b5c45e9570d00ba1c695c3b336bf7e1e6452e 100644 --- a/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_fad40521-7262-4bf8-9611-be44c197681a.png +++ b/images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_fad40521-7262-4bf8-9611-be44c197681a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2366f877a31d020a45f9639fcac5d7d86a3594d3cd94932a2eec981b12c49a14 -size 1113755 +oid sha256:fbd0c1efa89a3580d850296b7e40b7571a22ed49b9ebcef8a7373611a6c9be20 +size 505851 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_0e0ccf1d-7ddd-456d-a89e-469d3a00a188.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_0e0ccf1d-7ddd-456d-a89e-469d3a00a188.png index e5892240cfdb7a1c71e31c86290a9d42a9bc8309..97a277ee2da021e034b8e24918973f63664e318d 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_0e0ccf1d-7ddd-456d-a89e-469d3a00a188.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_0e0ccf1d-7ddd-456d-a89e-469d3a00a188.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98f8bbdd904755a645229306ecef4b3661b1ca2751464a25b42cef0f2eb9a84e -size 4570221 +oid sha256:cca33ef226d05394fd549018936eff4f6edad513ae5b7fce6f6f2f79696d70d8 +size 656834 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_20ae08b1-640a-41f8-9af6-9b29da52578e.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_20ae08b1-640a-41f8-9af6-9b29da52578e.png index 4dff164257a02bf7ff394062a5b1a4fa89caa871..edc6892ce3f1113632138f0bbc01e38c95357052 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_20ae08b1-640a-41f8-9af6-9b29da52578e.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_20ae08b1-640a-41f8-9af6-9b29da52578e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86d638d04ee15d903505c83ce016e832c03cbbbd53ebe85a308130448af9dbb2 -size 958546 +oid sha256:c7fa58f942ae2422bdbb90f8407b252ab459234db5080a9efe768c4ceb845e4e +size 617654 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_3d80d70b-e911-4b66-832a-9e4e48884689.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_3d80d70b-e911-4b66-832a-9e4e48884689.png index b24138e72a726537c3f5af122e4542a123f7c940..cc59ac2d6f74f69b7c7e5613647af0c127aa1854 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_3d80d70b-e911-4b66-832a-9e4e48884689.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_3d80d70b-e911-4b66-832a-9e4e48884689.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a6ece7b7ea3a70ed727e77f059c0222cbaa8babc27b85c10d32c1a1950db57f -size 874412 +oid sha256:8ce16c244e17e0a2d597ba86e50b644ff372fa578e633361cd6ede251b0354d5 +size 903355 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_4fc9c4f6-0be4-45fe-b57b-1950681d0415.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_4fc9c4f6-0be4-45fe-b57b-1950681d0415.png index 1ba225e57b7d85104333430e8c5b15ccaa19192f..1258c26b83536750c98b1d38ec7fc2a2bca46acb 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_4fc9c4f6-0be4-45fe-b57b-1950681d0415.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_4fc9c4f6-0be4-45fe-b57b-1950681d0415.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9bf380d968cd59e6bf47e42ab88d5f6906a76ac53ea9d37b3ed805fee8ec2f07 -size 856898 +oid sha256:c3722a3569fd7d13a582110cf942975e198a63e62246f8aeccb0d45f04099c9b +size 803404 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_7eca4c5a-2094-4510-8f7c-b18976791000.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_7eca4c5a-2094-4510-8f7c-b18976791000.png index adc100e43ccdf37a1423fd40f75204ac53f5c0e0..9a3dac30d786fd0178ad6dc8793732b9e6fb12ed 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_7eca4c5a-2094-4510-8f7c-b18976791000.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_7eca4c5a-2094-4510-8f7c-b18976791000.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d95a205db9bc1f9bd373d9e2416e6070e8fa9c0bcf6e9921c679841bfe05e70c -size 872343 +oid sha256:baa4bcbbf64235ee18a8bffd145a55cd80c9a83d62a4dba1a517fefc1a94426e +size 901268 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ac23a9dc-a401-429c-a93f-dbbf04494cbb.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ac23a9dc-a401-429c-a93f-dbbf04494cbb.png index c27a005043575d30b51c4ad96c927bb5502aeb58..f995a4106b04d1603fb61562f17c479d2ed70166 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ac23a9dc-a401-429c-a93f-dbbf04494cbb.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ac23a9dc-a401-429c-a93f-dbbf04494cbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b743b83bb2ed2448ea410a74fad01b9e4ce4776a137a56fbfb2669934db2fade -size 863137 +oid sha256:3494ce72ed4fab897d31eb8691d3aa4eabf8b4b1af74148f75548138de42da69 +size 443477 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ad396ee3-8490-4f70-9196-6da9a1d68166.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ad396ee3-8490-4f70-9196-6da9a1d68166.png index 19a071235c6c7a862cc8f3cee2a158c954bd3fe9..2b017488a5d4b9ac4101b98889237ce6e7b0612f 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ad396ee3-8490-4f70-9196-6da9a1d68166.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ad396ee3-8490-4f70-9196-6da9a1d68166.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5c71a44014548aa0f2251a063711ebbf1eef25c89430c980fcd7915be78512f -size 856035 +oid sha256:13cf0cd674779b59a35fbaea35672cc8a18c9379057f763cc401d214e6fba378 +size 848488 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ae57d1a1-a97c-4a44-aec7-1f1d6bd7c8d4.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ae57d1a1-a97c-4a44-aec7-1f1d6bd7c8d4.png index 084dc4ddd858039be82a881f0826a3d7d2604768..79283864b3deb91ab1628450b8b8120ac1cf6ad0 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ae57d1a1-a97c-4a44-aec7-1f1d6bd7c8d4.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ae57d1a1-a97c-4a44-aec7-1f1d6bd7c8d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33ef89eb4c68e1100ec2789bba38ae8453e38660e110eedf294271edc18cc743 -size 6761737 +oid sha256:63730ad6704dfc96e5de8bd2197aa79808c2be3d3296fa4a22507d7a2b2eccf7 +size 275915 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_d42eee95-95cc-482d-99f0-3f087df1b275.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_d42eee95-95cc-482d-99f0-3f087df1b275.png index cbf91a0739aded4136d065344d0e2905cc75e94f..f51a697a8d7baec27838eafc6095152577f92659 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_d42eee95-95cc-482d-99f0-3f087df1b275.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_d42eee95-95cc-482d-99f0-3f087df1b275.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:691172f2150ef980a00662e4310539aa1619551ca907fb6d712f11209b748376 -size 864232 +oid sha256:ee17d6000dd53440b1d9c4905fbb4b836a2f019540f34114e47ec47af13ce28f +size 904151 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_e5e1ae2d-f013-428c-a7ff-d3144deb008c.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_e5e1ae2d-f013-428c-a7ff-d3144deb008c.png index c31ea6aaec76b1f3d1836e2de9a8d779c0f5fda2..af4cf30cc72e1ea07fd3ec54b3dfdf42c087ebd2 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_e5e1ae2d-f013-428c-a7ff-d3144deb008c.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_e5e1ae2d-f013-428c-a7ff-d3144deb008c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77e21981596cbe2a6c8dd6be1d1a44112810b2d22afe8004eae3781c88d6216c -size 916876 +oid sha256:3a937b7dceb9a4dc34e36cf33d909cb08782c0db1b4b38c28aa7aac62ac0a51b +size 478532 diff --git a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_fe71a180-7a37-416d-9728-db4936372cb7.png b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_fe71a180-7a37-416d-9728-db4936372cb7.png index 48ebaf48a2e1f7335b9c6a51108dca2397dc5453..72c5b811269906c223567a7b9f1ecada847ba1cf 100644 --- a/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_fe71a180-7a37-416d-9728-db4936372cb7.png +++ b/images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_fe71a180-7a37-416d-9728-db4936372cb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e51e48a4a69f3e4c06d4f4ff5619fff1c262e41fb4d3c7b7bf9ea5045e3e4d65 -size 878160 +oid sha256:de55134c59c43dc2a95acb62fc7a3d8b876ac144c8583bd3f1ccda6cc24f95dc +size 826238 diff --git a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_12af0a4e-ac29-4730-9fef-81f52558f981.png b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_12af0a4e-ac29-4730-9fef-81f52558f981.png index fd1eb56af7372dbe238234e4667679d61004f4d7..73c275985104b4e6f92145d41e8377a0af31072a 100644 --- a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_12af0a4e-ac29-4730-9fef-81f52558f981.png +++ b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_12af0a4e-ac29-4730-9fef-81f52558f981.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7e49bea1d7a4662a245b6ba01da513446552539ef79228daa36eb066560932a -size 907542 +oid sha256:159c66c1f3bf67aa92f0af3f4198b96f7ff6d8ac563ec234c936c339c852d365 +size 953306 diff --git a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_411a33e1-09da-4e0a-96a5-303cfa86ccae.png b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_411a33e1-09da-4e0a-96a5-303cfa86ccae.png index 2611265430e65cf1063fcc6b126493a96060e628..fded66d839b52fbbed6428c48b39f2763856565a 100644 --- a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_411a33e1-09da-4e0a-96a5-303cfa86ccae.png +++ b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_411a33e1-09da-4e0a-96a5-303cfa86ccae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e6ce8ffe267cb71505570961c785401e57d0cac01e9fcf603d123bb3e1e82fc -size 884557 +oid sha256:ff57eefdf25ac3995984bd43426df67555d27872758ab36cf416501c8c7c3afa +size 879628 diff --git a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_641f627f-98e5-4b3e-a0b5-4bb370e16340.png b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_641f627f-98e5-4b3e-a0b5-4bb370e16340.png index e5520a9dd79367a09deae7c0d0abf1c45f89ce5d..783de6aad2f8cebf235c7f86f756223431453234 100644 --- a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_641f627f-98e5-4b3e-a0b5-4bb370e16340.png +++ b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_641f627f-98e5-4b3e-a0b5-4bb370e16340.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c5e8d17c81dbf22993f40f2dfca15483d7ecb3033529ca6b4853ac66c8170c6 -size 1172700 +oid sha256:eeb7601a8e04eb4f156c58b103eaeaf419c893c8a5ba70a92508cbe2b83c02d4 +size 1620288 diff --git a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_fbe93f95-b79d-4208-94b5-baa35be519f7.png b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_fbe93f95-b79d-4208-94b5-baa35be519f7.png index cffc6659fee3522824a971d28e85f0b385e75149..daec11f14bdde8fcc9f9d3c87ac3c4b5e60ca47e 100644 --- a/images/acc194d4-2f71-496a-b378-e18ab698ab0f_fbe93f95-b79d-4208-94b5-baa35be519f7.png +++ b/images/acc194d4-2f71-496a-b378-e18ab698ab0f_fbe93f95-b79d-4208-94b5-baa35be519f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e909b14c90ecbb0d1436d7ab5da22eeb7079b993efe360c49c1404c68068d60 -size 2717849 +oid sha256:bc7581b420eecfa9dcfe9856b696f67ab6ddbc16df0eb108288913b06464cede +size 1407775 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_51c840cc-adac-4cc2-a914-b07bcef81959.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_51c840cc-adac-4cc2-a914-b07bcef81959.png index 5101fc0b01752b31140322456cfab783b97fd341..ec64740ec4d7f7862af79b6e1cd161f3ae2ae925 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_51c840cc-adac-4cc2-a914-b07bcef81959.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_51c840cc-adac-4cc2-a914-b07bcef81959.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9486326d19e44ad450f8dd912c2d19173a9286d960c01ea6ac31f21f3019d26f -size 549359 +oid sha256:c554f7db4b157eedd1ae79e8b3227a415b406b159240ee4c33ae64f06478ba36 +size 725662 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_880ea728-2568-4211-8078-f7a92a2802b6.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_880ea728-2568-4211-8078-f7a92a2802b6.png index 8232e2a838de7c5594747bed96946b1496d37080..83960048fed0b8ae679e3c2dbcc4652c8b92c00a 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_880ea728-2568-4211-8078-f7a92a2802b6.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_880ea728-2568-4211-8078-f7a92a2802b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99bf30e56fb3102c3ba5e7a0bd34a90ec25b3df1035350f15a14e994cfbdca9e -size 304642 +oid sha256:f6af8c45faa5ab83c1c611cbfe66164d8bf275e28bc5594bdc87a504d42b9e32 +size 611354 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_a32fbe21-0c34-441f-ad48-e12583c525a7.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_a32fbe21-0c34-441f-ad48-e12583c525a7.png index f9e2a473ff63b44bcad2753b2dfec89b2faf6493..17d8a752e570e68f7be626b4f4e5befdd4a1708c 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_a32fbe21-0c34-441f-ad48-e12583c525a7.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_a32fbe21-0c34-441f-ad48-e12583c525a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6971295eecd81342f6b717ae3e53e91d900852e90d252480e6d64eaf222b1bc3 -size 495879 +oid sha256:ee0c1ae9b5ca134bba739128bd8d5e0146efde0605462a69b0f25ed0675b8af0 +size 983690 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_ad5633e3-d238-41a4-9b12-78597a1f2070.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_ad5633e3-d238-41a4-9b12-78597a1f2070.png index 97bbf282fcb3e695ff220ce8463742e3396d9b3d..95dd1891e9ec6154edb4287cd4e26c3102c5bb76 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_ad5633e3-d238-41a4-9b12-78597a1f2070.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_ad5633e3-d238-41a4-9b12-78597a1f2070.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85a96861aa351255457f6699a5d016343b0543008177ad9c7799d8728ed801ab -size 370426 +oid sha256:abae05b2022852642fa4bcceef01b4c55f111372cc330052a61747ca4bd48aca +size 559117 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d35d8236-d15b-4fde-b1a4-cb2250309b2c.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d35d8236-d15b-4fde-b1a4-cb2250309b2c.png index db6332d3b97aa8c7adc765c2777b7a573df40a22..c515c0775e3c1376efb261a87edd1fd317dcc6dd 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d35d8236-d15b-4fde-b1a4-cb2250309b2c.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d35d8236-d15b-4fde-b1a4-cb2250309b2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a15383922d5902c77245791f8cfa694583032be03e301f3cb1e6f1fd9d383d90 -size 359847 +oid sha256:5c05afd7eb95dcb463df20faa3acd1849889de0c0b8f9d9d0ab65bcb4d981dbb +size 772116 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d4d09cdb-86cd-4870-9e02-5cc4fc3a08d2.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d4d09cdb-86cd-4870-9e02-5cc4fc3a08d2.png index 144a2e9f1f3c46676b135519922680e21bc6a12e..048759aae16a9b09dd3fd3b9b78511a988dbd8fb 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d4d09cdb-86cd-4870-9e02-5cc4fc3a08d2.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d4d09cdb-86cd-4870-9e02-5cc4fc3a08d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d941b04f8232f535fdf8c63a49cc663aa866f91d6cda74f959867f1cb16a6de0 -size 545876 +oid sha256:cbea9f450b40c183e2f7424afb3e08e1235d611f61e760738b33174f155fcd05 +size 302197 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d7c369dd-f0a0-4296-b42a-21e848626295.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d7c369dd-f0a0-4296-b42a-21e848626295.png index dc773de25edd5f8d906a6dea6c0838bc2b8b7cc8..b75f671a6adbee6666c2dc55ae7e6167d6a184bc 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d7c369dd-f0a0-4296-b42a-21e848626295.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d7c369dd-f0a0-4296-b42a-21e848626295.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d67ecd1c248f4623383299b749508e523c5e039720e80e27ee159c1d1c953ccf -size 491129 +oid sha256:889f949754b0a9b1596eb09817d0d1398071336781b08e06c01f16ca7c883cff +size 521682 diff --git a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_f406f093-4ec0-4056-beed-b6f59270656d.png b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_f406f093-4ec0-4056-beed-b6f59270656d.png index b7107097cd78606e10a05ff9a0b69dae93525aa8..79e4cc1fd2e68e95d4e3c6d03f9dafe3b5e697b2 100644 --- a/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_f406f093-4ec0-4056-beed-b6f59270656d.png +++ b/images/ad0369b6-cfd5-4555-bb4f-d84a942be555_f406f093-4ec0-4056-beed-b6f59270656d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e24da1802a562878e0ca328564d7e55945ff5d368fc07d09bae41ef27940b838 -size 449371 +oid sha256:4dcd1b84c32ac2222edd8c6381fbecb6efe89f9a8aed8ee7cf0321f02cf1aed9 +size 672249 diff --git a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_5f626ce1-8f6c-41da-a606-191bbaf298a8.png b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_5f626ce1-8f6c-41da-a606-191bbaf298a8.png index 8bf3393ea979a952df92d9a03177fef28432e7b6..1983729ac8dbcf51611f1afefebde66282c928b7 100644 --- a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_5f626ce1-8f6c-41da-a606-191bbaf298a8.png +++ b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_5f626ce1-8f6c-41da-a606-191bbaf298a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e56be336598663b336a0bbb13269884290d47f5f978162f48aa36026ae3c6c22 -size 1167253 +oid sha256:2f0dee2daa24118910223dddac726acbc314032dda00d934688fb64d9b1d86bd +size 1085332 diff --git a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_b211b155-4276-43bf-9669-5973995ff7f5.png b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_b211b155-4276-43bf-9669-5973995ff7f5.png index afc46750a27aeb0b8bc09d268078b972e4567d21..4beafbdc11e99b5aef43f725af19326f58ba70f3 100644 --- a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_b211b155-4276-43bf-9669-5973995ff7f5.png +++ b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_b211b155-4276-43bf-9669-5973995ff7f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7be7905e0e99265807a388a15bf26ac4175696ba2513916e22629e0da815a6be -size 765665 +oid sha256:80b8cf75e5e1f85628e75cd5c876ea1c544a6579713750e33435cc45ddc9ba91 +size 873294 diff --git a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_c7e0e26c-67cc-4ef7-90b8-78c16829898e.png b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_c7e0e26c-67cc-4ef7-90b8-78c16829898e.png index e0fd074ef93881c0f245c33dfb15a41300dda00e..6a1199742164eafaf98a7df07f1aec9db172fc1a 100644 --- a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_c7e0e26c-67cc-4ef7-90b8-78c16829898e.png +++ b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_c7e0e26c-67cc-4ef7-90b8-78c16829898e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d0444326e878ea5da605f749b9af353911ca8c55eeeee70f59d29c2b7ad8689 -size 466854 +oid sha256:89f121d5dcf60d792a3e42f8c06d1631a9c8a6e1bd3ef52e0523caa24f7d0394 +size 467389 diff --git a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_d154d215-a2c1-4ca9-8aec-a8f047a361da.png b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_d154d215-a2c1-4ca9-8aec-a8f047a361da.png index f1b4a3f3b31f70b2eeca7d97895212efa2df1ce5..d104679a482c3deae9f3f96d544f9eaa45801ac3 100644 --- a/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_d154d215-a2c1-4ca9-8aec-a8f047a361da.png +++ b/images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_d154d215-a2c1-4ca9-8aec-a8f047a361da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5df54f3c2a2baffc8eb1410588697d6ae5ea0abe6ff1a13c3b6a8c6af413288 -size 769638 +oid sha256:8078766d823e23c17291684bf0bb6489ef76500d6e1e0436d8a06920016268a8 +size 1251160 diff --git a/images/ae969e05-d10e-4255-99f7-c27e071fad69_189310b5-d088-400b-a817-9a4ea975fb6d.png b/images/ae969e05-d10e-4255-99f7-c27e071fad69_189310b5-d088-400b-a817-9a4ea975fb6d.png index ff5df2f8e5b2279039d0eaa4f2bbdc9338912059..f8436643c7f347ed7f60d7c514b297ea31e7e9f8 100644 --- a/images/ae969e05-d10e-4255-99f7-c27e071fad69_189310b5-d088-400b-a817-9a4ea975fb6d.png +++ b/images/ae969e05-d10e-4255-99f7-c27e071fad69_189310b5-d088-400b-a817-9a4ea975fb6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e53ce434c65711e547415a2d7c508e14ad7ab4a6a06f99f4c28b5ce944153cb -size 1052157 +oid sha256:d172b565e20877ca4d61f40b369c04e919d896937e7e946e526132258342652e +size 1434292 diff --git a/images/ae969e05-d10e-4255-99f7-c27e071fad69_1f17b922-6a57-438e-84cd-bb7a5d08ddcc.png b/images/ae969e05-d10e-4255-99f7-c27e071fad69_1f17b922-6a57-438e-84cd-bb7a5d08ddcc.png index 2253fd01704ae6e11fcf0f52115b3186643d173f..c82bb9dbe341609fa5aca1bac2e68012b9b0b5fa 100644 --- a/images/ae969e05-d10e-4255-99f7-c27e071fad69_1f17b922-6a57-438e-84cd-bb7a5d08ddcc.png +++ b/images/ae969e05-d10e-4255-99f7-c27e071fad69_1f17b922-6a57-438e-84cd-bb7a5d08ddcc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6a5205342475a0188ff90f429f3960c5c7f0da363c230b7544a272f332ff15a -size 1317549 +oid sha256:8d44c084ed4a4cd41091cc39b5bd13f927d108c54ccdcc1007d77079070a8306 +size 1664923 diff --git a/images/ae969e05-d10e-4255-99f7-c27e071fad69_3e7af91f-3368-46f6-9c85-0c5de6736fa6.png b/images/ae969e05-d10e-4255-99f7-c27e071fad69_3e7af91f-3368-46f6-9c85-0c5de6736fa6.png index 116b1450bb315d740e877b651b8e5157d5fd2b43..ce72180f2a370b02b2fe9625192615b79331fbde 100644 --- a/images/ae969e05-d10e-4255-99f7-c27e071fad69_3e7af91f-3368-46f6-9c85-0c5de6736fa6.png +++ b/images/ae969e05-d10e-4255-99f7-c27e071fad69_3e7af91f-3368-46f6-9c85-0c5de6736fa6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b62a6583d857946f2410971fd846bd43aa8fe2eaffa2f4f39a28a7e4d8a9c61 -size 1079990 +oid sha256:06ecc7554b5200458127eade7d0f7688c387312bfcb424408733f9639a7b26fc +size 1224549 diff --git a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_15214483-2534-48e2-bb40-d84e4daf3540.png b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_15214483-2534-48e2-bb40-d84e4daf3540.png index f7ede04e2d80785ad2d823ded473bc0a139ccaf7..5267aedbbc5bb5f198b5d7c50a78ca9848ab5a54 100644 --- a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_15214483-2534-48e2-bb40-d84e4daf3540.png +++ b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_15214483-2534-48e2-bb40-d84e4daf3540.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7470a96165983d177cb940ff14b2437f856650a8f058185955ab4ca4cd1eb9be -size 1582885 +oid sha256:011a88d6211e88566a2a8df5392cd49cf78172c6f3f7c07656212ff4bb467802 +size 2019289 diff --git a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_4fe53d58-b083-41ad-b7ec-0857093df247.png b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_4fe53d58-b083-41ad-b7ec-0857093df247.png index b385c09ee2ab04c99b7c6a137ec72d38ffae86fc..e196974de5434e958826fc0f679c7d20bf73900c 100644 --- a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_4fe53d58-b083-41ad-b7ec-0857093df247.png +++ b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_4fe53d58-b083-41ad-b7ec-0857093df247.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6fce132cd958058215ba4dfe235c6eaf5a79f0250a22a0a5a6de9def03a07e5 -size 2311679 +oid sha256:787a1bf19b7f3f9c02fd3d08eab5c9505cff6f4ac4928ac08d1fcec193bd0ab3 +size 2221728 diff --git a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_7dd29122-5aa7-4e40-a2a3-6193dd6eb05d.png b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_7dd29122-5aa7-4e40-a2a3-6193dd6eb05d.png index b3d0eaa8150fbbccd17c1c787b3b9a85633646e2..46c9329b9cd5b7d8dd5ad680ebef737548f15945 100644 --- a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_7dd29122-5aa7-4e40-a2a3-6193dd6eb05d.png +++ b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_7dd29122-5aa7-4e40-a2a3-6193dd6eb05d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed8a16a2b0b51a0a30f88e0fb32efab9008c036dd6ace4f00d35ff6251893fe8 -size 2658593 +oid sha256:dc66231d3b29dca00e04d663574bc626ae04ef300deae00845e748bef2c36f54 +size 2079261 diff --git a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_af73a962-fb6a-4393-b7fb-2607ab8a26ef.png b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_af73a962-fb6a-4393-b7fb-2607ab8a26ef.png index 04b7ce4444c8d78119c8c1e5201881ef1f663cea..77cc99730c0c9fa5d763a3ea27e81aa47b3ec73f 100644 --- a/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_af73a962-fb6a-4393-b7fb-2607ab8a26ef.png +++ b/images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_af73a962-fb6a-4393-b7fb-2607ab8a26ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23b00d1c7bcf5cbc0b06863015cbe311183d2f0638e1d8dae0cd433af7f3b575 -size 1663294 +oid sha256:7c30469a24974ee8301eea195e3dfc7abb0a68c67a4014181270e336e30dcc2e +size 2112069 diff --git a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_0f71aa1b-f0ae-4360-8312-faeac77e1fe7.png b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_0f71aa1b-f0ae-4360-8312-faeac77e1fe7.png index 3da8e06f6a1d3d204d1e70e63e272f5597872558..dc9f709b03650110b7ad66108c7462df8690aef2 100644 --- a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_0f71aa1b-f0ae-4360-8312-faeac77e1fe7.png +++ b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_0f71aa1b-f0ae-4360-8312-faeac77e1fe7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52ae9baf0d5e5d7468305048563599ed92dbb1384eec1fbdf2c5bfa40bc31e33 -size 787648 +oid sha256:a81632177bcc432e71e09b1b483aa8c63d0c1d45896fa7f48ec5576ede63f8aa +size 1281998 diff --git a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_232b3998-bdde-46ec-839f-e1ddcd632443.png b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_232b3998-bdde-46ec-839f-e1ddcd632443.png index ff21b3c5207ddac9dd9c2b758a51d509b3f70db4..b0686941a8ec76f2d6413690ed5c6d933e19fdf4 100644 --- a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_232b3998-bdde-46ec-839f-e1ddcd632443.png +++ b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_232b3998-bdde-46ec-839f-e1ddcd632443.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe745cacd6b312c22e9f7fa98dde9094dfd55f3eafd56e8e6f3c447263ed57c1 -size 1230411 +oid sha256:54dfa1d743629f97720efda70f7bbef3d82dd3373445c7f928867121db7438e9 +size 1425431 diff --git a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_3498a297-8121-418f-a8a4-ce50490e51ea.png b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_3498a297-8121-418f-a8a4-ce50490e51ea.png index 220635ad21e02024b23ea886ffcd125bb7237246..e6bce868591bd4afed0e70ddf9e4473d3df5130f 100644 --- a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_3498a297-8121-418f-a8a4-ce50490e51ea.png +++ b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_3498a297-8121-418f-a8a4-ce50490e51ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e9b3f9886127f3e37065334b91ca72fcd453798deedb8701b8320d688fdfd06 -size 1236375 +oid sha256:7a78ae3118a7076af137fd6e1573865f763776527de573a4412c7c3617a57bd3 +size 1468296 diff --git a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_a053edd4-1209-4989-9b99-86fba90a1817.png b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_a053edd4-1209-4989-9b99-86fba90a1817.png index 0a59c708908a78b50f4acacb7d60ff055f5dd7e6..3610a6eda70a0263954b48c56b56340e5c388f0c 100644 --- a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_a053edd4-1209-4989-9b99-86fba90a1817.png +++ b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_a053edd4-1209-4989-9b99-86fba90a1817.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:465df118c9c4c0639878b7f71328ad283e99545468e7f4a0ec9588a5b2e6ec7c -size 1236138 +oid sha256:7ddcf10a35159df4b5c2b51887837a272eb41d5a8a41c8220b3d843ee30e1af5 +size 1216150 diff --git a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_c33315e7-7e2d-4dc0-a06d-06ad4e82dbab.png b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_c33315e7-7e2d-4dc0-a06d-06ad4e82dbab.png index 86896b318990acc2b270404904d9c43810e49db8..90e4d6e60fff035d66692eaf2ed36c13440c9496 100644 --- a/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_c33315e7-7e2d-4dc0-a06d-06ad4e82dbab.png +++ b/images/af6655c8-0047-4c93-8da2-aeb04ff06b17_c33315e7-7e2d-4dc0-a06d-06ad4e82dbab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:979ec10f043eb5a8f8bb2ee70d29695d3172417d6261a979437947f5a1b86ae7 -size 1245505 +oid sha256:05fa78f9410f7af5cfa3b1de1d7360c5b13ae685cc1ea1e5eb664f0967c0240f +size 1448834 diff --git a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0387a16c-0486-4263-97b5-a8e3145814bc.png b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0387a16c-0486-4263-97b5-a8e3145814bc.png index d3c31421ef41778817028aa8b0b5f509ec6d1e33..f84408a3c333827d33d4479d42e7d5419f75a8b2 100644 --- a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0387a16c-0486-4263-97b5-a8e3145814bc.png +++ b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0387a16c-0486-4263-97b5-a8e3145814bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6258a4440196204389ab6dc1054297e170a56802297b70168ac80dea94b1c5a4 -size 1181727 +oid sha256:4a46404804dfa0f3061bbf230f22f1fa20de505d56a95f27d347f4133238cf13 +size 1557325 diff --git a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_08aae8dc-6df3-4f78-b0f0-2fbdef6c83f4.png b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_08aae8dc-6df3-4f78-b0f0-2fbdef6c83f4.png index 8add0d63dbb49b90d416651841f5c49b7c7ed49a..9401f00a35ebbd309fc3d76a686f8b3972cd7902 100644 --- a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_08aae8dc-6df3-4f78-b0f0-2fbdef6c83f4.png +++ b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_08aae8dc-6df3-4f78-b0f0-2fbdef6c83f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0f2ed6e014a53eea4aadb55ab38814b75272af06b87cb0e6ec83860288380eb -size 1082804 +oid sha256:be6837feeb856f3aa2bdc47d98534649f930c031b605b21b9dee7aaee28877a8 +size 1665385 diff --git a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0d5bf127-c6f5-4d6a-91ee-7a365759f335.png b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0d5bf127-c6f5-4d6a-91ee-7a365759f335.png index 47a68d3f09d0f9ada800ce883c62860769a2b469..4e8512fb3a4d7969aa7b54deea59e93087d2889f 100644 --- a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0d5bf127-c6f5-4d6a-91ee-7a365759f335.png +++ b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0d5bf127-c6f5-4d6a-91ee-7a365759f335.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81c9eaba4117addea6815e0a01ba5972d9d7db53bceb9de86663a88a57926602 -size 1035084 +oid sha256:ece0b787c1d8fa8adca56cf49f904dd66e77ecedcb376b8c2699051132f8d311 +size 1555773 diff --git a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_d1b389f4-383f-4b10-8e74-cf4f8ad0dfe9.png b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_d1b389f4-383f-4b10-8e74-cf4f8ad0dfe9.png index 29d2573b30d04b611715d3983ff20834cf3435b7..c3aeacde1e95fd8677609ff5790f27bcbf2f3e27 100644 --- a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_d1b389f4-383f-4b10-8e74-cf4f8ad0dfe9.png +++ b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_d1b389f4-383f-4b10-8e74-cf4f8ad0dfe9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be8eabaf50aa0e392c8cb0c6c90f602d33911e4bb2112e62f54f7e9e94603b69 -size 1487397 +oid sha256:cf850c5b854049c352b1219e15fe7771f33356c808f4e174d09aca6469af3efc +size 1755826 diff --git a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_e56f828d-45bb-4858-98cb-9c6ab5b55e2c.png b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_e56f828d-45bb-4858-98cb-9c6ab5b55e2c.png index b347750d9622ec13a860fbc3758f39fb5b142d87..e87030d5c3839e1f704ed0916f1accdf7e4c57d7 100644 --- a/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_e56f828d-45bb-4858-98cb-9c6ab5b55e2c.png +++ b/images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_e56f828d-45bb-4858-98cb-9c6ab5b55e2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff7b07a5eb42dce78d5f832a0948c6a0d6197bd6dc4648bb107d46fec4cb529b -size 1291871 +oid sha256:05d73915a755e8f4215a201058407c8daa4acbac8845aeaf722682b7e6a347d4 +size 1839810 diff --git a/images/af97084c-5b72-4fec-be1e-dcab0980b335_1766f5b3-b6c8-489b-9848-636317358a9a.png b/images/af97084c-5b72-4fec-be1e-dcab0980b335_1766f5b3-b6c8-489b-9848-636317358a9a.png index 2e7f07aba665799a937ece81e8c685fb30b421bc..0d98e977a5fdab9e5f805719b2e724abfe17f607 100644 --- a/images/af97084c-5b72-4fec-be1e-dcab0980b335_1766f5b3-b6c8-489b-9848-636317358a9a.png +++ b/images/af97084c-5b72-4fec-be1e-dcab0980b335_1766f5b3-b6c8-489b-9848-636317358a9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:478b6330f1c6486d380444c9957b0efe1eb888515a226bd241665361280c8196 -size 391911 +oid sha256:cef717865b8c4cbf25acae56f5bddb10b87421fe511b8d6c84735e9865c4380d +size 374621 diff --git a/images/af97084c-5b72-4fec-be1e-dcab0980b335_251534f2-9acc-4f2c-a1d4-2158f8a4840e.png b/images/af97084c-5b72-4fec-be1e-dcab0980b335_251534f2-9acc-4f2c-a1d4-2158f8a4840e.png index 92fafd5d00aa0d6a62c02269e6fe1edd6dce84fc..afb1f9a0a46973bd63c352f36fb082b6cf76bd9e 100644 --- a/images/af97084c-5b72-4fec-be1e-dcab0980b335_251534f2-9acc-4f2c-a1d4-2158f8a4840e.png +++ b/images/af97084c-5b72-4fec-be1e-dcab0980b335_251534f2-9acc-4f2c-a1d4-2158f8a4840e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d39846d2966422aa212238b44c38b59a44055b7e117565d65023579bcd69267 -size 1122747 +oid sha256:52641e7a09e13e7f95d6217eacb5a4fb1e082bb9fce900fb22f83bd5ebd4cea4 +size 1372472 diff --git a/images/af97084c-5b72-4fec-be1e-dcab0980b335_595b3f33-53fd-426c-95d1-2049a525a4cf.png b/images/af97084c-5b72-4fec-be1e-dcab0980b335_595b3f33-53fd-426c-95d1-2049a525a4cf.png index 526f4b81151a2c0e0a9457cf2bf45ba87480475d..2af31e0b6dea9d0ba715a27141143b91a7a0a409 100644 --- a/images/af97084c-5b72-4fec-be1e-dcab0980b335_595b3f33-53fd-426c-95d1-2049a525a4cf.png +++ b/images/af97084c-5b72-4fec-be1e-dcab0980b335_595b3f33-53fd-426c-95d1-2049a525a4cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0151c477ea85158e15e87aa7350d43a3f6b088358cc04f5ce3e519e9e063afd -size 887279 +oid sha256:34a256675d0c178d6440640fc10c0fd353d3a65e33c3a82f9094c8eb74562e93 +size 989733 diff --git a/images/af97084c-5b72-4fec-be1e-dcab0980b335_7da23b05-7a5a-46b0-90bf-ba2e4dfa8ec9.png b/images/af97084c-5b72-4fec-be1e-dcab0980b335_7da23b05-7a5a-46b0-90bf-ba2e4dfa8ec9.png index 4bf5406f1c839b4c3008f92cac19127d35f5e324..a9c0b4216bc4f6ec10928b204aba10aa69d0bf99 100644 --- a/images/af97084c-5b72-4fec-be1e-dcab0980b335_7da23b05-7a5a-46b0-90bf-ba2e4dfa8ec9.png +++ b/images/af97084c-5b72-4fec-be1e-dcab0980b335_7da23b05-7a5a-46b0-90bf-ba2e4dfa8ec9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9db5bc369a1c459b893eb6e0365c25ba0a848c88c275597775dd32a5b4972172 -size 1096989 +oid sha256:8f7543028626f420c4464096a092f32321f0f96620145e4d6bd40503ffcb0a30 +size 1041781 diff --git a/images/af97084c-5b72-4fec-be1e-dcab0980b335_973418b6-859e-479f-8d1a-b1a8fa9c5e51.png b/images/af97084c-5b72-4fec-be1e-dcab0980b335_973418b6-859e-479f-8d1a-b1a8fa9c5e51.png index 808c1fb9aefee66666ebd90abbc67bdd20ec7e4b..5c7977f86049bcbb285cbd13bf7fd021eff3adbb 100644 --- a/images/af97084c-5b72-4fec-be1e-dcab0980b335_973418b6-859e-479f-8d1a-b1a8fa9c5e51.png +++ b/images/af97084c-5b72-4fec-be1e-dcab0980b335_973418b6-859e-479f-8d1a-b1a8fa9c5e51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42d907cda8fda6d72325aa2c5332624972e4f684a7e116c51f8b5ab522055cb4 -size 1123583 +oid sha256:4e7f05ea8f4c63fede24b19cc5f250843436e1bb66fdb61d220553a267ff53d6 +size 1123841 diff --git a/images/af97084c-5b72-4fec-be1e-dcab0980b335_a499bf10-f7b8-4771-8234-002fd88c3439.png b/images/af97084c-5b72-4fec-be1e-dcab0980b335_a499bf10-f7b8-4771-8234-002fd88c3439.png index 2e7f07aba665799a937ece81e8c685fb30b421bc..29431226ef71dc5148b7e5b53c723eed1f1242f5 100644 --- a/images/af97084c-5b72-4fec-be1e-dcab0980b335_a499bf10-f7b8-4771-8234-002fd88c3439.png +++ b/images/af97084c-5b72-4fec-be1e-dcab0980b335_a499bf10-f7b8-4771-8234-002fd88c3439.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:478b6330f1c6486d380444c9957b0efe1eb888515a226bd241665361280c8196 -size 391911 +oid sha256:36d023e825b1604d4724f0ce7ec0b5deb52bc10691c20269e959d72ffeda8165 +size 403419 diff --git a/images/af97084c-5b72-4fec-be1e-dcab0980b335_bc8a6490-c12c-4d18-bed0-e0a9652265d7.png b/images/af97084c-5b72-4fec-be1e-dcab0980b335_bc8a6490-c12c-4d18-bed0-e0a9652265d7.png index 169e643ffe36da2749ba87a95e9c6c9623ece9c5..ffdffeff07ca24a4c6d62f76be08869f4b18389b 100644 --- a/images/af97084c-5b72-4fec-be1e-dcab0980b335_bc8a6490-c12c-4d18-bed0-e0a9652265d7.png +++ b/images/af97084c-5b72-4fec-be1e-dcab0980b335_bc8a6490-c12c-4d18-bed0-e0a9652265d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15220a58befce259d0ddc24e129b12af70ddd4e0d94f9579811c555fa3802cb4 -size 1120244 +oid sha256:877492ecb7be102b83faa1df1a41b9aed668ab9028c984d1df497e63068dc607 +size 984351 diff --git a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_0ef8edbc-14a0-42b9-9b98-3551bd624d87.png b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_0ef8edbc-14a0-42b9-9b98-3551bd624d87.png index 318f052f59f3eee8dd2e9cee9a588fe3cdc7b251..d19f623cc826be84a0e4f661e087fe4ac35bb95e 100644 --- a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_0ef8edbc-14a0-42b9-9b98-3551bd624d87.png +++ b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_0ef8edbc-14a0-42b9-9b98-3551bd624d87.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3414e2e4dcee5fc23c9578850b36f1cc2ff6708155e63d05fe6dafa7b6122ffb -size 1124552 +oid sha256:74167f2329f6ecdcc618098927f832c599df12577452f107a0e70cc0174a0c77 +size 1136365 diff --git a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1429b238-de97-4a6a-a14d-c14ac1c47e7b.png b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1429b238-de97-4a6a-a14d-c14ac1c47e7b.png index 6b13307fe38adcca3d97353411fdb0da19406918..10f37518f5056d452a22099d6dfe5dfc1ff59754 100644 --- a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1429b238-de97-4a6a-a14d-c14ac1c47e7b.png +++ b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1429b238-de97-4a6a-a14d-c14ac1c47e7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81257ea5b58207e81e6d3b4e808eabe57552107c853c94c307ee3a315c446057 -size 2845696 +oid sha256:deeabb19a212d11a88b65c52cc53f71f38be145942fa08fbef794d45573accf2 +size 1784639 diff --git a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_2860f79e-a8ff-44ed-af2b-0e95f2ac1731.png b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_2860f79e-a8ff-44ed-af2b-0e95f2ac1731.png index 76da5cb2a221c13f152388719a72efb143a9a946..5ddbcaf22668661f53cb179c6dcd211233cd7525 100644 --- a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_2860f79e-a8ff-44ed-af2b-0e95f2ac1731.png +++ b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_2860f79e-a8ff-44ed-af2b-0e95f2ac1731.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce953513cc6cfc2d04f025b6fede84a5afb466d155757e6d6d9e3f6f74947c3c -size 1396242 +oid sha256:14c7b6c10bcbbd7abf9df235a0ac85d1598d4d5159023b7f62beecbeb495c9ec +size 1779292 diff --git a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_8ac9fd2a-bb62-4303-93e6-8a5c3276a367.png b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_8ac9fd2a-bb62-4303-93e6-8a5c3276a367.png index ee3f0b6aa71e1aeca3f014d75f9c6e30ebf8c9bd..b0c9f2c154e83efefdfbbb12c2613b3a8246faf6 100644 --- a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_8ac9fd2a-bb62-4303-93e6-8a5c3276a367.png +++ b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_8ac9fd2a-bb62-4303-93e6-8a5c3276a367.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1fc0b68dc69db6d9da49e9b961870464053bf5ad45abe521e6760328ab96dc8 -size 1338788 +oid sha256:42ff160239fa125a03c87656c81566593476eb420a149be8f8a4113e46d31e01 +size 1461591 diff --git a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_9d29bcf6-38dc-4fc3-b54b-c79ad0c7b672.png b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_9d29bcf6-38dc-4fc3-b54b-c79ad0c7b672.png index 4bdd20cee02cc715cf10ba205923def054b25030..26a612456cc9e11001511d60412362f8aa013f64 100644 --- a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_9d29bcf6-38dc-4fc3-b54b-c79ad0c7b672.png +++ b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_9d29bcf6-38dc-4fc3-b54b-c79ad0c7b672.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:274d3fd7c0ed747aa712f318f8d3e50341e971631e6051847077c0edc4259c22 -size 1532516 +oid sha256:e54b6d2331c2320f6a2c2d2dd4d7ed04eb4e925ed9d85e31d883dab508d08485 +size 1379760 diff --git a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_d0df5da7-08b6-4ba6-a359-e6f4de52d074.png b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_d0df5da7-08b6-4ba6-a359-e6f4de52d074.png index 163141e35c484b5b8edb0bfc64ae805e0a33207d..4176ef23d99f5792c3fb8f15a953235eeb07b304 100644 --- a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_d0df5da7-08b6-4ba6-a359-e6f4de52d074.png +++ b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_d0df5da7-08b6-4ba6-a359-e6f4de52d074.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0629d818cc6a987606bd12f8546892908d5d758c49116cb6610383a243240ad9 -size 1237901 +oid sha256:a7a1451911aed069c6eeab535f2035495868e11211b15ee618d8b6214d1c3270 +size 1138060 diff --git a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_e29f648e-0dcd-4cb1-8bf0-dc33c40ffb98.png b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_e29f648e-0dcd-4cb1-8bf0-dc33c40ffb98.png index f889ae49bee702e7027c0d19111d9733c95954b8..b537e2b7f3676564e8a6c48487d8cac397544d1e 100644 --- a/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_e29f648e-0dcd-4cb1-8bf0-dc33c40ffb98.png +++ b/images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_e29f648e-0dcd-4cb1-8bf0-dc33c40ffb98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b98e4ec22abc24e26ea79de46498029446dd72383775d21d5cc538f15fa840ec -size 1536622 +oid sha256:8d2929adb716167a077ed65cf03ad6af28b73f97f095d4e5ab0e8a4b3f9dee34 +size 1536338 diff --git a/images/afb693cd-57cb-4468-9f7f-d965ee530913_0c543e05-b392-4a39-ac27-7d2d82c1cb47.png b/images/afb693cd-57cb-4468-9f7f-d965ee530913_0c543e05-b392-4a39-ac27-7d2d82c1cb47.png index 36cb17f9e8da8451b0218973dec02a94161d5228..5eec29e8db3a0b22642df06761e08d7053eaf2b6 100644 --- a/images/afb693cd-57cb-4468-9f7f-d965ee530913_0c543e05-b392-4a39-ac27-7d2d82c1cb47.png +++ b/images/afb693cd-57cb-4468-9f7f-d965ee530913_0c543e05-b392-4a39-ac27-7d2d82c1cb47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c82bfa932d7147ee45ae12ac13ed42aa59751c002f6d7ce072829a64322155f -size 799841 +oid sha256:f6cebc798fafd5311f8569b1802c9e9890276011f7d4ac5192a8f96b5599e962 +size 712828 diff --git a/images/afb693cd-57cb-4468-9f7f-d965ee530913_2ddef786-a576-4379-8ca1-136036060c78.png b/images/afb693cd-57cb-4468-9f7f-d965ee530913_2ddef786-a576-4379-8ca1-136036060c78.png index 72338d3fc4ff3b3062d9d1ea4927f8a4a9057734..06d5b7c840acf8bb356cdc39ab041a89faa6155c 100644 --- a/images/afb693cd-57cb-4468-9f7f-d965ee530913_2ddef786-a576-4379-8ca1-136036060c78.png +++ b/images/afb693cd-57cb-4468-9f7f-d965ee530913_2ddef786-a576-4379-8ca1-136036060c78.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9a0b23979b10a20a0069e2de78f955eb106e6621ad45f754ad1a5b45e58bf33 -size 1713812 +oid sha256:b3b9b74338a1bbcd9ebaaa6e48ac6eaa99ad94af447e5e793aa7b3a4ad897b8f +size 1016624 diff --git a/images/afb693cd-57cb-4468-9f7f-d965ee530913_3a180732-6454-48e1-b9f6-cdf9b5b339f7.png b/images/afb693cd-57cb-4468-9f7f-d965ee530913_3a180732-6454-48e1-b9f6-cdf9b5b339f7.png index 1d9066179435c49bf043a0341204906a00d8d2bb..efd365225b292a8f01a1d18bc2b92414d22d6f0d 100644 --- a/images/afb693cd-57cb-4468-9f7f-d965ee530913_3a180732-6454-48e1-b9f6-cdf9b5b339f7.png +++ b/images/afb693cd-57cb-4468-9f7f-d965ee530913_3a180732-6454-48e1-b9f6-cdf9b5b339f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a6ee0454840e969a76fdcd7bcd7b58892750ad902b14af882983f4bdad789de -size 831433 +oid sha256:a857b1c348cafc1921be37c6da9ffe5f979c78d1e159ecfb978d9b3fdc55bfb2 +size 502007 diff --git a/images/afb693cd-57cb-4468-9f7f-d965ee530913_4ff311df-0d3b-4e91-aa58-1fa0219c8834.png b/images/afb693cd-57cb-4468-9f7f-d965ee530913_4ff311df-0d3b-4e91-aa58-1fa0219c8834.png index d207f6a2ba04f1a04f2bc6f78f28d5b19587f171..fbe207c9942d56bd6327f5565a621eab0fd4b486 100644 --- a/images/afb693cd-57cb-4468-9f7f-d965ee530913_4ff311df-0d3b-4e91-aa58-1fa0219c8834.png +++ b/images/afb693cd-57cb-4468-9f7f-d965ee530913_4ff311df-0d3b-4e91-aa58-1fa0219c8834.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ba5f92c526d4f343bb4ba6a14fc5608697f72011b57c06b1f7b949b18222b8d -size 457287 +oid sha256:988b200d5d757084d329461cd51e367e59690397eeb3a0db8918de29207a5c80 +size 787387 diff --git a/images/afb693cd-57cb-4468-9f7f-d965ee530913_6f196ecb-8ab6-4aba-82fe-6c3a0041637c.png b/images/afb693cd-57cb-4468-9f7f-d965ee530913_6f196ecb-8ab6-4aba-82fe-6c3a0041637c.png index c25f0da926237b7ac01f53d97dc9da3e57a84051..c4f9c117eb2c818b064e949b8195e625046d07c5 100644 --- a/images/afb693cd-57cb-4468-9f7f-d965ee530913_6f196ecb-8ab6-4aba-82fe-6c3a0041637c.png +++ b/images/afb693cd-57cb-4468-9f7f-d965ee530913_6f196ecb-8ab6-4aba-82fe-6c3a0041637c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a114f68a8484a22b51cea7716487bdbc3b59883e4583d77d7359bc2dc4db60c5 -size 815744 +oid sha256:62574e078c75e4d85f5f5caf6f8db8b05a61b14aa94d96cd08e81ec826fef333 +size 816723 diff --git a/images/afb693cd-57cb-4468-9f7f-d965ee530913_d9976c1f-bf18-4f5f-abd6-fb7592c0622c.png b/images/afb693cd-57cb-4468-9f7f-d965ee530913_d9976c1f-bf18-4f5f-abd6-fb7592c0622c.png index 943e6b182150a1f407c47783a3b3fac24b0e2166..fe21ef11cf6e361a976a3691a999b40a6c1993ea 100644 --- a/images/afb693cd-57cb-4468-9f7f-d965ee530913_d9976c1f-bf18-4f5f-abd6-fb7592c0622c.png +++ b/images/afb693cd-57cb-4468-9f7f-d965ee530913_d9976c1f-bf18-4f5f-abd6-fb7592c0622c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc458277446c5c7b5d1def15d260a3854080b240f89d9c772c9587a3855aad0d -size 806364 +oid sha256:dae18bd258372afa9ec027ad19fe0362ea2f2a6d49834c4224986075f7e1bfad +size 950570 diff --git a/images/afb693cd-57cb-4468-9f7f-d965ee530913_f7425150-b0a1-4b8d-b230-d614dcbb9168.png b/images/afb693cd-57cb-4468-9f7f-d965ee530913_f7425150-b0a1-4b8d-b230-d614dcbb9168.png index e9e6b6bb8ef0053e967182a2ba4ef8cd0e3d3b0a..4ab2e02c6b945b49d56a105791b8c34a9b89a52e 100644 --- a/images/afb693cd-57cb-4468-9f7f-d965ee530913_f7425150-b0a1-4b8d-b230-d614dcbb9168.png +++ b/images/afb693cd-57cb-4468-9f7f-d965ee530913_f7425150-b0a1-4b8d-b230-d614dcbb9168.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46c20c5f95e36310c59ce43017e9bf3ddc2be1ee0634ca277c4531aec2ef897b -size 897361 +oid sha256:35b4d44bb891eabd9c25e4c1780fcbc071713277ff91132ea390850651b238a9 +size 629995 diff --git a/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_30484980-8301-4a3a-ae0f-f2ea7df58336.png b/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_30484980-8301-4a3a-ae0f-f2ea7df58336.png index f993a2299b32836bf92cd4b2b69a53728cb17947..1ca991c4ec45272f4fd754163815962bdf4d46f1 100644 --- a/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_30484980-8301-4a3a-ae0f-f2ea7df58336.png +++ b/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_30484980-8301-4a3a-ae0f-f2ea7df58336.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06f31a36e600e0640cd5e432c62f2bd71af2cabb0ce38fee7b72a6eb9d774d37 -size 778818 +oid sha256:d4b65ed5e39b488a5f1313b7deed68b88a7651fa688e0b5aaa1e38c849fbf92f +size 759347 diff --git a/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_6bae364e-de11-4195-b886-42576377408b.png b/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_6bae364e-de11-4195-b886-42576377408b.png index f40fabfc09b3c017a39590bb036b4b7e0b69a74c..e4dceea3bc999a309cd53c1948a6a1c15046dcfa 100644 --- a/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_6bae364e-de11-4195-b886-42576377408b.png +++ b/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_6bae364e-de11-4195-b886-42576377408b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8057553db72ec25990519d5f99c7118ec910a2edd1f0db718cf2ac3931a4036a -size 787427 +oid sha256:83c17d54f392039e3f64dad365b285158e72274c7f2b8289147f1a1b8bde29d1 +size 588140 diff --git a/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_d420750c-23ce-43f7-8f39-d31b623dddde.png b/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_d420750c-23ce-43f7-8f39-d31b623dddde.png index 0969934b43249971130db1c3d5ab4b1c2a6781e6..8b30e5b8100840a294a470e0de0a73da108b00e3 100644 --- a/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_d420750c-23ce-43f7-8f39-d31b623dddde.png +++ b/images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_d420750c-23ce-43f7-8f39-d31b623dddde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7450783a19534fb3756d23fd2ac80880c296b0c730632513d42c11f8555f233c -size 1070340 +oid sha256:0f12a246788ea3b8d754701b0de7a6f4116a26de43165e1adc8baf794cccd33c +size 1006753 diff --git a/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_464ec933-1237-478f-a390-08e1168b4498.png b/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_464ec933-1237-478f-a390-08e1168b4498.png index f187e1bedfe7d6cf9a15f83fbe41fea531f03658..23fdffcb3bdcd93112a4f698d6b9da4aa6f55df0 100644 --- a/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_464ec933-1237-478f-a390-08e1168b4498.png +++ b/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_464ec933-1237-478f-a390-08e1168b4498.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccfd74f9346c0a2cc6a9e2070bc454f117eaea86d06a72eec5759a895321f082 -size 1555036 +oid sha256:d425d6ff0aac4c5929a6b9efb2eebb0ebb46ce36f312e81e657473214e6b4282 +size 1856927 diff --git a/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_8711c91a-4523-49b7-aab6-78c85d0e8af7.png b/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_8711c91a-4523-49b7-aab6-78c85d0e8af7.png index adc684e15614719cd9f734db9cd63b06e5a41e52..7a3ca5a07303a2240540c2103f66dbb04b9c7db9 100644 --- a/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_8711c91a-4523-49b7-aab6-78c85d0e8af7.png +++ b/images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_8711c91a-4523-49b7-aab6-78c85d0e8af7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43e625da8249ce7612f8c5c6c4e0bf1fd881fd1520dd3e37372d212bcc634e77 -size 1534508 +oid sha256:cbb9bdecb9810be972c8656c7ca267c79a694a7193937819cb551c5e7990080c +size 1535411 diff --git a/images/b1055658-7993-448c-9af6-a722cf69ff97_2255c0f5-0829-4874-89d2-1dafcd92da6a.png b/images/b1055658-7993-448c-9af6-a722cf69ff97_2255c0f5-0829-4874-89d2-1dafcd92da6a.png index 1a57bba46b9c9445ca6bb3df97e488d245155f35..e881f832565a8fedf6a324073d490c5b1c71ac63 100644 --- a/images/b1055658-7993-448c-9af6-a722cf69ff97_2255c0f5-0829-4874-89d2-1dafcd92da6a.png +++ b/images/b1055658-7993-448c-9af6-a722cf69ff97_2255c0f5-0829-4874-89d2-1dafcd92da6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b6d890f273ca054f351ec9c0c0b91fa01e7834f8a82a81a77d9d13fce9a551b -size 973292 +oid sha256:160fc5e6cd88578298923b3685969c49f86b57c3190684edfdd4c6e5a495a699 +size 937545 diff --git a/images/b1055658-7993-448c-9af6-a722cf69ff97_53677991-4d59-4105-b3ad-6896b77fecba.png b/images/b1055658-7993-448c-9af6-a722cf69ff97_53677991-4d59-4105-b3ad-6896b77fecba.png index 0abfd3b3f91c04c8fac5a9eea5a9f1d34fecfbba..c7d9710c2c0419b678f4cb59fc566fa3d7f7bb36 100644 --- a/images/b1055658-7993-448c-9af6-a722cf69ff97_53677991-4d59-4105-b3ad-6896b77fecba.png +++ b/images/b1055658-7993-448c-9af6-a722cf69ff97_53677991-4d59-4105-b3ad-6896b77fecba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4162a2c8d8b11b04b8a9c82cecde625bcd1ccbfeb247353728321d882f96ea23 -size 823346 +oid sha256:c5b90db21a6f183b5c8f3561bf431d5a283461b745af4e9af99a5fe0d2c38c82 +size 540544 diff --git a/images/b1055658-7993-448c-9af6-a722cf69ff97_67c94a3f-d29d-4504-8e0e-4008a0d59813.png b/images/b1055658-7993-448c-9af6-a722cf69ff97_67c94a3f-d29d-4504-8e0e-4008a0d59813.png index 97a3bd2f1e6dc33aa9854d5dd39b6e3226825d1f..465f1fae96ae129ae2227dde1c795f7bde1ad626 100644 --- a/images/b1055658-7993-448c-9af6-a722cf69ff97_67c94a3f-d29d-4504-8e0e-4008a0d59813.png +++ b/images/b1055658-7993-448c-9af6-a722cf69ff97_67c94a3f-d29d-4504-8e0e-4008a0d59813.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e5d941fe2cf67eae69286bda0ae4036eb1bb94115dac3f7a6dc55f1997eabe8 -size 2987728 +oid sha256:449e35f7a1fe80b2a8e26c0e6a797b9602f6b4037dd6c6db9b52705ad89d3e49 +size 1008359 diff --git a/images/b1055658-7993-448c-9af6-a722cf69ff97_f5c0b94f-00a9-48db-af5e-41b3312cced3.png b/images/b1055658-7993-448c-9af6-a722cf69ff97_f5c0b94f-00a9-48db-af5e-41b3312cced3.png index 5a4c3606b39beecca9b9ebc9dcfe19778fac650f..472fe360b4c9f324b2f554b9b0e72e61684be95b 100644 --- a/images/b1055658-7993-448c-9af6-a722cf69ff97_f5c0b94f-00a9-48db-af5e-41b3312cced3.png +++ b/images/b1055658-7993-448c-9af6-a722cf69ff97_f5c0b94f-00a9-48db-af5e-41b3312cced3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d37be5d18775330329629f4b363b22906e73157bf305a0446cd740940d6afe08 -size 1533007 +oid sha256:8274a2a93b1861366204c6fc0a9864723b4ef4ec23af99e62186085841a8d794 +size 1636112 diff --git a/images/b1055658-7993-448c-9af6-a722cf69ff97_feb8be19-5f9b-44dc-a9fe-1467233b4677.png b/images/b1055658-7993-448c-9af6-a722cf69ff97_feb8be19-5f9b-44dc-a9fe-1467233b4677.png index d0ac715db3e078a5ce20954551dcb8152b5d0856..9da5629c1e8014017123c0ca41382251b89cc744 100644 --- a/images/b1055658-7993-448c-9af6-a722cf69ff97_feb8be19-5f9b-44dc-a9fe-1467233b4677.png +++ b/images/b1055658-7993-448c-9af6-a722cf69ff97_feb8be19-5f9b-44dc-a9fe-1467233b4677.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:546b6f0a360be062fb3322660838676c98614c8e646f7c357b77ef2c45943e22 -size 954555 +oid sha256:9a3131fc887eb3dab307f0b619bd2b04d8bada6efee10b35af3719dfedc2937e +size 703390 diff --git a/images/b1a1f767-8611-4539-9c08-475011d38e12_3703e141-87f5-412d-9675-2eaf6c10dbea.png b/images/b1a1f767-8611-4539-9c08-475011d38e12_3703e141-87f5-412d-9675-2eaf6c10dbea.png index 4e290877f8fda68a9de3f3bd9a3c1f34bb341853..1873b7ab821d749235e55a3f17c24fd6ef4097db 100644 --- a/images/b1a1f767-8611-4539-9c08-475011d38e12_3703e141-87f5-412d-9675-2eaf6c10dbea.png +++ b/images/b1a1f767-8611-4539-9c08-475011d38e12_3703e141-87f5-412d-9675-2eaf6c10dbea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d415e5b43f3eb4f2908974b4f73bba1b1b17e0a9bdb5a233f14717f3156974b5 -size 1242891 +oid sha256:88796a90e75efae29a7390d69f3d2ef53bcf79016134524d544818ba42086149 +size 1183614 diff --git a/images/b1a1f767-8611-4539-9c08-475011d38e12_7988776b-0e2f-438f-b2d6-b789efb59236.png b/images/b1a1f767-8611-4539-9c08-475011d38e12_7988776b-0e2f-438f-b2d6-b789efb59236.png index 6d523562d33501638e07657ac56b1be50590d343..dae90c16b6bc144ad2a8d0953624a66a5b56f9a1 100644 --- a/images/b1a1f767-8611-4539-9c08-475011d38e12_7988776b-0e2f-438f-b2d6-b789efb59236.png +++ b/images/b1a1f767-8611-4539-9c08-475011d38e12_7988776b-0e2f-438f-b2d6-b789efb59236.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:362ff326b8fea60b36b49d085b0afdfb0a6e5f361fdab69a2ef63d961fbbb0d2 -size 1179541 +oid sha256:cad2fe369ea12f1a374c616de4c3d9f1fdde873965f885b8be965e73f6aae8df +size 1988318 diff --git a/images/b1a1f767-8611-4539-9c08-475011d38e12_f720be0a-2053-4046-94b6-7ce03c2d5f6d.png b/images/b1a1f767-8611-4539-9c08-475011d38e12_f720be0a-2053-4046-94b6-7ce03c2d5f6d.png index 898cfff9f74d3d3ba6ddadd8a43a485985cc4d82..6a210330e7a57152616d5da8f3bcaf8890522253 100644 --- a/images/b1a1f767-8611-4539-9c08-475011d38e12_f720be0a-2053-4046-94b6-7ce03c2d5f6d.png +++ b/images/b1a1f767-8611-4539-9c08-475011d38e12_f720be0a-2053-4046-94b6-7ce03c2d5f6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74cae6faa1b3d106b39ee558d89588a4ee9cc7c90f2c64b67327acbdf85dce9e -size 1231129 +oid sha256:1285d79ddc0e8b0429cf27542fd4893a080d5e9c8a416b65508e8b63ec6a7edc +size 1725539 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_09ad252d-0aa9-4500-9c76-a6f428acfcae.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_09ad252d-0aa9-4500-9c76-a6f428acfcae.png index 611f9e93c8213fa8cf84c7a5d1a74b795773bca2..fedd9a63c0d2b6d12fe36d455f2180cb6d7fe45f 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_09ad252d-0aa9-4500-9c76-a6f428acfcae.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_09ad252d-0aa9-4500-9c76-a6f428acfcae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a29653416b4d7ae47e905dc5e9e4bed44a91746f1d4e0cda823bace5c5f8c490 -size 1278103 +oid sha256:3351a8815ada1594e43b6620a65aeba765e85ba00f22848344064c4a2dedcec6 +size 1307109 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_24383821-fccc-4aad-9072-cc8ce10bd95b.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_24383821-fccc-4aad-9072-cc8ce10bd95b.png index 5ed8d9c7a58ea8c6e44d8e1e8ea072a39bc46ab8..478e697fbb2703b7b093b4d8a2d686abead5dc39 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_24383821-fccc-4aad-9072-cc8ce10bd95b.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_24383821-fccc-4aad-9072-cc8ce10bd95b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5aecab5153c5bf99d9abb2e0ba2dc3aa2aea2529a27bc620d1f474dbcec15f65 -size 1140727 +oid sha256:8111172344df60b3be1eeddc7f929bde3a8ac5a24ea27661775caa5e1314b5c0 +size 967546 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_2cb0354f-be23-454f-ade7-ab45bb1778f7.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_2cb0354f-be23-454f-ade7-ab45bb1778f7.png index 133beae1abe1ec82bc763f2608f38efa38a3ce51..cb8797d2012e05f0b1c6251d3fdef53aa8a18b8a 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_2cb0354f-be23-454f-ade7-ab45bb1778f7.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_2cb0354f-be23-454f-ade7-ab45bb1778f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7b5f8fcdea1b666d746a1cb28a019f80a3877ac3dd6bde8e7880ca89c8fbd83 -size 1324225 +oid sha256:6872119c540d1fec375d3606f18e6c955692ad4306a65b8c6a358e101a87f6aa +size 1277097 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_3e785891-d6b3-42d4-ba08-316207cd0aeb.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_3e785891-d6b3-42d4-ba08-316207cd0aeb.png index 2420fcaff170b749de1aebde99e98679dc4979de..f899823c3c241a6690135979203219a557642a41 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_3e785891-d6b3-42d4-ba08-316207cd0aeb.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_3e785891-d6b3-42d4-ba08-316207cd0aeb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25e31bf0e153512bf758860bfaa1f5dc448bda6bbb95fd5b612343fc1833bc5a -size 1294520 +oid sha256:952a5969fb47589dc0be8529c284e892f8039419a1c97061af34db075659a936 +size 956939 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_55f7b393-c44a-43dd-924a-37bbcb3e2b07.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_55f7b393-c44a-43dd-924a-37bbcb3e2b07.png index 104d9b9e6c789a6e8431e86d68456a3c06ed7e01..c1d3370ec16db0693d99790c38aa00fcf3dac108 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_55f7b393-c44a-43dd-924a-37bbcb3e2b07.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_55f7b393-c44a-43dd-924a-37bbcb3e2b07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:992562aa212b6729bc99aef5f8c367d6938f975d621a677346460f84b606a35f -size 1279134 +oid sha256:cc194fd315d213133929e6fbedb54394399e228a67d292ab1e7c2d2c322f83e4 +size 800472 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_6a440fc3-e9d3-4292-a5b1-1109388f3dd6.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_6a440fc3-e9d3-4292-a5b1-1109388f3dd6.png index 29cb8df4a2e26687c9f4493b86f0c32465d8f9d6..60128d66542f1c9ec286358a59fe2136afd60df1 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_6a440fc3-e9d3-4292-a5b1-1109388f3dd6.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_6a440fc3-e9d3-4292-a5b1-1109388f3dd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d33fa9e7d6db4e151cd826926527a5c53650e9c19945ceca422daaaf5106bb4d -size 1428912 +oid sha256:5909b34fb6fc264114c8e3d8565daa865c2592fdac96f89f1e560614ab417db1 +size 1421716 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8084a818-7884-4e70-90c3-6f94536efcb5.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8084a818-7884-4e70-90c3-6f94536efcb5.png index 91712a2ccf96fff2a1e596a554833b5925cba27b..c8d5c83097fe7c6f77cc90d680d0b4d8126e2dff 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8084a818-7884-4e70-90c3-6f94536efcb5.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8084a818-7884-4e70-90c3-6f94536efcb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1dd07bf2f23062b361a8614344dbce2417a2342c60721ac3d3f5080dd6ca0b6 -size 1391013 +oid sha256:46244084c036da4f7098ca9a767fe2d6347957dc8da380afac96cd43ece293ae +size 750801 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8230e92a-1a42-47f3-8884-891a159c10bb.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8230e92a-1a42-47f3-8884-891a159c10bb.png index c4fbb160a58d06eae07035ca7363a4e289348cf0..7ff55e0e777214dc91a0fbe8245d7c7733b3aed3 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8230e92a-1a42-47f3-8884-891a159c10bb.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8230e92a-1a42-47f3-8884-891a159c10bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4aee8f0206f71152700a4162b94c1cae4774c62710b4e8b4c1f0c551d51df6be -size 2548843 +oid sha256:f330ddbbef9274011f008591d7912ea9a06362552e703e2424cf722493a64b16 +size 1605568 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_92169b81-9840-4ae7-af42-73477badeb33.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_92169b81-9840-4ae7-af42-73477badeb33.png index 6db923ea49a50f31ebec49e54c12fdd6ccd6d304..f5762dce23d9727d8de5cf240df40c1226b920c3 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_92169b81-9840-4ae7-af42-73477badeb33.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_92169b81-9840-4ae7-af42-73477badeb33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37b544f763a394c4b1f6c78824d0bfbe2740d0a46955dffef45e7d7a0947d156 -size 1428492 +oid sha256:83bac1ae1b6ff5b6842da0fcfd9a7dd256178012643b4254e6d9037abd6b229b +size 1486757 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_e3eb0dee-a1dc-4c62-bcf9-b7c9f56c3113.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_e3eb0dee-a1dc-4c62-bcf9-b7c9f56c3113.png index 54f16f80e9cf2497d09255402d2ddc0b123354e2..7353629b46f9a461473f2d9235e63e4d9d62b9d0 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_e3eb0dee-a1dc-4c62-bcf9-b7c9f56c3113.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_e3eb0dee-a1dc-4c62-bcf9-b7c9f56c3113.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c616c323c970e7d57cfac8d18f9c48781653e2bfae10597c4cb9c66f9574115 -size 1342288 +oid sha256:b6f6403ae018c177dd6209e6a479d0017113de6654c61564db95d1b2ff82ab46 +size 1239998 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_ec119d53-7ced-4964-8fb0-95482559b137.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_ec119d53-7ced-4964-8fb0-95482559b137.png index 5c2d4aff97b6eb3731d920c92752f17402a68c7e..703f70b9db93d5336f4c87aca61f7f1250fd4f1c 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_ec119d53-7ced-4964-8fb0-95482559b137.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_ec119d53-7ced-4964-8fb0-95482559b137.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dfdc4abc73b9908e235c7c5346768c3736ded53869bbe330dc56a5fa2410133a -size 1451191 +oid sha256:5b58d9e43bd5db01451466c8a0feebec8a8999c5a06fdbfcbbce134ffe1225c4 +size 871232 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f55faaf5-acea-484d-be37-0cc18774f094.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f55faaf5-acea-484d-be37-0cc18774f094.png index c43cb7f6d883ee3fe3569b567ac54ed916d4d729..92bcfd1538219227cb66e7c279c3edca73aafb07 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f55faaf5-acea-484d-be37-0cc18774f094.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f55faaf5-acea-484d-be37-0cc18774f094.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3bdd4eff78b656b950c7219833c17b217a78c24ae995e83977cf6038a487a11 -size 1168703 +oid sha256:37909a8f333a4382ef2ebb8ac907ee793de0bb748a8417b472ebf5e931de6cc6 +size 708672 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f626fb4a-bfe2-443e-aaaf-663f8eae8055.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f626fb4a-bfe2-443e-aaaf-663f8eae8055.png index bf9dad39953fc52b82339b06db98bc66336735fa..8a81a9b77b206dfe8f6f98dccef48ad1c980e12a 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f626fb4a-bfe2-443e-aaaf-663f8eae8055.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f626fb4a-bfe2-443e-aaaf-663f8eae8055.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19fd2b5ddcd880ba4949beda0d4cdb1be04c16a3cb38919d727dd210b3504e69 -size 1328270 +oid sha256:d7e04746c869963134ca1faed1631f27962bf7787217bbdd5441ac72daec95a1 +size 1176455 diff --git a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_fda96947-caa3-40a5-8f47-4413cf7cc0cc.png b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_fda96947-caa3-40a5-8f47-4413cf7cc0cc.png index 063bf679b571d29164fd4a9beb0a34bfc98fdb25..69e426dd96dcf93c5a8387110a8753869939d20b 100644 --- a/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_fda96947-caa3-40a5-8f47-4413cf7cc0cc.png +++ b/images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_fda96947-caa3-40a5-8f47-4413cf7cc0cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:876df4e98d8d19efb883ac2bdcc22a04dd641275a0d8446f21cba0ba9cd14f83 -size 1392004 +oid sha256:9130fa9e239a061b8dbf4d57122673eee248d1bd52a5130b66565ce432f086f9 +size 1158650 diff --git a/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_9124ca10-aa2c-45cb-870b-29a580fbb2f0.png b/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_9124ca10-aa2c-45cb-870b-29a580fbb2f0.png index 837c014de66506b3618b78286dbef02c909c01f6..20cf1798812602b38ba180a611034ad621a36cd9 100644 --- a/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_9124ca10-aa2c-45cb-870b-29a580fbb2f0.png +++ b/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_9124ca10-aa2c-45cb-870b-29a580fbb2f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec92955a1117fe69096fb6629b671d24e275085117cc68d530a5a44a3246a486 -size 895512 +oid sha256:e4cd304195a0962bdc1529fc69fe741e16031ca5dd9fdd31d61d2f3b4e092f60 +size 723495 diff --git a/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_efda118b-1324-4375-90be-92d6e1767945.png b/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_efda118b-1324-4375-90be-92d6e1767945.png index 9222041721f4949d3896477672887e8e9955c991..5cfe9c75849cff140be9e385e922315478e15b3b 100644 --- a/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_efda118b-1324-4375-90be-92d6e1767945.png +++ b/images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_efda118b-1324-4375-90be-92d6e1767945.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48674de63b3a2e7490a953c06c6fa94c7f38591e2737d50a9cd54b63f51cc1fc -size 1019509 +oid sha256:bcc859b58a3584895a2a9b56cb5a0baad0f42aabf8f86b4d2a70d28042e5ac39 +size 578962 diff --git a/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_07e211f6-3f82-484e-8465-34c9b2f91f5e.png b/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_07e211f6-3f82-484e-8465-34c9b2f91f5e.png index 1fefb3edc789e3ef21da1dddc06e91ca4e90d7c3..a87cbb1ea43d1c45410b42ea7f8423c43e45ef58 100644 --- a/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_07e211f6-3f82-484e-8465-34c9b2f91f5e.png +++ b/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_07e211f6-3f82-484e-8465-34c9b2f91f5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f83ebbdc8cbc28447fb15a94ec6785bd9d463fc5b401023fa26321b0bc18ce5 -size 1303412 +oid sha256:63f74d7624d5e20cae8c1ea0be5ad096c70d9b62bf2b367913f214a10c07cfce +size 782915 diff --git a/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_906c8603-da25-403f-b16b-7258c1f73735.png b/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_906c8603-da25-403f-b16b-7258c1f73735.png index 1fefb3edc789e3ef21da1dddc06e91ca4e90d7c3..bb663050d19e8f869fb7ab8ea8e8cb4cea8e5b5d 100644 --- a/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_906c8603-da25-403f-b16b-7258c1f73735.png +++ b/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_906c8603-da25-403f-b16b-7258c1f73735.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f83ebbdc8cbc28447fb15a94ec6785bd9d463fc5b401023fa26321b0bc18ce5 -size 1303412 +oid sha256:4eadb0af0e891192d42533becb77e071462814a8da13c6943e369a6029dba0cc +size 1317552 diff --git a/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_91811be9-5687-41af-9800-b2ad1470e844.png b/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_91811be9-5687-41af-9800-b2ad1470e844.png index 1fefb3edc789e3ef21da1dddc06e91ca4e90d7c3..3a65463f32db84a32c4d4295f71c4bbbbcbcea13 100644 --- a/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_91811be9-5687-41af-9800-b2ad1470e844.png +++ b/images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_91811be9-5687-41af-9800-b2ad1470e844.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f83ebbdc8cbc28447fb15a94ec6785bd9d463fc5b401023fa26321b0bc18ce5 -size 1303412 +oid sha256:63502ca1677b4c87a986105aada3d1d4ccaab29d4c2ab44df1653f4bbd78ef27 +size 556876 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_18f516f8-beaa-4338-a0dd-659293279207.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_18f516f8-beaa-4338-a0dd-659293279207.png index 4510b428b3bccbcc239db40cb1551c0adb8542a8..6d831b35597db6cf5c4cf7c7fa91446c41814e39 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_18f516f8-beaa-4338-a0dd-659293279207.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_18f516f8-beaa-4338-a0dd-659293279207.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1304460dcb1a84025dfefd715f82399d776eb439a45d3a753e6c8930d6a8f5a6 -size 973184 +oid sha256:d325d5fa502e01da02310320e9ab9f93b6ae45991b20f175d96f2b156fa65054 +size 908645 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_20627442-25be-4dc3-aa4c-e36a3b8a6f3d.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_20627442-25be-4dc3-aa4c-e36a3b8a6f3d.png index f85b4611fc79b3b8c574b359581eda67e9205e22..cbf8675fa200aedbaf5bb72b946eb1c39965ec03 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_20627442-25be-4dc3-aa4c-e36a3b8a6f3d.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_20627442-25be-4dc3-aa4c-e36a3b8a6f3d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5db2b5ae8bc442e3c07dc03b514555884c1dec1a5e857ac0ce597f3d66bed348 -size 587497 +oid sha256:fcbff0967dba40275141cfc4e308463de85245b4e15b249d095a841f5b8d782e +size 1034438 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_24f091f2-2302-45cf-8a3b-6926028a8c8a.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_24f091f2-2302-45cf-8a3b-6926028a8c8a.png index 606a9d5ba666b7939d93053e35b9f572171404d3..7578e43a9d7807f10a639713da387892a8532ab4 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_24f091f2-2302-45cf-8a3b-6926028a8c8a.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_24f091f2-2302-45cf-8a3b-6926028a8c8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf9fe2072337b624a96331e09de9a3687fa881851144b855e72e34f86ea9dbc2 -size 580998 +oid sha256:b1d5860bfddb72ad6a9ad5c4f2293bfd3175ae9b1a1fc0ff159c8f6a4b7a8e1a +size 503756 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_470c7cf0-6d9e-4a43-913b-fb309a888da5.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_470c7cf0-6d9e-4a43-913b-fb309a888da5.png index 1cd156709d014cc8ccca8e20aca8c336b6693746..7e6efc8236be64c5bcf09f703c4fbab7ee7b8251 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_470c7cf0-6d9e-4a43-913b-fb309a888da5.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_470c7cf0-6d9e-4a43-913b-fb309a888da5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:587dabc9466a40d3f6099d39bb77307c82c91e980cefb9467ccfc77c00c6174b -size 733221 +oid sha256:fc96d1614d73b86735de3a51325123bd49cda08e522c1ddb35d930114c80a446 +size 1411486 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56e1e554-8541-4c1a-a4bb-d8ad1f7b95cb.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56e1e554-8541-4c1a-a4bb-d8ad1f7b95cb.png index 44a9dbb6609b0309440af899f99e70f3de34b726..c7663c2d803b611a5ec056882ae5d9e67cabd7d2 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56e1e554-8541-4c1a-a4bb-d8ad1f7b95cb.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56e1e554-8541-4c1a-a4bb-d8ad1f7b95cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2202c0d440d2d89ea141a9a920f57a94b69be4ec2413bc3e9bc5c64782310f84 -size 645506 +oid sha256:356e6815b979198e854b6ec5d1b85106ff8aae8e8da3b246b109a0d1b8ab74e2 +size 1142363 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56fa7b74-7f71-40c3-9efe-af9d76d7d282.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56fa7b74-7f71-40c3-9efe-af9d76d7d282.png index 26f0691c4789bb105674ec79a96f3409e9c4bb7b..dc4e410e2887ba6640faf31d7df7e0381d4c47a7 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56fa7b74-7f71-40c3-9efe-af9d76d7d282.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_56fa7b74-7f71-40c3-9efe-af9d76d7d282.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a228cbefa0282e60c51e561cd655c3d23da1afdafe18e557f81c5f89bcb518e8 -size 922437 +oid sha256:c651cfa99fcb09a9ea6b1194da8a303bcf680ebda5e0c5e2f76f3d396ba19496 +size 1136860 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_75f3bf7b-84da-4fb6-9810-13ca7ce311fb.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_75f3bf7b-84da-4fb6-9810-13ca7ce311fb.png index 4312f78823b6c04691db52a24187fc3f187a955e..62d4ca342fb5cd6949c6ade0e111ca1fdb602ca4 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_75f3bf7b-84da-4fb6-9810-13ca7ce311fb.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_75f3bf7b-84da-4fb6-9810-13ca7ce311fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:494581e8ee8f53542a7917c8703427c75b732de34cc5e006fe1bf7e81c4278e7 -size 1095385 +oid sha256:91943efd346871104a7a171d298e62150c160883dfa91c5b266872c8765ec712 +size 1403990 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_77ae34f7-69b8-4da8-90c9-5420ce7b170c.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_77ae34f7-69b8-4da8-90c9-5420ce7b170c.png index ad309c7ad2f07602e97558d0705042004002cf32..418f0e2dbba72f022696e82a61041e9775661f68 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_77ae34f7-69b8-4da8-90c9-5420ce7b170c.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_77ae34f7-69b8-4da8-90c9-5420ce7b170c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71b8a48e22c8d7cedebedf96edca0a9a5af509af77e9ec255779b2766994fd4f -size 1131350 +oid sha256:5b70078d55b27634d05000a05e28663d00711d52428e9e1cdba5e46aad93247e +size 1315459 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_a82c1270-924d-4758-86cd-30ba60260eb9.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_a82c1270-924d-4758-86cd-30ba60260eb9.png index 5ad9b696c1a96185c506881c467231ed0b2ceea1..c249b9de12fd75eba0469c18cc4ac8e35182980f 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_a82c1270-924d-4758-86cd-30ba60260eb9.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_a82c1270-924d-4758-86cd-30ba60260eb9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f6f81118697140c392b560663d9d630c2caa72a67ca910aded8fefea5c52165 -size 1151094 +oid sha256:1749b83fca973dc5344f518544eb69adcd540400b619c0d40253ea75416d3276 +size 1275408 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_bc8a2b0a-b824-4d31-996a-98da91c17d68.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_bc8a2b0a-b824-4d31-996a-98da91c17d68.png index 6e372faaffbfb97618f9c34d620e0dcc33c98e29..96c7eadcdc43328029d1f3e83d896aeac7bc04f4 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_bc8a2b0a-b824-4d31-996a-98da91c17d68.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_bc8a2b0a-b824-4d31-996a-98da91c17d68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1dc971c1964df6772422792fd6e8b455a325f2b05b318a64d18833c0a324b85b -size 862364 +oid sha256:15b396f454075e7982c59a099d783361c20335210e14ff851f82b9e13921fe34 +size 613044 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_c31831cf-fcb1-4fc6-a696-aa5540372aa1.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_c31831cf-fcb1-4fc6-a696-aa5540372aa1.png index e80b8061207b112c1d98d5d290d022224861d43c..761a7f23dc879b4204cb14d6d5a8c2b317799260 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_c31831cf-fcb1-4fc6-a696-aa5540372aa1.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_c31831cf-fcb1-4fc6-a696-aa5540372aa1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2da667478f637020febbcd3787601a679d673c6e730a39d3c099a01150b57d85 -size 870691 +oid sha256:d1a8e08519ec004c26474051e496477f950be8d547e6e1675ab08ce1e0d929f8 +size 1083204 diff --git a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_d8f45da9-b931-4adc-b980-61fc1ecf4943.png b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_d8f45da9-b931-4adc-b980-61fc1ecf4943.png index a771aa96ff7e951793b617d8c320348d9906f694..68e4921d3ff5f09ba67ed0013f818787161bc3c2 100644 --- a/images/b20e1dc4-651b-46e1-8470-16250657f2a8_d8f45da9-b931-4adc-b980-61fc1ecf4943.png +++ b/images/b20e1dc4-651b-46e1-8470-16250657f2a8_d8f45da9-b931-4adc-b980-61fc1ecf4943.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:531d1a2ea90100e88c9d6ff3188a9ac26cdfb4b637d949581282acb19ea7a182 -size 876826 +oid sha256:669679ec816bd6d9087a3968afcc050362f726b955006e40b4271aac80d19cec +size 1062847 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_1650528f-5ac6-4a51-91a8-e217fa90b7a0.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_1650528f-5ac6-4a51-91a8-e217fa90b7a0.png index b976492c87bb495ef0a2a9550b5eebd15738f516..ace142acda8bcc5bda41d5433e0048b7fec3ca61 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_1650528f-5ac6-4a51-91a8-e217fa90b7a0.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_1650528f-5ac6-4a51-91a8-e217fa90b7a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7f2d52705ea585c2b73c3e216c9c5d5389714bb64959d53609d02c8ecfd344c -size 803990 +oid sha256:07c4bc12dd86a96be718831382d40db07fc6d0adca18f5c671aca596b5f0765e +size 1007067 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_19c1d885-fd92-43a9-b9a6-091054ce4e46.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_19c1d885-fd92-43a9-b9a6-091054ce4e46.png index f41803b7139b041630ca9585cc8dd43f5dd6616d..49b5619970c413e8efa52729047cee884ed2b23a 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_19c1d885-fd92-43a9-b9a6-091054ce4e46.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_19c1d885-fd92-43a9-b9a6-091054ce4e46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39bf485eca14547f271eb5b0cada772e153245c8769d86384b1ce43a4629da4b -size 852123 +oid sha256:9cd61cd759f3830e99a976f56b02ba648abc1d4f0b4a51ae8fe0d8a9c27a96b0 +size 720187 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_24cd8cb6-fad4-4840-a423-b2bf2ce4de58.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_24cd8cb6-fad4-4840-a423-b2bf2ce4de58.png index ca2970f30f2d00ec440db9c0ebf6ce24e4a37e54..800b9a2520932beedc15d3e115bb5326994e6773 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_24cd8cb6-fad4-4840-a423-b2bf2ce4de58.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_24cd8cb6-fad4-4840-a423-b2bf2ce4de58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fa0e4a2aa79d1f98111f521e70fe7f0f125af05618eb5c729f8f60d65ec230d -size 944135 +oid sha256:d895372e773a88225df5b3478e186b8635d3adf7edee907372a110aef4be1e6e +size 981989 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_2cb27e58-8e3f-4926-a34d-f9fdefebe672.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_2cb27e58-8e3f-4926-a34d-f9fdefebe672.png index 43cbe7701e13ed7f8642499f16448fa88bbe082e..f89e924a7ea9ebda253b12aa980b215d0e7c46ec 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_2cb27e58-8e3f-4926-a34d-f9fdefebe672.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_2cb27e58-8e3f-4926-a34d-f9fdefebe672.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32534b5bb71a8b27a554833fe0cca474ebfcab1b3c84cec9a927f32d68345bea -size 760077 +oid sha256:056057052a9d21fc4a32b056f307b84ebd807338d6acdf82d5d1019b9b500dbe +size 415175 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_316b6826-d405-4b45-9723-8fd585ef7722.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_316b6826-d405-4b45-9723-8fd585ef7722.png index 81e24f0d733361a80e845dc47efeec23d818167d..fb71ac7fae40771a4a5e7d1e70300e2dedebe6b0 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_316b6826-d405-4b45-9723-8fd585ef7722.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_316b6826-d405-4b45-9723-8fd585ef7722.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef3ba749897db8824d1beb67e1857fd4816eed4bf75a1f768f245d52e3deeed5 -size 1571100 +oid sha256:fa9ea9d0a7670f63389382652bda85a275533f070587161d3a8783d43dbf279c +size 1428770 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_31f7b8a4-1b3c-47d0-b248-9dac460f9f5a.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_31f7b8a4-1b3c-47d0-b248-9dac460f9f5a.png index dbfef2ec4632952f9be3d17e1370ff0313af00c8..3ae41e7818c8b2f4994f5cbc228676a9ccbd341f 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_31f7b8a4-1b3c-47d0-b248-9dac460f9f5a.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_31f7b8a4-1b3c-47d0-b248-9dac460f9f5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:036f25cddd68503ed1195652e1ee5167589bff55f5b7e8369c793102f3ec0151 -size 842725 +oid sha256:e77043f77aa732e2f50961f726c4196e321765234fda259091a429bb238b0f3b +size 1266180 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_454c214a-547b-4f92-bd1a-8c7af5315360.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_454c214a-547b-4f92-bd1a-8c7af5315360.png index a99a8bb7721ebc3846cbddeb3516596d9fd33406..2875fb07ce70fea0a287a7f8285231de68e37c53 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_454c214a-547b-4f92-bd1a-8c7af5315360.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_454c214a-547b-4f92-bd1a-8c7af5315360.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0dd7098052f7274662a7abc7b2079474316b572587cc21ca5a61253881dc77a6 -size 494251 +oid sha256:fb3d4fa5f88634080ff99a5845fae996410eac588862416bbc9b5e02ec8d996b +size 574978 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_571a8a34-2ad2-41e5-bca8-b8f77ab01ab7.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_571a8a34-2ad2-41e5-bca8-b8f77ab01ab7.png index 826646b0941716552733f1597367f72bb457978a..b131ed757435d47dc6c251137351b344603530a8 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_571a8a34-2ad2-41e5-bca8-b8f77ab01ab7.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_571a8a34-2ad2-41e5-bca8-b8f77ab01ab7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8035433d640739b82022dc55f8ace00487318e6890f10178df6f0f344acf72d -size 766356 +oid sha256:eb5872e7e38e40af89790dd7290bdb3fe26d6e5e0467f4068a80610cf67084db +size 1379352 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_714aee7c-12e9-46f2-80e7-71ba558c3f4a.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_714aee7c-12e9-46f2-80e7-71ba558c3f4a.png index 58ece3e5b2a4276a7c958ca321175eb529156892..72af3ada864bb3ebf6d1a5ed684175daf116a473 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_714aee7c-12e9-46f2-80e7-71ba558c3f4a.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_714aee7c-12e9-46f2-80e7-71ba558c3f4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b5acab65f7cff14d67712da9dd1c1db555400d9b9cabd6230a7451e8bd6d4d8 -size 1467551 +oid sha256:284266265408338adfef01ede2447d7ef9a4fa122b3635e619356c41333db147 +size 1497831 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8e52b340-62a8-48f8-8d18-ce80711db210.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8e52b340-62a8-48f8-8d18-ce80711db210.png index f099e7ef5628572cad092f1115f65952b01107fd..86f151ebb35c1379c27b99ac81f3b95337ca700a 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8e52b340-62a8-48f8-8d18-ce80711db210.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8e52b340-62a8-48f8-8d18-ce80711db210.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1dd944530c04945024a80f7d61eed10c9c0f8c9f2bce2dc21eba42b764e29c6c -size 748819 +oid sha256:8cef1342f319169650a158d524609c6166741567587f2f32a3edfd5e6e992fa3 +size 1452782 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8ee2d1d9-6fc9-46af-a6ff-482d1a1fa2ca.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8ee2d1d9-6fc9-46af-a6ff-482d1a1fa2ca.png index 32b77c57bb205bf83dd1ae55cabbee0c347a769f..7259132cea4341138f6147c64aab96d5cc4e1379 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8ee2d1d9-6fc9-46af-a6ff-482d1a1fa2ca.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8ee2d1d9-6fc9-46af-a6ff-482d1a1fa2ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ffda6fd53fbf3fbe12e7909a3dd0936c091924d0c9e246ca3acc2b898ef5912f -size 747803 +oid sha256:0d9698a48c85bd81230afba51df8132a5d7ee08e585d8c879d6b6d3cc82f837d +size 435221 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_ab228503-2ac5-4989-b2bb-57db3bf18fc0.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_ab228503-2ac5-4989-b2bb-57db3bf18fc0.png index 5b8e5e5a8cd9a7aa48c91e22e98b40023d436570..831950ca5378a40be918c8c4fb0ab9e6fbd5e4a7 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_ab228503-2ac5-4989-b2bb-57db3bf18fc0.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_ab228503-2ac5-4989-b2bb-57db3bf18fc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82b95789dd065b61ee629198713422a1e4c1fe374d87948c98db2e6ac48ed36d -size 763909 +oid sha256:da702b61df264bc76f556efb5c9fbd8312461959cab67ee7002a296eb7888873 +size 1319235 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_e247cc44-be69-402b-8df7-1b64365510fe.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_e247cc44-be69-402b-8df7-1b64365510fe.png index 0f9624e69faa41b35ae7b0623c908fc8141b74b7..2ed37e9c35ba12ea3b33c102cd966ddb97b5208e 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_e247cc44-be69-402b-8df7-1b64365510fe.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_e247cc44-be69-402b-8df7-1b64365510fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84639ae9f37ddc8ded65fe9fdce68c1c8b08a35c0d59150313e17a6183b973cd -size 774478 +oid sha256:726bef2ac7de267c54223433d3bb2cd4c8e9c57f52b54a88e0b4d923f207db71 +size 938484 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4a005a1-d2ff-4628-80b4-310e149d0585.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4a005a1-d2ff-4628-80b4-310e149d0585.png index 93fedfd7af120013c24b265c38ef0c60f589c06a..10009098f1cff0edf0289cc0de7a8b14873f0678 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4a005a1-d2ff-4628-80b4-310e149d0585.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4a005a1-d2ff-4628-80b4-310e149d0585.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97bc47bf75dd90fee0fdd289c091584a37bfa661acf03b4c69d846327bb851c3 -size 750056 +oid sha256:3fb034e9258cb4713103a3772716b0f1ada97ed615fb62e8201b00d326b2277c +size 1459056 diff --git a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4f9eaa4-d365-4194-ac5c-02412f49c7e1.png b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4f9eaa4-d365-4194-ac5c-02412f49c7e1.png index 540205f7f98232052cf795cf09ece373ec0fc192..77ac05bdd26f82cec98b59dcfa06ef704d027cae 100644 --- a/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4f9eaa4-d365-4194-ac5c-02412f49c7e1.png +++ b/images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4f9eaa4-d365-4194-ac5c-02412f49c7e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f563278f9bda0e04205c65a3f78bbf3f0041ad8ec3b682ebeb06736860b9d6ee -size 828218 +oid sha256:7997e6def95584972ee0d597b99183f5103aae9a96f4308050885ccd7ba920a6 +size 971233 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_1d9aa0c2-db0a-47b1-9e38-267bea54a66c.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_1d9aa0c2-db0a-47b1-9e38-267bea54a66c.png index 8f8eebdebaf43526ff5abee1275d7d4f0f9d9fd4..295b9c5312475e0d59c22ba700d7fee4fbd2711d 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_1d9aa0c2-db0a-47b1-9e38-267bea54a66c.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_1d9aa0c2-db0a-47b1-9e38-267bea54a66c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33599078f31aafdcb628315a41501bac5541cfdf6191e84e672b7817d1880f37 -size 2078865 +oid sha256:06e3908399348b803e7d11b2f67133efb3519eb4f98da2dad0c4bf62d8bc6ecc +size 2007902 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_2403f621-0e06-4828-bd85-e88920da6630.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_2403f621-0e06-4828-bd85-e88920da6630.png index 4dbf4e626cf05b509b356809d0b44060560ec7cf..99b80d027f42915b7ac58bcdc0b187761c6040e5 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_2403f621-0e06-4828-bd85-e88920da6630.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_2403f621-0e06-4828-bd85-e88920da6630.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4bde433eb1357409b8a421a29fea6a0cacf891ccfda3a74b4436c911dd8895d8 -size 1690206 +oid sha256:affba5f94afeeb6ccd6778fe18b44d1bc95a288f5ed28b6649873cfe3e52b663 +size 1411355 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_90ccc0fb-f4f3-4a2a-a635-4de2b8634a4d.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_90ccc0fb-f4f3-4a2a-a635-4de2b8634a4d.png index 6cf4fd310da1ed92a4dd6374e1505813a540f2fc..9fcfa4c30b7859de7ed02de7e86e56d8711fa4f4 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_90ccc0fb-f4f3-4a2a-a635-4de2b8634a4d.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_90ccc0fb-f4f3-4a2a-a635-4de2b8634a4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b898270561beca616b7a12acd6886532e3b2d98d790f997a744933646b5da6e -size 1339352 +oid sha256:8f512fbe2a7c6f536959ab7c5cccd0c9d6f9ef2cd7e578ac28cbf5c844bd4066 +size 1493685 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_b32b39aa-4510-4ee1-8d3f-560a4fb3220f.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_b32b39aa-4510-4ee1-8d3f-560a4fb3220f.png index 5bf74b3402766e751f75240f98245640c9ac17d3..50d08ab32aeaebb989122c08152b5410d3402b9e 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_b32b39aa-4510-4ee1-8d3f-560a4fb3220f.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_b32b39aa-4510-4ee1-8d3f-560a4fb3220f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abee6dbbcf969ad43de377d5f4d309a2a17cacf4da3cdd820dc931afff622810 -size 1563611 +oid sha256:b6b5f72cef230e119d6548ac96fd89f15aa21a1d314f6ed54bce717189f33667 +size 1435692 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_cdd03303-a8f9-4c95-9c04-fbe006ec7497.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_cdd03303-a8f9-4c95-9c04-fbe006ec7497.png index 6cf4fd310da1ed92a4dd6374e1505813a540f2fc..c18e94224063ce781990ecd03c6e00d2a4ac8109 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_cdd03303-a8f9-4c95-9c04-fbe006ec7497.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_cdd03303-a8f9-4c95-9c04-fbe006ec7497.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b898270561beca616b7a12acd6886532e3b2d98d790f997a744933646b5da6e -size 1339352 +oid sha256:c771c44c948210147d47f7548c82d9bb078e6da9e7696d8d503604f5ded801ae +size 2317838 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_d3712cc7-e7db-450c-98e2-ffedf82420bb.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_d3712cc7-e7db-450c-98e2-ffedf82420bb.png index 1223d00c06426fb07ee957477273c19fa04dbb53..d7831c9d8509433cb23f2d384e2417a67c52100d 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_d3712cc7-e7db-450c-98e2-ffedf82420bb.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_d3712cc7-e7db-450c-98e2-ffedf82420bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b35a4ccf7daed0987c4bee763fba034c1e9cd318e1ba2097f720e9642fe2e68 -size 1453666 +oid sha256:cf921bf12f5299e2cd07a9d885e7e3104f038ea692aa5c5eeddc39b797396376 +size 2552869 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_ee795994-e62a-47c3-a705-bb02487f3c6f.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_ee795994-e62a-47c3-a705-bb02487f3c6f.png index 2e48b220765888504c85f6e51caa629f9f71f2dd..d5c5cfa1aaa6099fa2c55bc683a6168e104d5a02 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_ee795994-e62a-47c3-a705-bb02487f3c6f.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_ee795994-e62a-47c3-a705-bb02487f3c6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c3e66d9b0648c5a0a2aceeb09e744fcfa5da89d41d14bebb4515a58e13146cd -size 1476820 +oid sha256:6112844669b115fc136f3014246c5bfcd6958e0867bb632d7dc7856ed7f3036b +size 1977985 diff --git a/images/b2c18588-b115-4937-b69f-8feeab22c387_f9b331ee-30d0-452d-8612-6799a1b53c65.png b/images/b2c18588-b115-4937-b69f-8feeab22c387_f9b331ee-30d0-452d-8612-6799a1b53c65.png index 83a39698d4d47787c5778f23a16fd89b28d97c75..ec767553776bd528ba58ae01c8025ed26dc55eeb 100644 --- a/images/b2c18588-b115-4937-b69f-8feeab22c387_f9b331ee-30d0-452d-8612-6799a1b53c65.png +++ b/images/b2c18588-b115-4937-b69f-8feeab22c387_f9b331ee-30d0-452d-8612-6799a1b53c65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:906d0af1028193dbb73dfd5f1868d6254fdd360d3a34dda1d8c6816bffb5efd0 -size 1818153 +oid sha256:5707a17906060ae817495754b05c036cd2cbb8759e2b171bea90e80456912cd9 +size 2865926 diff --git a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_06821f17-f373-4854-9f11-fdf64b7a44f0.png b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_06821f17-f373-4854-9f11-fdf64b7a44f0.png index 41947f35a01c01be781e94e493e137803505e050..5dc0c8d6ed2435f189e3ceb4bd99173b5239af85 100644 --- a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_06821f17-f373-4854-9f11-fdf64b7a44f0.png +++ b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_06821f17-f373-4854-9f11-fdf64b7a44f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddbf1e225ec4ce3f4e7dbb7b6f8a01bcaab577df65af7cf726972f7180b27a07 -size 2803210 +oid sha256:0bc6f4b842b0148b31bedd65054b9766b8bec53d0a48511f2e5510fd111e2c28 +size 1833354 diff --git a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_1896e25f-674e-407b-bb72-a02a44c625b2.png b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_1896e25f-674e-407b-bb72-a02a44c625b2.png index 40cbb9ec699db05fb8769c90d944bafc2c5e415d..42f09700538eb99ce3e49f503e1f01c0c92fef66 100644 --- a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_1896e25f-674e-407b-bb72-a02a44c625b2.png +++ b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_1896e25f-674e-407b-bb72-a02a44c625b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:286e53c4ef610f46d02c97ef26f6931edb396b4c831c16d58602678846ec1698 -size 2946097 +oid sha256:b16fa0dad4b6987690a6c27b78a03675e9921c1e83df49827c7c0176754b96cc +size 2029292 diff --git a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_5edb6bac-5471-482d-904a-09635fe4ee2d.png b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_5edb6bac-5471-482d-904a-09635fe4ee2d.png index 7d30bc6e84b22663021a800bed8e71c314b0364d..cd384c2f08ffb3b937d7f3c191e9fcb7f91ec5dc 100644 --- a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_5edb6bac-5471-482d-904a-09635fe4ee2d.png +++ b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_5edb6bac-5471-482d-904a-09635fe4ee2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8be8494236528ccdae05ab7b9483441e350d78bd1a70177d5090ae855c2425f2 -size 2689277 +oid sha256:ae74c77f6f16a74e79c6ef3bea0b761d80dbfa25ec568b3dcc339a9cad44a38a +size 1479100 diff --git a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_c06ba573-4b50-4b1e-9a87-70d18fa8474a.png b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_c06ba573-4b50-4b1e-9a87-70d18fa8474a.png index c8bbc71c8399687c96f16ebd3059d47d8ccb2950..a08058f096588d7eda089ccc807eab168d1d76a2 100644 --- a/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_c06ba573-4b50-4b1e-9a87-70d18fa8474a.png +++ b/images/b2dd00ff-3f09-46bf-a043-7b996f3815da_c06ba573-4b50-4b1e-9a87-70d18fa8474a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ffa324b2352a5cf7c51b631eddb818abd50a4b79b19a90913805c3b146e653b -size 3762324 +oid sha256:7ecd2cfcc590ffafcec3da3e1f68a04feb1277c216d259f13c1baaa2c1eb289e +size 1979248 diff --git a/images/b307117b-e10c-470f-a85d-968b2e442b19_9800f6fc-8573-4f1d-bbf8-e425e6dc4fdb.png b/images/b307117b-e10c-470f-a85d-968b2e442b19_9800f6fc-8573-4f1d-bbf8-e425e6dc4fdb.png index 270c98ddd923c9f3b7f5a0767b4fb4846fad1c35..defe721d7b1df78652d9fdb2682b54cf5a12aa28 100644 --- a/images/b307117b-e10c-470f-a85d-968b2e442b19_9800f6fc-8573-4f1d-bbf8-e425e6dc4fdb.png +++ b/images/b307117b-e10c-470f-a85d-968b2e442b19_9800f6fc-8573-4f1d-bbf8-e425e6dc4fdb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a48a8347465b5312f0a4e7f0deedcc2d40e17b1d00f7a3289c340628e5ac119e -size 955177 +oid sha256:a31cfa83685bf64cd404553f1069be624dd43d50eb43463286b8f0f9abe94038 +size 1672012 diff --git a/images/b307117b-e10c-470f-a85d-968b2e442b19_cc41d893-f1d0-4303-b3a7-b19fc73f05a9.png b/images/b307117b-e10c-470f-a85d-968b2e442b19_cc41d893-f1d0-4303-b3a7-b19fc73f05a9.png index 9c129f86b2dedf16d454dde8b26cea691fe0f39b..dd32c3cf50db40b845f2c2c997f58c683152d562 100644 --- a/images/b307117b-e10c-470f-a85d-968b2e442b19_cc41d893-f1d0-4303-b3a7-b19fc73f05a9.png +++ b/images/b307117b-e10c-470f-a85d-968b2e442b19_cc41d893-f1d0-4303-b3a7-b19fc73f05a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ba5317560988e3ec2424785aa4fc3881075e79864324cc1ca779098818e44c5 -size 1357481 +oid sha256:03fbc5fe66c44d4e841a572f16995a3c76d17f861e5c87d5215158119db30945 +size 1123878 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_265e54aa-6c90-4afd-832f-8a1f4fe6294b.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_265e54aa-6c90-4afd-832f-8a1f4fe6294b.png index 516166f8568e2b87c0f40f73072d1131c7feadf7..9fb30022d85449950258efb590cfbda1523e8c25 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_265e54aa-6c90-4afd-832f-8a1f4fe6294b.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_265e54aa-6c90-4afd-832f-8a1f4fe6294b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf7cd3633d5ad3dae75d3afaf8e7028f41b1623465cc9bbad6ea5f6f6043f1b2 -size 577558 +oid sha256:8977dafa2a89fd461b22a63194fa2360d09274c4a6329d1c6099812d9cb7105d +size 543539 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_712639b1-118b-4f73-b96f-cd5b48c06cb3.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_712639b1-118b-4f73-b96f-cd5b48c06cb3.png index 0db44d17f8520c576c3594b79cf39b039f3fa2f8..7d376a27bb638e45decb2dc68ecee6a0928eab71 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_712639b1-118b-4f73-b96f-cd5b48c06cb3.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_712639b1-118b-4f73-b96f-cd5b48c06cb3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78536c2fa544a3c8081c70acaeb3f1d8811aa7c4a41afc11e2d7b7c99b865f0a -size 621008 +oid sha256:5be03d2aed9c289e7ef62ef715e56743285247779b81077e57762aeecbbe0eb5 +size 849703 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_82ba2b10-839f-4716-b42e-af904b9b0c04.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_82ba2b10-839f-4716-b42e-af904b9b0c04.png index 8ed2a2abd05f700949ee007e4c04a522da59ee4e..fbc955bd2d331cfc62f9503f5e063f26b1315cd4 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_82ba2b10-839f-4716-b42e-af904b9b0c04.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_82ba2b10-839f-4716-b42e-af904b9b0c04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b43cd761d14624ee575bb6169170eb799a1280c60220a44678fecf9e1ab0331 -size 520492 +oid sha256:b19194de092da7fdea72ebcd14425319e09473c87de469ff52f8d06ea476a792 +size 455523 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_c31b9357-ee70-4f66-974d-647feb53a5da.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_c31b9357-ee70-4f66-974d-647feb53a5da.png index 4a473cc08f1463db9a5ca3a0f9eb69dd2c33dece..78b8fdf8cc8991d2cbf22a5abf3d4c6951c645cb 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_c31b9357-ee70-4f66-974d-647feb53a5da.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_c31b9357-ee70-4f66-974d-647feb53a5da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0310e02a847d8bb2290664ace5b9f44929181c68c6d0aab7be9cc2b7ad0b528 -size 929974 +oid sha256:4ef255080ea477c1df103555d0ce926743cd23784dee367dd70071c415197ffa +size 843428 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_c61429c5-6d45-4632-b80b-1cfe0e7532e3.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_c61429c5-6d45-4632-b80b-1cfe0e7532e3.png index 3000adcbf1fb6a3e9246fc17b498fe5067a3e5d7..5d998c33612e68d4a1218acd88a1575a458b6ae9 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_c61429c5-6d45-4632-b80b-1cfe0e7532e3.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_c61429c5-6d45-4632-b80b-1cfe0e7532e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc1a9f4533759531a01adbcb95f284dea09c666846b36a2840334b70668bffd9 -size 554106 +oid sha256:2f9ea769a3e8a54fb93d99436c13dc754718c6596d4a998af2648841a5d83318 +size 669234 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_d0666e22-cb4c-4bab-b17b-7dabac0d02b6.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_d0666e22-cb4c-4bab-b17b-7dabac0d02b6.png index 5880b434153caa0a92d65e5d41429b51ba35748a..35cf1edaf3d419f274e173bc7043ade1b657deec 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_d0666e22-cb4c-4bab-b17b-7dabac0d02b6.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_d0666e22-cb4c-4bab-b17b-7dabac0d02b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51a8f2b970149295eb4e9f7aea275e863a34ed6b31f97620e1bc63eb1c6922b8 -size 1125320 +oid sha256:3cf144103e05c86bd3b31543bbed1378c72c7c1e86fd2080118f1bcb800a18a0 +size 1089926 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_d6343eab-e997-4934-8527-0d69f7db2bab.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_d6343eab-e997-4934-8527-0d69f7db2bab.png index a70ef5458beb374c7c920f4c5d63c411ce30fdc9..496795f0664e336265e704cadf640a3ce9707296 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_d6343eab-e997-4934-8527-0d69f7db2bab.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_d6343eab-e997-4934-8527-0d69f7db2bab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac24229b7bbb763b9396b43387396fb5f8b8f2e9aefa97e94fd21fc2a47fca71 -size 684191 +oid sha256:bc0ec0830c179d7935859f0ec5036b681671a822f9c4b9d87b775a6d21167b0d +size 792552 diff --git a/images/b30b9f84-0541-4826-a3af-98220b851f7c_e5be8100-215f-469b-bd1a-791ce30bfe16.png b/images/b30b9f84-0541-4826-a3af-98220b851f7c_e5be8100-215f-469b-bd1a-791ce30bfe16.png index 322aea2cc79d07f922533c6af7b22c0319061692..a2fd51692581a819d2ae5ed0d36b74c513bbb4a7 100644 --- a/images/b30b9f84-0541-4826-a3af-98220b851f7c_e5be8100-215f-469b-bd1a-791ce30bfe16.png +++ b/images/b30b9f84-0541-4826-a3af-98220b851f7c_e5be8100-215f-469b-bd1a-791ce30bfe16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0551d6661cb93adfe3325d1c07a6a446e02869e5934413a99bee9f3023cc97e -size 949584 +oid sha256:581ea49725cb57a03153b8eefea23d59897fdc4d8e9e43b5dcf8d8e051908051 +size 684887 diff --git a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_578adebd-03d2-4cf9-a508-15eb94946605.png b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_578adebd-03d2-4cf9-a508-15eb94946605.png index 5e9c5e9c7070d6c11e7fb5a2bad940878d0f6758..8155856637283178f4161bf864ceec3b43100b8d 100644 --- a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_578adebd-03d2-4cf9-a508-15eb94946605.png +++ b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_578adebd-03d2-4cf9-a508-15eb94946605.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:419c842dd6d120adcdcc66c071636dccb2fc953a01d8b0c2630da12623a66b71 -size 730332 +oid sha256:0d7de939fab35e2fef123c4191ec9c1419440c9c02eeaccd55b8de635c1f58ab +size 1020063 diff --git a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_60c544de-dc61-44c3-b0f2-0bb17011e3bb.png b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_60c544de-dc61-44c3-b0f2-0bb17011e3bb.png index 3ef5958b1bf4bdc0059afd70765d20f0554add0c..cf23fd8c6d6d3781b2ef9bf0e36914d4167f2982 100644 --- a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_60c544de-dc61-44c3-b0f2-0bb17011e3bb.png +++ b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_60c544de-dc61-44c3-b0f2-0bb17011e3bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea7a32aca373b30220b307f5e2f06ce2ac30d813cbc537d1bdb4c8891b8c31c8 -size 1102115 +oid sha256:f2f122cd8008b6f179bbe5e1cc1c5b6c58333c86860ba025ce9f1b6136ec1b88 +size 869434 diff --git a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_63febe49-b818-4412-96ee-0589ed46caba.png b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_63febe49-b818-4412-96ee-0589ed46caba.png index bbc57639c695f630e8f0f27c83c7ce0d1fcc9760..6e4806e25c6ca1df540f04b41ead4c8db1f047b8 100644 --- a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_63febe49-b818-4412-96ee-0589ed46caba.png +++ b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_63febe49-b818-4412-96ee-0589ed46caba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec25cefc10904e695786176111b383bc63f64d38a3181f99ab133c3b80dafc1c -size 1328168 +oid sha256:ca2af9507d1cf1029393cfae9785f7323ea097e115aa728bbeff0e0e355a3331 +size 1068590 diff --git a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_b22bf5a8-0dc7-4cb4-adb4-1ae86643fe10.png b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_b22bf5a8-0dc7-4cb4-adb4-1ae86643fe10.png index a18efe4cae6b2e9fedb7363dc22bbb29979b3e8e..15b68ec5b705a7e3881e224ce9d9bcb5bc0f7bc5 100644 --- a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_b22bf5a8-0dc7-4cb4-adb4-1ae86643fe10.png +++ b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_b22bf5a8-0dc7-4cb4-adb4-1ae86643fe10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8185febc7890027fc966b20f26908b4af14484e0b22785c1872901530b6071aa -size 747198 +oid sha256:708313ab109bcdabd143f46c1788b6f0ed7b270644fa5ad70d6de3aa4cfe7970 +size 702898 diff --git a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_fac64ba6-630f-443b-ae2f-0afb8aac89bf.png b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_fac64ba6-630f-443b-ae2f-0afb8aac89bf.png index a3279119e29c90f776f4d5e06523d8d7b818d127..e07117b497933a2985844b8f162de1f4fa50c05d 100644 --- a/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_fac64ba6-630f-443b-ae2f-0afb8aac89bf.png +++ b/images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_fac64ba6-630f-443b-ae2f-0afb8aac89bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac32c5f4306d2ffe1fcc38560f3f379d9c6eddaa32aaa51830f0a076c9b99001 -size 848339 +oid sha256:9a29c3f4ac492d1ee385769746fb5a64b4b11e5bc4c8f5c8323bc78f595af756 +size 697123 diff --git a/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_456e8f64-967d-4497-8b2c-7c1075f87817.png b/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_456e8f64-967d-4497-8b2c-7c1075f87817.png index a80369e965b3a3bcefedcc552744b1979f817572..4c0e98c8bed0842c0e88c1f6e3eb57e7aa6b8bd8 100644 --- a/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_456e8f64-967d-4497-8b2c-7c1075f87817.png +++ b/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_456e8f64-967d-4497-8b2c-7c1075f87817.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4051a2ee75770d2cc00725752cce2be8da5052dcf371341ad8dd129d128f2813 -size 1277438 +oid sha256:477afe643c288fcdfa12820e4bee30f23d83745b68d1e9ed6a95505f2ae24964 +size 1859607 diff --git a/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_7f52cfc8-106d-40e8-ba47-2b67a7d462ac.png b/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_7f52cfc8-106d-40e8-ba47-2b67a7d462ac.png index 963e87502adba5ef059ecc2530374bc4e903322d..1d1be6ee0e4179080d824dda45df99f285c059b4 100644 --- a/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_7f52cfc8-106d-40e8-ba47-2b67a7d462ac.png +++ b/images/b3a28e48-3912-4b0e-b3a9-d359da13864d_7f52cfc8-106d-40e8-ba47-2b67a7d462ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54ba960384156d94c15db7af623d8881c2e9e42cd4c10fd725a0f59007b80387 -size 1026881 +oid sha256:cc8e86958f449abf39d4667fc90072664a347cb69ee35d63d7771f4430a0f41a +size 532344 diff --git a/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_409090be-7df9-412d-b354-2a68656eb421.png b/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_409090be-7df9-412d-b354-2a68656eb421.png index 53b46ab0be71c449e04eede20d940a64bb62df9b..2cb51dc22ee5fc029364c1a717e928850c7d377f 100644 --- a/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_409090be-7df9-412d-b354-2a68656eb421.png +++ b/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_409090be-7df9-412d-b354-2a68656eb421.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7145ee16835436f196896b93f391857aeea59a98c504e8e7f43b59eb9ba1fe18 -size 1182681 +oid sha256:da9a41b93139f59e02d9fddddd1d8bde60cfae186c153ae5a0fb4abc3f3f30fd +size 751076 diff --git a/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_6a28730e-2571-4d4b-aba1-4e7470873680.png b/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_6a28730e-2571-4d4b-aba1-4e7470873680.png index a2dce5057e80b7d943ee252b3b0169ceca3d1297..4fb0d475832c932b08542fba7f653a8d907d42bf 100644 --- a/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_6a28730e-2571-4d4b-aba1-4e7470873680.png +++ b/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_6a28730e-2571-4d4b-aba1-4e7470873680.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d3a26490d36d9ff042d5ecb4c3006d8cb5bc30051562a4315be647a3df213e3 -size 860706 +oid sha256:c05356c06d6243442dbb1b265c00de579eaba3e44e1101883901a0b203913ac0 +size 1089885 diff --git a/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_e604833c-a61d-4011-9ada-fc0b36437ac7.png b/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_e604833c-a61d-4011-9ada-fc0b36437ac7.png index c5299387b839a05dd023f4535a15201b26d87352..b6cd4cc234508d9445cd873cb486d1cfd4c4fc94 100644 --- a/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_e604833c-a61d-4011-9ada-fc0b36437ac7.png +++ b/images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_e604833c-a61d-4011-9ada-fc0b36437ac7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:454f3ff7b13757a24ecb30d319c9263b77dd57d516593dbc00e1d90ef980c6e9 -size 1773115 +oid sha256:f83903ec900044c72c9484522ab873bdb457a47cdec4f787b855d35e475d1937 +size 2116324 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_247a11f8-00a7-4f2d-a549-c4bafb74faf9.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_247a11f8-00a7-4f2d-a549-c4bafb74faf9.png index 394235a9fd840082cf25b6e1fd06e97cc07c63e1..1ab47f2c58c157e0481f53ba81eeda60b43afff0 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_247a11f8-00a7-4f2d-a549-c4bafb74faf9.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_247a11f8-00a7-4f2d-a549-c4bafb74faf9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:403d4996f94cd74545eb77e2bb8bfc0b6068c788943f888e5acdadd441b0add1 -size 2052731 +oid sha256:ceb9a75dc4dd5cf620e47f29307e51be33caba2cf4c2397b1fb244ecd9b93b59 +size 1602147 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_32691a48-f07e-4724-8c49-ba80367012ce.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_32691a48-f07e-4724-8c49-ba80367012ce.png index 830d80f2fb8bfcdb72559f43e2c108322a85d566..8df28870ba26c56895326e4aa519f53efd558c8d 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_32691a48-f07e-4724-8c49-ba80367012ce.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_32691a48-f07e-4724-8c49-ba80367012ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d937fdcc08a7b4a37441e6867f933ee7434691de15362c3b50944b6f351aea7 -size 2192079 +oid sha256:b376edd6c1ff75f9b8bc4a664bdd43147663cd733c8e46cd05f8f63468246e71 +size 1083993 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_3e1a0425-96a2-4d33-bb75-a68e69a3a034.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_3e1a0425-96a2-4d33-bb75-a68e69a3a034.png index 404fb859967dbd61607c47e01d9b7f1d502be2b5..207ad962aed673283386e5bbcbdf92f88150cf59 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_3e1a0425-96a2-4d33-bb75-a68e69a3a034.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_3e1a0425-96a2-4d33-bb75-a68e69a3a034.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d43ad4f1c4bc6ba230e09d9cdb3a32265ce4cdfc8afa61fc49cb1c35d67aa3ec -size 2103966 +oid sha256:913b4707bc3dc70172c1fc6c2f182ecc2f89803a263470637fbb0a27db5d8474 +size 1222243 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_46cd12b5-61fb-4d35-9de8-082eeb0a11de.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_46cd12b5-61fb-4d35-9de8-082eeb0a11de.png index e26f9290efe18aa142a2ba0191e00caebe5b6b59..0d003bba3e2f343e4d0b0c242f3c3c03473e930b 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_46cd12b5-61fb-4d35-9de8-082eeb0a11de.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_46cd12b5-61fb-4d35-9de8-082eeb0a11de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:392d867a93a621c106ffd74f45d3fbed9874559be5e82ad36acb82a5cb7c3d8b -size 1959143 +oid sha256:56d5f262a2e6a0197bd133244edbb96fb6768eaf417ed66b029533793beb7ccc +size 1148851 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_5234c799-fd49-4103-8be7-e15a1bcfd84c.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_5234c799-fd49-4103-8be7-e15a1bcfd84c.png index 092f5bb1e5161d94ba045feda253f64a9a49bbed..241ad98f83dd316e8c040f447fff35a44a7707b9 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_5234c799-fd49-4103-8be7-e15a1bcfd84c.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_5234c799-fd49-4103-8be7-e15a1bcfd84c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a91860d6374e1bd1b3deddb11ba1699dac7e76871c87a05f55e117ef6b2fae5e -size 1924020 +oid sha256:0c9d3b987f095833d1fe93ede6b2ac199d1db9df1ed775184397792ab186c20e +size 799974 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_61dd1ff8-f57e-465b-8b8f-90340bb4c4d1.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_61dd1ff8-f57e-465b-8b8f-90340bb4c4d1.png index 54f4b2b4e72361a493c0827057fd65aca290c3f8..0d92fec50c01682f195be979b775b403164a4837 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_61dd1ff8-f57e-465b-8b8f-90340bb4c4d1.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_61dd1ff8-f57e-465b-8b8f-90340bb4c4d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b99ccdb20a81b14989d036d6245c7fdb88341cc162425681afa91472bceb1a9 -size 922074 +oid sha256:28b9ac002d3c22a2d3de43d9da13f2d5ad8c3254832feab43207e6b353bb6581 +size 1048217 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_a3caa31c-759f-4764-ba35-39db38cc3e33.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_a3caa31c-759f-4764-ba35-39db38cc3e33.png index b75606352adea96546301df230c9999a46dc25b5..a3ffbc4b7fd63ace95b34289291df307ba7f810d 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_a3caa31c-759f-4764-ba35-39db38cc3e33.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_a3caa31c-759f-4764-ba35-39db38cc3e33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8130fe52f77040ebbe90325d472491aa631eb13f18800aa54b75532fec583a2e -size 1636728 +oid sha256:e6494a4711e2f16124a0a3cccef4719d20a62c25a11906140775716e4c772dc1 +size 905797 diff --git a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_c88f469f-8d6f-4573-895e-f79f176a1c0c.png b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_c88f469f-8d6f-4573-895e-f79f176a1c0c.png index f81bbc72df086a66c449a87cf5b8d1f8345b76f7..56cb8b1e2317ef95427b35c047ef05f962701515 100644 --- a/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_c88f469f-8d6f-4573-895e-f79f176a1c0c.png +++ b/images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_c88f469f-8d6f-4573-895e-f79f176a1c0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf3cda3c73231ce9f31b6a114528086cb18baa831b4f689aa36e120c23dfecd7 -size 1668286 +oid sha256:0844f2752d369c2131a8c5042bf1efcceb60d8aed2d38c143fa61f4822eeda35 +size 1186602 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_1e0238fc-8df5-4b99-bf1b-f1c1e7c88e26.png b/images/b4362dec-6803-415a-a112-819f694b84d9_1e0238fc-8df5-4b99-bf1b-f1c1e7c88e26.png index 4f9e46d68060972bdece0e15a773c3605e071aaf..bc1cc38f36ad8e80b7cd608901ae8be25f088fb4 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_1e0238fc-8df5-4b99-bf1b-f1c1e7c88e26.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_1e0238fc-8df5-4b99-bf1b-f1c1e7c88e26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e115ec24957829abd3cf56c1ae6eb890dd7d08bedec9ab963b146bfd88e4bd2c -size 513095 +oid sha256:a8ff8f1ec288b4bade072760505fe308e5798297ecf237dd81ef122dc51e1503 +size 1020749 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_3a37f05f-1cf6-43cb-9509-7936404dae33.png b/images/b4362dec-6803-415a-a112-819f694b84d9_3a37f05f-1cf6-43cb-9509-7936404dae33.png index d9f45e206266515262753934a228ad62cb31f696..cc42ddc25c0c5ceae829b864087458dcd4b5a404 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_3a37f05f-1cf6-43cb-9509-7936404dae33.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_3a37f05f-1cf6-43cb-9509-7936404dae33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b39d15e6ed6b3cb33a5c0fc3457ae62ce8453dce70d58c62bfc99dee489e12b9 -size 759591 +oid sha256:e03c08dcb3480bb89225a28b1fc7c794f552fad9a1656f9c121796f0a3e0848d +size 1139210 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_63c79386-dc4e-4073-b094-76e6bb7cb672.png b/images/b4362dec-6803-415a-a112-819f694b84d9_63c79386-dc4e-4073-b094-76e6bb7cb672.png index 50be5302b4fce56cda4d8b0533a980e2de37e8f2..9d6978e119c17b70e5f5136d46e008ffd18d117c 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_63c79386-dc4e-4073-b094-76e6bb7cb672.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_63c79386-dc4e-4073-b094-76e6bb7cb672.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6be478b64a0e9786f1226b71b751f3b7a2ad0faa4b1f99575e00b5a989e6be37 -size 1546223 +oid sha256:7d22ad6cb091b6e92fb850ef83dd9a08c9831989725b89033b7b8f50a6bfb797 +size 1603245 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_795d2e6f-3df8-4c64-a81e-fd2558515b2a.png b/images/b4362dec-6803-415a-a112-819f694b84d9_795d2e6f-3df8-4c64-a81e-fd2558515b2a.png index 82d2c09a55389f48bf02a7d352c2fa11e9e602a8..6e24a922135e18e6f177a4839bdd91f83bcc95bd 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_795d2e6f-3df8-4c64-a81e-fd2558515b2a.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_795d2e6f-3df8-4c64-a81e-fd2558515b2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee6987f9b8493b6f981df0f23825fc512fc7c923f60965ed5f384384acffd99f -size 3248588 +oid sha256:10866f28b833b17cc9d065f9e10eedd823bb2ed55e27c699a29b1fbc8212056c +size 2408132 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_80fe87ad-ddcd-4427-9c67-11e293082f8c.png b/images/b4362dec-6803-415a-a112-819f694b84d9_80fe87ad-ddcd-4427-9c67-11e293082f8c.png index 6fb649f19be5d1c5b19a495a97e2d2414bea9ca5..5b0361596684c90f21c280b6950f98aab5d29eda 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_80fe87ad-ddcd-4427-9c67-11e293082f8c.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_80fe87ad-ddcd-4427-9c67-11e293082f8c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6618bca534acba2b4af982f740ade85a09dba88fd78fc2500ff49af60fcc75e -size 1056959 +oid sha256:99ec003744a8b6896d2f96e000d72b6a88e2fecffc8e25beda386044f1001ef0 +size 2466467 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_8b75beda-efc6-4710-a083-8df8a18becd6.png b/images/b4362dec-6803-415a-a112-819f694b84d9_8b75beda-efc6-4710-a083-8df8a18becd6.png index e21d0f341986808d4ffcb02392f185c1c145b341..ba9c0d60ab1a5da6c2894f0b48479f347bd5a16f 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_8b75beda-efc6-4710-a083-8df8a18becd6.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_8b75beda-efc6-4710-a083-8df8a18becd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b51c9634b98065062f8d36cb57b1e84456152313a05161669ceaa354d630dae -size 757902 +oid sha256:de67af5c4aa2fc6983173426982bd6fa9014e65dc8ae0521f6d7b0f29d8aa36f +size 804738 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_90113f80-02c9-4c12-ad3c-a2324f74842b.png b/images/b4362dec-6803-415a-a112-819f694b84d9_90113f80-02c9-4c12-ad3c-a2324f74842b.png index c70360c8db87e57ef6ba4cbcd80e49f54a2f400f..4e3280fca7b13d28dd5c86c8ca002d2b08f994c8 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_90113f80-02c9-4c12-ad3c-a2324f74842b.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_90113f80-02c9-4c12-ad3c-a2324f74842b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:107c339b8dcd53d6390bfcf3cb02b64d56a0e25ccbb202b892c8c235faff7ad7 -size 2361460 +oid sha256:ce31b1c2a8f708619e2ddb278c366f48911464239f61cf4335a7a467eca31216 +size 2074526 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_a039d9b0-cf8a-4049-b01f-20740f97e6d6.png b/images/b4362dec-6803-415a-a112-819f694b84d9_a039d9b0-cf8a-4049-b01f-20740f97e6d6.png index 5bef6d8a9dd889c5d2bf2ee0c1e8378908e42da6..1d11987117b9f747305ad1641ce19c5d270239e2 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_a039d9b0-cf8a-4049-b01f-20740f97e6d6.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_a039d9b0-cf8a-4049-b01f-20740f97e6d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a6f918924477e5f30ff7187ebc0cbaaa0af585f1c99994cfb90613aa26bb536 -size 3182112 +oid sha256:527610d924501982dfa095961a010dafa80c9a124356ee05013769226a1a382c +size 2278755 diff --git a/images/b4362dec-6803-415a-a112-819f694b84d9_fb1207f7-703d-411a-8128-546df580dbe2.png b/images/b4362dec-6803-415a-a112-819f694b84d9_fb1207f7-703d-411a-8128-546df580dbe2.png index 63f3a5d64533f5672a71e8fb433384e7942fa111..c427009a2fe1f75793d0a6bb5102f44cd0fdc013 100644 --- a/images/b4362dec-6803-415a-a112-819f694b84d9_fb1207f7-703d-411a-8128-546df580dbe2.png +++ b/images/b4362dec-6803-415a-a112-819f694b84d9_fb1207f7-703d-411a-8128-546df580dbe2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:317c32cbb4afe21290c22fe5519dce1797c2d2eaab8fec06e34006e5aba522e2 -size 518737 +oid sha256:e7de1acd976d14965e15d05818f5f9b66bbf2af624a39303741ca0258716826a +size 924138 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_176f95c2-fb04-44dd-a18b-50de8dd10786.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_176f95c2-fb04-44dd-a18b-50de8dd10786.png index 56e22162510d57435ba0502809058e7f6384be05..a4949fe0b4a4276752a7a0debc874f15580e8482 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_176f95c2-fb04-44dd-a18b-50de8dd10786.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_176f95c2-fb04-44dd-a18b-50de8dd10786.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d90b2468faf90bafbd9f7b37c12c4dd7b78ffe234a702aa0f1fe0e8244d4726 -size 1245743 +oid sha256:717ecedb660e69d150857d03bb7974a4a5df94f296139e70c8c9ea6843bd73db +size 993778 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_241522f0-f05c-49d3-89e1-e0db568af201.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_241522f0-f05c-49d3-89e1-e0db568af201.png index 51984b774ab3fa3da80f6349019b03edb2b35ef0..3d4ee5a5c01cb1756128fb3e958f310a667a9be4 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_241522f0-f05c-49d3-89e1-e0db568af201.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_241522f0-f05c-49d3-89e1-e0db568af201.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3e6fa8aae54bbb064beb4702e6e80eb075784b5d97ce088f320f45baf3bc018 -size 1225863 +oid sha256:81829bd4ff9858a4d4ca5f35489f0e19867b609757116affce55d8d083d05091 +size 1498622 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_2e6609c2-84a3-4a3a-bc5d-f29bee7e86f9.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_2e6609c2-84a3-4a3a-bc5d-f29bee7e86f9.png index bdff0bec2a68b51fcbf2f161c0f743f2f381956d..60290262e82397cec33519313b05b0741d4091e5 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_2e6609c2-84a3-4a3a-bc5d-f29bee7e86f9.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_2e6609c2-84a3-4a3a-bc5d-f29bee7e86f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15a2d2d808c6f31aa4946e5d99e77bb742a18f07033429b60ad7ab0479cd4744 -size 980989 +oid sha256:f43bffa7f57b9f51ef80f55a3dbdcce18eb6cc73e2173f48ec891cf0eab9d735 +size 834316 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_32f50fb0-29e4-45af-ac3a-e2c5e30fd5c1.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_32f50fb0-29e4-45af-ac3a-e2c5e30fd5c1.png index 3f2c107bb6b59a35412877fc4486091fb0fe3f36..a7ff28059ac893dd5905a53e1a23dbcfea1edca5 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_32f50fb0-29e4-45af-ac3a-e2c5e30fd5c1.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_32f50fb0-29e4-45af-ac3a-e2c5e30fd5c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:060e785303e378f89456dd7dd7f6914e7513f7e1be8292984eb00c44123981b8 -size 614922 +oid sha256:5b12be36b8d9c569fb63f13fe1e412a47785c36187a39d54ecd7a3508af9a397 +size 880937 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_3cb50019-9056-4144-9449-be80b231f3cd.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_3cb50019-9056-4144-9449-be80b231f3cd.png index cd976206ecdef016e495fdfb2010e0ea04eeeba0..7c629e87087b137f1423b70b21d41b4a0d4116f2 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_3cb50019-9056-4144-9449-be80b231f3cd.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_3cb50019-9056-4144-9449-be80b231f3cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:723d0e0e438bb12caa909bec876f209b181cc7acd2a4e5064d7572001d0d79b4 -size 845034 +oid sha256:9f572d64ad3c426ba12d37cb6cca001b963ad090ab86897ec97486fa74c96f73 +size 1094231 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_4ddee31b-ec7d-496c-a1bb-92e16c3306ef.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_4ddee31b-ec7d-496c-a1bb-92e16c3306ef.png index 5b8caaa8499a014542ecce1e594d177352b9b311..6253f34e0ce3f6c1015d96a719dc1ad74820baeb 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_4ddee31b-ec7d-496c-a1bb-92e16c3306ef.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_4ddee31b-ec7d-496c-a1bb-92e16c3306ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ffd9002ec638766eddc9b9762288ac201ef03bb1029e3d444b610e32abda1b3c -size 844047 +oid sha256:14bcd97491b5b77e30bab0d020ee422dabd6a3b62eb691d3c0b0e3ed7fbefb7c +size 878301 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_56a6a786-d692-4dca-969f-3d04f183ff2c.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_56a6a786-d692-4dca-969f-3d04f183ff2c.png index 3de8f70f8659bb17b5f0a88b7a4c279c5326792c..927ddc78de0a2e41013179d23c7580c88f522930 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_56a6a786-d692-4dca-969f-3d04f183ff2c.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_56a6a786-d692-4dca-969f-3d04f183ff2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7425633a3703d2ae925833f39afbe5e7a11d3641e481f366dd28a292641471d3 -size 1138927 +oid sha256:096689d21bfaa382328abf2e17bb2ee4fcacbf39f8d4acea865642c632d6aa9a +size 882243 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_90a95512-a3cd-4f4e-8dec-561efd1c11b9.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_90a95512-a3cd-4f4e-8dec-561efd1c11b9.png index 6b0589aabfa1991bb44285812a8e97c284968766..1660a9ec7e83d379e1a74d226f728485a6dd429a 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_90a95512-a3cd-4f4e-8dec-561efd1c11b9.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_90a95512-a3cd-4f4e-8dec-561efd1c11b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f514b02bc8bbbe5263fc997ad3f5c21741c25c7786022a0ddc31f96ef2cf946 -size 900363 +oid sha256:eb10b354caee896b505abbe5233886426d37aa5c8f86a2c87b6a8d41590a9502 +size 874857 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_ca8df4fe-9e2b-49ea-9eb4-cb71f99749a6.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_ca8df4fe-9e2b-49ea-9eb4-cb71f99749a6.png index 5c52af5b6076e51a8aa10debc315f4ddfd51568a..03bd3106218abe249266a05c077c7ba4f31e02f4 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_ca8df4fe-9e2b-49ea-9eb4-cb71f99749a6.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_ca8df4fe-9e2b-49ea-9eb4-cb71f99749a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0ba8e9b4568d441b1c844951091e096edaa4f94f094d96aa2d6663b3f085d21 -size 1037433 +oid sha256:9061a743638d96656707a7f3a746982a57b05a50035f01b3501a250cf4922d52 +size 710242 diff --git a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_fec13c43-55d5-4c4d-9059-7137018069eb.png b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_fec13c43-55d5-4c4d-9059-7137018069eb.png index 9fbb37e353f6e9424de05dc168aa8dd6a8e6ed5a..a64880860803257d9e128e1567fa7c1fb27269f2 100644 --- a/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_fec13c43-55d5-4c4d-9059-7137018069eb.png +++ b/images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_fec13c43-55d5-4c4d-9059-7137018069eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1722d3128371d4f99d01226958472147d44d5106a763d3620068e09c956ae4ee -size 1027874 +oid sha256:826c810575134e6bb04c2319f6e248fffdf0b5b5ac0f6dab1cc78376040ddf2a +size 594630 diff --git a/images/b4872f0e-9d9e-4259-8b1e-844509b85712_1e74a3a8-01f9-480c-a924-561348ab26d4.png b/images/b4872f0e-9d9e-4259-8b1e-844509b85712_1e74a3a8-01f9-480c-a924-561348ab26d4.png index 480019e2c254c30889f4bafdb095478ef1f69614..d68749ea3ab49daae4f728b0d4b3a49d261b5811 100644 --- a/images/b4872f0e-9d9e-4259-8b1e-844509b85712_1e74a3a8-01f9-480c-a924-561348ab26d4.png +++ b/images/b4872f0e-9d9e-4259-8b1e-844509b85712_1e74a3a8-01f9-480c-a924-561348ab26d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fc8d3b175fae1956b02e51baa3d03bd61bab1f24875592dff2720f6041c7f34 -size 2689392 +oid sha256:c42f33e9fad6c4a8823006d395e5a452d524d69b65734b583b059c5016f09df4 +size 2872468 diff --git a/images/b4872f0e-9d9e-4259-8b1e-844509b85712_82cc8845-bf93-4eeb-bf4b-56ec11926ae4.png b/images/b4872f0e-9d9e-4259-8b1e-844509b85712_82cc8845-bf93-4eeb-bf4b-56ec11926ae4.png index 8e8ba20ac457e365bcacd8f782484f16f7a962be..d6ba9e8d3eff4d06f759bdf3f5d5a3ecb146d241 100644 --- a/images/b4872f0e-9d9e-4259-8b1e-844509b85712_82cc8845-bf93-4eeb-bf4b-56ec11926ae4.png +++ b/images/b4872f0e-9d9e-4259-8b1e-844509b85712_82cc8845-bf93-4eeb-bf4b-56ec11926ae4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:306cc3c161ed9d130ac0090a24cf6ea4dd00b5f5d0e6a0ae3e7f598ad74687fb -size 1285142 +oid sha256:ae84effc1a1b8a47764120254ff9d9e7f07bb9bdbe5312d90d3010ec50540c44 +size 1341829 diff --git a/images/b4872f0e-9d9e-4259-8b1e-844509b85712_8793262f-20c6-4600-a161-8ef3699192cb.png b/images/b4872f0e-9d9e-4259-8b1e-844509b85712_8793262f-20c6-4600-a161-8ef3699192cb.png index bdcad246f4bfda3164306d006272a48adef5420c..94ec3ce0440c6d20502b9156306ac445a2d8ff22 100644 --- a/images/b4872f0e-9d9e-4259-8b1e-844509b85712_8793262f-20c6-4600-a161-8ef3699192cb.png +++ b/images/b4872f0e-9d9e-4259-8b1e-844509b85712_8793262f-20c6-4600-a161-8ef3699192cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac2d257ad6e1301874628aea79528a29b1dd777501e0c6c6db02f46f1c1f6cf2 -size 10729221 +oid sha256:d94aaf3c0174593cfe0345b69c4e505788c5c52db3159dbee48d9a863cd3d295 +size 3121798 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_233a182b-880c-4fc2-883d-b5f7db449fa8.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_233a182b-880c-4fc2-883d-b5f7db449fa8.png index 8f95fe27aaec22dc9c5eb292f411f1a0a99047bb..082c81014c45f2c96bfb2d5f7961ad3b6e0be6ce 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_233a182b-880c-4fc2-883d-b5f7db449fa8.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_233a182b-880c-4fc2-883d-b5f7db449fa8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e26fef5846b1cd4b79813d2702c4cd1783bc84342aea6952fb7b32d82380c79 -size 1466263 +oid sha256:5ba9de7431aff8feb811e91e51a58bba680802e84879f6856cec2eccbf3c0ac0 +size 1025907 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_2a57a0b2-0e58-4fd9-b6b2-eaf59e4e6d5c.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_2a57a0b2-0e58-4fd9-b6b2-eaf59e4e6d5c.png index 0feb81301f980ef74567b3342efac62b484f1735..b8d514c97675e3241d08893d16b124cfa303ceb6 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_2a57a0b2-0e58-4fd9-b6b2-eaf59e4e6d5c.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_2a57a0b2-0e58-4fd9-b6b2-eaf59e4e6d5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5d048694e54d1c066c5c46217a4eaabd94b6e291411b93d823317a1fd679d76 -size 1467190 +oid sha256:807984796ea150ef83749b3e8df701fa8ec2bf0748d7fb796cccdc31656e12d8 +size 1124330 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_34932950-fe34-4548-99d0-8a8726ddb9f1.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_34932950-fe34-4548-99d0-8a8726ddb9f1.png index 08f3341689497b2a81d741d12148d763325c98ee..e5f6376096d7ee5e65916c5e87e52399116c866a 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_34932950-fe34-4548-99d0-8a8726ddb9f1.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_34932950-fe34-4548-99d0-8a8726ddb9f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4b9d3f440196dfb555cbf38a691b0eda6d99da6d5a804ba4eb65f9535e3fb7c -size 1475804 +oid sha256:379a83b0c8a3e3af829adb174ae7486cbc59214cd099f22aef1195325d57c5a1 +size 1465148 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3565c84e-3f3a-4a37-9d8c-1bfcae9e432f.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3565c84e-3f3a-4a37-9d8c-1bfcae9e432f.png index b5aface644efa6747514828c23118c70b348414e..c8d719b73ad3802fb5e7ee836e0eab1fa5ff5135 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3565c84e-3f3a-4a37-9d8c-1bfcae9e432f.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3565c84e-3f3a-4a37-9d8c-1bfcae9e432f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73b972a45661ceb101e7f11da15a980220c22542abb38230b024c36dbe701a72 -size 1488693 +oid sha256:b3ca6294944f77174ee1b8e28388479adb682c80fcb3eef24d9a345ea6d08498 +size 1568460 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3d3c3e48-24f0-4760-b98f-803f6a4dbe61.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3d3c3e48-24f0-4760-b98f-803f6a4dbe61.png index 10825affbb027a3c97085a06ce8df2d66a82605b..a4b2ba6deea997b88fdea1fa6d054117f5951e7f 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3d3c3e48-24f0-4760-b98f-803f6a4dbe61.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_3d3c3e48-24f0-4760-b98f-803f6a4dbe61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cde2ab5e6bb08d0074590df8c02e72aa32302dbb78241d736b62065a8fa0f1a -size 1476822 +oid sha256:128c288aa381726e8d70178d6c155c2dca559ff1408f5ad8dacc77b381164b1d +size 1520419 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_459ed167-c817-4bbf-bc91-73822e98bfd9.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_459ed167-c817-4bbf-bc91-73822e98bfd9.png index f1372a68076a8296f8fc62ff750622f792a57a39..aa238f62bb850543c42e642a898233ecaedc8ea6 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_459ed167-c817-4bbf-bc91-73822e98bfd9.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_459ed167-c817-4bbf-bc91-73822e98bfd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19bf8cdd22aee93b7c1bd97c365c0a5db0e73eef8e42e44033064b9e3f2757c6 -size 1474233 +oid sha256:4bc84b1b4740e73b8dbd276df9b86ae0df2db9372431d0e30a0d8f88e7ab6a57 +size 1470453 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_507874fe-1115-4387-ae3a-678440621c58.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_507874fe-1115-4387-ae3a-678440621c58.png index 7edf5de8d3ec90a8f8c6bd65a23574e67dc9ad87..4309ab901ee6546516b7a930a793f694681f162d 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_507874fe-1115-4387-ae3a-678440621c58.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_507874fe-1115-4387-ae3a-678440621c58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:357c47b25132e2a0f93451c551f771b526186b5b7d7ae1f4b5860595e3ed654b -size 1426616 +oid sha256:f1f49d4caf616c295edabb414bb17983022d3e745da795abd3a91ed4660089cc +size 1476606 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_513bf92e-6c28-43d9-9fb0-0d858631436c.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_513bf92e-6c28-43d9-9fb0-0d858631436c.png index ed4c0e46bb3d2827359e8b5bbfdf29e488b9c011..92f65a5e305943cc41e10237ab88409aa7968099 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_513bf92e-6c28-43d9-9fb0-0d858631436c.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_513bf92e-6c28-43d9-9fb0-0d858631436c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90e9115323f7c9c55b97f3be3ea863b7c2a2dff3c719e4b3885e8409c4122109 -size 508051 +oid sha256:e9f769133f9266eb2328c573ca8cf330c71556e1f74e3aae05207eaeb9e64dfa +size 542100 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6aeb2f30-3d17-4ef6-8073-6ace2fdbc4b0.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6aeb2f30-3d17-4ef6-8073-6ace2fdbc4b0.png index cec26a9f4c8f930106b1f133a9e6ecb18bd131e2..4c839d74e37b3a81e90c0000d8a4919171c0037a 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6aeb2f30-3d17-4ef6-8073-6ace2fdbc4b0.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6aeb2f30-3d17-4ef6-8073-6ace2fdbc4b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a42973298ef6a7384c390143adcd1cf80f0282abfbe324ee953e24fdd0ab4642 -size 421075 +oid sha256:ea6da11cbdc3c6c983034f0e547f4b3cd877f3a7565cf271824ce5e83c640314 +size 447911 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6c9584a6-5745-4585-9d4c-56a9d0d4a24f.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6c9584a6-5745-4585-9d4c-56a9d0d4a24f.png index 48d2f578807bf82b872e1cb71278af223aa6cc24..f55007053b8f3ba36ddecd464ead7843bf0f20f0 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6c9584a6-5745-4585-9d4c-56a9d0d4a24f.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_6c9584a6-5745-4585-9d4c-56a9d0d4a24f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e13a4d3022a149824b6e0246723583bef2797ab189df723f12542ba6cf129abc -size 1476826 +oid sha256:8e401d5d7af30a14ea4eafee7cad879f8bb177498b9131010fdc576cb7df5990 +size 1521215 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_75df0b6f-d448-4ab5-8039-d32f11ab3fc4.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_75df0b6f-d448-4ab5-8039-d32f11ab3fc4.png index 34086afedd74c29a711d5b58d8590456151c3b09..4fa7028c75befb6032ae1f29e128be539cf34d80 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_75df0b6f-d448-4ab5-8039-d32f11ab3fc4.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_75df0b6f-d448-4ab5-8039-d32f11ab3fc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:336b50e5e5bb2fe140a176a422d7724366792ce00219e87975961a64ff0f28e3 -size 1476765 +oid sha256:fcfa6fdabd75c4e38e1aecdcc86605e965b828e8b7892cbd592203ec5dbf09a2 +size 1406750 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_7d59caa8-226f-4fe2-986b-17eb4b9ffcee.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_7d59caa8-226f-4fe2-986b-17eb4b9ffcee.png index b6218edf77b228b3da43a67c687f6310796e1ef8..6c44c55a6de690b71f12dab21206eb10873d7b23 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_7d59caa8-226f-4fe2-986b-17eb4b9ffcee.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_7d59caa8-226f-4fe2-986b-17eb4b9ffcee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e6f3aba74057941dbe1de8417bb75996d4e66cb797f3b73c4980b27dd71617f -size 1426051 +oid sha256:a128009c0fd90027b2dd63fe9dc62fbf3ffd06e233096017053d511bad876838 +size 1511478 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_987ecaf2-68de-479d-ac77-4f3e3210dc44.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_987ecaf2-68de-479d-ac77-4f3e3210dc44.png index 48d2f578807bf82b872e1cb71278af223aa6cc24..d29c2c3bf69b6648ed25c56e16631246701ebf86 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_987ecaf2-68de-479d-ac77-4f3e3210dc44.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_987ecaf2-68de-479d-ac77-4f3e3210dc44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e13a4d3022a149824b6e0246723583bef2797ab189df723f12542ba6cf129abc -size 1476826 +oid sha256:5a94812facdc5df975d6caba10580b5f8ef9716993a753f769d600a35d1e9261 +size 1473675 diff --git a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_f1269494-1cf9-4f7c-ab7b-43521cf53783.png b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_f1269494-1cf9-4f7c-ab7b-43521cf53783.png index e1c07e564b0a31dd52e78cc2b8c544f87874710a..349eec997d3f36f1b483742a407eaeac784d64ce 100644 --- a/images/b48c9974-4ba0-4112-98ce-3667781fa71b_f1269494-1cf9-4f7c-ab7b-43521cf53783.png +++ b/images/b48c9974-4ba0-4112-98ce-3667781fa71b_f1269494-1cf9-4f7c-ab7b-43521cf53783.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f41fd96362f53903c74f47d6e7aef8e03359725fbcedb10ce2c7a218b6ec650d -size 1420712 +oid sha256:f5baf1c8096e4c5ebc9f4dfff5739e4c46e0ad4abd1d302646b7cf8810573418 +size 1465966 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3129d72d-487b-4db5-b9f5-e5108f9905c6.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3129d72d-487b-4db5-b9f5-e5108f9905c6.png index 9d7281dc86349b4f540b3ff56cc07e1382cdcf6e..30f3d7f39adb6863927b9f1a735b94262f922ce6 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3129d72d-487b-4db5-b9f5-e5108f9905c6.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3129d72d-487b-4db5-b9f5-e5108f9905c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:694c1c0966abc0c4a948bd4d627a24a541bfda5c2146bf6fbeeece5eb5f0e442 -size 1163495 +oid sha256:e7e7f0ed64b4ad9650ea02ce3a67c63add5d221ee04d31011c56dca5d7beae0b +size 1464533 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3a7df161-0055-43f8-a7b3-0705eb5f73a4.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3a7df161-0055-43f8-a7b3-0705eb5f73a4.png index 85102107830d546848f75e6d8db57e451abe7286..b91ff888c57b1dbb8e601799f4a4e234d8e4ac6d 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3a7df161-0055-43f8-a7b3-0705eb5f73a4.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3a7df161-0055-43f8-a7b3-0705eb5f73a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af865e78772f6d4b115eb57b5018ee72b9c3263dfb00426cffdbe8e50b070634 -size 2025107 +oid sha256:6d62ddc0e6b00d8804e5eb9b58931e889016febbc58dec01bdd1ac067f055cb0 +size 2248006 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3f010014-c3fd-457c-a17d-1ab30ce9a333.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3f010014-c3fd-457c-a17d-1ab30ce9a333.png index 5e8c0974a468a7793471dd59a8678a0646acb903..70a8c7d1cff107684c28816d9c8b18148943dbe6 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3f010014-c3fd-457c-a17d-1ab30ce9a333.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3f010014-c3fd-457c-a17d-1ab30ce9a333.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:827e52bfd3730d48252c80485c5945b4158409a4acc43bedd59631a6edc8e5ac -size 1174307 +oid sha256:9291b53c2ac6e1468b76e8c43e7a67d42d034d4fdbdbb0bc86a1794683958258 +size 1137773 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4659e9b2-1197-45eb-b644-7c9166476d4b.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4659e9b2-1197-45eb-b644-7c9166476d4b.png index 4fc55e5be967a266407d24a313009e116ad050ba..31686c3fd2782a93758f658cc3e65e19c94de387 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4659e9b2-1197-45eb-b644-7c9166476d4b.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4659e9b2-1197-45eb-b644-7c9166476d4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e54ff9baa63f7400c1b4b270d4f1017bb70e38271bf9a48c1cea59b94800e02 -size 1160571 +oid sha256:244e937cfd8b24efb259a82b7dfc8ed839ec6e36911bd599ebb8e8cc4face3e9 +size 1179991 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4e3a9490-e833-4e4c-957f-e0556fb8e96c.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4e3a9490-e833-4e4c-957f-e0556fb8e96c.png index f0d4e61356145241f049782abb7fcd6349e73a09..c3ff037ea700099edbac64ffa8371e0eda04ca29 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4e3a9490-e833-4e4c-957f-e0556fb8e96c.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4e3a9490-e833-4e4c-957f-e0556fb8e96c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f62a01201b0faafd95c902ed8e84e4203e667728e27da66963c9fc4223fd1298 -size 1123949 +oid sha256:2f1c3457ff4d401b47ebda05b3e46769fd7d4e7b1db9dffdb334bc97f0413473 +size 1598382 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_59c4d38b-5848-4f9e-8057-87bda7630fe5.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_59c4d38b-5848-4f9e-8057-87bda7630fe5.png index a100ac04ba932e0ec94175bbfc463fa71ba1ecbe..d88e089a61ba64225dcc5961b2b35a84df02668e 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_59c4d38b-5848-4f9e-8057-87bda7630fe5.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_59c4d38b-5848-4f9e-8057-87bda7630fe5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76977278480ecc8681d67bd457c896d3529045d1483f3e3c24edb3a628492f01 -size 274209 +oid sha256:b984a6c06e20c816e309bd283fc68ccb636889eab4fc4da93fd978f02d0f7ab9 +size 218072 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_61593538-6a94-428e-a354-f8b3da1bde5f.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_61593538-6a94-428e-a354-f8b3da1bde5f.png index 30e124b2de4e719a54100ccacaacdfc35fa48097..6111809112c95ddf40113701f9ddf5f7f91a65ff 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_61593538-6a94-428e-a354-f8b3da1bde5f.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_61593538-6a94-428e-a354-f8b3da1bde5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3342b56a03ea291ebe6513b26b32dad9ea5d4af8bbdd9c071359d92ef3ef238 -size 1125606 +oid sha256:973f1c70ff722ea5f2bbc1bd99e59e6db69b16ac0ce572ef55f254379e6e37c6 +size 1477630 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_66e14c21-122f-4ff8-af51-9510d38fef5f.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_66e14c21-122f-4ff8-af51-9510d38fef5f.png index 96ead02428e3e12f72eca654e6fe58b45bd02744..c5dbd24b9ae5f6c580c549ce16375d6c7e67f8f8 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_66e14c21-122f-4ff8-af51-9510d38fef5f.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_66e14c21-122f-4ff8-af51-9510d38fef5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96c0d256b53911c495d740514dc5b69bbb85f9bc2ddfe7e778fc4b5185588c1c -size 1229594 +oid sha256:f4829907ad5779986032d39aff79028c312f78291aa10956bf80e3382d062e0e +size 1226075 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_6d149fca-3072-4909-90ba-487c98b599cd.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_6d149fca-3072-4909-90ba-487c98b599cd.png index cf76378819ff6dde6153262f6efd9729bb587424..05095d64f204f1f7e88d504581eea68b0d823e08 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_6d149fca-3072-4909-90ba-487c98b599cd.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_6d149fca-3072-4909-90ba-487c98b599cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:efa0d287daea87315f39cb5abd849f170be69559c0bdad009b229c2e3b2d7755 -size 1159617 +oid sha256:36e40fcf5e0a5fd3a1ba5ffe6109a6e3d6358768a24a0a2c7eb63ccc7a4c1a23 +size 1151562 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_7a897c64-a917-42b1-9c88-4587761e7767.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_7a897c64-a917-42b1-9c88-4587761e7767.png index b2e3657ae8d3c2747ec5ab729b7b2312ec433add..61d5950bcdb8df96d6dde4873ce8b0264f201231 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_7a897c64-a917-42b1-9c88-4587761e7767.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_7a897c64-a917-42b1-9c88-4587761e7767.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:402abf16f76d824d0ae3523887d8968c5d5fa449448a74496705304ab1eb84ab -size 1156077 +oid sha256:b40d5c828a74616df6943d033e1e011f03f5c80643a1f3a4c36ffa7fe29db39a +size 1030904 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_9d29ae83-8236-4a48-96a6-61cc6b26aab2.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_9d29ae83-8236-4a48-96a6-61cc6b26aab2.png index 48d02ed8f37d7ee317413d8225f20d6bab785c58..525f8df6395cf93a7730f6aa640a92cf8b334a85 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_9d29ae83-8236-4a48-96a6-61cc6b26aab2.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_9d29ae83-8236-4a48-96a6-61cc6b26aab2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:793285f4ec11cae33f21b8b67eaf79758c096dcde7f565561468fcc20831b51c -size 2141971 +oid sha256:916d0fb2fc9cfffa0806a1af1712823a8385477555f4c1022aace6e6e62949ab +size 594318 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_a8676a33-d16d-4331-b300-a79c7d73f3ba.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_a8676a33-d16d-4331-b300-a79c7d73f3ba.png index e1b6f1014e5cabc317775f3dc29ccdd6bc08ea13..2d43ba1279f6dc0dfe38397aab7b604c23cf07db 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_a8676a33-d16d-4331-b300-a79c7d73f3ba.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_a8676a33-d16d-4331-b300-a79c7d73f3ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc12c50f1ae7edfc751391110bec12099b498e1567ddb6aa580580da82024969 -size 2717384 +oid sha256:c7b96568e4f6ab945267212079ce31f48227566e9a281fb16fb4824944e0accc +size 2058116 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_b900e955-2f87-44cd-aff7-61b5ec066da7.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_b900e955-2f87-44cd-aff7-61b5ec066da7.png index e08ed9240da575a859e573470c541888248d6cbd..197729d93ce0228aad001847cf08c540d4ca1bf3 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_b900e955-2f87-44cd-aff7-61b5ec066da7.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_b900e955-2f87-44cd-aff7-61b5ec066da7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:663bafa84e428948569858b777f19601097e83f7f3a572457763bcc02c3c1a46 -size 1160001 +oid sha256:e445036934074cf3d46eec861839f9d69dcdbb09745d9c6913b292df83df05e4 +size 1294130 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_ba56712e-4801-424b-857e-fb64ab1a9307.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_ba56712e-4801-424b-857e-fb64ab1a9307.png index e5787fb8a1d5347a14301eabaa2f9fd96d11662b..875e4a0e4696fc2339f13b05a1b98747fc470276 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_ba56712e-4801-424b-857e-fb64ab1a9307.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_ba56712e-4801-424b-857e-fb64ab1a9307.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07e44de889076eb15c892054496ec3add3d526d9e30824c3100f327c86565f83 -size 1158378 +oid sha256:aa51ae3b40360324339d26a39daaa358c6f8cca32197e598c55c0f17cbb5187c +size 913844 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_c98a20ca-42e8-470f-ba7c-a78cbedd0804.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_c98a20ca-42e8-470f-ba7c-a78cbedd0804.png index 5228e8add63978893bfcc864757f373dfe9e2fbe..78116b4bb17ffcb02b5a0df17749ff0da8788591 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_c98a20ca-42e8-470f-ba7c-a78cbedd0804.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_c98a20ca-42e8-470f-ba7c-a78cbedd0804.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e51844330f787c21057d8289b29e0e2eaca064fdef0ede72f5597f8e7695e488 -size 1137715 +oid sha256:b87e05b374959721f506987ff97b2ec093d4bb631bcf26b4b856c7cc67b83a4e +size 1604423 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f3d13de0-6b97-4f7e-acb5-77fc953b68f7.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f3d13de0-6b97-4f7e-acb5-77fc953b68f7.png index ebb340758db022f2042f00efe042664731dd0e56..271dbb99665a8f49378321bf8b83fcccf22f3992 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f3d13de0-6b97-4f7e-acb5-77fc953b68f7.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f3d13de0-6b97-4f7e-acb5-77fc953b68f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:464fff7f5219f5947055ef907ec4f04cae03fcd34c571b5ff8f855beff0635c7 -size 237994 +oid sha256:cb17042036646bb4fcfbc383ecb48268a1fc3990458b0fdd43a5d70db26ddf53 +size 242312 diff --git a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f78b743a-0b12-4f1c-b33d-a1e29de080df.png b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f78b743a-0b12-4f1c-b33d-a1e29de080df.png index e81f09ffa5835182a5ed7ba7ff5a9803d95f6d86..2175b2d4d53c0b5075423aad485625dd353c1d24 100644 --- a/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f78b743a-0b12-4f1c-b33d-a1e29de080df.png +++ b/images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f78b743a-0b12-4f1c-b33d-a1e29de080df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1011af9c8485cc728240425e2ff070edf2399be18672e26618cd4bbb09b6c459 -size 1192052 +oid sha256:bb453003d0aee09175a08f215ef3defbacc815c8dc2eba9a8fdf11e847532c06 +size 1498192 diff --git a/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_786a63d8-2537-40cc-85ce-2484ed87a3ad.png b/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_786a63d8-2537-40cc-85ce-2484ed87a3ad.png index ebecff87a62f54900ac69dd45c957fb1aca1153f..3ecd73bd432bf0cdacf9f99204b0c179ff00c777 100644 --- a/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_786a63d8-2537-40cc-85ce-2484ed87a3ad.png +++ b/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_786a63d8-2537-40cc-85ce-2484ed87a3ad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4cdf182fc0f45d3218e5fd7d344d00b68b17a136034411e527a4826068039637 -size 1866763 +oid sha256:2a34878daee75b7e4becb850f7e6f5ddbe90e7eae45e8c171030459a2d6019dc +size 1142584 diff --git a/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_b60d6d8e-b331-4d00-945a-f2a2a29926a5.png b/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_b60d6d8e-b331-4d00-945a-f2a2a29926a5.png index 5e2e7d73bd5c40a76d4ff7f7d813efac19456c3d..ea841ba8d01d94e88fd02dc5b373d56451a018cf 100644 --- a/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_b60d6d8e-b331-4d00-945a-f2a2a29926a5.png +++ b/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_b60d6d8e-b331-4d00-945a-f2a2a29926a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3bc567ba7c3f753d7f41bb9e3ce78419a325b248572f2a8bb4435afca4ce8bba -size 1137874 +oid sha256:ad0769a3d1bf2aac2f9b475585ef7ba8ba3ed80a67733e3692db08b1a48ba37d +size 1463505 diff --git a/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_cad83f0b-1baf-461f-92b1-b353a804f39c.png b/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_cad83f0b-1baf-461f-92b1-b353a804f39c.png index 92a96fa860a46bb4923c9b581e51d866a8a663b2..b3299034399bd7c482976f10a097a00f0dde4d97 100644 --- a/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_cad83f0b-1baf-461f-92b1-b353a804f39c.png +++ b/images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_cad83f0b-1baf-461f-92b1-b353a804f39c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4db75f6507b595a14148d25f29c4ade1e499027fd20b4fee2a9244141b47bbe -size 1120898 +oid sha256:8581ea2386c454ec51352a5e8bf21c809642b4587e6c764615c42864aa9f753c +size 1143402 diff --git a/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_92587b64-ad6e-4e8f-8c27-feeff12b79d8.png b/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_92587b64-ad6e-4e8f-8c27-feeff12b79d8.png index 97d577bc38262b465fb9169e358cb56b621e1302..f8ae59d194c9fac489cb430e10d5f0b4223e3653 100644 --- a/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_92587b64-ad6e-4e8f-8c27-feeff12b79d8.png +++ b/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_92587b64-ad6e-4e8f-8c27-feeff12b79d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eeda507afff261cfcb7febb2db54cf9c9a2ab2984db36b8721381fe364fbc487 -size 2025662 +oid sha256:31524fc2db89fc2af5a4d8e6c38a3374e6317ac93079e2ee4b282fb56e785850 +size 1991254 diff --git a/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_9ed5695b-9888-4996-8d8d-fdc59e8b84ab.png b/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_9ed5695b-9888-4996-8d8d-fdc59e8b84ab.png index e401616385f86f1f2945e7e5d75b347b52f48c71..2492396a10102050d270a5bbbe4a07c63598c4e3 100644 --- a/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_9ed5695b-9888-4996-8d8d-fdc59e8b84ab.png +++ b/images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_9ed5695b-9888-4996-8d8d-fdc59e8b84ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f6c563285bbdc5a6f519bb3dc6fa69cae7f1cfc24a3f761bf292b2c732ed9ad -size 3594379 +oid sha256:93fa5dbac1ad639d1d62fc3d5c3b8cbe3d93fc748226f501f56ffcbdb8e20199 +size 1840173 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0031514c-8d35-43c7-ab3f-8723ef5b8647.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0031514c-8d35-43c7-ab3f-8723ef5b8647.png index 8ff4dfae83e12df3e7da6905779c3d85a0699c7f..558e6d3a381694ed18d8107484a23549962a73a3 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0031514c-8d35-43c7-ab3f-8723ef5b8647.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0031514c-8d35-43c7-ab3f-8723ef5b8647.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1213eb941594e5f78be26197c9614da09b18896dba88274a970e0f5a18b3d6e1 -size 1377788 +oid sha256:31201fe82de5c8eec9a9f1c864be1f65843b135a485f7a62382f4bf66bf54413 +size 1085547 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0eebe04e-32f9-4329-95c2-12ba3c6b59d8.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0eebe04e-32f9-4329-95c2-12ba3c6b59d8.png index 4c69a11a8de0b564e53d7a843423a749aea3166b..999af81cd7f9ac2ab628a767805f191746872e71 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0eebe04e-32f9-4329-95c2-12ba3c6b59d8.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0eebe04e-32f9-4329-95c2-12ba3c6b59d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:613ecbc77c76ea81dabf1c76dc1725071cf744607facd773137a72268b27675c -size 1839336 +oid sha256:b204f36b7775069a15167a7bbe715559bba472f7f0cac08f2d95f1871e8ed8be +size 1898351 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_19094986-c5bc-4351-96e5-2b11185894b1.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_19094986-c5bc-4351-96e5-2b11185894b1.png index 87228e7701d7f05589d8d5aeb2fca8a215aa8edb..30aa12fd3e72323913e66fd1324c581a4d66b347 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_19094986-c5bc-4351-96e5-2b11185894b1.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_19094986-c5bc-4351-96e5-2b11185894b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ff50dc1cdfe39ea25d89f1427ebe6bb5de83f52ff16363e5b9b9b73ca5c16f5 -size 1430356 +oid sha256:ac75c81c15c45f68b0d61633f4fcccfcad4431dc76a6956dd713e9f74f46176f +size 1491159 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_2b8696c1-be11-47de-af3c-141664f86b58.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_2b8696c1-be11-47de-af3c-141664f86b58.png index 7459dd8399f05ae20e5a92e3bb6e2b7041ac741a..48cca14cd11b2adb4723435494c8cf9307e61420 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_2b8696c1-be11-47de-af3c-141664f86b58.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_2b8696c1-be11-47de-af3c-141664f86b58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0dcc84f530ab87700b42b650ca8171748b28b6a3125904bd0418d597ecfcbf43 -size 1836679 +oid sha256:fa6026dfa480ff54c36f98a6b9b1224e657f6cb0b484e360418516d2994ed8d8 +size 1600440 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_30644e2f-e07b-499c-8a69-269b8c6dd9d1.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_30644e2f-e07b-499c-8a69-269b8c6dd9d1.png index be92357b84327b0beb733edfd2c940bbea1171fe..ffb3d2c52de671946d2aa97c374656a7cd9811ea 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_30644e2f-e07b-499c-8a69-269b8c6dd9d1.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_30644e2f-e07b-499c-8a69-269b8c6dd9d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:916e754c9ca30e636532ce44963b7d8848bde99a117e54ba9cb772b42fc4b02d -size 2211519 +oid sha256:64bda1e91fa3b9d6451cb9fad93376467043d7dd522c952262d380f871df59a9 +size 1872196 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_4a8b112d-25b2-4430-8ca3-275372e7ecbb.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_4a8b112d-25b2-4430-8ca3-275372e7ecbb.png index 5be7c36b18e7b154be9e4601848e79bf9410835c..5dedaf3c00fc4ef457a7c55c8d5e372e36778cc0 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_4a8b112d-25b2-4430-8ca3-275372e7ecbb.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_4a8b112d-25b2-4430-8ca3-275372e7ecbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e4a9fe85b6884ff9683e8439919ff707eede3b29d02934d87f684ccdb74e3f1 -size 1844761 +oid sha256:ddfe44aeabfb1a9366d59708bcad81fd962a36799826bbb52cd8aaede8f72f87 +size 2227168 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_56f01aa9-cfc2-423a-9c5b-daecf15e17a8.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_56f01aa9-cfc2-423a-9c5b-daecf15e17a8.png index 3ca53e6a515678a0701b2d98f0616465a1076b13..b0d89e0f4c47af6341b3d76a26339d30e86d0651 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_56f01aa9-cfc2-423a-9c5b-daecf15e17a8.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_56f01aa9-cfc2-423a-9c5b-daecf15e17a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9c77f2ba339f0a25cb7b2515c85eec4bd0f772cbeea3f982cfbe9a11e06d1e3 -size 1843961 +oid sha256:1ee9ce2baa7fb0c55171741e6ff253cc17b7a538a4f22165af4477395096acef +size 1578140 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_5ae1626b-8e2b-4bb4-be5c-4488a2121063.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_5ae1626b-8e2b-4bb4-be5c-4488a2121063.png index 8cba82500d033ce5a159a0ca7e3ff29a02b024e1..ba2c8ac21601d18c3d6a6012403b794026752814 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_5ae1626b-8e2b-4bb4-be5c-4488a2121063.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_5ae1626b-8e2b-4bb4-be5c-4488a2121063.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c34478bb6953161a126b6eae3028bb42bd0ef7cf288c6ec0c2d38be9977cc34b -size 4163115 +oid sha256:419c5dc45219b5d1198728fa9d9e8bb191735e8d994033b616ddc842ec33b2bc +size 1853695 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_629a8d57-269d-4468-b07d-4709c572f645.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_629a8d57-269d-4468-b07d-4709c572f645.png index 42463171422f3cbf73b105517fb6420249063358..bbbbd0185db380ed1fe15f827c8cfe6150a588f2 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_629a8d57-269d-4468-b07d-4709c572f645.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_629a8d57-269d-4468-b07d-4709c572f645.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7d28fe94f3a9e9d7664f7ad0f5f2553faa7cd9970b222957d7da07e0e19446f -size 1842708 +oid sha256:74f4240b65d62a439bc8e5572d4910097614afd6aa4d14ccbb094bed68e38c6b +size 1239835 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_66a559b3-5317-49ef-b0ba-ca14967bfde9.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_66a559b3-5317-49ef-b0ba-ca14967bfde9.png index 91170e83a367379e191bf40761fbe0d0f892d5dc..6db227cb32319dd60fd36d4f0a2061a0cf769d4a 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_66a559b3-5317-49ef-b0ba-ca14967bfde9.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_66a559b3-5317-49ef-b0ba-ca14967bfde9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4efcf1624a0b5828b580cfcfa79ba2f70ca0f8074a3eb6d74ac2175607f3784 -size 1930404 +oid sha256:2e7d486220d288b7cdbdf067e9cab53f9ff4ea8d47e8fc273fb3733e5f4da6f4 +size 1577410 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_9a4a0f84-7a55-4cca-bf4b-0c044967ed41.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_9a4a0f84-7a55-4cca-bf4b-0c044967ed41.png index 589f15f87c3c8473e2eb11f466c67964041b8a23..f40f1f3f49ea85ec3aa45fa31ed95a7effbdcef6 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_9a4a0f84-7a55-4cca-bf4b-0c044967ed41.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_9a4a0f84-7a55-4cca-bf4b-0c044967ed41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f94c23b9175cc8cff6892ea6e74ccae0b72d5768c79fccfe4edc6f32fd171948 -size 1838029 +oid sha256:e6e54e4299d300800923c41dc4a61753d55e98846d873d916a603f22c683ad84 +size 1679489 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f2cbeb2f-72b6-4862-8ea3-3b40e6926317.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f2cbeb2f-72b6-4862-8ea3-3b40e6926317.png index 14b1187b3b10ef1bae7167c27bbfa8653d92352c..7625a3e7fae63f9499bf1972fe3aa1902ed31848 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f2cbeb2f-72b6-4862-8ea3-3b40e6926317.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f2cbeb2f-72b6-4862-8ea3-3b40e6926317.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e684920e57740c48a8a4c466c5d4f47b6535ab868b49e02b5c5beafd5c6e29e8 -size 1225599 +oid sha256:89b8654cbeafad8ba72f8dabaa6d4ba9ff6f3e1816205d48e5ea73c13b47219b +size 590401 diff --git a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f4439995-85b8-459b-b852-f64741f39d39.png b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f4439995-85b8-459b-b852-f64741f39d39.png index d43e9e71d008df65adc8ee09893b2288fbb5436c..f85025e9873d65757fcc06658d7401481186411d 100644 --- a/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f4439995-85b8-459b-b852-f64741f39d39.png +++ b/images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f4439995-85b8-459b-b852-f64741f39d39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bcc4279cce6250b542d0dae2f1e539baf700211ab31316df160db415f20943d -size 916688 +oid sha256:0e87a6f8aade7a242a8a502e3ae927d4bf6d5861ad6ffd8366f0aa913a02b064 +size 869838 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_42833388-627b-43f2-a72d-ab7582cef893.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_42833388-627b-43f2-a72d-ab7582cef893.png index 9bdc5cea5b83c5c2b01c9d129a455c4b7afdd5bf..bcb5c50ac5501b8d3e014bf38f90b96acdadaf70 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_42833388-627b-43f2-a72d-ab7582cef893.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_42833388-627b-43f2-a72d-ab7582cef893.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8a423c39a20408979744ce415f2a8f75e69f5ba34cd89e35653207a45f03b56 -size 1491451 +oid sha256:1e1b19b5088b7c8897ad2c637ba3f47e633f46010e07b21e153f1d710c5d83db +size 1576285 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_52148af4-19e2-4ea7-be70-40c779c314bc.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_52148af4-19e2-4ea7-be70-40c779c314bc.png index 97c81e98df303aaf21aed4453ac333e69a1c3825..faebb183ced916fe1cf870378dc20d9c2d5081b8 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_52148af4-19e2-4ea7-be70-40c779c314bc.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_52148af4-19e2-4ea7-be70-40c779c314bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f75dfe314d63297fbcb8eb47eb2fe7699d3160e2adbcc5074697eb1fa48a1e4 -size 1194921 +oid sha256:9453d714b763835395ef213db102cc4210f999335c90214b055fbb129f44dd18 +size 1164320 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_7a968f10-ab98-49c7-8dce-a5ee4b28a838.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_7a968f10-ab98-49c7-8dce-a5ee4b28a838.png index 33896be05996eba7856c8a105bc74c82b4f095e7..18c0228a3caf412f044f5394020d55c76b61815e 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_7a968f10-ab98-49c7-8dce-a5ee4b28a838.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_7a968f10-ab98-49c7-8dce-a5ee4b28a838.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39655e6a99a0ffd7b0ac9d84ea645901cf039ec87434e7d56c4917413373de2e -size 1441479 +oid sha256:2827095bfaa440124ebc530b11cef65c864c8ab1a18c384d87ba825ebef176da +size 992703 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_98661977-f720-456c-a165-9c8609d94b0d.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_98661977-f720-456c-a165-9c8609d94b0d.png index 06576ef9452f87238eb1ba46134bbe82d56465ec..27e65f696b83cb93fd30be63aefc4a3160386415 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_98661977-f720-456c-a165-9c8609d94b0d.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_98661977-f720-456c-a165-9c8609d94b0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35884e98515f7e1af895dbc8bfe8e2e629def1b5c6e73f5984d91b35c82bcf64 -size 1154912 +oid sha256:064935e90372104cae24e51be2c9c28a0ef9a2062b328a1fc7cb07bcef5d7b9b +size 1518124 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_abeec648-d689-44f8-a277-15fda2ecf8fe.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_abeec648-d689-44f8-a277-15fda2ecf8fe.png index 4c9a161b2319c3c3bc68f047a11d35d59145964d..0d8f80ada733eb0ee994f51238b6da76f6491eb6 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_abeec648-d689-44f8-a277-15fda2ecf8fe.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_abeec648-d689-44f8-a277-15fda2ecf8fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:037d57300ece55f4bd6d924800dab964e7f94294c4249e9ec3b076381df3bd52 -size 1180707 +oid sha256:e96e74a12a19faa932daaba9765d0e1277e0f45d442d4b281a843cb22ab1c3c5 +size 1590199 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_b74a4253-bfcd-4616-9d96-4219baf3cce4.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_b74a4253-bfcd-4616-9d96-4219baf3cce4.png index 6cb8ab03cbde78dc39d22566543e3b35cf28a259..d25a14b02c22f43696f18f4c5f3619752e9bf08f 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_b74a4253-bfcd-4616-9d96-4219baf3cce4.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_b74a4253-bfcd-4616-9d96-4219baf3cce4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58a0bce5d7a9a00b231263942e22a1e655eb8062fc75251cccef57378cc65c74 -size 1413429 +oid sha256:4f898f6fb34be62ada6cafd82993c4f48f1c578a94a81c6b77cd61d4b9bf2ce7 +size 1565354 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_e3e58722-6d08-46af-bb39-109c07dc6874.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_e3e58722-6d08-46af-bb39-109c07dc6874.png index 3d35b63310955db6210aec160bda150659ced632..3086d8a48f0135de88ebdeb83f5d42eda695622e 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_e3e58722-6d08-46af-bb39-109c07dc6874.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_e3e58722-6d08-46af-bb39-109c07dc6874.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11d73e161245e625af32b48cd1db002c71ab8ea94bb5af3bef75e138aa898253 -size 1368944 +oid sha256:fc47b872ada391201c3d9040c3ba6b5ef1d9436e9da297eca298d559e8f3651b +size 1565423 diff --git a/images/b5c98548-4a3f-4e7c-8287-c36963930348_e8593956-fe5c-4517-8903-06508cece040.png b/images/b5c98548-4a3f-4e7c-8287-c36963930348_e8593956-fe5c-4517-8903-06508cece040.png index fdd7a21949da5e82caefba3981c3047bf98b4527..71f45d244e9495f2e4318ca3fe844722ba4329e2 100644 --- a/images/b5c98548-4a3f-4e7c-8287-c36963930348_e8593956-fe5c-4517-8903-06508cece040.png +++ b/images/b5c98548-4a3f-4e7c-8287-c36963930348_e8593956-fe5c-4517-8903-06508cece040.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02d8b45f0dcbf308493169b84654066c9c0ff0ae560dea503390e82f316db9b2 -size 1396630 +oid sha256:0020e5b129adb4bb5c796c206c431b177b44fbcf1a4f18e0f7896899ea2852bd +size 1501812 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_07da8bf9-a101-42cb-865c-597a7150f981.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_07da8bf9-a101-42cb-865c-597a7150f981.png index 6b935387bee6b04458588ec4f0fd1440f245959c..8da1cc132e2468e276fc1d1409248be3106e530a 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_07da8bf9-a101-42cb-865c-597a7150f981.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_07da8bf9-a101-42cb-865c-597a7150f981.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bae101ddbca4d8213aa8b94b625cd983f93cacd86545cf639e4c42ae4a3e1d57 -size 995691 +oid sha256:1e3c1c29237091e097910ad5fc47cbfd7ea75c853cd51e5d9089b89181f89b1a +size 934282 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_15a35d9f-c3aa-44be-8f0f-4827042e2f95.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_15a35d9f-c3aa-44be-8f0f-4827042e2f95.png index 686be323df54969ba76fe4ff7e83693e0df021e0..f3736957ac46d1a9ef2a2ad0a9e94cda2fc91ee7 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_15a35d9f-c3aa-44be-8f0f-4827042e2f95.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_15a35d9f-c3aa-44be-8f0f-4827042e2f95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f9d99da5b1e20b885a4f4bd7223aa45fee29172705815e8ed38fe87366d6e60 -size 811981 +oid sha256:fc85bb46d1c3183b552b97644132d4764566353e59915993c029fd415809c1f4 +size 1071538 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_174d81fa-298e-4062-bc37-7e88037a43d5.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_174d81fa-298e-4062-bc37-7e88037a43d5.png index f7496f6b00ad02bbcf81cee790dbe6379934b43f..a2feb8390d4661a5888e6a300e94ac263576c7c0 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_174d81fa-298e-4062-bc37-7e88037a43d5.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_174d81fa-298e-4062-bc37-7e88037a43d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36a7226030cfd0239a2c6de7d2d3e5f602c6bac33510a9cece000d6a88bd340c -size 720296 +oid sha256:5421b93c3c119cbec3246c48ccb9256f4f7e3e770ed1a86b3a9febdbaaed2a8b +size 788635 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_20025603-f2db-480a-b623-54c605d29e37.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_20025603-f2db-480a-b623-54c605d29e37.png index bac07dff656433339ffd0ddea0b7ada372d30ad5..6fe256bb69cabc23d4288ecb2fbcc6c965b8483d 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_20025603-f2db-480a-b623-54c605d29e37.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_20025603-f2db-480a-b623-54c605d29e37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e9cbc1d09eaf1196d47651020e5c7c3d7f0cf2b3757845ab7589c603f402d4b -size 554017 +oid sha256:ddfaec5e00dabea89fe531e94c027e3924fc8de47a296586f8f5614ba02e3312 +size 389463 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_3075d6ef-74ac-4361-87f7-817f02e4a80e.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_3075d6ef-74ac-4361-87f7-817f02e4a80e.png index 435ca3282f4e375e3065cd3450da7e52f981e7c7..93cb4f4d3cf5bef214332ec3edb458c5a420c85b 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_3075d6ef-74ac-4361-87f7-817f02e4a80e.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_3075d6ef-74ac-4361-87f7-817f02e4a80e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e90f3f8b9b3ae7070872d7c29c5486c97c612b1822b9f19d2c685fee40207ae6 -size 916016 +oid sha256:1293e978c19940c1e073d83acd10bb6ef90e5945159fc67a0f9bf6acc53ed996 +size 1389815 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_5e1eaaa5-5b32-4ef7-8a06-5934987e804c.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_5e1eaaa5-5b32-4ef7-8a06-5934987e804c.png index 243c8310cec5ed98b5ce23b2d271e61ba9de2515..8a3b4e1c1fb408bcfbda2d92f4f401fd6000a053 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_5e1eaaa5-5b32-4ef7-8a06-5934987e804c.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_5e1eaaa5-5b32-4ef7-8a06-5934987e804c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87bbc7eb9b985c67b16a45f912e4a57d8c56ad6d7d5bd5be3f736dd852055a5b -size 1167499 +oid sha256:4bad4e4335de244b114a7308927d31149ced72df6992db1182dea646594ea6d7 +size 1080103 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_7521f3d0-1427-4e25-b646-ae267d83d1ef.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_7521f3d0-1427-4e25-b646-ae267d83d1ef.png index 7a9309737b34624353faba0bcc1ac157b9f377bd..e185cf5d3f59090a2a4e27123dc1713fbb68a8a3 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_7521f3d0-1427-4e25-b646-ae267d83d1ef.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_7521f3d0-1427-4e25-b646-ae267d83d1ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06c066b22fa81473e68968c79a7a00022c172e722b955cadddab1a9fda770613 -size 564554 +oid sha256:0a273841235301bb70db99c390e6004a8cb85436552e9b4788a26d9472fb3291 +size 643084 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_9abb2493-8fff-4950-9b8a-d371af9516a1.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_9abb2493-8fff-4950-9b8a-d371af9516a1.png index 0479d85884b072426e1a433333b20e8d15580943..91363ba7b543d41b417de43eebd633607056d8a0 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_9abb2493-8fff-4950-9b8a-d371af9516a1.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_9abb2493-8fff-4950-9b8a-d371af9516a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3954773e6230150fc15354312813c5816fe2fd0db5ee24e37340c300b8fb513 -size 832926 +oid sha256:6fa66b415e2e5a0a5e257a523afd05a5c8421b461d4c4a7a2a9b1e666995dbda +size 1064470 diff --git a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_afec19b2-2c9e-4a02-b24f-00932ef73c44.png b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_afec19b2-2c9e-4a02-b24f-00932ef73c44.png index ba6db9e27f545219c06987d9daa89b38c03e239c..b4cf29ded1d069167505946954ba6e1ab8204b4c 100644 --- a/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_afec19b2-2c9e-4a02-b24f-00932ef73c44.png +++ b/images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_afec19b2-2c9e-4a02-b24f-00932ef73c44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a19ad8712d45fef62c04f8bd8c6d2d84253e9b81e5dfc967a3c093a7a6fbda9 -size 694323 +oid sha256:694d62c0427b860cefd085b32539afe34f03755a809dc095ad9f2aa706d6d78f +size 653697 diff --git a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_34f58f02-a3b6-4e7d-a1b8-03b2370311ca.png b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_34f58f02-a3b6-4e7d-a1b8-03b2370311ca.png index 356fabd92a69d7728ec6e2337e0d6961218588bb..c8389f262c322331f6e36fed2a727156a3b368a2 100644 --- a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_34f58f02-a3b6-4e7d-a1b8-03b2370311ca.png +++ b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_34f58f02-a3b6-4e7d-a1b8-03b2370311ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a1e45954f16ed0aaf69544e1cfb4c6372f50452c471845f79684a7f35645b4d -size 897470 +oid sha256:880579f65f77e32ba178f20e831509292b199ccce401325df1d1d31c5ededa88 +size 898358 diff --git a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_8a0c2511-b6dd-46eb-9591-5f52889652e3.png b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_8a0c2511-b6dd-46eb-9591-5f52889652e3.png index b1b3fb88ee66525c9f5d51e18ca0513ff4ca45dd..e18a90ef1669f8d2fc59505f0d7d23d888b238a9 100644 --- a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_8a0c2511-b6dd-46eb-9591-5f52889652e3.png +++ b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_8a0c2511-b6dd-46eb-9591-5f52889652e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d9a5d890fc645e7a06c22d97e1be336305011df3da07dda5cff9888800a103d -size 2101929 +oid sha256:4bf8203e6fb077265090faded1946907819b3dbabbb9c94f02ddfb5d066dfe78 +size 1904915 diff --git a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_92c31707-5c0d-450e-a9bd-b0290f28f907.png b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_92c31707-5c0d-450e-a9bd-b0290f28f907.png index a68c85549a3e2a6cf393d41ad5b18fcc7792187f..a6e5a27ebe2ef13594c411fcb00ce59cbd20bb4e 100644 --- a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_92c31707-5c0d-450e-a9bd-b0290f28f907.png +++ b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_92c31707-5c0d-450e-a9bd-b0290f28f907.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4be8b77559c1e4367e344ab49b1216ade47104dc7c58364a4c171014889e7cf5 -size 1281008 +oid sha256:39222838b8c1d2c462e971901ee7da178aaed6ac76ac8a1b39928e6751424111 +size 1298042 diff --git a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_a921fb0a-6baa-41d4-9927-a3400e96af32.png b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_a921fb0a-6baa-41d4-9927-a3400e96af32.png index c8f60d8a8fb65bda64a3ed703e498dfa42d790a4..a64f059a522579d1064b66c08356a3e3ad55e50a 100644 --- a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_a921fb0a-6baa-41d4-9927-a3400e96af32.png +++ b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_a921fb0a-6baa-41d4-9927-a3400e96af32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b64ee4d1344c1ad16ecd7ec480776fb617d6520e12f50ce9d9a0c6709dd70e1 -size 887651 +oid sha256:23748ce160f0c0083adac4a79a5cd26b0c72c329bc7e7ebef821e072575f95b9 +size 824922 diff --git a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_d0560cf3-9b90-4a29-a8b8-08577ec0c19b.png b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_d0560cf3-9b90-4a29-a8b8-08577ec0c19b.png index c8f60d8a8fb65bda64a3ed703e498dfa42d790a4..cfe47a1e6e53cbfef82446051ed8c4af2d05672f 100644 --- a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_d0560cf3-9b90-4a29-a8b8-08577ec0c19b.png +++ b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_d0560cf3-9b90-4a29-a8b8-08577ec0c19b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b64ee4d1344c1ad16ecd7ec480776fb617d6520e12f50ce9d9a0c6709dd70e1 -size 887651 +oid sha256:b2d4b3466167ea090c0de80dba6aef981082c5be535d88e4d18229bacf0fb3df +size 571277 diff --git a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_e28c2d70-46de-45d6-b3ed-20ea099217a1.png b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_e28c2d70-46de-45d6-b3ed-20ea099217a1.png index d9068cfe73c4dcc4d5ead58150f503cc78823671..d4dcdaf789e205ad8ad03ad5683e468c7cbbf46e 100644 --- a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_e28c2d70-46de-45d6-b3ed-20ea099217a1.png +++ b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_e28c2d70-46de-45d6-b3ed-20ea099217a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1dda395d642d62a3aac7b99e70bde972a26b8feb5eb304f06da759fdad1c1eec -size 1272377 +oid sha256:f46d5b887da064b0a3834ce3acef891896b60bf5e81c7026e20bddda996440eb +size 1023498 diff --git a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_f026582a-747c-4f3e-86bd-c219d7425d53.png b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_f026582a-747c-4f3e-86bd-c219d7425d53.png index b1b3fb88ee66525c9f5d51e18ca0513ff4ca45dd..23f31435b1e0508c2bdcc40b7e662420d1644a9a 100644 --- a/images/b5d47068-b773-4061-b7ba-17bd25e88e06_f026582a-747c-4f3e-86bd-c219d7425d53.png +++ b/images/b5d47068-b773-4061-b7ba-17bd25e88e06_f026582a-747c-4f3e-86bd-c219d7425d53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d9a5d890fc645e7a06c22d97e1be336305011df3da07dda5cff9888800a103d -size 2101929 +oid sha256:bc7ea0d40e168fc71c9f998c45c61cd1e8d7ad41e6b60af8888a080872f0e666 +size 1831055 diff --git a/images/b5de73d0-820d-45bf-8989-1743a0d9b072_1b89abdb-f73a-43c1-ac9e-1c0f9c6469f0.png b/images/b5de73d0-820d-45bf-8989-1743a0d9b072_1b89abdb-f73a-43c1-ac9e-1c0f9c6469f0.png index 4dbcb40619ceaf189f6b114aef05c6dc90084241..b22efa6f9f52dd7930191f644611d028f8fba221 100644 --- a/images/b5de73d0-820d-45bf-8989-1743a0d9b072_1b89abdb-f73a-43c1-ac9e-1c0f9c6469f0.png +++ b/images/b5de73d0-820d-45bf-8989-1743a0d9b072_1b89abdb-f73a-43c1-ac9e-1c0f9c6469f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:041c480d4652b6f993212f47a2bf00a72965c9a27858e3394260fc75a1757940 -size 844553 +oid sha256:572b15df55b93d90062d6d3a835c2e2e62504830d6e0374adc14c84f383ccc13 +size 957804 diff --git a/images/b5de73d0-820d-45bf-8989-1743a0d9b072_4c168a73-6f51-4f60-8121-76e76caa359d.png b/images/b5de73d0-820d-45bf-8989-1743a0d9b072_4c168a73-6f51-4f60-8121-76e76caa359d.png index f6015d7e5ac80029a1da5325b4d29ab3af4579b6..2d21d82772b95981f3d1e3c007ca4a49055d51d1 100644 --- a/images/b5de73d0-820d-45bf-8989-1743a0d9b072_4c168a73-6f51-4f60-8121-76e76caa359d.png +++ b/images/b5de73d0-820d-45bf-8989-1743a0d9b072_4c168a73-6f51-4f60-8121-76e76caa359d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a84a1e560e134ce86700cfc76e97fe81b1f5df82a3fc4ebcde7b5400b0dc5a9 -size 944117 +oid sha256:6ca2163068fbe09dbdf618f3d424c8dfc22d79623754e0fd3e5de4482c4e819f +size 298654 diff --git a/images/b7003092-3e6e-4fac-9867-083cac064f89_075e3102-01a5-4d21-a14a-22ffb129f1b0.png b/images/b7003092-3e6e-4fac-9867-083cac064f89_075e3102-01a5-4d21-a14a-22ffb129f1b0.png index a845520b2b4773a13a9d57953f5ed3b20814f1f0..40bda3d45d44f301f74d11c2a444c625a93c8220 100644 --- a/images/b7003092-3e6e-4fac-9867-083cac064f89_075e3102-01a5-4d21-a14a-22ffb129f1b0.png +++ b/images/b7003092-3e6e-4fac-9867-083cac064f89_075e3102-01a5-4d21-a14a-22ffb129f1b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b4f47b944ba0784b0c712e7bb648f07e47153b430d9f640e2d845c984fcb0c2 -size 247903 +oid sha256:5d70546f2e26fd6c9c451507918bb7c643eebd09ced3e6ef36ba517941137057 +size 247451 diff --git a/images/b7003092-3e6e-4fac-9867-083cac064f89_261b1738-122a-44ea-bd95-54f319709a86.png b/images/b7003092-3e6e-4fac-9867-083cac064f89_261b1738-122a-44ea-bd95-54f319709a86.png index 6e0fe7dc7e2b4b73b19c380ca7a54ea5d7eb352c..e68278ada4f6ccbc137de3598f826c0fffa0c34b 100644 --- a/images/b7003092-3e6e-4fac-9867-083cac064f89_261b1738-122a-44ea-bd95-54f319709a86.png +++ b/images/b7003092-3e6e-4fac-9867-083cac064f89_261b1738-122a-44ea-bd95-54f319709a86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dce98b366967e02e0ec9f9203372492eea696d933bef4ea415c5243c01bed214 -size 2783933 +oid sha256:05c05841c4b4a7562a949e4cd921862f1f631d153729053de1e5a7adb04d6f6c +size 2057568 diff --git a/images/b7003092-3e6e-4fac-9867-083cac064f89_f00afcea-c60d-4746-b705-8abad5a4a61c.png b/images/b7003092-3e6e-4fac-9867-083cac064f89_f00afcea-c60d-4746-b705-8abad5a4a61c.png index 5474697ca93ca3eaeb73ce77743f7e3513c0574a..7590b691e581228896ce7f138d60635ced753344 100644 --- a/images/b7003092-3e6e-4fac-9867-083cac064f89_f00afcea-c60d-4746-b705-8abad5a4a61c.png +++ b/images/b7003092-3e6e-4fac-9867-083cac064f89_f00afcea-c60d-4746-b705-8abad5a4a61c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:315bbb5bf21c2d1b6641ecf35419e201e5a80d3e4d0d231cc34aafcc5a933552 -size 3092695 +oid sha256:bb4bf1e1d75e62419fe6aa717e4a9091cb2c7e521105e5c665d70ccc58923e87 +size 2291779 diff --git a/images/b7003092-3e6e-4fac-9867-083cac064f89_fd544f3c-9154-4db2-9d56-7cc4138fe0b9.png b/images/b7003092-3e6e-4fac-9867-083cac064f89_fd544f3c-9154-4db2-9d56-7cc4138fe0b9.png index 0765080f00f7d73db4a8ba008e66c04eb07108bf..b4ac24ee3539dfe20d2df56ac960d1b695acb7b8 100644 --- a/images/b7003092-3e6e-4fac-9867-083cac064f89_fd544f3c-9154-4db2-9d56-7cc4138fe0b9.png +++ b/images/b7003092-3e6e-4fac-9867-083cac064f89_fd544f3c-9154-4db2-9d56-7cc4138fe0b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db2e55fa48ce583348cad4a7b777cfc83da31cf061caf4633063fbda6dd852c4 -size 2781440 +oid sha256:db8ed517c38705a2f94a07da3029fcddc52bf13ae59c7ab7e1e6d9c0694337d0 +size 1553316 diff --git a/images/b7082615-e6e1-4981-b51b-9259671d1adf_188b0bed-cc38-40e9-8652-97811bb3b5e8.png b/images/b7082615-e6e1-4981-b51b-9259671d1adf_188b0bed-cc38-40e9-8652-97811bb3b5e8.png index 87e63e9c732095310217a98a1d08a38219fefc44..1eca1f484f4dd65dfa98c9dd352ce52d5fa54010 100644 --- a/images/b7082615-e6e1-4981-b51b-9259671d1adf_188b0bed-cc38-40e9-8652-97811bb3b5e8.png +++ b/images/b7082615-e6e1-4981-b51b-9259671d1adf_188b0bed-cc38-40e9-8652-97811bb3b5e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ee1cb16dee57255bda426c218003d8931e806662bc70696ea0cb99e3183cf93 -size 1126649 +oid sha256:593e2893aef10eb81e296a592da1080160cf1ad503a165eb4e0ece3f4d45e375 +size 1247642 diff --git a/images/b7082615-e6e1-4981-b51b-9259671d1adf_74dcfb10-d052-4f30-b65c-a2a06d3bb983.png b/images/b7082615-e6e1-4981-b51b-9259671d1adf_74dcfb10-d052-4f30-b65c-a2a06d3bb983.png index 3b69b5284e683379496b3a94b19e56dd0576a580..77729aa4c9481a8544b4cad86fd091918561e796 100644 --- a/images/b7082615-e6e1-4981-b51b-9259671d1adf_74dcfb10-d052-4f30-b65c-a2a06d3bb983.png +++ b/images/b7082615-e6e1-4981-b51b-9259671d1adf_74dcfb10-d052-4f30-b65c-a2a06d3bb983.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:811a1eac89b64916fa6deb8ff2d70e89eee39611b2a918588a29f23377a5292e -size 1053594 +oid sha256:fa3b7c4ec677954bfc9e21707642af85f9cd2edc69413420a4ef1cd12320b13e +size 1072572 diff --git a/images/b7082615-e6e1-4981-b51b-9259671d1adf_765d1395-6d7e-496e-96ad-ce9fa6367197.png b/images/b7082615-e6e1-4981-b51b-9259671d1adf_765d1395-6d7e-496e-96ad-ce9fa6367197.png index 87e63e9c732095310217a98a1d08a38219fefc44..ab4e675a1dd88b3b9d4c885d43c1cc256ec8a6ee 100644 --- a/images/b7082615-e6e1-4981-b51b-9259671d1adf_765d1395-6d7e-496e-96ad-ce9fa6367197.png +++ b/images/b7082615-e6e1-4981-b51b-9259671d1adf_765d1395-6d7e-496e-96ad-ce9fa6367197.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ee1cb16dee57255bda426c218003d8931e806662bc70696ea0cb99e3183cf93 -size 1126649 +oid sha256:987a0354a0705fbdcefd964fa5b6a23021e2dbd267f1f60727f0f4d404ea231f +size 1730420 diff --git a/images/b7082615-e6e1-4981-b51b-9259671d1adf_f59cc735-1769-4401-9349-c127435edfb6.png b/images/b7082615-e6e1-4981-b51b-9259671d1adf_f59cc735-1769-4401-9349-c127435edfb6.png index 997128677ac3c071d3fcb552af275b1365965b1e..77864642a2251553dbf356dfd0221161572cdcb2 100644 --- a/images/b7082615-e6e1-4981-b51b-9259671d1adf_f59cc735-1769-4401-9349-c127435edfb6.png +++ b/images/b7082615-e6e1-4981-b51b-9259671d1adf_f59cc735-1769-4401-9349-c127435edfb6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce7630aad62eba93dcad77031587fc27d0ed67ad2434408f4509c79f11696a40 -size 1072554 +oid sha256:abd4d7403919ade84f05550bebab18d09ed09267a996447b257260378ccbe67e +size 603256 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_0c6371f1-0488-46a8-9683-b6d73833cee9.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_0c6371f1-0488-46a8-9683-b6d73833cee9.png index 421550c9cf5b7aee260298822aee527d25f3036b..4e49fccf7cf85ea6c6b4a47445f022c79ed5cf28 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_0c6371f1-0488-46a8-9683-b6d73833cee9.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_0c6371f1-0488-46a8-9683-b6d73833cee9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a63bdf6e3aaf6fa70fc1f0e8a513a81779f6ceeaac1562324bd61078cb3f5f09 -size 457665 +oid sha256:964698f6bdfb2d543a65385bebdc56efa73de9a9c108df7eb2a89349cf62e38d +size 529535 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_17ca5c93-6288-40dc-9018-691a97dc13d4.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_17ca5c93-6288-40dc-9018-691a97dc13d4.png index ce88c520a53cb0c7cbb60210280fe0748c43a24e..c1346877d3783852f785c5a447cc38cc3d3f39b9 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_17ca5c93-6288-40dc-9018-691a97dc13d4.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_17ca5c93-6288-40dc-9018-691a97dc13d4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b0389adc63f635293872025ef6ece44df884725b54938003fba91a3d013f3fa -size 461575 +oid sha256:095e70f84d32c311b6742b888a53b2b6fe5f2b148b11585364a1471431753dca +size 486891 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_1e0f3579-bba4-479f-a9d0-2b7660f62767.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_1e0f3579-bba4-479f-a9d0-2b7660f62767.png index 7c5f9a86412b48313aafaed66e082ba2bc105712..48f8106e6f25c91a12a1f570f9ca9e0105ab53d6 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_1e0f3579-bba4-479f-a9d0-2b7660f62767.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_1e0f3579-bba4-479f-a9d0-2b7660f62767.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e42a5e5d76c6581941b077afe62818e51037ea103881aa31a9ee3f549f56d15e -size 457643 +oid sha256:e9dcb88ea5661a47fc516c33bc83d78b52c37e46ad586d2a2f9f37d8af893549 +size 415957 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_392b753f-57be-49d0-bcc1-0b44af7ec1a6.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_392b753f-57be-49d0-bcc1-0b44af7ec1a6.png index 4c8649225a91cd45c82927a620d61976429d0aef..92bce0d00d90c1440a7f55690786a4d3dc1689ec 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_392b753f-57be-49d0-bcc1-0b44af7ec1a6.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_392b753f-57be-49d0-bcc1-0b44af7ec1a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b989a79329b703eebbbf72030e0416ac04e5dbf1d207be1284726e18afe4c795 -size 524602 +oid sha256:636025c9d43aff5769de0f817c6ab6520293ca73ca9f18ad21a54b0870595c69 +size 513221 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_51eb2c1d-08f8-4f21-92f2-c17c6de966de.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_51eb2c1d-08f8-4f21-92f2-c17c6de966de.png index 47f1faeb4d71d1dda6c747b0a0ad8c4dd7b4476b..e2543153abbb596d199fc9ead8559c19b8fdfd54 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_51eb2c1d-08f8-4f21-92f2-c17c6de966de.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_51eb2c1d-08f8-4f21-92f2-c17c6de966de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcb9f055a4a9301f6cbc71f437216c130871b42b06f98fa11ed42ef80a371aff -size 458519 +oid sha256:0f968d44bc26091a9435ff474e249e1bf5a3e2a6e3ce40494fd8f6948265e4cf +size 644629 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_6ae8e6ca-ed17-4af9-937c-cd2666364100.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_6ae8e6ca-ed17-4af9-937c-cd2666364100.png index 085b65e15907b9ff956388566867ee1b5bc57c80..b86856e7bee12322914a770f2fa76c0be012f20a 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_6ae8e6ca-ed17-4af9-937c-cd2666364100.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_6ae8e6ca-ed17-4af9-937c-cd2666364100.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:299a5899a427eb7258c4eea435191d1ea58a784d2efb769bbf429948660bc4d7 -size 457354 +oid sha256:0ca3258430fa343cd4ab1ce32fe43ea63ae343006ce5778f39f7f99699223545 +size 625969 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_7e18cfda-a3ba-4519-b4b9-4c0b79083f6a.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_7e18cfda-a3ba-4519-b4b9-4c0b79083f6a.png index d1cd2d4a485bf59d7ac197b779fa28a8497b2fea..5959501647c58f082526b5e934d0e0e0f4420481 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_7e18cfda-a3ba-4519-b4b9-4c0b79083f6a.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_7e18cfda-a3ba-4519-b4b9-4c0b79083f6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b0ef3bec88a8c55ca4b974ef9853342936dd95a54445a22cfcaca2ed767813e -size 1849968 +oid sha256:6d13a41a08f9e9f3184c1fdfd368b2cd7f351327f32ccaad0dc7f47e21993f97 +size 1235598 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a1f30be5-5578-4ce5-bf6d-749fb72afe40.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a1f30be5-5578-4ce5-bf6d-749fb72afe40.png index c1ee87a2eeac765f25d2fa998e17d64bbde5e449..b2a73ad65bb45cc12b6c2db4a84480fd3dc43943 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a1f30be5-5578-4ce5-bf6d-749fb72afe40.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a1f30be5-5578-4ce5-bf6d-749fb72afe40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b9316e1cbab0406591e109f2a7545e254e7c40065bc77b15ee05f91aa251d90 -size 565738 +oid sha256:eff519d99d891ee9125edf7f5bee8592f43d3793c94b9fbac4515f81437ddf4a +size 480107 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a60f2c73-7148-4798-a883-9b406aed93d6.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a60f2c73-7148-4798-a883-9b406aed93d6.png index f6c288441bc43b031239011b5d56eb2742070c27..c759cbe6d7b0d8645ec4782e3cc574db2f57419d 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a60f2c73-7148-4798-a883-9b406aed93d6.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a60f2c73-7148-4798-a883-9b406aed93d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c98bce963c4d8ee2a3ad57362f833794c69302724fa37536a003d49c3606cec5 -size 471794 +oid sha256:f336fa4055668d529659a4001bf8aa5117908e61aa214fe67c9767c4588c79b0 +size 606275 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_b44807ac-6f07-474c-b99f-13d1a3841f1b.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_b44807ac-6f07-474c-b99f-13d1a3841f1b.png index cac8679f02e37ac4512b6af670a44cd809cd53ec..fd7383fab0efcff2a7c4a98740d99671b9524513 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_b44807ac-6f07-474c-b99f-13d1a3841f1b.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_b44807ac-6f07-474c-b99f-13d1a3841f1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80b1d47a76cf5543a944bf0c441c0b4291479b1309f5286cd36d164b5c0d04c8 -size 481029 +oid sha256:01cb157bf9c36a747ee4b4055537d2ee83d5a4a6adc106b9ce299c7d1c1cd329 +size 724037 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ccc8fff7-4673-4e12-b66d-87ebf14f3a02.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ccc8fff7-4673-4e12-b66d-87ebf14f3a02.png index 6dba91ed4ad27fd7ebd297c059da9e913e3b02c8..d3dcf918794778ef25ea22e3df5e7b0e97aea005 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ccc8fff7-4673-4e12-b66d-87ebf14f3a02.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ccc8fff7-4673-4e12-b66d-87ebf14f3a02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ea4b6646ef60cf6a59a97b7abbc03be1f14278cb13260e416aad0aec9c04b9c -size 457705 +oid sha256:e9a2ca0124c235850babae753073e535d6ff348753c2d58e8488d376a9b1abf8 +size 552524 diff --git a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ffd68484-58e9-46e4-9d94-e0aa3514e9fd.png b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ffd68484-58e9-46e4-9d94-e0aa3514e9fd.png index c1ee87a2eeac765f25d2fa998e17d64bbde5e449..ef28d8ed547071f11291bbdf97677226891f6ab4 100644 --- a/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ffd68484-58e9-46e4-9d94-e0aa3514e9fd.png +++ b/images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ffd68484-58e9-46e4-9d94-e0aa3514e9fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b9316e1cbab0406591e109f2a7545e254e7c40065bc77b15ee05f91aa251d90 -size 565738 +oid sha256:665cd9a9c8704c2d7cae8ea314ceb52f1adc3f981eade765735c5116e60b5f61 +size 261065 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_04efa8bd-69ee-4f5a-97a9-0a70d2e5de36.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_04efa8bd-69ee-4f5a-97a9-0a70d2e5de36.png index 581f41925e7852c4e507129333f20a238ccdac4d..9bbd992f782ecab3de9e310e4e000fe0c1306427 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_04efa8bd-69ee-4f5a-97a9-0a70d2e5de36.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_04efa8bd-69ee-4f5a-97a9-0a70d2e5de36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:033cf8f77e60afba9a0187e349120f8f9853e43872bf082fe7ccf14b81bc8598 -size 1744215 +oid sha256:aab89c3c6cb8f367690959628f1990d82ea64a64187a4c39fe28ded7f5c66e64 +size 1466875 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_1a8b410e-a1b9-42cb-9ac0-fa9515cf6140.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_1a8b410e-a1b9-42cb-9ac0-fa9515cf6140.png index 1111ae0cab4b857db53035e2233e89214af70940..c105ddc060ec2ae8af2e84ce49ad7cef28a47731 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_1a8b410e-a1b9-42cb-9ac0-fa9515cf6140.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_1a8b410e-a1b9-42cb-9ac0-fa9515cf6140.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:71a9d3faafd8933263d3a9206c1556604790b140b645ea279b1e40c0f388248f +size 2922173 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_289cdf09-4bfc-4ba0-affb-bf06c4b47593.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_289cdf09-4bfc-4ba0-affb-bf06c4b47593.png index 1111ae0cab4b857db53035e2233e89214af70940..697746ec2a9ea64cdbd2d3f9060dc6eaaac7acaa 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_289cdf09-4bfc-4ba0-affb-bf06c4b47593.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_289cdf09-4bfc-4ba0-affb-bf06c4b47593.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:45622058634a3332b396951a268c8acc4778720ba87bea97476b3e37a24bbbdb +size 1614155 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_31cb54e5-737b-4c62-8e2f-b2b8b74d551d.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_31cb54e5-737b-4c62-8e2f-b2b8b74d551d.png index ce11a32aa60afd5e26193fb45e420e1a40823492..6bd5b7387391007758deb3228c80a18bef0d6f33 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_31cb54e5-737b-4c62-8e2f-b2b8b74d551d.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_31cb54e5-737b-4c62-8e2f-b2b8b74d551d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:863517ed41fa89caf20fef1685d8c058d040ffa6f0bfe9a57d9353079749be41 -size 840828 +oid sha256:6945673995807b84a9ea7ed8fc2203e307598ead377fda0aaaf36a83e3626a96 +size 838987 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_578f1744-b335-4030-b230-ae524d9563ac.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_578f1744-b335-4030-b230-ae524d9563ac.png index 1111ae0cab4b857db53035e2233e89214af70940..ca01b0b597666b34f4b226c82fe8c42fdd152872 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_578f1744-b335-4030-b230-ae524d9563ac.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_578f1744-b335-4030-b230-ae524d9563ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:6841689031468866d92446f9111f2dea7dfe11fda46531631b96b6f7e4212462 +size 1617192 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_5ab24f88-6b27-4bb0-8a0d-91e54d6f8dae.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_5ab24f88-6b27-4bb0-8a0d-91e54d6f8dae.png index c688b753f48e8aa3955eea23df3f5312312b1a5b..6159be1cac9eeae3c359ac59e50343f9008be5cf 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_5ab24f88-6b27-4bb0-8a0d-91e54d6f8dae.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_5ab24f88-6b27-4bb0-8a0d-91e54d6f8dae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a58cd61099757cc8488322df2049710b388cd42c1e51b34124374d55c505baad -size 887913 +oid sha256:d7456440d04e9ed0dcef192c3427fdf7000ebd3a51fcc2e67ebe201bd5fb8f1f +size 924499 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_68c2b8e3-b806-4602-ac7d-027a7865a754.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_68c2b8e3-b806-4602-ac7d-027a7865a754.png index 1111ae0cab4b857db53035e2233e89214af70940..d85a0959f296d07dcac34d6b73445040b1968a9e 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_68c2b8e3-b806-4602-ac7d-027a7865a754.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_68c2b8e3-b806-4602-ac7d-027a7865a754.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:fea46005a217976edcd141f7a18b71f8943f7ceb57dabf89573ca2b22e5dde37 +size 2946664 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_74456eba-7768-489b-838d-3f49d90d29b9.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_74456eba-7768-489b-838d-3f49d90d29b9.png index 1111ae0cab4b857db53035e2233e89214af70940..9fdaf06ae2f336d68707e41f751283125b621de6 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_74456eba-7768-489b-838d-3f49d90d29b9.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_74456eba-7768-489b-838d-3f49d90d29b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:c785785bbb93f47a4cec47a63cc5e9e91be94edcb19872c7df5fd8c04f144dfa +size 2770681 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_7c5a9c22-a130-4225-84ab-c661784863e6.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_7c5a9c22-a130-4225-84ab-c661784863e6.png index 70437e247962a3efdd81b970490d5dbede813ae1..961f29933cfa837aed360ec9eb23458e3964870a 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_7c5a9c22-a130-4225-84ab-c661784863e6.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_7c5a9c22-a130-4225-84ab-c661784863e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b42b3a924fadc82642d5bb271b3097924cbae1f35e26e6483bd996ec15192b0a -size 732401 +oid sha256:95624a0afb3b18d66097541d1e805bcaa3327e09d58ff9c3cec70c6b9e22aff9 +size 1007122 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_7f57582c-c4a9-4de7-9804-977b8120e0dc.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_7f57582c-c4a9-4de7-9804-977b8120e0dc.png index 46ee6a238b215893d536a199cf85d74daa0062a8..1e0f7c85339c4a85d4c08f80b49ac090f67e784c 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_7f57582c-c4a9-4de7-9804-977b8120e0dc.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_7f57582c-c4a9-4de7-9804-977b8120e0dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b345f502492b89277f0c02dcc5f34221a72ef5b96094b80058890cf4ab89883d -size 725963 +oid sha256:12b52f78aac26dde9ef8b33bb044d3bfee9c3b4b004bea33e43aa8801db7f58d +size 1114103 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_82d93f55-8572-4c68-8aa7-982b1774b04c.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_82d93f55-8572-4c68-8aa7-982b1774b04c.png index 1111ae0cab4b857db53035e2233e89214af70940..6c28b1c356bbb977fad4d9072c07c3f2018dc40f 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_82d93f55-8572-4c68-8aa7-982b1774b04c.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_82d93f55-8572-4c68-8aa7-982b1774b04c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:0d3545dc750bb260d50b561afab8dd115ca0bc7cb7942da9c173dd89c4dd3fce +size 3381048 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_a28d7d39-b925-4d55-b0e6-ab865d7409ce.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_a28d7d39-b925-4d55-b0e6-ab865d7409ce.png index 1111ae0cab4b857db53035e2233e89214af70940..a782b765045662a05bc0f281221b2a63267b4de1 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_a28d7d39-b925-4d55-b0e6-ab865d7409ce.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_a28d7d39-b925-4d55-b0e6-ab865d7409ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:a404351a867843d52a9898a1be2d767f5c3a72fa1c44fe4a3568327cf2162480 +size 2180848 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_a5c7978a-226d-4d6f-919d-a987f495eab0.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_a5c7978a-226d-4d6f-919d-a987f495eab0.png index 93b9fa88f3e1a7375b02c308f5a2b0a14777553c..f680936bb8546bb30b2759da6ef2b8dcca322ba4 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_a5c7978a-226d-4d6f-919d-a987f495eab0.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_a5c7978a-226d-4d6f-919d-a987f495eab0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e78bb5533519d0eaf697907fa1a97c3f666e3bb0f466296b4d718abf4fe9a1b -size 1000012 +oid sha256:a93cd33bd68388306fd5d468ef8161f95be265d61d7246c646c18e9a9ea6b734 +size 775780 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_aa02e15b-4a03-4834-9c7b-426c76d2a7d1.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_aa02e15b-4a03-4834-9c7b-426c76d2a7d1.png index 1f04a9d0382a633f632844287ba63367f3cc35d2..8c51ea010367b4565b353d77fca1dd16744a8712 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_aa02e15b-4a03-4834-9c7b-426c76d2a7d1.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_aa02e15b-4a03-4834-9c7b-426c76d2a7d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a414d8cae45f95eb1287a88ce0c52602f6211d7fd86f3cd69e2d5c259681d887 -size 729317 +oid sha256:7a6c283580eda1ae959e14b8f39f7e1715077f4e67eab632d5424571f255958d +size 813932 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_abe9b06e-30c5-4200-a4da-afbd766799c9.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_abe9b06e-30c5-4200-a4da-afbd766799c9.png index a1044f0e2e022748cf6506827fd17bcd350aa06b..ecd94df0e1100f29dd1ad219dc740fccff0947ab 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_abe9b06e-30c5-4200-a4da-afbd766799c9.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_abe9b06e-30c5-4200-a4da-afbd766799c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c697c2d1ae7ba89f277f775d4e60e5b034772a914553c578c1e3c0fbb89e13e8 -size 1084263 +oid sha256:1a86989f9bd3ffcf77154143e768cd305943f40298985e380fa7b8d5ae23b469 +size 1373067 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_b6d33f4e-d09e-4d10-ac72-98ddab6a40ca.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_b6d33f4e-d09e-4d10-ac72-98ddab6a40ca.png index d56f83561f89c6ae26b45a623f7d7df1ae57daf7..3d22ef5720731f514b829c3403cba466fe1c06ec 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_b6d33f4e-d09e-4d10-ac72-98ddab6a40ca.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_b6d33f4e-d09e-4d10-ac72-98ddab6a40ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a42ac94d13d7ad6e2fa07394deb9142974db89dbb887517b1f0dca1b1c1e17e4 -size 859359 +oid sha256:5216f9a27fee4b10308fa7ed20bf5c3d4c6c2bbf60c3c9857c3a28ab45e1cef2 +size 1003674 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_c45411de-bce5-415e-90d1-63f05c5810e1.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_c45411de-bce5-415e-90d1-63f05c5810e1.png index 99fc80bdda8684eaef268d3220872af48e66cbb7..6614a6c6024b3c9e2fc9e833fdc527750d72dbd7 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_c45411de-bce5-415e-90d1-63f05c5810e1.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_c45411de-bce5-415e-90d1-63f05c5810e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1503bb0b6913a783e2faaf5cb43f3518cd2d26b6c7a4ad4b97afcc777c8eb83 -size 729008 +oid sha256:f8885086a1aebfd96ad5473e83c8696b70357de6b5fbd28d4b040614db5ef8b2 +size 636442 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_c49949a8-e0f9-4a34-b795-342e7126a979.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_c49949a8-e0f9-4a34-b795-342e7126a979.png index e1988b4b7c9ee3859d5fe93f25665504e5b9c480..e1129aa5412a198fcca66223d4b87855c4c1c0d8 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_c49949a8-e0f9-4a34-b795-342e7126a979.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_c49949a8-e0f9-4a34-b795-342e7126a979.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f1d5d9c61d0dc4548c63ea1f49eafae95a39d7c753806d5e316033fd92fafb0 -size 1868049 +oid sha256:1f50c0b39c585f789d7008551c6f4f28849ff2a2cce174c419e60b74d30faee0 +size 1584086 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_d321135b-7227-4764-933b-d0ce804aa88a.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_d321135b-7227-4764-933b-d0ce804aa88a.png index 58cfa7e1e1c885d2a665c6188baf344d72d5b3cc..faaa04aae8ae69c73f7814788b1e08c1c632d047 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_d321135b-7227-4764-933b-d0ce804aa88a.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_d321135b-7227-4764-933b-d0ce804aa88a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2624266ef498d59178a89b0ca8124a8ebfacae74992e24bf2ed79cf89b1d45b8 -size 784452 +oid sha256:685f91ac5000a45e7640c2f2f1109b5961de9330575c4ba0c039fb9165cc328c +size 703043 diff --git a/images/b770af80-75f0-470c-8797-1466d90c35ce_fd1d1cfe-2054-45fe-9470-701b302cc200.png b/images/b770af80-75f0-470c-8797-1466d90c35ce_fd1d1cfe-2054-45fe-9470-701b302cc200.png index 1111ae0cab4b857db53035e2233e89214af70940..5fa3d3a067e85dbe1268f95dfe13c78f1f72bee2 100644 --- a/images/b770af80-75f0-470c-8797-1466d90c35ce_fd1d1cfe-2054-45fe-9470-701b302cc200.png +++ b/images/b770af80-75f0-470c-8797-1466d90c35ce_fd1d1cfe-2054-45fe-9470-701b302cc200.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea0e75761efa18e3536407ba688898c339fcc5b3bc59315a1279be654bc5504 -size 1706379 +oid sha256:5abbf5483c5137e27fa0521afbdc017148b9f24d67d2e50fe9add1eaa32a2fcf +size 2892203 diff --git a/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_7c7f0a13-b479-4828-9bd3-dbbb8bafdace.png b/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_7c7f0a13-b479-4828-9bd3-dbbb8bafdace.png index 55a3692617941463556e0b245d9d63fcb7138476..20662c841eeb141504e976d0c37924c956f311b9 100644 --- a/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_7c7f0a13-b479-4828-9bd3-dbbb8bafdace.png +++ b/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_7c7f0a13-b479-4828-9bd3-dbbb8bafdace.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:159d8103087e2acbf81ac63d8b744b5d1d765583c222c7f4c24199390687f3ab -size 378321 +oid sha256:f8c779f46f7d0af05f936015ea1aca5ea77e3ba156f5f006e7b91fa72b1011e3 +size 379666 diff --git a/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_fbd56342-ff93-4a36-92a4-b463d0d1c9c6.png b/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_fbd56342-ff93-4a36-92a4-b463d0d1c9c6.png index 244a5cc470aa4cca607682b2a1c6893ba3d3d818..26666d9eeed8ceb4ba10208c018aeae23aabc525 100644 --- a/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_fbd56342-ff93-4a36-92a4-b463d0d1c9c6.png +++ b/images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_fbd56342-ff93-4a36-92a4-b463d0d1c9c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab23cea8dccb552b61e3fc35552373422adce224b90bc34aa6432e9c94d33de5 -size 273807 +oid sha256:9ff82ce096d61ea661db05c97166cf3ac12098a493ce651e4657e75a409c9e3a +size 273955 diff --git a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_3bce3264-f5ca-4d47-9ab5-95af75dd15ab.png b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_3bce3264-f5ca-4d47-9ab5-95af75dd15ab.png index 502605d8a3c07299f15526b7f810847e72f20336..ea7df6a2adde9c38d3f8f70fb545c210b8d094b6 100644 --- a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_3bce3264-f5ca-4d47-9ab5-95af75dd15ab.png +++ b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_3bce3264-f5ca-4d47-9ab5-95af75dd15ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18161c6f1b9eacce3b0b2c15581bad7833f6fda7f3d95d4e8f1ce907a33f6b18 -size 2789817 +oid sha256:b475455f36662ac8aa7b32ca206fc776139c1813b4504ac3b50e6502880531be +size 3308489 diff --git a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_43f63fb5-a96b-4da5-a251-cf7829d4501f.png b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_43f63fb5-a96b-4da5-a251-cf7829d4501f.png index 9e9c86e9289d709b3314e916e943bfc7ce9003af..697bdc0fb8c73331aaede1caed4642248b002d51 100644 --- a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_43f63fb5-a96b-4da5-a251-cf7829d4501f.png +++ b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_43f63fb5-a96b-4da5-a251-cf7829d4501f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f9954dcbbdedf36c0799222faf76efdc53ad91616b6985b6699b83ab8b3fa09 -size 2254153 +oid sha256:fe05eddf8575c1fcb4bb995bacd972257610bd584f93f5b6e598dda70e446159 +size 1968248 diff --git a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_5aa3d6d6-e7b6-429c-815d-85b1df8eaab9.png b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_5aa3d6d6-e7b6-429c-815d-85b1df8eaab9.png index c89964cb8fc2e2b62b746269045cf620236ae4f3..6c625c2b0564206e7a7ee6073c58ed1c351d6b74 100644 --- a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_5aa3d6d6-e7b6-429c-815d-85b1df8eaab9.png +++ b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_5aa3d6d6-e7b6-429c-815d-85b1df8eaab9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:754d57de5460740faff56fc60c4cf2b13119ab0ceb790b57ea52b73fcf6d3a43 -size 2457574 +oid sha256:bf40fc69d186480170362212f6bba35c20bffd8f6316819620a31a882c39b74f +size 1802243 diff --git a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_861c9513-4fed-487b-8eea-fd35543e0599.png b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_861c9513-4fed-487b-8eea-fd35543e0599.png index 0982c579a93f41ac435f82bbbe5ec26357c98c0b..936f4869db56774c307d76d11e78f4c431d4e58b 100644 --- a/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_861c9513-4fed-487b-8eea-fd35543e0599.png +++ b/images/b7e501a4-5dee-406e-b6ba-00eed783df0c_861c9513-4fed-487b-8eea-fd35543e0599.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8859f2b8f8e244a2bc514c0982e581337948961a0fe94fbc84bba8afc663217e -size 2537145 +oid sha256:c1120a6f55e6c065d5c75a9a34376dbac85a3779ea8b54198d2c3f4505c9c8db +size 2032938 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0233f7f3-ea8f-4b99-a6ff-89ebc2d48120.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0233f7f3-ea8f-4b99-a6ff-89ebc2d48120.png index bd48b7f52b8ed950d9628eba1203347e15069a18..7ca8b5dcb7038157aa9c5df9dadaf9141cfe2ed0 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0233f7f3-ea8f-4b99-a6ff-89ebc2d48120.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0233f7f3-ea8f-4b99-a6ff-89ebc2d48120.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a7e6c12c111cadaebac152fde0d555f370590675f3ae872572c6a234b2e1226 -size 233170 +oid sha256:83e7c62857b0aa767d89b3ce25bbfb678a90df8421c3992e002bdd1993931e07 +size 232733 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0262cd12-5355-4602-be83-a0e4fc8e5196.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0262cd12-5355-4602-be83-a0e4fc8e5196.png index 7e7e6527c34200210b70cf0f9d2f3aebbf557699..dd885a1fb15cb354ec8425bb16205be9cd23b89d 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0262cd12-5355-4602-be83-a0e4fc8e5196.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0262cd12-5355-4602-be83-a0e4fc8e5196.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92b52ede7f555c250ab49cc24cedb905bb236cf1817340056dedc9332b0046a8 -size 359540 +oid sha256:6cf11af9c1e2e4cfdecb78159d4cd3ff8b519bb14cddef776558d08081058760 +size 388081 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_02f85c6e-84ed-42a2-8d2e-1a23f5ac3528.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_02f85c6e-84ed-42a2-8d2e-1a23f5ac3528.png index 40755123dcfa576014cf281df3dd20af561ada3a..117e4f2d4b5a28e24d4cd92d9b435005e6f7f6af 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_02f85c6e-84ed-42a2-8d2e-1a23f5ac3528.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_02f85c6e-84ed-42a2-8d2e-1a23f5ac3528.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31e67315be0c6d6231b595e887fffc813e1be16e0b0e9155f350b726a179ae8c -size 279448 +oid sha256:a728140ded5104fd705742477b660c03550fe7618017cdde327674af0daec893 +size 278429 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0ab23995-fd39-483f-9eb4-c633bef00a0c.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0ab23995-fd39-483f-9eb4-c633bef00a0c.png index 09dfee21a780c86bca723fb10e7dc86544d761cd..e8c7105b7354a6f8aa51b8da9dc2eb94b3ac8c64 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0ab23995-fd39-483f-9eb4-c633bef00a0c.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0ab23995-fd39-483f-9eb4-c633bef00a0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d50a32bbaac6baa2e14957be8c5ecbae78018849f88fcfed092180bde03b392f -size 334088 +oid sha256:b53103901fa8a41a18ae1a32e1efa90376f085d0c01872394117c98cb0d01d4f +size 336812 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_336eb253-b594-42dc-97c0-a6a96a35c858.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_336eb253-b594-42dc-97c0-a6a96a35c858.png index 58253d4376e001ed98bae3c5484f6b46aa05845a..40cd34ef3cf9e031e9c6701c76ac382eae1575e3 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_336eb253-b594-42dc-97c0-a6a96a35c858.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_336eb253-b594-42dc-97c0-a6a96a35c858.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65e186bb96713dc705d1828ea3e27e631eaf62058abcde0298904b3a3f24359e -size 169048 +oid sha256:68b5e015d98d855a872fc341eb846f9102224aee27c080e1e7deb6bb37998051 +size 169741 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_35dd7c87-f653-4de6-a617-d3503d0bdc73.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_35dd7c87-f653-4de6-a617-d3503d0bdc73.png index 7ed39026c4911c95ad88c7d6b0c80b0244ed6d8f..ea2769e86e87c372ffcc6b1407951d4e8f2bef05 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_35dd7c87-f653-4de6-a617-d3503d0bdc73.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_35dd7c87-f653-4de6-a617-d3503d0bdc73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa701d010e85d6448eff484e9d9873b31fd3242438008f43d5bb7645cf496a96 -size 367583 +oid sha256:db50a52e52c8a7a120bec1a4f359b5ee15c3a24faa531bb4b47d12cc89b0be0f +size 371748 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3650813e-c882-4d37-bbab-bed70f3b6dce.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3650813e-c882-4d37-bbab-bed70f3b6dce.png index 2c6f083f1169392f1436cd1e1eedea6ac5bca555..f6186ebf8c16f7b8e15c7708a7a4ab2ad8330a19 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3650813e-c882-4d37-bbab-bed70f3b6dce.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3650813e-c882-4d37-bbab-bed70f3b6dce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a0dbec741598a7d3cf6bdc310344aeaa4d29d8f82578ba9a35923eae33fb11d -size 224859 +oid sha256:a485c0ac116e00e5fd940baa715ce87dc64eeaa6277e5ccd23db6fe6f8cf0629 +size 226070 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3b88c290-cc6d-40d9-8de4-2f891e6650c7.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3b88c290-cc6d-40d9-8de4-2f891e6650c7.png index 706ca52ed5e700c127f2266b7c1c1a4a592e7727..51d8419bb5128a63861bdf367960953be9d58815 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3b88c290-cc6d-40d9-8de4-2f891e6650c7.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3b88c290-cc6d-40d9-8de4-2f891e6650c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71880ab93aef79bc27e5d24a9a63d684cebc65f3086ccd65ca09fb4f1f165a93 -size 883807 +oid sha256:9f43e3e81073d8decea9e4e473f148545bb4a62b74194c7e7a7af2230c1793b5 +size 1140551 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_54995115-8314-40d1-bdb1-564538ecd6f8.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_54995115-8314-40d1-bdb1-564538ecd6f8.png index 86f7019c0234dc6ef22a46bec3de8381a296530b..24d52b87126f28ef024a4294ea9c99f181de29cf 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_54995115-8314-40d1-bdb1-564538ecd6f8.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_54995115-8314-40d1-bdb1-564538ecd6f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd0632b3ba3abc4dfc25fbb26f00830368147a6c902ba417e962b709c3121ff2 -size 161159 +oid sha256:58c2f6bda94e98bdac0fdeee044de668f611b4608629beb784dd0a935b5358be +size 158972 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_59857729-5631-4bd5-be03-ae871b6c7549.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_59857729-5631-4bd5-be03-ae871b6c7549.png index f37f7d849c24db568f1179cb22b653e17e59e0ec..cccba412cdfcf9e370f45a5648f3d067dd75f758 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_59857729-5631-4bd5-be03-ae871b6c7549.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_59857729-5631-4bd5-be03-ae871b6c7549.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d031e551d872cbd3556228823d0d9423547c0d9bc86f6db1bf8813c964658ee7 -size 264531 +oid sha256:ed4ae74c06b6cc57b9cf78aaa63f0eb2b758538ac875fc7eaede071d2d75cebc +size 264584 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_5ad16225-5e2c-4a50-97f0-c7742c5bc261.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_5ad16225-5e2c-4a50-97f0-c7742c5bc261.png index 562a1a6055ff3dbf337c21b08852f02c77293f74..06812f3ff1433a46856a039e362297202c0ddd8c 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_5ad16225-5e2c-4a50-97f0-c7742c5bc261.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_5ad16225-5e2c-4a50-97f0-c7742c5bc261.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b09dedade4a596eaaf3ec80d7f2177b82591a207ecbe2445d6af49271a97ab8a -size 174877 +oid sha256:843d6789d71da4afe07fb50c783c38ddefa3212b02ff3243324d07fe7ef0899c +size 191793 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7128c85b-93dc-496e-9ffc-d8e624bf9036.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7128c85b-93dc-496e-9ffc-d8e624bf9036.png index d407c7663a86c5b0aa6de7ecf68fcf587e25c181..b24cc4dc0d1eebbee1697d64131249075c72529a 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7128c85b-93dc-496e-9ffc-d8e624bf9036.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7128c85b-93dc-496e-9ffc-d8e624bf9036.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d474e1cdeee2263c5764f1b7c6b90e04efbca5a7d4dc0744f15fef92f01e96ce -size 157654 +oid sha256:da901c76a62b413e0f94e30204a497e5d2308ac0df7dcc0a7ca24caee70e4a9c +size 186201 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7c771dc6-d31e-4b5e-9619-90f0f383d7fe.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7c771dc6-d31e-4b5e-9619-90f0f383d7fe.png index 892766eff5d094dd291c42c34bc72610fa909390..79116364123f978cdd99b95ecb5ee61f75e5ccf6 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7c771dc6-d31e-4b5e-9619-90f0f383d7fe.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7c771dc6-d31e-4b5e-9619-90f0f383d7fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a4664f9f773748218969bded1bc9736c6cd77dd03de67f0b87015bef567b254 -size 175461 +oid sha256:fdb5208fbe5d00362517b5d3c459a49f47ed5217cd5b1520cb95e01acbb424dd +size 176644 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9a136118-0c6f-44c8-b64f-5dbbc3e3deaf.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9a136118-0c6f-44c8-b64f-5dbbc3e3deaf.png index 4f992e3ddc2a1899bcb9eaf3cab167f9ee866023..8b35ff017ea9d264b29c7396282e8896b146f9cf 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9a136118-0c6f-44c8-b64f-5dbbc3e3deaf.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9a136118-0c6f-44c8-b64f-5dbbc3e3deaf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8bcf264498f1655fc250e57a210d38f173fd17fb0f7cc29456e0b94caf51f9ff -size 262515 +oid sha256:cda417ee47e8590a8c9d92511818dec52b25e48b9164e00a45e0da4a29af6618 +size 279840 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9ae9ccd0-147e-4ce8-881a-79d4b3d8f717.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9ae9ccd0-147e-4ce8-881a-79d4b3d8f717.png index b9dadaa7962b724d0f3e80632a98a999aa3a1064..5f2ad2243d54ff0cfe30d7a8966c648a1d682e0e 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9ae9ccd0-147e-4ce8-881a-79d4b3d8f717.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9ae9ccd0-147e-4ce8-881a-79d4b3d8f717.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:445f08152fd39629152983aeb2e0a7419ea9aaf82f3b7c1537e68ac1fbacb72a -size 233132 +oid sha256:3355eae7d942fcac29d0bb3277bed743ee3497c75887697a3339de36d0759593 +size 231855 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a18c0387-6182-447c-99ba-74444b28d91b.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a18c0387-6182-447c-99ba-74444b28d91b.png index 4dd6801f544f2e640a9b39039870cb0b64634a3b..7660c473865a9f23804c5d472f8881ca65cb159f 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a18c0387-6182-447c-99ba-74444b28d91b.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a18c0387-6182-447c-99ba-74444b28d91b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53c569c39c87a30841f17cd9b7f7c336df52054ec5f6f58959a039870aea69ac -size 353426 +oid sha256:6e3188e927e67587cb5fb162f692a9cb0593940151770a456b9d723cf0786d7c +size 399081 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a3853221-cf8e-4f1e-a89b-0d1e3fd620b8.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a3853221-cf8e-4f1e-a89b-0d1e3fd620b8.png index a3024e8da5a5aa8b38a5db23a1bf7e7d52901482..fed3084315268499a2e5033f1e6e5317c4c72968 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a3853221-cf8e-4f1e-a89b-0d1e3fd620b8.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a3853221-cf8e-4f1e-a89b-0d1e3fd620b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb85d7c43c647b81325c60018b25fa4bf19b5d656966d72cc27c937991aca514 -size 1729203 +oid sha256:c9366487290e88052641d4e200b9f9a4dbb2ecd729a6a1b2efb02850b701954a +size 1065256 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a6849511-4d2a-4799-9fdb-82757a549170.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a6849511-4d2a-4799-9fdb-82757a549170.png index d717791a967ee08ea0306ac4458eaac20e73827d..6a49062aeac2496186f064ca777647dfb1b9b87f 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a6849511-4d2a-4799-9fdb-82757a549170.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a6849511-4d2a-4799-9fdb-82757a549170.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae8a4708d643c4eb01b57b66df0a557e999874ab4573de92a22075579ebf921d -size 329876 +oid sha256:a39d39a7c727d7f16f39e78774373a2d43e9141cb7a6f9489b42a65000fc374f +size 384903 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_b87d39de-e0c4-41ce-9ed7-37de94e8fe7b.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_b87d39de-e0c4-41ce-9ed7-37de94e8fe7b.png index 2295a3c28d233be318f9476e8f76653ef3a98b9d..a22de8cf4f116a568e3a63941855a38cb1dda516 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_b87d39de-e0c4-41ce-9ed7-37de94e8fe7b.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_b87d39de-e0c4-41ce-9ed7-37de94e8fe7b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6125179b9dbff23e6255281f54ce38515d2ee024817ad9ea3cec48228c08e07f -size 169098 +oid sha256:6236997b575dfc423077cc44cbe0dced658076e83daf7d2705de82fde765d30d +size 166149 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cde21781-bb33-4185-a6ab-f03a12216547.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cde21781-bb33-4185-a6ab-f03a12216547.png index 47202964399e39c320d8eb03feebd4ac5f5094c7..1e9020deeeb1b7360d156b3e6bda126a61e70be9 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cde21781-bb33-4185-a6ab-f03a12216547.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cde21781-bb33-4185-a6ab-f03a12216547.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ca12959697882505e94d23267eb84b7c1d172e4f42353ec7475879a8947303c -size 1030062 +oid sha256:8038ae0bcd88993f85ecf175186e0660b1f089498e0efcb9b410e984a5a9d0c3 +size 1095975 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cfb5072a-1eb9-4da0-8515-843dba96f9d8.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cfb5072a-1eb9-4da0-8515-843dba96f9d8.png index bd48b7f52b8ed950d9628eba1203347e15069a18..3dbb9ae1881b7720df875a9dff614eb9cd8d0446 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cfb5072a-1eb9-4da0-8515-843dba96f9d8.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cfb5072a-1eb9-4da0-8515-843dba96f9d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a7e6c12c111cadaebac152fde0d555f370590675f3ae872572c6a234b2e1226 -size 233170 +oid sha256:07c0e50bb91edd381db9c96ba13450c591b49a5c42a3cf6fab1d6b6bc7bedef1 +size 233165 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d02d1e0d-1558-49ea-a007-fd43a7560a30.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d02d1e0d-1558-49ea-a007-fd43a7560a30.png index 4c40f48f17367f58fcc6f5f352ec1057bbf36d0f..9e4c167136d80f9c80daf566ce972ac4d36a473e 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d02d1e0d-1558-49ea-a007-fd43a7560a30.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d02d1e0d-1558-49ea-a007-fd43a7560a30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:14c219f72d4e2af019e9132bff7bf87fcf8e1d2e402f79385e752c3eef3fb9bf -size 158450 +oid sha256:d58ea7db943c698a4a664d05e0a6bf667e8174d8609f44493032245d0633cbda +size 188710 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d6f41e4c-9843-4db8-a803-4844920ce2cf.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d6f41e4c-9843-4db8-a803-4844920ce2cf.png index 38bfeba350fd6e565995b46ab0cad169292fee22..e09b0a4cb7fefe80526ffbdc442a86e640ea2c08 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d6f41e4c-9843-4db8-a803-4844920ce2cf.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d6f41e4c-9843-4db8-a803-4844920ce2cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31ff605d4bcf3c48d304268b23ca3dddac3aafc76cf343d57cf0e268e4d80254 -size 168511 +oid sha256:57f5b7ba3a84ab0df67521401a2963785364b4d1051e8abf38842c91b6974004 +size 168380 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_e70021d7-5c43-4e5b-a710-953fccc9b3f4.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_e70021d7-5c43-4e5b-a710-953fccc9b3f4.png index c32d1cefbd3966e741ab533a806aaa7823735770..9a8839b4fd23881ac6315ecedf91d9372714df5d 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_e70021d7-5c43-4e5b-a710-953fccc9b3f4.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_e70021d7-5c43-4e5b-a710-953fccc9b3f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d0fa69e087b2a11c9e1baf7d5256968a4061ccd9488be4156126583b341aeef -size 186353 +oid sha256:4b4723ddd8f579ab5964383cd0ff34823f40b69c6f6c58ff4f7ef7ab80d0bf84 +size 155660 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_f3e095cc-b31b-4f6b-83ab-677044140ff8.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_f3e095cc-b31b-4f6b-83ab-677044140ff8.png index 45b33c31e2e876360baa3062d6a70495a81797ae..bbe543bead020b751964308c81d4f3f31cea549d 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_f3e095cc-b31b-4f6b-83ab-677044140ff8.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_f3e095cc-b31b-4f6b-83ab-677044140ff8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:efdff82c41fefd0c3371164f1bcfa11f25ff1031f518ddd9d3a1a8d657e19302 -size 246114 +oid sha256:b101e00fc84177c6688eb268cc959a402eb8cafec12344f7b0ada572659fc97c +size 253874 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fca5d423-f301-4411-b33c-f7956eae3a0c.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fca5d423-f301-4411-b33c-f7956eae3a0c.png index 52f063ea0857ba4d2b976a82b8c6586454dd6d4b..c7d849d5655a93c892f831f27d1459c5bedc110e 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fca5d423-f301-4411-b33c-f7956eae3a0c.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fca5d423-f301-4411-b33c-f7956eae3a0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fddf2d139658df43cc8c9cba97e3def7754f52b47b27217428b07aa8a2b0644f -size 185848 +oid sha256:d8c922119d50be4c3932c4af7fd19013c2718e28377e18040436b0d22f32c30b +size 189497 diff --git a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fefcbed0-8fa1-4592-88d3-8bdab9e18fd2.png b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fefcbed0-8fa1-4592-88d3-8bdab9e18fd2.png index 36418da4283dd53c69c9781886e24326e0b581ce..b97c6b6499ef52468b428210f7da2b1503da4885 100644 --- a/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fefcbed0-8fa1-4592-88d3-8bdab9e18fd2.png +++ b/images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fefcbed0-8fa1-4592-88d3-8bdab9e18fd2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2516e3366b8cf45d15d087f4d129a9fa7cf9e4d364c8cdc2f90fca595e98caa9 -size 161991 +oid sha256:c22b9379a095e3cc7dbee2efdf599bba41fb6531f4d094e2a3ba7452deb71b19 +size 192050 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_2f0b502b-6280-4361-9d9b-77f44c23c9c7.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_2f0b502b-6280-4361-9d9b-77f44c23c9c7.png index d25cfe65f8eae2c7633ce95b1ff8192033aec7c8..aba1cf31a58a7997290ed917a3e72b9db5cee0a1 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_2f0b502b-6280-4361-9d9b-77f44c23c9c7.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_2f0b502b-6280-4361-9d9b-77f44c23c9c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2024a379ec561d4fb3db95921692c0e55abebd501000b7e56a277c79fc452d6 -size 524933 +oid sha256:cc3d4e4bcf64399ed515c71320af6cce5a42ed0efececcb7d879fdadb13b69a8 +size 413801 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_3d8400a4-ec58-48bb-a45d-9d8bef993fd5.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_3d8400a4-ec58-48bb-a45d-9d8bef993fd5.png index e7f86990961e93cbba3817fdb2e4d25e8c8c462c..47223d0110318b3106ba0273287bf8af060f7c5e 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_3d8400a4-ec58-48bb-a45d-9d8bef993fd5.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_3d8400a4-ec58-48bb-a45d-9d8bef993fd5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f68233105e6a9b2a7eb2b887cc6b8b39a72017da38926b769cd059eb9df80764 -size 485108 +oid sha256:a5efc25bee24a8e16e877276f18f2d6da243ca695b7f56e3d60ea3e537fd7b0a +size 627151 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_83616b73-0718-430d-979e-39e05350f0a8.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_83616b73-0718-430d-979e-39e05350f0a8.png index c44068df4f5af0c5ec044b797ba6321b1450e52d..7fc2e69f30939deee30e967c46ddee5dc334dd15 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_83616b73-0718-430d-979e-39e05350f0a8.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_83616b73-0718-430d-979e-39e05350f0a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30bc1e41d11e529177da7a8be1a8812cfacb1d66fa15f2dc73269bf0083ee9d7 -size 606980 +oid sha256:78e66da27df6a33eadd4ea2f25d86d1d1d1b38c5d5522b3ef14c8121f78dfa5c +size 1427356 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_8cd6279c-0398-4d6d-8efd-cc77ddc492c3.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_8cd6279c-0398-4d6d-8efd-cc77ddc492c3.png index 6d2c10abde7319809318d96d4834c7fd1835ebae..b6670755bc5f879b8a6db8466b905dc5e83bff15 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_8cd6279c-0398-4d6d-8efd-cc77ddc492c3.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_8cd6279c-0398-4d6d-8efd-cc77ddc492c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ee134af21c0653d092c06c9808febf80968c39000d9b295deb11f669618fcfe -size 944342 +oid sha256:f3788c1c1b4a7aa6b1f229915299d24cbdadeef98f22775f9716de79cf1c87f1 +size 813606 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_93807878-de8b-4b57-83bd-6964dd4decf2.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_93807878-de8b-4b57-83bd-6964dd4decf2.png index 039dcb43069c800e24b0ac4ff2b04731bd92325a..5d6004714805057cd32115431d8897d5e61bbd5a 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_93807878-de8b-4b57-83bd-6964dd4decf2.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_93807878-de8b-4b57-83bd-6964dd4decf2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:027be46ae3b2ffff4660dea8aff188e6303544a58ce19a3d33f36077ceb679f9 -size 533128 +oid sha256:9ebd56cf76e8c22efdebc2283b9daac0e8e7cb88435805cfa69ce29bd5301385 +size 554830 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_938f123e-a8b0-46fe-82bd-580041111df6.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_938f123e-a8b0-46fe-82bd-580041111df6.png index 88040b04fc3ad63c6143c56c020402a688a47bd8..69e0ceb216af061652f7997e936cf9cf9c4e8003 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_938f123e-a8b0-46fe-82bd-580041111df6.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_938f123e-a8b0-46fe-82bd-580041111df6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5347cf1a98f7a11f4d6b2efa3359a581ce5c04c6cfbf8f2547bd5c79c0d053df -size 546065 +oid sha256:9e35238a0b337276f919dc6d69b07af2876e70335cc88ba684e0fe0b7dc60cfc +size 381136 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_98de417c-491c-4137-a8d7-1071dc6e1f4a.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_98de417c-491c-4137-a8d7-1071dc6e1f4a.png index 8b6956445a1bc01e98bf2774ed813c28487eea98..f2379173aa83c734ef44d59279d556e04ec023d5 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_98de417c-491c-4137-a8d7-1071dc6e1f4a.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_98de417c-491c-4137-a8d7-1071dc6e1f4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:434579f63dd525fdca38672b92c8b35ac74e2d9aefdab6074a0bfd38a0376e2e -size 554721 +oid sha256:a11581db095ec619d5783807c06d720d74fa58e28080d2a4a470dfeafd53cdde +size 589769 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_9d4fc01d-c792-471d-8fa9-dc5d5531aab3.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_9d4fc01d-c792-471d-8fa9-dc5d5531aab3.png index d1b362a8f70210392b35872a8a55be261aa5c6df..8fe0838a9affa9167df3615f30706c1b66caec08 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_9d4fc01d-c792-471d-8fa9-dc5d5531aab3.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_9d4fc01d-c792-471d-8fa9-dc5d5531aab3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d621c5ae2abf673420b9e80da28719e36d005d82896162eb136aed18f72c74e -size 539108 +oid sha256:33170bdc0ceb085c083e0d99c2bbffda35dca33901d86dea88e27272b14a5192 +size 381509 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_da522562-f672-4425-bfb0-d6afe495664d.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_da522562-f672-4425-bfb0-d6afe495664d.png index f322f25544becabb075b4c05ed4a369a31ebf7df..2423bb818a62c2d898639ab3d6d828aa7a92d39b 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_da522562-f672-4425-bfb0-d6afe495664d.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_da522562-f672-4425-bfb0-d6afe495664d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af96844cc1b507f4effcbadac85f5105b66711d1698bdabe76822960831eabf1 -size 515521 +oid sha256:6d08b48a3c166c6bd620cbe9c6cef203860b6792df5d4ac4e64d44dd488cd275 +size 407856 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_dda39a16-4008-4904-8895-51a26c5f8e31.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_dda39a16-4008-4904-8895-51a26c5f8e31.png index d61f08d17950c4297be482ee7354b6afb6621985..667969a4045045ed360bfbf06bababb51cbbaedf 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_dda39a16-4008-4904-8895-51a26c5f8e31.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_dda39a16-4008-4904-8895-51a26c5f8e31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a3dea0e4554b332fa5bf3a7e1553143073c6bed5aa6399b642e95b41dced350 -size 556556 +oid sha256:8fe7356c2bfb12d101f2e82bc6d6d030cb8d338dfae4ddb7d94e2bd60d8817ed +size 366948 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_e8584aab-e315-4f72-b91f-bf7e76a7d1b5.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_e8584aab-e315-4f72-b91f-bf7e76a7d1b5.png index fa6776c6b311f69eb68a7ec2bda145503625a3c8..836101acb1a7658713f01f36967e45d49632ef97 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_e8584aab-e315-4f72-b91f-bf7e76a7d1b5.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_e8584aab-e315-4f72-b91f-bf7e76a7d1b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:534f25ce165260b66f1260704a267664815a38171f05e86ed9421b363925a961 -size 518992 +oid sha256:13f1380e21ca673690ecbf3b661c25ec71d3d4b8bcf126a2174248151a848988 +size 570876 diff --git a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_fe6b056b-6b75-4102-91d4-acf37296d4ed.png b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_fe6b056b-6b75-4102-91d4-acf37296d4ed.png index 6868c8887304b832f17a5b56dcb098cbb768ad7b..c9fb6395887b5896158cba7efdf864ca89651483 100644 --- a/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_fe6b056b-6b75-4102-91d4-acf37296d4ed.png +++ b/images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_fe6b056b-6b75-4102-91d4-acf37296d4ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90ed5639f51df4fb5bab5db1d44143f79bae17c2b570d0e2602641bcaf31306f -size 550909 +oid sha256:aaa378d6b804ab8edaf09ee3ecda9e0fa9cbf46e83e670d8bce6e7407ceb73b4 +size 698673 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_04457367-8505-4973-b9af-ecd5eb814182.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_04457367-8505-4973-b9af-ecd5eb814182.png index 0f2e3458ab592c7f47175beb94ce5a8dc4479f8d..0467211f6bf709da146a700c632ee385858e8ae9 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_04457367-8505-4973-b9af-ecd5eb814182.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_04457367-8505-4973-b9af-ecd5eb814182.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ade14442f4c3b52379d17fc06e32dd9d01c0085d69531729c5586580b0049520 -size 499266 +oid sha256:e65618bbdcd797c8d1fd95478454378d92072d3eef950e9a66f97a49976e6c85 +size 1436769 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_07e08472-af90-4513-b934-c8893026dfc4.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_07e08472-af90-4513-b934-c8893026dfc4.png index fe9484c4354514217a968c2dd2acbe7b6d1e0172..62f59508ce245c728e3bb6000d44beaa66067738 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_07e08472-af90-4513-b934-c8893026dfc4.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_07e08472-af90-4513-b934-c8893026dfc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c12df314911e4cdb992cea0f7e1a6799be86d7e75ee19d0fbe56fdd74d862041 -size 545035 +oid sha256:4245a46d148c1bd32f34049aee0cb35a84e7fdc03ee55dfce71d6b41a6ad2871 +size 1341900 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_15235466-cb9e-45a0-baf0-c2715e127ad9.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_15235466-cb9e-45a0-baf0-c2715e127ad9.png index 11ed682a20c6d10c5512328d048bda73d273bb4a..e80b457a6771b7d614607867c4b6b875e4a61b58 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_15235466-cb9e-45a0-baf0-c2715e127ad9.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_15235466-cb9e-45a0-baf0-c2715e127ad9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3eaa9f0491502a2ad408d079ec2bfdf1000847bb5315ebf1fd19951bb93de960 -size 1247632 +oid sha256:438072b1975943e05f92a947635e9376f63f56b80e01cd696684c7a9051a80a1 +size 374604 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_25595676-125c-4c39-8a05-9d86e9f3b5a4.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_25595676-125c-4c39-8a05-9d86e9f3b5a4.png index 3a99d5376b27ba40537105fb4fc3438de70f1831..09166d3da30a45794f5576e0089629bb4f5de3bb 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_25595676-125c-4c39-8a05-9d86e9f3b5a4.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_25595676-125c-4c39-8a05-9d86e9f3b5a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9642efbc0f9977cdddb0d2231de270fad04113170c25249963f61e962ceea4eb -size 516759 +oid sha256:6a8ffd4155775eb319326b9b3eca7459d2e95365ca663be073c6e31ff6a5a3ad +size 281004 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_2a1a10d0-4641-4d0a-85fd-024a5dbfef2b.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_2a1a10d0-4641-4d0a-85fd-024a5dbfef2b.png index d313338e6e589cd22b4aebaf37ec20d723da9cce..8954e7751530f21574eaabafdd92d66b0ecf176b 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_2a1a10d0-4641-4d0a-85fd-024a5dbfef2b.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_2a1a10d0-4641-4d0a-85fd-024a5dbfef2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc13b7b8fed1b1ff6600cfc114d870dfc21db9abbbad16b2ecb4a7a12b528106 -size 606072 +oid sha256:ac1e78f9ad242c7e7687bd9b51d1324a4bdd5ba23001e3c1e2d76d05e571b41b +size 1036989 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_2e5cd3b2-dd5b-4055-bd2b-ffdfc93923ee.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_2e5cd3b2-dd5b-4055-bd2b-ffdfc93923ee.png index 5a28768056dfdcf03c2ecd7d62aaae004b4a0502..9a657ac1526066b2bcffca31fe0a5e9bd0c119f2 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_2e5cd3b2-dd5b-4055-bd2b-ffdfc93923ee.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_2e5cd3b2-dd5b-4055-bd2b-ffdfc93923ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1468424c578a1800ff8510a7aef3f58a440c8b96ec018784a07edaa64dba7d49 -size 597751 +oid sha256:8f05e5ffbc9b8df3501abe4397200416d271158f90f1fbd9442d78bccc734137 +size 1567623 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_3f38216e-16cf-4a62-82d5-00bd6a493e5c.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_3f38216e-16cf-4a62-82d5-00bd6a493e5c.png index f47dec7eaa327f8387c301c4e9237ed8dfd34aab..33f9e5072ad6868e7a0ae688e756d8dfccf73d98 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_3f38216e-16cf-4a62-82d5-00bd6a493e5c.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_3f38216e-16cf-4a62-82d5-00bd6a493e5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:132ea8762d1c28faa0d412407b662789e1508690653f78086d97baa560aeba7c -size 1156459 +oid sha256:cfc251e701364ee8759035f08a701bb9e1e8912d643c2e5d79d13450282046a3 +size 1125927 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_6ac8ac5c-31df-4c8f-8093-13f539417457.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_6ac8ac5c-31df-4c8f-8093-13f539417457.png index db123d2bba461cfdf5a956be7add201ab976a218..d22b6ac910686c527e29aa4634510a2628b333b6 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_6ac8ac5c-31df-4c8f-8093-13f539417457.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_6ac8ac5c-31df-4c8f-8093-13f539417457.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:150bd767731fd76aea44bf828ad5c40f9c118589ba3cf3257f2b533cada33b49 -size 571219 +oid sha256:029042d8733ec97ca2d2b9e7cdc34961394d01e1c9bec2b8eb52ed454567faaa +size 463169 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_712b6f48-ec3d-433b-9804-6663aa03c42c.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_712b6f48-ec3d-433b-9804-6663aa03c42c.png index b876ae1327ec9df66534e8a4f0e33405e936ecdb..f121dedd96e89123cf6d27fd6f661b5f2aad311f 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_712b6f48-ec3d-433b-9804-6663aa03c42c.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_712b6f48-ec3d-433b-9804-6663aa03c42c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d707088c730c50c8739b6eb56c92df2cae9b9f5743a6fa630685e057a39b987b -size 473111 +oid sha256:63e3d7ad1bbe6cac6c5c9cf885c4e3c51c1a135c2d24f7674dd10fe855e16c3c +size 234899 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_7911af81-53d1-402e-acf4-53625f86f726.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_7911af81-53d1-402e-acf4-53625f86f726.png index 2c9acfa62661216f926e993296a0810c2817fa18..45b914124dd66b3311e6d054990dc32ef4eaafd8 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_7911af81-53d1-402e-acf4-53625f86f726.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_7911af81-53d1-402e-acf4-53625f86f726.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5220782ed041e556a8d26b0adccb70b4a0f27f981be68c4bd672e2382533aeea -size 517528 +oid sha256:d967219030777fbf3db5b559d512bcc3a8d1c39ea53f478f89ef3813c15e701e +size 518352 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_8af6839d-a6e9-4945-ba6a-3cacefda382a.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_8af6839d-a6e9-4945-ba6a-3cacefda382a.png index 1e981d62f9952f204768e631d75d2e87cfffd817..980056f5cf5dc66e5dd4a63bc778f14b8f42f78c 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_8af6839d-a6e9-4945-ba6a-3cacefda382a.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_8af6839d-a6e9-4945-ba6a-3cacefda382a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0da06274a5a4988a75789fbe7ac6acd7a43046234a93845d32a1920482c9b037 -size 642680 +oid sha256:fc83f5aec8f30abe552c747d587be346b9211d1fa3cd09a5956e8f0ca316d354 +size 894097 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_96b8805f-69fa-420b-823e-29ff28e471f8.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_96b8805f-69fa-420b-823e-29ff28e471f8.png index 7d9b0436f54bbf79878b02717d2bb90b56804cef..0bf74457ce0e70329135be1ef576637420d81393 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_96b8805f-69fa-420b-823e-29ff28e471f8.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_96b8805f-69fa-420b-823e-29ff28e471f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fe45b22c31fd2fbe90fe8452ae9a19a83368af9b19a3080387e7a9d125434eb -size 1538664 +oid sha256:9d775a6419cb0e537618ff4d4571e96fa7409aa8e2f4a67b1c05b6c26b84e4e2 +size 1836342 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_a3dff442-0bde-420e-8a86-013bc958198a.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_a3dff442-0bde-420e-8a86-013bc958198a.png index a7fbbf2c79391842f93122c147394edd44a4e063..16899ed4480a4a29466dba27feae2b2575ef5cc5 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_a3dff442-0bde-420e-8a86-013bc958198a.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_a3dff442-0bde-420e-8a86-013bc958198a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c500b32a61ec972d04535841d6bd03a13f632eedf8eaf0f8c20485e6a407af0f -size 886247 +oid sha256:675c9d6d3950265d11824461a2912831d7bed6ff91151d038340965a33f59c5b +size 265816 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_b27add92-735a-4aad-a91d-1a8ca179d35e.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_b27add92-735a-4aad-a91d-1a8ca179d35e.png index e0682506cbc5ce58bea401f3cfdc3d3ab20cc86a..7564451a419f5934a4a555a88b273f6a6f5c3f28 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_b27add92-735a-4aad-a91d-1a8ca179d35e.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_b27add92-735a-4aad-a91d-1a8ca179d35e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01e3eaee08d95bdb5ecc0b5d05666686226f5923e213ea360706a605dba70bd6 -size 1376416 +oid sha256:80e21283cda85c28f2a099e527acfde7dbd4b7f5899eb6d358b08e212a202fda +size 1534514 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_be1d8ecf-609b-4a6c-9485-2f010f65c215.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_be1d8ecf-609b-4a6c-9485-2f010f65c215.png index 56f8f3b5907acdff64be47eb0c02ea4ed7f1db78..475697f8a2712d78360db0c7fc80f75548b23517 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_be1d8ecf-609b-4a6c-9485-2f010f65c215.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_be1d8ecf-609b-4a6c-9485-2f010f65c215.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c4b0342a04ee329800df97a1b6c40a0874a87926352e3ef9e7a3db71a7817c1 -size 511009 +oid sha256:365d2d69ab8819a3b4bfe85b923e547b8c0aa5a5fcf423ee7640ef8c69ed25e9 +size 720739 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_d7aae626-e02f-4f67-a2ed-100574db0121.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_d7aae626-e02f-4f67-a2ed-100574db0121.png index 858a50ed45827967652cb5ce94ad94455b0f8a0f..edc6e38140ea4d4cb9e753be5a812aa83e2bbac1 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_d7aae626-e02f-4f67-a2ed-100574db0121.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_d7aae626-e02f-4f67-a2ed-100574db0121.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11a1927f086ff32c815dfd3ffc340cf2a44be86b0b01bba33bed235338129876 -size 515380 +oid sha256:496a3ba91895f191672129deaf91af4b7752de8cdc681f4a6234495b60d93b5c +size 939670 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_e6dd4954-4b1d-4f56-8049-0ff9698e56af.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_e6dd4954-4b1d-4f56-8049-0ff9698e56af.png index 1f305c648242861e73d6fedd10d94e3c2756f1ab..51f1c1abf94444a35d355561167bc00ee9025160 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_e6dd4954-4b1d-4f56-8049-0ff9698e56af.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_e6dd4954-4b1d-4f56-8049-0ff9698e56af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2c6e9cb26f34fe60ed2bcef47370bbf310956091e1841fad75d40e9481724f7 -size 1101621 +oid sha256:42876164551de006b536c0ac8059145c5f4646b3e46dc7700ae6c7f1c2d48460 +size 1678075 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_ebceb903-ecd1-4993-9962-4f21022d163f.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_ebceb903-ecd1-4993-9962-4f21022d163f.png index 0dec9bb08fcaa96e3f04f57fce883b796333eff4..03a3106e71fa37707c33c40d4990913b9aa95d6f 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_ebceb903-ecd1-4993-9962-4f21022d163f.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_ebceb903-ecd1-4993-9962-4f21022d163f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:857bacf26e9d8bc97e294fa680ea7810a8eab0551bfc983ef9b48ba565f55537 -size 417553 +oid sha256:0e53bc2f3d59e0a084da82e2322c67e8f77bd89233cd7196201b7ee1d665de13 +size 1643413 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_f25623fb-9589-4fb2-984c-2da6871e9a33.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_f25623fb-9589-4fb2-984c-2da6871e9a33.png index e455d4089a07141f10f666f00f09f4315ef071c0..5b05bc088e038ba4b7d47066006b20528d828fed 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_f25623fb-9589-4fb2-984c-2da6871e9a33.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_f25623fb-9589-4fb2-984c-2da6871e9a33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:732243b8756c958011f6121b007fff0249316bc7c4b6279976634e3db9ecbb02 -size 513529 +oid sha256:a84f5c27e6454a828fed98c50a5d1c2e77b649410b009d6ed4254f24c52bec8b +size 589538 diff --git a/images/b910229f-6133-452c-a640-6a6ec67b668b_fed84eba-0361-4753-883e-226494372650.png b/images/b910229f-6133-452c-a640-6a6ec67b668b_fed84eba-0361-4753-883e-226494372650.png index 18ea4e7b4e72612c68f8b8a80da1ae889dfdf867..4b986b29a8df6cec4d89728148b7d940f9d987c3 100644 --- a/images/b910229f-6133-452c-a640-6a6ec67b668b_fed84eba-0361-4753-883e-226494372650.png +++ b/images/b910229f-6133-452c-a640-6a6ec67b668b_fed84eba-0361-4753-883e-226494372650.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a589d833ae8c5ba013d2c729c2548edabe2a2ca7ccdde4e5ebfbf00fbef493d -size 488798 +oid sha256:16b43bb88c43b26836a3b30413c4977c5eeaa3276077b6ba7eaacd4d0f048c81 +size 416653 diff --git a/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_64d255e5-d45d-43fc-a9d2-49c01b414b38.png b/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_64d255e5-d45d-43fc-a9d2-49c01b414b38.png index 889aa576d40dcda1a22d07ce757ef5b5297f9c70..39bcd72b9d33cef9a455d1fbae3475f3b02a1beb 100644 --- a/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_64d255e5-d45d-43fc-a9d2-49c01b414b38.png +++ b/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_64d255e5-d45d-43fc-a9d2-49c01b414b38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37c4c283da19ca45d0a21c51d9f5ebf3bb767e0223e519ddcc25075874b82e39 -size 696271 +oid sha256:93268fbf44e3f7166a2a9d21b8468a3aa1a42a78f01f14d3288b1e15b9c86deb +size 578040 diff --git a/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_a066138a-8316-4514-b493-b12221f9f0b3.png b/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_a066138a-8316-4514-b493-b12221f9f0b3.png index 1e532b4f91938cf8c92c5427380ad368699159be..93b8fd630d4a3f393e95e1f37adcdc07fe25ec36 100644 --- a/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_a066138a-8316-4514-b493-b12221f9f0b3.png +++ b/images/b9f5dd60-690d-4f32-9e69-3db9d346f020_a066138a-8316-4514-b493-b12221f9f0b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e873e1ceeaacca9e711202910c06153b4ea781957fa9e9f48a2c5545860ebd7 -size 919927 +oid sha256:ac51c879d68783a26c96c6445c31b9e08855edc8d683ceca181c0c7997c71c7d +size 924920 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_00cf6ce0-d213-4a2c-bd82-a17d21179d40.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_00cf6ce0-d213-4a2c-bd82-a17d21179d40.png index 2858d942c65cc3d6d69013da47df7213c49d2c9f..b6c4c294751c5e27a51dbc19ac1d02f83c194b7a 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_00cf6ce0-d213-4a2c-bd82-a17d21179d40.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_00cf6ce0-d213-4a2c-bd82-a17d21179d40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e51f155f15a0c85356acf4d189b7ef847dafcc7cee01e09114751c374fe3e15 -size 1208510 +oid sha256:be48c660565d1a5e16ee1e5da6c954b1fca497bd4f0585526faa0176807d49d2 +size 1478804 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2151f674-7324-4d95-a39d-4fdf73e0b0ea.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2151f674-7324-4d95-a39d-4fdf73e0b0ea.png index 7fe9491a9e387615224f32228bc76fd809c22485..7ce080f799634d00fc72c65978f71299e1f876be 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2151f674-7324-4d95-a39d-4fdf73e0b0ea.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2151f674-7324-4d95-a39d-4fdf73e0b0ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0590354912c7360fc1ccf24fa423ad624440840cd7957b075bb2f55c875cc3e2 -size 1203935 +oid sha256:529787d694d3c0b6ee722291562d6dd5bc27e50c3121a1b7f79e9d40798ee792 +size 1208634 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2702fbb5-0714-4760-8194-cf4cbf66de8a.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2702fbb5-0714-4760-8194-cf4cbf66de8a.png index 414cd77e6fb1f8674ff5f5ffe7b480067633c01c..d8cf75ae58b9e9434aca5f1c56e5ab8265d1f9db 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2702fbb5-0714-4760-8194-cf4cbf66de8a.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2702fbb5-0714-4760-8194-cf4cbf66de8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3da07c9dc8fb43bb29833281faac075d7b74cf0029c9d6229f7b5575263dc6c1 -size 1165998 +oid sha256:cb8bd6101376a67048cfbafc1c888bcf00c2c9cc4eb2cd66af92305b8a6b45ce +size 2218632 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6097d607-4458-40c6-9005-9e21bf70ecaa.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6097d607-4458-40c6-9005-9e21bf70ecaa.png index 6f623de384204e578b39e5b8f470d65877fa944c..33d5a600057741f4fcb4d9435befa1cd01b24620 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6097d607-4458-40c6-9005-9e21bf70ecaa.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6097d607-4458-40c6-9005-9e21bf70ecaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a1ddca4f4da45beb9f7504a1b7f815667a54836a963df7eb73e32f23f935b01 -size 534154 +oid sha256:cf1682e35b39fc02189165938a88c6675797c49557c3ed8319959321a03b1de2 +size 501142 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6564754c-cf0b-4099-a320-28dabde5f587.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6564754c-cf0b-4099-a320-28dabde5f587.png index a452c4e5ed126b941ad31f87f420399da5bcf65e..963df111726cca4bacd7744f7ac3a686afae67d0 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6564754c-cf0b-4099-a320-28dabde5f587.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6564754c-cf0b-4099-a320-28dabde5f587.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:973fd71d764fb6fba61b35c503edbebc1ee9c953850945d79a83fc8d03e256de -size 1269695 +oid sha256:50178f479ae54f864d187a4262b867de650f7b1345a47725a0f78b87dbcc62bf +size 1763658 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_7d161c6f-4df1-4eed-8b1b-3c1d368a1694.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_7d161c6f-4df1-4eed-8b1b-3c1d368a1694.png index 168df4a2f2b0e8cef8fd060637ca6d1a4f8e0a09..28579222276b5dc6e73d5d903e4d66d05bd18430 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_7d161c6f-4df1-4eed-8b1b-3c1d368a1694.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_7d161c6f-4df1-4eed-8b1b-3c1d368a1694.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:659815683ca47ee56af1c415cb5d355f8283b3234b3afa4df45891f9579b5301 -size 1218992 +oid sha256:f9dff7b5c390644e9d36a1cfb5ad812d9c9b55ba8caecd4e2f43b1a925317041 +size 1105678 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_83363272-ba05-42ae-b732-707f2ceeecf1.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_83363272-ba05-42ae-b732-707f2ceeecf1.png index 0ce421786da6c13bef7b1570221c03a2c565b963..85c9ca0243e4d5c7fc884dd24a4c8666437dedfa 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_83363272-ba05-42ae-b732-707f2ceeecf1.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_83363272-ba05-42ae-b732-707f2ceeecf1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5de9827b957eb770bd87268055bbac3180431a76c1dee851f5c88b3b55d5279 -size 522508 +oid sha256:0f199b171cdf306707201324ba78b37795286f35d76ca982af1baa6077245efc +size 510955 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_ba9f81d9-3f78-4f0c-95c4-5795d13a3183.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_ba9f81d9-3f78-4f0c-95c4-5795d13a3183.png index fd44713652a889d4c1161c92af2e3e4fa9003ee2..bdb6549e031b547eb120a1164f22c4610389e058 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_ba9f81d9-3f78-4f0c-95c4-5795d13a3183.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_ba9f81d9-3f78-4f0c-95c4-5795d13a3183.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d44c2b0891c9518c89a09214787ee6760b9927104e719608ce7123fc415e48f -size 1268244 +oid sha256:b6092c32a436fb9a25180b9f4d462785a782a501d1417d2c47a8f6a498f502ed +size 1672006 diff --git a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_f42d0082-b3db-4ab3-bc96-0f5c51523fbf.png b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_f42d0082-b3db-4ab3-bc96-0f5c51523fbf.png index 679e2b656f34a875c8d375ba1b77909e9636eaeb..29dcad5205b4ab45063871ae67970f37788ba2ff 100644 --- a/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_f42d0082-b3db-4ab3-bc96-0f5c51523fbf.png +++ b/images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_f42d0082-b3db-4ab3-bc96-0f5c51523fbf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5352d43de351dc3997e15707740a9f5f3a2ee9bb996238ad3697d509d752fa8 -size 1265003 +oid sha256:8ac1e662544ec6e66d710a0bec0ca18363db7ae8433e27f246b82b7a7b71703a +size 1595495 diff --git a/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_68ac02d5-a995-46a4-91fc-cc364d6a9585.png b/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_68ac02d5-a995-46a4-91fc-cc364d6a9585.png index 20a4376bf1f184eb060230a56e603d10387d9f73..dc4220c0e3e3d2cd7da24ac2ed133aebc3346687 100644 --- a/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_68ac02d5-a995-46a4-91fc-cc364d6a9585.png +++ b/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_68ac02d5-a995-46a4-91fc-cc364d6a9585.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f5e315a1860ad18f690d098e2eeed8ebca241d457a491d2a8cd641381d8d25a -size 1099122 +oid sha256:1e5767493370547486137717f8d7887fb6d873caae844ec6cdd7ba681d91b694 +size 585569 diff --git a/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_76adfe82-7943-40af-9121-513e8de299ef.png b/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_76adfe82-7943-40af-9121-513e8de299ef.png index 29be342a7de6ae3e724f1a1d280f7667bfe658cf..1dd7753d4ff60ed1d844991056ba89b27450b9ac 100644 --- a/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_76adfe82-7943-40af-9121-513e8de299ef.png +++ b/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_76adfe82-7943-40af-9121-513e8de299ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9370c9517b08af588beb0b1597e957b54f12fbde77e75ff04b86c1ded20a55c1 -size 728803 +oid sha256:3aee6f662889fa30893d447005cb47871b4c0fbdf193caed4f1015d8931df4c8 +size 836695 diff --git a/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_bd359ca1-6647-4b90-9465-583fbc71a119.png b/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_bd359ca1-6647-4b90-9465-583fbc71a119.png index ab2f0d44920e9686a5209752e4fbc98562beb37b..be64db945339e699eab67c87e30023a7153efcdb 100644 --- a/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_bd359ca1-6647-4b90-9465-583fbc71a119.png +++ b/images/bafd6a44-5938-431f-8e2e-17d680d5c48b_bd359ca1-6647-4b90-9465-583fbc71a119.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:86f54f62cf17330cb4b31e7eafce014d52cbba204a378a6c288fd0b9f78b6388 -size 1210874 +oid sha256:55652aecfc9587fc19370cd9e86ab940ec2921e3b445aa02e527765fef5ab860 +size 1327056 diff --git a/images/bb02400d-e9da-416a-839a-0068f511a630_4bb48e3e-2a32-4135-b436-33621055fc36.png b/images/bb02400d-e9da-416a-839a-0068f511a630_4bb48e3e-2a32-4135-b436-33621055fc36.png index 929152ac86f61963e55ecf422946c302dee8b95a..58e8722382b5fafc6b6d792a581ed6c010e3d3e0 100644 --- a/images/bb02400d-e9da-416a-839a-0068f511a630_4bb48e3e-2a32-4135-b436-33621055fc36.png +++ b/images/bb02400d-e9da-416a-839a-0068f511a630_4bb48e3e-2a32-4135-b436-33621055fc36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55468454a9a4a7218defc766b6fd7f364cc8ab14a6402ef4b63eb97a1222bfd3 -size 655079 +oid sha256:08e15bfe10aaf93d84512e08609ad4a9166f76add07c41378d1600143621e3c0 +size 434657 diff --git a/images/bb02400d-e9da-416a-839a-0068f511a630_a23b9fbd-1e7a-41fa-9116-32747b5b9649.png b/images/bb02400d-e9da-416a-839a-0068f511a630_a23b9fbd-1e7a-41fa-9116-32747b5b9649.png index a9d202d34a039342bee018c9b43fb96872c11abc..5380611b326727faf99a8f6d93837c446ae7d719 100644 --- a/images/bb02400d-e9da-416a-839a-0068f511a630_a23b9fbd-1e7a-41fa-9116-32747b5b9649.png +++ b/images/bb02400d-e9da-416a-839a-0068f511a630_a23b9fbd-1e7a-41fa-9116-32747b5b9649.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b98e584a0377de6e832bafa84fa564408f24121fe460d1fe978e632d8c19f892 -size 1248698 +oid sha256:0ee218027e54f096d0d3c766f340af4f462503d4b20e65e0b2883e38443bccb9 +size 1365871 diff --git a/images/bb02400d-e9da-416a-839a-0068f511a630_a5259ece-0829-477f-b30d-f47c1f508515.png b/images/bb02400d-e9da-416a-839a-0068f511a630_a5259ece-0829-477f-b30d-f47c1f508515.png index a1ea839e6af9c3c8fb42ae32243780da482abdfd..96266c8920c4ac3381b51f8ac08a8cbcffad63d1 100644 --- a/images/bb02400d-e9da-416a-839a-0068f511a630_a5259ece-0829-477f-b30d-f47c1f508515.png +++ b/images/bb02400d-e9da-416a-839a-0068f511a630_a5259ece-0829-477f-b30d-f47c1f508515.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c03a4adcff26376c7112c1bf7c2cc2ef6774ebe3814d44c2b019cfa142b8379e -size 2078717 +oid sha256:ab0cbefbfd33df211a1c0cf52f7f392a5d7f2ae309989b9a8b06e38b066e480d +size 1181509 diff --git a/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_10cb2e61-9f95-4cac-8db9-03791ea89776.png b/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_10cb2e61-9f95-4cac-8db9-03791ea89776.png index 5102f3f2fc97f41cc056aea41af97b10826213c1..30312041c854b82249809f8f77cc93ba397626e6 100644 --- a/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_10cb2e61-9f95-4cac-8db9-03791ea89776.png +++ b/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_10cb2e61-9f95-4cac-8db9-03791ea89776.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:923e1f1b161a32aa885b2ca07e555284a035149a475fac967b13604213b86dc8 -size 419292 +oid sha256:3b80642ee9bd08e7d5a665b87a145c050dd0226eede35adeeccca095c9ab1220 +size 536488 diff --git a/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_4b35d4cd-0a74-4c3e-82a9-a1804592ae3d.png b/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_4b35d4cd-0a74-4c3e-82a9-a1804592ae3d.png index 4cd219c6e2ea262ba0814c26642e645f52bb85a7..e8839c473e4b941c9e4d8b65d58c734877545dc3 100644 --- a/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_4b35d4cd-0a74-4c3e-82a9-a1804592ae3d.png +++ b/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_4b35d4cd-0a74-4c3e-82a9-a1804592ae3d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3afe36fda813869d69c1afed190402840f8c11c5767a33b9cc4fcc48f96f477 -size 774849 +oid sha256:30e0e02faf4132307a2c46613ef0fa9c9f339fdd49efc0911bb4a84ab04306a2 +size 710238 diff --git a/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_ebeceea9-c367-4eb8-97aa-b96615e4671e.png b/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_ebeceea9-c367-4eb8-97aa-b96615e4671e.png index 817399306abba1c5c24aad87ec360355d051cabd..07fd5f0eb4ab10597d6ef99ede258f0f18d8c55c 100644 --- a/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_ebeceea9-c367-4eb8-97aa-b96615e4671e.png +++ b/images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_ebeceea9-c367-4eb8-97aa-b96615e4671e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:532cf59de3fed340c2f9fd0169359f410bb0e0a016b10a870eac0f3b0dd95ec6 -size 1447956 +oid sha256:91b891848a43a41bf602c6466f56849cb0be207ddb589f00f6b1fe3bc1e1f9a6 +size 1766440 diff --git a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_145c2b41-3f86-4ce1-af76-9eae6f8845f0.png b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_145c2b41-3f86-4ce1-af76-9eae6f8845f0.png index 93ad051a114d94e5969d0ade14a6f349466792eb..f5c9b0108c80c8dd61a91c71775f4cc9d5e3573c 100644 --- a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_145c2b41-3f86-4ce1-af76-9eae6f8845f0.png +++ b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_145c2b41-3f86-4ce1-af76-9eae6f8845f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa7f19bd4b97abd0ee000f2d45bfa5c478483074d7abd947a5cc83b88005eb8a -size 1131150 +oid sha256:e62be4e0dcd3352437d62cb5280e96dc26dee7b99849d9d2b091641b2a76a08c +size 1336349 diff --git a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_1c128c7d-67c5-454c-9aaa-82cd82e5e69d.png b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_1c128c7d-67c5-454c-9aaa-82cd82e5e69d.png index 31c3c8365ccd074f2e0f70d16c4d0d03adcd709c..b7420de9ef90f9550757ca8ef030401e8ec5fe42 100644 --- a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_1c128c7d-67c5-454c-9aaa-82cd82e5e69d.png +++ b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_1c128c7d-67c5-454c-9aaa-82cd82e5e69d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:660003e21f0e85dc0184f03d3e0ed9181e6a85483a0a506e6b1e873ef403055d -size 1131210 +oid sha256:dfeab3cd57a0bf695f220fe7464e8983de1dbc1a2f5e498eda216acbbe9f19c7 +size 1292980 diff --git a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_4a1fc4f9-6541-4a12-b6c5-7228811c43d3.png b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_4a1fc4f9-6541-4a12-b6c5-7228811c43d3.png index 6b520c115e94df9adc257753744ad13d8224e6df..f0db4bb51bbe171ccb1c93263da3d466510f415c 100644 --- a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_4a1fc4f9-6541-4a12-b6c5-7228811c43d3.png +++ b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_4a1fc4f9-6541-4a12-b6c5-7228811c43d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f04d47839ac4918a7f0389f5e2747dbc356b8c2609ea567538c992991380c240 -size 1143294 +oid sha256:23aae518be7e0d293d2986d97d464d1ed8c033c3723d0a9de2df7a91b12c24a9 +size 901490 diff --git a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_b17d8b15-9af5-4661-9d7b-74851a227b83.png b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_b17d8b15-9af5-4661-9d7b-74851a227b83.png index 93ad051a114d94e5969d0ade14a6f349466792eb..7bb4d38dda02a69f4f18997661e1d7e50d7a7628 100644 --- a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_b17d8b15-9af5-4661-9d7b-74851a227b83.png +++ b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_b17d8b15-9af5-4661-9d7b-74851a227b83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa7f19bd4b97abd0ee000f2d45bfa5c478483074d7abd947a5cc83b88005eb8a -size 1131150 +oid sha256:b0e53cb5df58dda0d6f26c5a7ce86f76ae9e1722d60ed561824fa9c57b7121a7 +size 877594 diff --git a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_bba1f237-a046-46b9-b0f3-96dd331c620e.png b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_bba1f237-a046-46b9-b0f3-96dd331c620e.png index 93ad051a114d94e5969d0ade14a6f349466792eb..74fce727693f7cc760fd6e7678f7cd375db2edb0 100644 --- a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_bba1f237-a046-46b9-b0f3-96dd331c620e.png +++ b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_bba1f237-a046-46b9-b0f3-96dd331c620e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa7f19bd4b97abd0ee000f2d45bfa5c478483074d7abd947a5cc83b88005eb8a -size 1131150 +oid sha256:3b7933f9c3b9a02564bd7fac6dd7900131ad16734446670b95695b6254977a23 +size 943416 diff --git a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_d558e7ad-1abf-41ab-8a92-2e62cc399b43.png b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_d558e7ad-1abf-41ab-8a92-2e62cc399b43.png index 93ad051a114d94e5969d0ade14a6f349466792eb..5c2edf01782cb414ee7339a325dac1d12ee6b191 100644 --- a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_d558e7ad-1abf-41ab-8a92-2e62cc399b43.png +++ b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_d558e7ad-1abf-41ab-8a92-2e62cc399b43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa7f19bd4b97abd0ee000f2d45bfa5c478483074d7abd947a5cc83b88005eb8a -size 1131150 +oid sha256:c0d9c26f05abd976b1940fbd3c970466e67cac73904803645e22fd07642ab2f7 +size 1003682 diff --git a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_da776b38-bc28-4400-92f8-870b52637a55.png b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_da776b38-bc28-4400-92f8-870b52637a55.png index 5ed52ef02f4d369ff9d25c3264f55f1c68b9ec04..fe53a9eb3e8386de7ea78c525f45953101823a4e 100644 --- a/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_da776b38-bc28-4400-92f8-870b52637a55.png +++ b/images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_da776b38-bc28-4400-92f8-870b52637a55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d50e58c00e4251f3bfe1ee668fa45e2a75e58f5c078f630dd2c253d30e0817d0 -size 1199721 +oid sha256:c37c872385078bc0577d736cddb126d3ff430c14fd6705a870f6baba0cb4feb4 +size 416235 diff --git a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_20dd0a1b-ca66-4ea6-973b-3ce45a7fff77.png b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_20dd0a1b-ca66-4ea6-973b-3ce45a7fff77.png index bee68797102a9ed84b398dd72c1754ddb42e40d2..d6ffd8472bb329e0f5ce1f3fe389197a064bf62e 100644 --- a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_20dd0a1b-ca66-4ea6-973b-3ce45a7fff77.png +++ b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_20dd0a1b-ca66-4ea6-973b-3ce45a7fff77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd5f247a459573e736435ba8894213ea34875c98792ecfbc3c2ae147a1522ead -size 554581 +oid sha256:2d5cf6eb3b8014e3564441ac66fe4f3e66a89e4594deb2966ff87338b2250d1c +size 576100 diff --git a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_222a5bfe-e904-480f-8a24-10d338acdc22.png b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_222a5bfe-e904-480f-8a24-10d338acdc22.png index ba0553f6c0420d4369f55f1ca80bb573f57a8d0e..3301a9d6e998b621299b32a66c237546f3ed567d 100644 --- a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_222a5bfe-e904-480f-8a24-10d338acdc22.png +++ b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_222a5bfe-e904-480f-8a24-10d338acdc22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87330b8a05046bbecbd251ee53f153c6f2c6b6edfb4253f15f84db0f4f99e1a4 -size 562590 +oid sha256:11f33e744815282cf2430ba19adad013b41c1289c2532315e32f6c30bda7f3b4 +size 519158 diff --git a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_73c9f429-644c-4c67-a7fe-47f68f350c36.png b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_73c9f429-644c-4c67-a7fe-47f68f350c36.png index 1559e1d32fd907b7c9891d14da8cb0233b58bae3..c0180bb8327880fa81a1d05d9af0eed021b733a2 100644 --- a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_73c9f429-644c-4c67-a7fe-47f68f350c36.png +++ b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_73c9f429-644c-4c67-a7fe-47f68f350c36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4daf758daf4b9c502e30e0e297fcc473a3cea7e71f894fd6bab25cbb4c8cb980 -size 889101 +oid sha256:d33f3ff1ad93bc1dedc90c21bcfc2fccf9c5a07b575e09a5fd0eb4b8d201b78b +size 850253 diff --git a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_d5320812-311a-480e-934c-e35760ef5bff.png b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_d5320812-311a-480e-934c-e35760ef5bff.png index 1d180a92a26fe34630602ff2d0ce78c6177dc7c8..c1e62138a459c3adbc7f4d6b853534c9732f6a9b 100644 --- a/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_d5320812-311a-480e-934c-e35760ef5bff.png +++ b/images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_d5320812-311a-480e-934c-e35760ef5bff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:295f34f3cb46887d22d87855d91780131141ecb2f209ac4cf2dc19f82a7ea605 -size 549321 +oid sha256:8ac17d5b6dce79ae9d35289c49f47859c44415b962c5f4b1b1a6289e98eaaae2 +size 527824 diff --git a/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_10bba13a-2813-4375-9199-70db338f88c2.png b/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_10bba13a-2813-4375-9199-70db338f88c2.png index a17b7fa307426e13c5ce247ba9c7fdf01dd6b9d7..82a4d9601fb9b91f4f32a60dacd79d0aa4d852b3 100644 --- a/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_10bba13a-2813-4375-9199-70db338f88c2.png +++ b/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_10bba13a-2813-4375-9199-70db338f88c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94c2a2fd96137016094484987d7e639b85710e00ee6825e105d8fee51fa157ca -size 1700127 +oid sha256:7d1773807e090ce8a21bfc7c4a877f1cb31fbc9271f31069c4c227d9aa6da993 +size 318373 diff --git a/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_df6e386e-5d3b-41fa-9e31-180a841bf8e5.png b/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_df6e386e-5d3b-41fa-9e31-180a841bf8e5.png index 78ef9ca00c82ecfc90989813bebcdea741a2b0f2..ed7b30d9430bcf2cac70c1dc8a8b299b60aebb33 100644 --- a/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_df6e386e-5d3b-41fa-9e31-180a841bf8e5.png +++ b/images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_df6e386e-5d3b-41fa-9e31-180a841bf8e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20dd647190d6ccf5abb8669f6f4b1caee7cb2db9544f682248a9a26f0547204a -size 914782 +oid sha256:0a1d140ebb750a8ce73fe627bf5b03996db66120e924822f11c0790724e39730 +size 570615 diff --git a/images/bbfed209-df70-434a-aece-5c5fc7a38f4f_c15c9c63-aa0b-4ae5-82c1-a5df5c474ea0.png b/images/bbfed209-df70-434a-aece-5c5fc7a38f4f_c15c9c63-aa0b-4ae5-82c1-a5df5c474ea0.png index 72df6baee5d1b615d73dec9729244f9194370951..fd4c75632fff69e0731ce6093333c2d6c607c580 100644 --- a/images/bbfed209-df70-434a-aece-5c5fc7a38f4f_c15c9c63-aa0b-4ae5-82c1-a5df5c474ea0.png +++ b/images/bbfed209-df70-434a-aece-5c5fc7a38f4f_c15c9c63-aa0b-4ae5-82c1-a5df5c474ea0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9445700e9e16574d214fb5d079e6aa3758318971342f604811b4c874001856cd -size 627771 +oid sha256:a168c6617655a430cf6c92239ac1e437248624b1205a746e528754125703a073 +size 556107 diff --git a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6cf7c15e-7f95-413a-b4d7-01e26c009e43.png b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6cf7c15e-7f95-413a-b4d7-01e26c009e43.png index 981b299d7b746d5f5d5ed7f65e386d2b62e76aa0..9aea472af2d5444cf627b82fa6047b14713d8206 100644 --- a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6cf7c15e-7f95-413a-b4d7-01e26c009e43.png +++ b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6cf7c15e-7f95-413a-b4d7-01e26c009e43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:544f66f4d5897f12431117325c6548f1eeadb8412ffb38b46fdc1e79384e6261 -size 2360327 +oid sha256:150e5af4307cafa1e60de9caf2860dd2853f2c56c20bd941ee901233770e1bfd +size 693428 diff --git a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6ec19c62-07fe-42e9-99da-b36682d1ab92.png b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6ec19c62-07fe-42e9-99da-b36682d1ab92.png index cd468bc66e499cb30a48e3cf6a98adfe653d89a4..abe264266033d541955bf1d4841100689cdc934f 100644 --- a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6ec19c62-07fe-42e9-99da-b36682d1ab92.png +++ b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6ec19c62-07fe-42e9-99da-b36682d1ab92.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fff76726689a8ed4733705b20301e5911a4be39d2e3e4c33b3cf8220c940648f -size 2365854 +oid sha256:89b44de1534949d81da2fe40b30154e3360c021f1662cc4fdb5037f8e84a840c +size 921106 diff --git a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a7113861-fe5b-4489-9bf1-74d8e911bdac.png b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a7113861-fe5b-4489-9bf1-74d8e911bdac.png index 77a558394478c87c0b83527f2cd4e4e8a9e8f1ae..e4f213e4b485e09ff2800f888ac9a6105787134c 100644 --- a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a7113861-fe5b-4489-9bf1-74d8e911bdac.png +++ b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a7113861-fe5b-4489-9bf1-74d8e911bdac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6bb7a27fa013681b0a953aa8b9b947ae0fa536390bcd41d2c1bee5109b00983 -size 943127 +oid sha256:683d0d2da1f3ba8b0f9849616bb4df4ebec2ae6f64ae31129f4fab1bf5296a47 +size 1238056 diff --git a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a97fa8a7-cf17-4f25-a02e-adf1cc4c1e43.png b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a97fa8a7-cf17-4f25-a02e-adf1cc4c1e43.png index a889059cbb4c6f34412385aeb23bf2f376db6018..e967d66eb8d5cc94c60e5c625c2d28dcc57bcac2 100644 --- a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a97fa8a7-cf17-4f25-a02e-adf1cc4c1e43.png +++ b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a97fa8a7-cf17-4f25-a02e-adf1cc4c1e43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8ed09bcc0c8d263fabba177654775b109e161669e5671187a5ef5e991ccaecf -size 2360008 +oid sha256:88d302ff3df4eb19f301ba7de942775861678750b0620f9b2face5922e1e7fae +size 1281104 diff --git a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f2ea8d95-fcd1-4372-b4da-1cc5f1afbbc5.png b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f2ea8d95-fcd1-4372-b4da-1cc5f1afbbc5.png index 6aa6853ea3f3c0eca5a3b54a7678286c0564f9fe..7435290937eb28a52b37a1e76b2e394eb5510b01 100644 --- a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f2ea8d95-fcd1-4372-b4da-1cc5f1afbbc5.png +++ b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f2ea8d95-fcd1-4372-b4da-1cc5f1afbbc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da453474490b503638a335a3054c8a299b442cf1edd233b33b5915145418e09c -size 675150 +oid sha256:c2ddbf86944338278785592e7025893db05905ed6292ab192c285f8641e5cf61 +size 895500 diff --git a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f69f089a-e5a4-4995-9df6-4b564436b806.png b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f69f089a-e5a4-4995-9df6-4b564436b806.png index 975747ea1a71beb63899bceb2246b2f7b459ec07..62e8c240d43b6d447ffdf407a907d7298913f31a 100644 --- a/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f69f089a-e5a4-4995-9df6-4b564436b806.png +++ b/images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f69f089a-e5a4-4995-9df6-4b564436b806.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:726c382abb44d430a1470b9fee284bce289c73f0f53a4bc3f427d17f6e0956d7 -size 978697 +oid sha256:a251c429cdaa5e0a514c71b48d4fbb772142bd70459b2a14d2dc7b0ab1dc8c74 +size 659537 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2b69e7d2-66ed-486b-8ae2-2e763e1f7d6d.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2b69e7d2-66ed-486b-8ae2-2e763e1f7d6d.png index 4bb00f809e2b39002edba6bde140ce360637dc12..455636a0e01f683767f3c5e14b2fe2e0c094f0e3 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2b69e7d2-66ed-486b-8ae2-2e763e1f7d6d.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2b69e7d2-66ed-486b-8ae2-2e763e1f7d6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b23d7a870488f68ee08d0c426beeef7a6fe1ba431885f2835f4d35e5786336ed -size 747512 +oid sha256:12d89b32951a0057c3100a4e60dc25883e5b298a390f87cb39b83735c7957887 +size 639815 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2c6307f7-6697-4a4f-8e2e-73682ce6f1a3.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2c6307f7-6697-4a4f-8e2e-73682ce6f1a3.png index 44285a6315dae5926cf84fe844cdf20f5a36233c..bc4611736f8bf0e5d85374a052ac16fb5bade99d 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2c6307f7-6697-4a4f-8e2e-73682ce6f1a3.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2c6307f7-6697-4a4f-8e2e-73682ce6f1a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d16185338c6a5b3947c837d4bf0eaf08916324a40809b9d492ca72e9dfae38fc -size 573977 +oid sha256:9962d1fa0c23f3d952a03bd31a1fb4a1ae7341d248c87cf052e57864c0cd084d +size 698854 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_56260a12-8133-43af-ba62-a8526f0e5aee.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_56260a12-8133-43af-ba62-a8526f0e5aee.png index 9f8d11da9b2495943af60c3e023abc13438a1007..0b72db484a46a1122b24e2f84a8f9b90b56833e2 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_56260a12-8133-43af-ba62-a8526f0e5aee.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_56260a12-8133-43af-ba62-a8526f0e5aee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb09b12c70a8900157bed1ddc128be6745c60cb5525a2dad05005e81c9768982 -size 574168 +oid sha256:2f4df31cd9e15b371f4f4a6760f12d7460f67d98dc6d64839d89a76d27d00e59 +size 466776 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_7e768de4-2cc5-4d90-8eb6-7b30dc596f7e.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_7e768de4-2cc5-4d90-8eb6-7b30dc596f7e.png index 35e8532c66a4999e00b9e6a0cbcc783d474e23e6..6dfb1bb382e07cdcac8644cfc657a745857c6ab2 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_7e768de4-2cc5-4d90-8eb6-7b30dc596f7e.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_7e768de4-2cc5-4d90-8eb6-7b30dc596f7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c140aa3f052a9642c1c6dfd14a2c439b8eb21e20d6cd182cb0bdb26fac753cf5 -size 585775 +oid sha256:5b49a614cc07273cf83c543856250ce5a2e96c9e574bb620fc4c692c8537f009 +size 652923 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b84d10b2-35ff-4c2f-9e47-89243f5d02b4.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b84d10b2-35ff-4c2f-9e47-89243f5d02b4.png index 699b25fda85ff4c66caf8ac79f2718a6180ce839..4b337391b6fb957d80f57deef221173ccc775ae7 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b84d10b2-35ff-4c2f-9e47-89243f5d02b4.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b84d10b2-35ff-4c2f-9e47-89243f5d02b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab0e2d460b9d69813cffb88fbae8750298176196a812c9daa995e7004d8ef6e8 -size 901070 +oid sha256:464eca9a73d5335991b9d04c16dcda66cf00a5e697fae2d32c2b557dabef42ec +size 842772 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c04a9026-e147-4c97-8589-5ef46bd0f224.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c04a9026-e147-4c97-8589-5ef46bd0f224.png index 82de06b54f8a4ff92a63ddf53ce4ea881187230d..c472efd1887b0079ff9252ac253eb8b7da6c3a8a 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c04a9026-e147-4c97-8589-5ef46bd0f224.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c04a9026-e147-4c97-8589-5ef46bd0f224.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ee99992addf928c0d24ab821e26db86cbe524dff0a3b3f58d712a7c612e6752 -size 419106 +oid sha256:b5d5a2d25326c2eef30c07ee511040bed0a45f5633c387269418b182d9b84076 +size 390086 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c34fd044-2f8d-46fe-b315-356e1882f1db.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c34fd044-2f8d-46fe-b315-356e1882f1db.png index 842e450ea2a7c01c4528ab8c4086e72d0a314065..2fd8b3dc183667a0b8ccb003fac9b467ee6aa388 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c34fd044-2f8d-46fe-b315-356e1882f1db.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c34fd044-2f8d-46fe-b315-356e1882f1db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff8554071f5249145be57b3f915c99def6fe433482939791b396a958d0efa487 -size 887032 +oid sha256:53a21dfa056c19010b6c9caaa935ac3dcbf74bedc7797ca723cefaa7f63a5dd3 +size 729159 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_de6219f6-89fb-4574-9f9d-a5f9841ac5f7.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_de6219f6-89fb-4574-9f9d-a5f9841ac5f7.png index 35e8532c66a4999e00b9e6a0cbcc783d474e23e6..b7835a1a1b64b7b487c07c4f887d552688d357cb 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_de6219f6-89fb-4574-9f9d-a5f9841ac5f7.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_de6219f6-89fb-4574-9f9d-a5f9841ac5f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c140aa3f052a9642c1c6dfd14a2c439b8eb21e20d6cd182cb0bdb26fac753cf5 -size 585775 +oid sha256:f9741ce812691cff30c4a1b6ee89504b7ae28370a96195bfcbfe39e716f1feb4 +size 727264 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e0384709-ae58-4537-9314-fe8e3eff55ac.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e0384709-ae58-4537-9314-fe8e3eff55ac.png index 312e5f50f56bd8d9935ec48ebd8e6619c2ebeff6..21fee7f6edc1b90b8f92f164708b55827620eb9b 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e0384709-ae58-4537-9314-fe8e3eff55ac.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e0384709-ae58-4537-9314-fe8e3eff55ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88bd7a87a03a31213ffd287b6ef805728d99c48d255214a5211da345d9c19708 -size 355724 +oid sha256:06fbbddddf76bada67de990998d6f817a3d952daa2fd0a10d7302a1974009274 +size 344902 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e206ee61-e177-44d4-9979-26f39dc2239a.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e206ee61-e177-44d4-9979-26f39dc2239a.png index a659b8ad4b67e53b4a86983a86a44113e0ef13c7..3b99b3482cc68226d3a1e8e9d2d4b7701d205671 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e206ee61-e177-44d4-9979-26f39dc2239a.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e206ee61-e177-44d4-9979-26f39dc2239a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41d31addfe534fe5d7eb6664ccba196d2f445f6cc4e485cd8e449015df26eb74 -size 645732 +oid sha256:b44a889ae9fd014e81d2e575a62e3007b1375ed7400fa98b7bbd7b7b02150033 +size 751378 diff --git a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e672afc1-9115-45a5-acad-08c5dfcab90a.png b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e672afc1-9115-45a5-acad-08c5dfcab90a.png index 2725a32621893d00f19433529c5bbc1b04406020..e7babfe4553474cb5619b29dce50250aeef911b8 100644 --- a/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e672afc1-9115-45a5-acad-08c5dfcab90a.png +++ b/images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e672afc1-9115-45a5-acad-08c5dfcab90a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00e3355b6b04b4530e49ece95b9e3843865ba22a8641a4fdbd4f7d09493f50e0 -size 896763 +oid sha256:5673e4beb1525a8386593bf00acada25b9af1539f1bbc9ad4e1a8a5a349210af +size 936335 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_1a541cb9-2269-426c-8687-241e040beb84.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_1a541cb9-2269-426c-8687-241e040beb84.png index 914345db5ba61e51c3b063e82ad09c054906c499..1200d51443010c26d8afd2d6f097651bbef981a1 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_1a541cb9-2269-426c-8687-241e040beb84.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_1a541cb9-2269-426c-8687-241e040beb84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79c39cb540222717b22bed6190ffb60355d68eb89266d8989e954119c31ae543 -size 1917738 +oid sha256:1e6fd5c13b2bcc6a6f5da34c10cdbcdeb5c36b783b0a1d026a5a86760d1b7c07 +size 2005212 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_2dfb62c9-c929-4cef-a5b5-ee1b8b9d7faf.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_2dfb62c9-c929-4cef-a5b5-ee1b8b9d7faf.png index 74ab53cd277f7c92133fa7d9584b3cc9b9d973df..ea1d3f772f21238cc72b695eacc3fbfd2a20404f 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_2dfb62c9-c929-4cef-a5b5-ee1b8b9d7faf.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_2dfb62c9-c929-4cef-a5b5-ee1b8b9d7faf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3520cdcef95b59cf3399510c81fa4d78d8d27ed899225ee0a253ca4ca6bf9d6f -size 2313233 +oid sha256:84a84ac727ee6457be5619079dfffd86f7abc39d9c0a195d3fed669734bf1121 +size 1741943 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_48d7ca2b-52f7-4730-9672-abe75d7aee92.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_48d7ca2b-52f7-4730-9672-abe75d7aee92.png index a360412ebe91f9527e60e98acc64686b4117bd73..634bac02ef4c737d76581f9d7704496166cee65f 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_48d7ca2b-52f7-4730-9672-abe75d7aee92.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_48d7ca2b-52f7-4730-9672-abe75d7aee92.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6406e9000aa1906ffdde6487c8bf420b6a709cefc15dd8285d364f245ea754a -size 2311545 +oid sha256:b4a4acff40de18645fee839bfbfaceea23d597ca261e590aac5da5a7c20c905f +size 2356518 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_49915c43-ef6b-4ab0-9559-24be43b60267.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_49915c43-ef6b-4ab0-9559-24be43b60267.png index 052b3aea05d34448344a560de61f10d14cb2a966..c88bf763fc142db22bc1775140432d5d197b03c3 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_49915c43-ef6b-4ab0-9559-24be43b60267.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_49915c43-ef6b-4ab0-9559-24be43b60267.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c196ab78a0c87bd9f968850c5a7daf7c594bf1dde705e2bcdf62b8a36fe05733 -size 1614838 +oid sha256:ce28a5a98f1ae969ec48b58180b36c5f9d2d3154889d2d9f56aec475f0b1f358 +size 1753321 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_64252790-add7-4554-9918-d7c39f24a67c.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_64252790-add7-4554-9918-d7c39f24a67c.png index fdee86493f9f45a6e3f3f174902534ab823f04ae..0263d08242a153da05176268c1975e557c48694a 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_64252790-add7-4554-9918-d7c39f24a67c.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_64252790-add7-4554-9918-d7c39f24a67c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:341ac0237c1278ecdb27b3fd003c42827b29f92254a35984c1534ce8b7d38fbb -size 1610468 +oid sha256:e644b8a261ebccef6018dcc586131cf810f089af66962bc97fe4631ae3d1ab92 +size 1203847 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_7f4f3081-4f9d-4238-83ec-87f4b992e5f8.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_7f4f3081-4f9d-4238-83ec-87f4b992e5f8.png index b0f37bf7877ad0cf7391adc2c172d6ee24c2d7d0..37f75c13e0b125d18fbe350096b22c0b1918b1ca 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_7f4f3081-4f9d-4238-83ec-87f4b992e5f8.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_7f4f3081-4f9d-4238-83ec-87f4b992e5f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e54ec6bd98feb7801b37b15899c4cab38c41d264e84e761b73b86db1c87889ad -size 1610824 +oid sha256:f4bb8dbd6e5460a1c09e5761d8b37edaf84bb162f9dc95ee861141df8e733f47 +size 1458443 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_a9210afa-f255-4b10-9dc0-401b91e86fb4.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_a9210afa-f255-4b10-9dc0-401b91e86fb4.png index cbcaf35e41350d170668e0fcc1a21cbf7fe5b1cf..65cefe74ca2735ebcdbc944e112e8a74c4e9e16c 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_a9210afa-f255-4b10-9dc0-401b91e86fb4.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_a9210afa-f255-4b10-9dc0-401b91e86fb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65c16da295b96783c0197fa5593fa87343a95472c76e895c45c3c87f2cc97879 -size 1467223 +oid sha256:61219931adca06cc600e09d095f009eea7e5b97be41ce43aa9ee3832a87d8e2c +size 1459678 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_c08cdc90-9282-4aa0-83c6-93436b95f425.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_c08cdc90-9282-4aa0-83c6-93436b95f425.png index 8236ce743a6123f779504fc14a3deba93ef51ec3..8f9c43468158c9fce15743c23e80021f4e04939f 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_c08cdc90-9282-4aa0-83c6-93436b95f425.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_c08cdc90-9282-4aa0-83c6-93436b95f425.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bfc86c6d788f66a8f2a0a741009f5626b94c7560c2f64d55de3f6a0c2df1752b -size 1608501 +oid sha256:69a0cbb354da61fbea2da30210d4254e2a2c99b1297e122c605caddef2aa30ec +size 1859668 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d3e3e41f-c601-4798-b602-6990777eba4a.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d3e3e41f-c601-4798-b602-6990777eba4a.png index 3bd83f8f54217844d74d654c8cb4941508c7c36e..291727f4da86a3f64efeb6c825a353efca198498 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d3e3e41f-c601-4798-b602-6990777eba4a.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d3e3e41f-c601-4798-b602-6990777eba4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6c17aabcefcd37516dba94612a066f56c4ee52f9643bc4b0c071f3751efb4a5 -size 1660251 +oid sha256:f32eafd9e68b141c4b70afceee0eeac12392cdf5918dc2ac00aecfcd4913d369 +size 1718620 diff --git a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d550a4d6-20bb-4663-8319-6ea7930ed041.png b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d550a4d6-20bb-4663-8319-6ea7930ed041.png index cbcaf35e41350d170668e0fcc1a21cbf7fe5b1cf..c6d4878999abc6828394d17736740920250ce9d6 100644 --- a/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d550a4d6-20bb-4663-8319-6ea7930ed041.png +++ b/images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d550a4d6-20bb-4663-8319-6ea7930ed041.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65c16da295b96783c0197fa5593fa87343a95472c76e895c45c3c87f2cc97879 -size 1467223 +oid sha256:8927a4074f63396d46ddb9e0311d3e101a4352b6f85f881f8786f0a676586a6d +size 1686740 diff --git a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_313db28f-e14f-4a5d-af0a-7fca3e4fcd49.png b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_313db28f-e14f-4a5d-af0a-7fca3e4fcd49.png index 35f1d76e4fd000e0d6d372d22bf09baff86af269..01905b6bcf5fc24a8b4d830640d943cf37153750 100644 --- a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_313db28f-e14f-4a5d-af0a-7fca3e4fcd49.png +++ b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_313db28f-e14f-4a5d-af0a-7fca3e4fcd49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a92ec839d6229f94ed09d0e1a1b63a3eba82d4703c8b0bc0acf4f4dcbb654b82 -size 1547913 +oid sha256:09ea287307324eef31e5aaadd6161904ad2aced92421f57a279bc25504a19394 +size 1176262 diff --git a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_40602eff-59ce-454a-98ca-c13c6f89eff6.png b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_40602eff-59ce-454a-98ca-c13c6f89eff6.png index 89576f34c2e69a41035c3b6cbf5abe825a15fe77..743943852f0816073c46ebc445cc96ec3094d370 100644 --- a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_40602eff-59ce-454a-98ca-c13c6f89eff6.png +++ b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_40602eff-59ce-454a-98ca-c13c6f89eff6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d0a83fbe18787eb787a0b7abd3187c8ba4c7c38b19ddd488a01ce328f3b2710 -size 825696 +oid sha256:3d4a5a37e141cd8a97ec13187a93ee73c84dfe90b163d953de7e2ecd253bd7e1 +size 604063 diff --git a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_45e135d4-8e5f-4562-bd2d-e03101466691.png b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_45e135d4-8e5f-4562-bd2d-e03101466691.png index 3a085e8e0e972c19cc16de0707a11f5390800a22..da82e9c589d5811a28f6b0e2402958400b660e61 100644 --- a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_45e135d4-8e5f-4562-bd2d-e03101466691.png +++ b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_45e135d4-8e5f-4562-bd2d-e03101466691.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8dc0d11e61568efb113ed702eed6420d57ba95f0e127ef6d820fa285cd155014 -size 767718 +oid sha256:6140916108c4e16527f6cf6f4f8fb3f134b05a4968e9673368101ea7d65d8e43 +size 768378 diff --git a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_e0931dce-0e84-42f5-91eb-b97b8d727e00.png b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_e0931dce-0e84-42f5-91eb-b97b8d727e00.png index 7c8d66402f163b72c32bbf885b914521a9411c45..21652b8b59f025642bc2b99ef803d8a9151bfc68 100644 --- a/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_e0931dce-0e84-42f5-91eb-b97b8d727e00.png +++ b/images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_e0931dce-0e84-42f5-91eb-b97b8d727e00.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:002c9b5ac6d6381c22a46a5bc32711811ed35d1c1e6f270c66450c7391ad4cf7 -size 423691 +oid sha256:551a64afd0bebd6ca6a1268b53ac80131f3d25599ece664110ec2abd58a0a58e +size 455181 diff --git a/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_46a54936-b04f-4a6b-8350-cc4259fe03d5.png b/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_46a54936-b04f-4a6b-8350-cc4259fe03d5.png index 19f17543954b408130e8caa8511544b0ada79dd6..ff5f2c03d2d028b2c788805cc9138054d670be37 100644 --- a/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_46a54936-b04f-4a6b-8350-cc4259fe03d5.png +++ b/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_46a54936-b04f-4a6b-8350-cc4259fe03d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc296695348631b2497e7dd28279b1ee8da02dfae260f6401f51e2ab2765d312 -size 2110113 +oid sha256:6b72b06933c358ae403af03005f0796c1e659a70f61468e70a92dd15e804ce32 +size 1135197 diff --git a/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_983d86c3-2498-4b43-8c31-a2e9549e0097.png b/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_983d86c3-2498-4b43-8c31-a2e9549e0097.png index 4a039d4ad7ce6c626d006a7af3491e8e1b06179f..2634f3b41755b68dc48af1e07f48e52427a8d6c5 100644 --- a/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_983d86c3-2498-4b43-8c31-a2e9549e0097.png +++ b/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_983d86c3-2498-4b43-8c31-a2e9549e0097.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f593a08ee951994f8cfc558782a2f79d49666b0a0c5f01e583f19af38c658e9f -size 605367 +oid sha256:e90ab4a107aeb34aac04750fd6106ac6bd0a4d6ca1d93ed9b07b2b7d7a21004e +size 587412 diff --git a/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_c649a304-2019-4ff6-9bab-817b700a4e2d.png b/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_c649a304-2019-4ff6-9bab-817b700a4e2d.png index 3231a4921df52f6e523c21506fedda311587832b..6f48cc0d63b93439185a709284fcd3af960bd897 100644 --- a/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_c649a304-2019-4ff6-9bab-817b700a4e2d.png +++ b/images/bd4b77db-00a5-405f-bf0a-a4d168967d64_c649a304-2019-4ff6-9bab-817b700a4e2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2535aa2c20dbb528f85edbac05d80d1c9fc594ab42affdc0c95d2becf7e253f2 -size 2042036 +oid sha256:9204797de0ccf504ca9d783d7fdf97798a8bdaa762448ec29168a148598d4c75 +size 853156 diff --git a/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_59e52dac-4d79-4f8c-96c8-b10b27294851.png b/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_59e52dac-4d79-4f8c-96c8-b10b27294851.png index 3283415e1e2d7c3691df0c2a798d0c54fe729948..b92b8e33ee5e1c458c35680fb43fa724eef04a6b 100644 --- a/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_59e52dac-4d79-4f8c-96c8-b10b27294851.png +++ b/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_59e52dac-4d79-4f8c-96c8-b10b27294851.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d69103ab76f519b5c40c4344bb09b4816dcf2bc6b26e95d88e2aa0c0167e0d7c -size 147378 +oid sha256:28cbe28d1d68aa1feddfc8221795b2047f582f2aeede0af0ed4745c87722db56 +size 77696 diff --git a/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_b068a66c-b3fe-4991-a9c1-b534eac1c4ee.png b/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_b068a66c-b3fe-4991-a9c1-b534eac1c4ee.png index a637971917ac832c37ce194e2ef3cc0973baff3b..12ed31b146a301b430a1e99a32f7115dc015da99 100644 --- a/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_b068a66c-b3fe-4991-a9c1-b534eac1c4ee.png +++ b/images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_b068a66c-b3fe-4991-a9c1-b534eac1c4ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c166698ae1dec4d9fd0c4e35f2c72919123c525b74ca2e24b2186858f648d3e5 -size 302786 +oid sha256:8cb013a9ab0f7281349d9f1adcca79bf30bd91e69def1438c8f14cfaf3d1fa37 +size 306277 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_070d9bc0-242b-4d83-ae47-c1a17c0040e9.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_070d9bc0-242b-4d83-ae47-c1a17c0040e9.png index f28e09e8ae1e7e1053c4e4ed6f1f3fb7e5209c78..3a3e0782e5dbf60e4dff11c8187ebc8ea5da85ed 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_070d9bc0-242b-4d83-ae47-c1a17c0040e9.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_070d9bc0-242b-4d83-ae47-c1a17c0040e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8f6c0101b07deb6c92980dc3c2ba4ac2d889da775abf77276970cff917e431e -size 981309 +oid sha256:f051ea9fd9a4fbb648d00e19427262e8b1d3eeedc44d1d0b7e51f7ac50cb2c91 +size 1012083 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_20482680-aa27-49bd-8b8e-310c1b22ece4.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_20482680-aa27-49bd-8b8e-310c1b22ece4.png index fd7c3f33df7d3bc04c0ea8cd2f3660a3c6895fa3..47ca94ffe468df0f52cfee9a1e987f9400da4569 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_20482680-aa27-49bd-8b8e-310c1b22ece4.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_20482680-aa27-49bd-8b8e-310c1b22ece4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:445ddcf946096bc380cc281617d845d4ac102b5b61c40186ad24cff33bea50ce -size 1209075 +oid sha256:b1b65aa44afac615a7b47a2b76617ea1bad224a194112185dd18d5912baf2d13 +size 1329219 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_21a24a97-8661-434c-849d-b37228d48abf.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_21a24a97-8661-434c-849d-b37228d48abf.png index 356bfc37c262a2b7a14dd56f6a5fa13cf817fbf1..b6106c7106cc377806c75649c019abb1d40a10a0 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_21a24a97-8661-434c-849d-b37228d48abf.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_21a24a97-8661-434c-849d-b37228d48abf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbab0327ef0a2131a2a752f96bd6f812e081d31bdd38659856b12abe7ac30db8 -size 1033708 +oid sha256:312068a0c9f7250ad87286745fc71d11c11548d5038e2e5bbe367034a5e2bf58 +size 1086546 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_2431a829-5471-430a-b02a-c30e63a9c5c4.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_2431a829-5471-430a-b02a-c30e63a9c5c4.png index a6bcb5ab720f393e5a96590fc4acc214d6c46dc2..f5203971546bb8dc2e4c9d98ba906d20f0fb6f31 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_2431a829-5471-430a-b02a-c30e63a9c5c4.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_2431a829-5471-430a-b02a-c30e63a9c5c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8663e038a809aa8ae70e204fb40ed46fedf59756f0706d8cc38d0cef35b7cec -size 970873 +oid sha256:35a279c5cd42be062f0761d37a6209d57d14eedf9dbc841ef769c676d76ba937 +size 897146 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_67c0685a-9f53-46c3-9842-d416f890ea25.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_67c0685a-9f53-46c3-9842-d416f890ea25.png index 8d92c3b2be6f47044747608c4cb5e3960817b1cb..119c8e78939a86265e7f193b403c5392d32ee982 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_67c0685a-9f53-46c3-9842-d416f890ea25.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_67c0685a-9f53-46c3-9842-d416f890ea25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef40458a7ee9b9e97896ef8d980101d17c5321f25d31f269862fcd7a3eb7deee -size 653217 +oid sha256:80db2470c32861f6cfe97157bad1dc50141407d55582f60903d154728ebbf78f +size 578208 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_718514fb-cc04-4b61-a21b-d9e159bd3e2c.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_718514fb-cc04-4b61-a21b-d9e159bd3e2c.png index db6091eed4fa01a32c3ba77b0267368bc45f00a7..4cd2018da2c9ce26673d57448157d37bdbecd56a 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_718514fb-cc04-4b61-a21b-d9e159bd3e2c.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_718514fb-cc04-4b61-a21b-d9e159bd3e2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afb83db10b36ae7aea836176b30b21713dff3b5dbb63623b4dcba36964f2a446 -size 1120794 +oid sha256:c65d0e1dcf6e49aaf00123936d9acaceea73167f5683f7ef4a415df9ab035b69 +size 949220 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_745f0ada-af2d-4846-ae61-94bc84783005.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_745f0ada-af2d-4846-ae61-94bc84783005.png index 642b615597ff9c855ea4c89ea7cc0ab9902fcf4c..82d2a1cd8620dab299e74052e84d58ab4b43340f 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_745f0ada-af2d-4846-ae61-94bc84783005.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_745f0ada-af2d-4846-ae61-94bc84783005.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f1af30adb93cafecdab4ad22d30dca0155dda6330f70164a64b99c9e7b51f38 -size 1122304 +oid sha256:70cadc47a6b7d2407b7d33d4cbed680dc131dc91730843ed15feb07c1c6397fc +size 1085854 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_771e94b4-061c-4b4f-9c67-23671ef91e2e.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_771e94b4-061c-4b4f-9c67-23671ef91e2e.png index a953b6f65d7b5118406b968e910842c390b6ed92..8d226ea34121550303f2680fd661da5f4b5b946a 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_771e94b4-061c-4b4f-9c67-23671ef91e2e.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_771e94b4-061c-4b4f-9c67-23671ef91e2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6112cc0ceedac6b20494e0e13214de6c63125f45e31bdb9d1493bf084aece280 -size 902102 +oid sha256:05e749311668871a69c320d847fd078595e7d646729d4fbd76e140046bf746f1 +size 1048645 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_777270ff-78c9-4c91-81f2-776107cbcd41.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_777270ff-78c9-4c91-81f2-776107cbcd41.png index 6081de446d17ed36c641fbbee76af3bc8cd90b8b..8848ff2eeb9d6bda0af14b9d08200b72d80d57bc 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_777270ff-78c9-4c91-81f2-776107cbcd41.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_777270ff-78c9-4c91-81f2-776107cbcd41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28be4bd9145c3288e5334e5339a31123195dc18b8a942087324ca64571a84bc6 -size 971046 +oid sha256:27345f1ff8d07dbde005f17cb7a3efb8ca3f69974aae7f85dbe1463c67ebfaf1 +size 853460 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_98a9cc6a-d5a0-4500-938f-546404bb57f1.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_98a9cc6a-d5a0-4500-938f-546404bb57f1.png index 678dce6ff97e917aafe4a7b03e8c630f950bcbfb..49425ec5dbda29384495710d283a0cacb3fa511b 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_98a9cc6a-d5a0-4500-938f-546404bb57f1.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_98a9cc6a-d5a0-4500-938f-546404bb57f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c37683093387357f13e752e5c3dd8668459d7289fa89eaf5acfcebf878753d5 -size 616145 +oid sha256:e980daa92c4983d2ffb82ca9a25c59c8b89e683e097a70a83d0e7ca9d260b37f +size 1340659 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_d94ad8e6-7e5e-4aa8-a2db-c7f469e82776.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_d94ad8e6-7e5e-4aa8-a2db-c7f469e82776.png index d5f81e800800c4968d319272a5a7c0ae7c73cffd..c22437b3d57695ecd209e41fe66b7fe318e67c0e 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_d94ad8e6-7e5e-4aa8-a2db-c7f469e82776.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_d94ad8e6-7e5e-4aa8-a2db-c7f469e82776.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab0529ffdbc553b42feb21f07342c2d49aabf41439470144ef1edcbabd5d2815 -size 1269014 +oid sha256:651e3606126ad1b262b2f1767d28948314d59feb4fbb798dd79c72f1a6ef95d7 +size 1242143 diff --git a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_f51fd4e2-ed05-4127-9077-0f925bf4755a.png b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_f51fd4e2-ed05-4127-9077-0f925bf4755a.png index d05ff0d4273b069fa3f3e3b923249a2b1d1713cb..fafc03ca5566ee45eb508c9fe90757f23034907c 100644 --- a/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_f51fd4e2-ed05-4127-9077-0f925bf4755a.png +++ b/images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_f51fd4e2-ed05-4127-9077-0f925bf4755a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c20fa3cd396e5fa65cbdce20b6ae5a00d54e1586d3774c8d481e4cef0466287 -size 1127006 +oid sha256:069ac7ab13008bba54aa3a7a4238441444fdd2f49a3931d57c2cd307de9ccff9 +size 1149880 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_3af71761-eb64-489c-a12c-fc741805c4a1.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_3af71761-eb64-489c-a12c-fc741805c4a1.png index d47225e18ab2be4839dd2ee2a84221600eb2e14c..aef73ea219bfec112e40eeac8770e4263dc47c15 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_3af71761-eb64-489c-a12c-fc741805c4a1.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_3af71761-eb64-489c-a12c-fc741805c4a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b05c01654ab2ef13a03dd4b290c6e18aa94224ab771678e0530746a8e81ea12e -size 1229806 +oid sha256:ffd44a819342cdc8037259661c88481126d60b809c7ca723f506584063a1a6e6 +size 1279249 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_3d497426-f9d7-4f13-a176-e700575969ee.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_3d497426-f9d7-4f13-a176-e700575969ee.png index 8f538eba0dcf14ad9e2542d3f3f16063a63e5527..338c15db71e628fef4f26431cb869878d9591bda 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_3d497426-f9d7-4f13-a176-e700575969ee.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_3d497426-f9d7-4f13-a176-e700575969ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd24c607e8b6927d11ed8266d89fec1975f79f775ba2d99a5f5a960e2b791084 -size 1764369 +oid sha256:aeb40cf4caadf63513631ef07e88ccfd7323c5c693ef071fb85dad78bf96914b +size 1762253 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_430ba357-5e86-4d8c-a1a0-66fe657b0197.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_430ba357-5e86-4d8c-a1a0-66fe657b0197.png index f0e9df9da0b931434b06f64b1fa39a5799fabcbf..f251f64307c8240b298ebf316711dac6e41eca25 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_430ba357-5e86-4d8c-a1a0-66fe657b0197.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_430ba357-5e86-4d8c-a1a0-66fe657b0197.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d8dab8da853c46ca1fffa14d8aa092b65633adec3f7b12ca9975b1e508185d7 -size 787709 +oid sha256:6421ed4c5d4aec3c04e3cbd2220fa98e65f6c7aa5ae303e75cb9b5d006fc1032 +size 1204458 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_798fb5b9-2d7a-463d-acac-2bef7f223623.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_798fb5b9-2d7a-463d-acac-2bef7f223623.png index 8b8acd79a4cd696b90ad9217d613532e93dffc96..ef3778a4bf20f50ec75068f5a240b5352cca7c3d 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_798fb5b9-2d7a-463d-acac-2bef7f223623.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_798fb5b9-2d7a-463d-acac-2bef7f223623.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a3aa18063956205506bb3f306db8252879b5321dabd2d1aba6fbbd8a4389f1a -size 856066 +oid sha256:7a4906eef15b2fc52d1c5ec9a57546751b30d0df52da098316294d2dab9e6802 +size 846551 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_9eb4e38b-69dd-454a-baa1-7412bf4d5830.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_9eb4e38b-69dd-454a-baa1-7412bf4d5830.png index a131d23ede1e16b58782d258a94f1b288a9bda60..faf8f251d92616a5f2a069b10c05f39044275987 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_9eb4e38b-69dd-454a-baa1-7412bf4d5830.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_9eb4e38b-69dd-454a-baa1-7412bf4d5830.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f2d10961095584419d48a7721871249e4f1f4748743252e62e50ace0b1c1015 -size 857848 +oid sha256:20557944a8d67d25f2bc09f635573bf29b04b15b4692cf949d9c8ae7cace6cd0 +size 1128748 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_a0d0a0dc-e1ef-4efb-8c64-f76f38813865.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_a0d0a0dc-e1ef-4efb-8c64-f76f38813865.png index c3d91c78ed981d8f93f6759784e1150840223d5a..217c73b4e288ba4feeae962a029bacd229c42fb7 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_a0d0a0dc-e1ef-4efb-8c64-f76f38813865.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_a0d0a0dc-e1ef-4efb-8c64-f76f38813865.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f12e25eed1d7964882834d80e2653ead7bc6c75d6c6f1ccf58e250b50fe4e0c -size 1162418 +oid sha256:42177cabb9af1ebc8ee62ceefa3938244b0b4b38f01302bab6b89945ba84667b +size 1312940 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_bea83466-1716-4e8b-81aa-7df1c9d2586d.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_bea83466-1716-4e8b-81aa-7df1c9d2586d.png index fabf2073f7b625102c2631cf1e2b4523c8da8e32..02016fcf3e2880ec9afff32303cb894f1313eb94 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_bea83466-1716-4e8b-81aa-7df1c9d2586d.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_bea83466-1716-4e8b-81aa-7df1c9d2586d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af3f73c5b64b74d159a21d11136fa612923575d97c8f59d71b3b5eeb46776d44 -size 805686 +oid sha256:0687994ba8573acb84339e75b258c2b92222325c9e32a1792116a850315b5b34 +size 606560 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_c809eab5-3466-4dc3-89dc-7f1329bbf5d2.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_c809eab5-3466-4dc3-89dc-7f1329bbf5d2.png index 2a0f55f88ac6fb4f72b84aca09005e7dee0b3c7a..7c4ce4819cc849814dd2ec133b3b1cdf54353e5c 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_c809eab5-3466-4dc3-89dc-7f1329bbf5d2.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_c809eab5-3466-4dc3-89dc-7f1329bbf5d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:129348dfb06ab5440b36cc93873c559188c12ba1b20c5cbf74f535d9bb48ba26 -size 1097495 +oid sha256:b736cc1c7b35e51c9cba4d6a2f515ad94befa8b0322f6341b7bc16e85f006cd1 +size 1310929 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_c8ccfecb-f35d-47be-a43b-48928934fcd3.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_c8ccfecb-f35d-47be-a43b-48928934fcd3.png index 66686e5ab4b5c9ae35523067c0123716805d3981..05fc720212a5dc108ead5e637ecaee306accfb76 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_c8ccfecb-f35d-47be-a43b-48928934fcd3.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_c8ccfecb-f35d-47be-a43b-48928934fcd3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7690d9a7c7bfcc8d3fd6a9fc6fe25338d3601d4f5babea953356dd55a72f75c -size 856960 +oid sha256:68951f5c0cc6b8671122036a5ca8501ba8d96cce644251be233ef60e1a70ac8a +size 835815 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_d7cbd9b8-6505-43f7-a163-d7c00bfd62a6.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_d7cbd9b8-6505-43f7-a163-d7c00bfd62a6.png index db6e90243b643e2b78806f058d0b785b48c5d7ad..0400dc01a9813f74bc6bad7f6acf821375e1f349 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_d7cbd9b8-6505-43f7-a163-d7c00bfd62a6.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_d7cbd9b8-6505-43f7-a163-d7c00bfd62a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38afae35fdddeba3bdbebd5d00e1e60133f9dcf8db909a482c14beb1aa08f5ac -size 1167370 +oid sha256:97b0e1ebed7dff0792711bd01bd7104c153bddf8583fa8d067bcfb1c1e006385 +size 1321066 diff --git a/images/be5e5f14-c875-4cfd-a517-175619491b90_e41f8a52-5ce6-4d8f-8baa-3f605832a080.png b/images/be5e5f14-c875-4cfd-a517-175619491b90_e41f8a52-5ce6-4d8f-8baa-3f605832a080.png index 2dc4f2805a3907d6a0584a3102a7f7dff40f95f1..9e05bd898d3f6e5e1c29628338ff71c0753813e7 100644 --- a/images/be5e5f14-c875-4cfd-a517-175619491b90_e41f8a52-5ce6-4d8f-8baa-3f605832a080.png +++ b/images/be5e5f14-c875-4cfd-a517-175619491b90_e41f8a52-5ce6-4d8f-8baa-3f605832a080.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71808a2a954313169fa7a9b2c5d1f3f31beb79507d3f74c1888fdca931088e70 -size 856981 +oid sha256:f39682af605a8baf50613861aabef36a1f6224474371c038f0066528513287a4 +size 1323910 diff --git a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_22ad3562-e0f4-42c3-b096-8c173a47673c.png b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_22ad3562-e0f4-42c3-b096-8c173a47673c.png index f7b4156a47e6e17a763bc8605f13be84854efc19..ac241d3e99029807d5161b5065c8371308636711 100644 --- a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_22ad3562-e0f4-42c3-b096-8c173a47673c.png +++ b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_22ad3562-e0f4-42c3-b096-8c173a47673c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d0dab149dcdaece911adf49c57e813c7ba6b1fca602277486e3bb398dd5f22a -size 2657876 +oid sha256:08f4224a0446df0cdd58a6e0aa133bcbf3e95153d337791705f059360808a51a +size 2122192 diff --git a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_7b768457-fd7e-478f-b7a5-e5b3b31acd34.png b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_7b768457-fd7e-478f-b7a5-e5b3b31acd34.png index 65c404355a6d97cd4a1e5e69382f3a5df06a3543..aea5d2e775c6f548a9f6ad69c2df3670449907db 100644 --- a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_7b768457-fd7e-478f-b7a5-e5b3b31acd34.png +++ b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_7b768457-fd7e-478f-b7a5-e5b3b31acd34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c414503533587c496f4aa02c32f5ac751f65f734013d757b0f262a727e20c74 -size 2893038 +oid sha256:eda8d6e0d7bf3bc1c00bc5c17e7c96433ec3ec40a4139bab1175acea092a173b +size 2375784 diff --git a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_93d10c01-8038-4307-a588-04ff78151bb4.png b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_93d10c01-8038-4307-a588-04ff78151bb4.png index a32214419050ec340dd98d4d1e34a9ef38227dd1..319104ea30b979c334b39e5e28f53822dce30a26 100644 --- a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_93d10c01-8038-4307-a588-04ff78151bb4.png +++ b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_93d10c01-8038-4307-a588-04ff78151bb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0a809e6de935e80162af23922661a04d81e5e76e4638d1e02c662eae6afe240 -size 1613849 +oid sha256:5cccf30fc2c9ac42ffc212075b3be6e2d474eb553934c5543ff7fb03fecbc526 +size 867148 diff --git a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_b68ec086-9eaf-4b7d-977a-b2a6a6417d65.png b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_b68ec086-9eaf-4b7d-977a-b2a6a6417d65.png index 4ce444a64e4e4d19f0baeb1a9e4566c05f9156bb..90571e427961704d42977640f473ec618d9a6452 100644 --- a/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_b68ec086-9eaf-4b7d-977a-b2a6a6417d65.png +++ b/images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_b68ec086-9eaf-4b7d-977a-b2a6a6417d65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:426464e45ec17f28f843255cac9fac72ff2a9b5f228d42232b2b83efe992a9c5 -size 2076880 +oid sha256:46b96648db4aae04cbc7c6e31ced4e1ad679ed912dfc05002a01d363960d6775 +size 1260467 diff --git a/images/bf008019-bffd-42ad-a48b-054488e1458c_384d2cb5-1500-4cfb-b973-ad828bf541fc.png b/images/bf008019-bffd-42ad-a48b-054488e1458c_384d2cb5-1500-4cfb-b973-ad828bf541fc.png index f1ee5cb18fb0c495ec3548daaf090c2126f7747e..ae3cd0ed256bf5d699d404402719c382998858f9 100644 --- a/images/bf008019-bffd-42ad-a48b-054488e1458c_384d2cb5-1500-4cfb-b973-ad828bf541fc.png +++ b/images/bf008019-bffd-42ad-a48b-054488e1458c_384d2cb5-1500-4cfb-b973-ad828bf541fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0da4038b7155ffc85ab66114330cb7edf8e8a45172c958a884d03d2c5ee3a96 -size 740640 +oid sha256:19562fb95ea63e3b92e238f9745413e526366d3f8ef660448850f9cce393b14e +size 1017679 diff --git a/images/bf008019-bffd-42ad-a48b-054488e1458c_5e9cd272-fa52-47fd-826c-8c5a2ebd93e0.png b/images/bf008019-bffd-42ad-a48b-054488e1458c_5e9cd272-fa52-47fd-826c-8c5a2ebd93e0.png index d8e6629cbdb2384da4f2b391f175ae0897ccf84f..ddae12c40315fd26c7e88787870c693adbe1b248 100644 --- a/images/bf008019-bffd-42ad-a48b-054488e1458c_5e9cd272-fa52-47fd-826c-8c5a2ebd93e0.png +++ b/images/bf008019-bffd-42ad-a48b-054488e1458c_5e9cd272-fa52-47fd-826c-8c5a2ebd93e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c40c48db785c053ea67aff11729196f6626ad8ab02ba2f9061e1e832524a42a4 -size 507453 +oid sha256:062fd401755a173028fb29d25f12280318be37a3cd7d56674aa414460a99a954 +size 507053 diff --git a/images/bf008019-bffd-42ad-a48b-054488e1458c_69323266-e943-4d87-a9a3-c38c6a97683d.png b/images/bf008019-bffd-42ad-a48b-054488e1458c_69323266-e943-4d87-a9a3-c38c6a97683d.png index 2308cf1a56a6737f270e9dcad98a049918a23c81..3782e12aaf02656cbc4589698b35135d80d48b3a 100644 --- a/images/bf008019-bffd-42ad-a48b-054488e1458c_69323266-e943-4d87-a9a3-c38c6a97683d.png +++ b/images/bf008019-bffd-42ad-a48b-054488e1458c_69323266-e943-4d87-a9a3-c38c6a97683d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:597be52ea0e44aec4b9030414582e90eb86941d96c27603a0624ca64cf6981df -size 507410 +oid sha256:6cfae29615ab037e172383a05a32d066cec3a692e9a40529989d0581b80045b5 +size 471242 diff --git a/images/bf008019-bffd-42ad-a48b-054488e1458c_98ce6733-1dd3-4cf0-a29c-03f67319dc68.png b/images/bf008019-bffd-42ad-a48b-054488e1458c_98ce6733-1dd3-4cf0-a29c-03f67319dc68.png index 83294749ce53988dbc522cf03ef923f69646d1ee..705cb6d3472b07b5343b589750d5df3250f5b38d 100644 --- a/images/bf008019-bffd-42ad-a48b-054488e1458c_98ce6733-1dd3-4cf0-a29c-03f67319dc68.png +++ b/images/bf008019-bffd-42ad-a48b-054488e1458c_98ce6733-1dd3-4cf0-a29c-03f67319dc68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f94ca8fc7725ada5c7b81923389c3fc91132d84b3999dafca809ae43bd8114eb -size 483849 +oid sha256:ecb8bad7bf7d26ef17ae518d7cb40e31de6ddb8a1ccfa97b0dd89e6c0db4b965 +size 461840 diff --git a/images/bf008019-bffd-42ad-a48b-054488e1458c_cbd106f6-33c7-4094-9edb-03c35153f4b3.png b/images/bf008019-bffd-42ad-a48b-054488e1458c_cbd106f6-33c7-4094-9edb-03c35153f4b3.png index 16a3fc7e57f282f1b7452d4a9769f532a7f73869..64889c7621600a34bfc7e25d7805d75a553b94bb 100644 --- a/images/bf008019-bffd-42ad-a48b-054488e1458c_cbd106f6-33c7-4094-9edb-03c35153f4b3.png +++ b/images/bf008019-bffd-42ad-a48b-054488e1458c_cbd106f6-33c7-4094-9edb-03c35153f4b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13f3c14711aa158770959d273a8a3bd66230ebc6d7c88930512818c936674a90 -size 608466 +oid sha256:525b2dee6f46099fa7fa5c742a40e2ef3363cfbcc118ce44f67fb536cafb2f2a +size 432591 diff --git a/images/bf008019-bffd-42ad-a48b-054488e1458c_cffd21a6-6348-47b9-a5f3-461b9532ad99.png b/images/bf008019-bffd-42ad-a48b-054488e1458c_cffd21a6-6348-47b9-a5f3-461b9532ad99.png index 26f46491ddee77873dd889cb2eb8dd58fce5a752..c067447dd5b57f6b56daab00d8205ff167cf33e7 100644 --- a/images/bf008019-bffd-42ad-a48b-054488e1458c_cffd21a6-6348-47b9-a5f3-461b9532ad99.png +++ b/images/bf008019-bffd-42ad-a48b-054488e1458c_cffd21a6-6348-47b9-a5f3-461b9532ad99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:998a5f33549fb9e8d7640b3449edaba2f7cbcdc0127731d6acfd2d3c9de8b2fd -size 861228 +oid sha256:8bd766b1532b87535bd7e254fd89f63d7e0ad71bb910ed621c3cab115ddba953 +size 702639 diff --git a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_2657d87f-4ee4-41cd-8272-12113073ca0e.png b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_2657d87f-4ee4-41cd-8272-12113073ca0e.png index 85a3fdb3e1b418975caa702705e224991fefeac8..e392c6d3a11c15fe6f50994737984a4a85508e40 100644 --- a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_2657d87f-4ee4-41cd-8272-12113073ca0e.png +++ b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_2657d87f-4ee4-41cd-8272-12113073ca0e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5bbe6d22215bc417ad1cf4d88a6147e22594440416126bcf3127d387a8a19cb8 -size 693999 +oid sha256:015b240f2e5f36f82ac36f05eaf3a9c4d736fd3c23806dc2e127836e70cefcd0 +size 928285 diff --git a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_4e232a35-ea10-415c-8e5f-c9ac22cd0350.png b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_4e232a35-ea10-415c-8e5f-c9ac22cd0350.png index 822fb0ddd448d51e5c88573ea5973f0145c3c3d1..c06294b44c3f91269b41216ad0ec8c51ff7a9ab0 100644 --- a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_4e232a35-ea10-415c-8e5f-c9ac22cd0350.png +++ b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_4e232a35-ea10-415c-8e5f-c9ac22cd0350.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73c45b527ed27ce889ff847bf54e3ccf42548b20ef3d78fa7cacaade2cf896f2 -size 1196576 +oid sha256:dd241e2fe576e64a028025c266ce25d03409f7e16a20bcc9fe1911802cf69069 +size 1199873 diff --git a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_5dc7ac8f-f789-4d98-9805-733815243c37.png b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_5dc7ac8f-f789-4d98-9805-733815243c37.png index caebb8e1309771d8b88fcecda7a79c11a76d8a34..a4b41a39944cf6b89f74e11ac123e68ec8d25098 100644 --- a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_5dc7ac8f-f789-4d98-9805-733815243c37.png +++ b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_5dc7ac8f-f789-4d98-9805-733815243c37.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f6525d4dad335c21823ec3b6e79292ec5b1662366e9d025314e4734a3420e30 -size 1336748 +oid sha256:d896f589d0db2eb57dda8f85f73bf0562605ce6f5d925c7357fc3d7192fa37ba +size 1834167 diff --git a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_9bbb8418-648f-4efb-a31e-9cb314c075be.png b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_9bbb8418-648f-4efb-a31e-9cb314c075be.png index a8cb4e0734c0783e3b4ae7c7beddab53aa0f5890..a60c8a4dc9e552ef96a7a1ab40b454d73ac45e38 100644 --- a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_9bbb8418-648f-4efb-a31e-9cb314c075be.png +++ b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_9bbb8418-648f-4efb-a31e-9cb314c075be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:120b34815adfe70d9fba2afd1675712179ed843eec22fe865924591e777e9954 -size 1193917 +oid sha256:82f89bd9d7916af58d4325e2d97dbf8204a84a1586e4fbb7864444338c701193 +size 882256 diff --git a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_cd63bae1-61f3-4ddd-b567-98de6b542827.png b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_cd63bae1-61f3-4ddd-b567-98de6b542827.png index 4fb28f1f96cfef14d7b5a08f6f0153b487d4176a..e99a6aea930d69da991456f7c1b2798dcd32bf73 100644 --- a/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_cd63bae1-61f3-4ddd-b567-98de6b542827.png +++ b/images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_cd63bae1-61f3-4ddd-b567-98de6b542827.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d50ad79348e07e48bd7b93068ef8b2fb48286e4687618b0afbb004975cc98956 -size 1183124 +oid sha256:236b0b2070772d013faa4476f9d32a079be691a12f90f5802315d24325c2cd41 +size 1275136 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_14fbcad5-7316-455e-af71-4205fb2df872.png b/images/bf469f30-6628-4017-b963-672645d7feab_14fbcad5-7316-455e-af71-4205fb2df872.png index 7f2c8291beff6c461caf4a387b5a6ea880a60015..eff2315ca9a1bffa9f79fd290ca15cc926906603 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_14fbcad5-7316-455e-af71-4205fb2df872.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_14fbcad5-7316-455e-af71-4205fb2df872.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc78565b62e7afb30899e8e98b4abe5112477a6f5f4bc1e5aa0d0f1ef0abbe97 -size 1216900 +oid sha256:fb4139344f707465484d298ae1f54f3db52bca7b8100815e135d6f2da5f20709 +size 1314152 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_37982fe3-0a00-4cb0-81bd-93641d095722.png b/images/bf469f30-6628-4017-b963-672645d7feab_37982fe3-0a00-4cb0-81bd-93641d095722.png index a8f719ef9f19d36d48d8ce3a0f020259af698592..e51d31dc693b5ad787b283ac3cf3ec21eb6cc846 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_37982fe3-0a00-4cb0-81bd-93641d095722.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_37982fe3-0a00-4cb0-81bd-93641d095722.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e95ed51dadb76845a379a21192e179e651ccb91b0ee3e000e1b5583ee4ae6757 -size 784731 +oid sha256:1a344d662a2e05dcb23d9337e3a0aa2a41d46a857f59c4a067db4c666153a8a7 +size 641989 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_51e9982e-0a95-4525-af85-fba89b577a34.png b/images/bf469f30-6628-4017-b963-672645d7feab_51e9982e-0a95-4525-af85-fba89b577a34.png index 1d5670e72abfd04438abb2985879ac7530e592f5..78bb590d3d0c3806c1c41f5cd3775bff8b640226 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_51e9982e-0a95-4525-af85-fba89b577a34.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_51e9982e-0a95-4525-af85-fba89b577a34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8c4e084c8f4306ea39212b5419d2f851bfd05a05e905c73e5bd3f14f2566e7e -size 757097 +oid sha256:3dc6fbad6b2a3107c72164076d2f765d4ec1dac151ebae9ad1a0201af95bda18 +size 813867 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_57746056-f9a1-4dee-a17d-3ce65f01e4b9.png b/images/bf469f30-6628-4017-b963-672645d7feab_57746056-f9a1-4dee-a17d-3ce65f01e4b9.png index 1b93d3692e4d7b5667530e33d078e9787578a1b7..7733521e88ff1d9def4b1442a2686f56ca276006 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_57746056-f9a1-4dee-a17d-3ce65f01e4b9.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_57746056-f9a1-4dee-a17d-3ce65f01e4b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88ce1737cd1f9ca210a3d0c0dc22a4ad3e6998ae4c29d8fc3fdcf225322a9f74 -size 765411 +oid sha256:01c7ea96511fdc8156caebd0e5dd791c0413ee06bd24fb897b889c8ca2c18032 +size 969361 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_74bfa8a6-c7a4-4df1-935c-57ff41629dd6.png b/images/bf469f30-6628-4017-b963-672645d7feab_74bfa8a6-c7a4-4df1-935c-57ff41629dd6.png index fed8c510afa3afe09ffd51119a8dd718d7e7acfc..260b9d230a1d93bdf622f0da088f26240075023a 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_74bfa8a6-c7a4-4df1-935c-57ff41629dd6.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_74bfa8a6-c7a4-4df1-935c-57ff41629dd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42fed5232d1f78bf72b2132572b41dca45d8d71d678662e7b81aee154df2f999 -size 860751 +oid sha256:f48ea6980f52710e9864cef3ff5e62efd8c1870dfeb190aaa523540d8e77badc +size 744577 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_9376f06f-d441-4ad0-8f26-502331ad9fd5.png b/images/bf469f30-6628-4017-b963-672645d7feab_9376f06f-d441-4ad0-8f26-502331ad9fd5.png index a903fdbe763804fea635b4eeb03bbdc3cca2657b..79f7432db16cff184540965e4feb7ebe2eecac53 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_9376f06f-d441-4ad0-8f26-502331ad9fd5.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_9376f06f-d441-4ad0-8f26-502331ad9fd5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d71d4b713d1d4cf4870d4efa9f8134d752e663c331ffa39b6f5fbd8395d1e0e5 -size 1205200 +oid sha256:aaec5b3f7f1d9b1bdfc01a4fbb5e84ec587bd99d149ff8360f37a432df1c15e3 +size 1315948 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_c148dec2-df53-4d6d-8da3-c1277ded7048.png b/images/bf469f30-6628-4017-b963-672645d7feab_c148dec2-df53-4d6d-8da3-c1277ded7048.png index 234a17538d6dbf99593b85e2df649b4af262c145..932f22b58121af4196552b4c13f6bdf07efa89fb 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_c148dec2-df53-4d6d-8da3-c1277ded7048.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_c148dec2-df53-4d6d-8da3-c1277ded7048.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8baaf2c66c4ee023de6ca2fde2047a2e85c54bcb8f9a4b31f0915896efe8a406 -size 945571 +oid sha256:7f4b6a08e106110864f2f631064ac1b688e5fb29b5a36a30fcb9182f79d16af0 +size 629438 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_c280a5bd-f3af-43ab-a64b-29e6984be6b8.png b/images/bf469f30-6628-4017-b963-672645d7feab_c280a5bd-f3af-43ab-a64b-29e6984be6b8.png index b550239c0f8efdae8e9b3d653374241ada9558b3..17b11d95476c3b34838c2f1d6845e4363b52aa3d 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_c280a5bd-f3af-43ab-a64b-29e6984be6b8.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_c280a5bd-f3af-43ab-a64b-29e6984be6b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7318b6c07fce14ff257b93a62abf26ee3e3b0b8a1a89433c8e67b2b608c6f43 -size 1668917 +oid sha256:df6552f4811d0e028c80894fa7a68b6bb5ad131217d66218a70a89c12767c1b7 +size 1253191 diff --git a/images/bf469f30-6628-4017-b963-672645d7feab_ff268edf-5481-45b7-87dd-16072ddacf02.png b/images/bf469f30-6628-4017-b963-672645d7feab_ff268edf-5481-45b7-87dd-16072ddacf02.png index b88879e490ff236e3b3ea1de1820f11d81dd8e9e..2063fad662a86298d140d2b3ac239ce1345a8138 100644 --- a/images/bf469f30-6628-4017-b963-672645d7feab_ff268edf-5481-45b7-87dd-16072ddacf02.png +++ b/images/bf469f30-6628-4017-b963-672645d7feab_ff268edf-5481-45b7-87dd-16072ddacf02.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:794108b0a835cf44e9bd178acd1133bf2c7e5bb854eb6b2be673d297145f21c9 -size 661073 +oid sha256:63e1c3410f4ece1afa66d8f34d80c0290b9638eb8105a30cfb8a46f5ffa1ea8c +size 930821 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_06195ec9-afde-4c23-a8d3-db3666bc04a0.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_06195ec9-afde-4c23-a8d3-db3666bc04a0.png index 5986a3721b747ad8637223525c5b1e1bf4f2cea3..42e0990700fb5ff3c04a31df877cccfc1c44114e 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_06195ec9-afde-4c23-a8d3-db3666bc04a0.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_06195ec9-afde-4c23-a8d3-db3666bc04a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c37754afa3baa5301485c93c31a10de2bf978781e758d25972609f7868f44462 -size 2383163 +oid sha256:e3883a7a4bd9e17ce768a05cab120cb86551c3606f7b6b8456e7be69696fa617 +size 2089479 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_158410d4-4bff-4a9d-bd03-39997c0c9a89.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_158410d4-4bff-4a9d-bd03-39997c0c9a89.png index 0cf19913ccc51500cdbec4724184f9ade9e38adb..d413db524be636165892ad987e842d240bd15d75 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_158410d4-4bff-4a9d-bd03-39997c0c9a89.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_158410d4-4bff-4a9d-bd03-39997c0c9a89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c9f070e10f3916a4561be982701bfe3fc14ca0c0c31fe7f339928653aab80f3 -size 971353 +oid sha256:8cae73ddb3ffad0e302a77fb6f5fc0dfeb59abaae4b190036e336151d2003d43 +size 1206406 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_32136539-d563-4515-a062-e74052a89105.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_32136539-d563-4515-a062-e74052a89105.png index 939c6e5258b9699ece3069645ad578e75ee4e5cf..3263640a1fe73bed23311681711effb1c4e0a9ba 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_32136539-d563-4515-a062-e74052a89105.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_32136539-d563-4515-a062-e74052a89105.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:758101a07ef07f1197812f14951b67f78f2ec33dde6fa4611a88a89e6d5b8dc8 -size 2435987 +oid sha256:7acf6711d3cfa62c97a20ebe4241f75f9a9ae7b7de929d6bf76c048e5dee32d5 +size 2392851 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_4c1a9de4-e5d4-4be6-80e0-1e60fcfb3386.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_4c1a9de4-e5d4-4be6-80e0-1e60fcfb3386.png index 97f8e6cbb2715c4d5c44b05d4ca0b8ca7848c944..cdc4da0a0aa0b5236dbc5f49ed2a8ebcd6c1dc2c 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_4c1a9de4-e5d4-4be6-80e0-1e60fcfb3386.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_4c1a9de4-e5d4-4be6-80e0-1e60fcfb3386.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7d65bf7bd4afbab4e2b64c227a1bb200bf72fdddfae167a75c2f645713c2db7a -size 970353 +oid sha256:b0735a40f2bbacc8f3483344076b1725406c774369aab41593ec2956294cc37c +size 1205229 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_8ee9e907-8abc-4563-b292-038c3e0e5edf.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_8ee9e907-8abc-4563-b292-038c3e0e5edf.png index d470bab9cc03a38cedfb08037272d55f8bc67783..d706145dced772a665b135d1ece5e38e374a810d 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_8ee9e907-8abc-4563-b292-038c3e0e5edf.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_8ee9e907-8abc-4563-b292-038c3e0e5edf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:761469e2262b73a5a4bf22a852db9a9ace671e740213bd57eb480f73c2474703 -size 2432793 +oid sha256:394176d002d5331eaf53551b8a713cd7e8b20f64044cedd3244a82c6b90bc963 +size 1757647 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_9e2ad598-a31c-48d7-809a-7482f0e22074.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_9e2ad598-a31c-48d7-809a-7482f0e22074.png index 2df4188a07e3d228ca916cce40b9f6fab33f946e..6004f1be2de47b3026a6a76c55c0a81693d4e64a 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_9e2ad598-a31c-48d7-809a-7482f0e22074.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_9e2ad598-a31c-48d7-809a-7482f0e22074.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5458fed67737814ea8bf2a9c5333a5ff14d2ae4fd8a2b0f5f13640a256befd21 -size 2388550 +oid sha256:eb206ea9f76f6e215d13c8675f02ee4c208204b1be8dca8ecb1506e91d74c7ae +size 2304852 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_a5959020-fb70-4c99-b3bd-4e1ca12b85c3.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_a5959020-fb70-4c99-b3bd-4e1ca12b85c3.png index dffb5f46c7944943520b49c35068cc910883180c..45f9c8bc18971193756656373306524caae686f2 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_a5959020-fb70-4c99-b3bd-4e1ca12b85c3.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_a5959020-fb70-4c99-b3bd-4e1ca12b85c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a8f54546600b92915a47348ed4ba70d10574fcd05e45f41ba6914364df789bc -size 789922 +oid sha256:4489b29807292a617dc6ae8585695e0577229d5cc70b823e47a6c97cb50abebf +size 976672 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_b88c28d6-7d12-42fc-95b3-f4267b4fd200.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_b88c28d6-7d12-42fc-95b3-f4267b4fd200.png index 745d62522b66bd807009de9506de9dae274dbcb7..d4d381d0db37e827c56d7a4d8428714b06187387 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_b88c28d6-7d12-42fc-95b3-f4267b4fd200.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_b88c28d6-7d12-42fc-95b3-f4267b4fd200.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6abf24b4308c157b60af9a8b6c5860b3a426aea245a254e4ad3f9a73b8b9b3d7 -size 2202700 +oid sha256:b959804234ddf77c38d9372ecc75e6d74842c15ec29d3e7a176f56e7b45cf788 +size 1203044 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_cb395227-5541-4782-8fd4-5262f8c4f95e.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_cb395227-5541-4782-8fd4-5262f8c4f95e.png index e16407f67e196cfb9d17c996b37ad593047e2e52..5c50d8e7d0245f753f2a4d78fd6aedd471eccfab 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_cb395227-5541-4782-8fd4-5262f8c4f95e.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_cb395227-5541-4782-8fd4-5262f8c4f95e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c210172ba7786477966cafb96ad8a54f4682ce2d40b0078c4b6d8a78a6707147 -size 2100279 +oid sha256:620fddfe6fde9306c1cfee5c5f18ad678d2dbcb305d9769e348d44b6a65958b7 +size 2081035 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_e23e2c2a-cf6c-45ff-8920-f0444ffee944.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_e23e2c2a-cf6c-45ff-8920-f0444ffee944.png index c3ae229ecb1d09ed47a42f37a3dba695d90e5d17..2fd9f560f28857e03f7cf39eb21faaf34abfff82 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_e23e2c2a-cf6c-45ff-8920-f0444ffee944.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_e23e2c2a-cf6c-45ff-8920-f0444ffee944.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bec62e31e64a394c31a30c06dfa45cb027d863ccddbda7e0698ca64cf7ad55ea -size 1967958 +oid sha256:ca057cea9909506d8f6d79d5b4d56a582d352424ad37d27b42ddd3fcc0abdbe0 +size 2293411 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_ef49383f-479e-4814-b8f5-f010ee86a655.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_ef49383f-479e-4814-b8f5-f010ee86a655.png index 91121c1905cb4cf748742fbf43243247894f1308..3830e9d85dc9b5980ebff3e99c46ca8b4844bde1 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_ef49383f-479e-4814-b8f5-f010ee86a655.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_ef49383f-479e-4814-b8f5-f010ee86a655.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4a5be525eef8f87dde6c125be811a177b11f655c84b7567b740ba78ca8580d9 -size 1882581 +oid sha256:98c037ee28f8b5e12789a051394171b44c6828661bdf2cd5867fb58ebd78c12d +size 2200913 diff --git a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_fd3c4d6b-de6c-49dc-a1ce-b8c22bbd015f.png b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_fd3c4d6b-de6c-49dc-a1ce-b8c22bbd015f.png index 51c98b8971ad6d66648214e4c873263af7b8b180..a0a83633a87cf59a861ae2c8367ed7265cdcde70 100644 --- a/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_fd3c4d6b-de6c-49dc-a1ce-b8c22bbd015f.png +++ b/images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_fd3c4d6b-de6c-49dc-a1ce-b8c22bbd015f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6cac3efdceb89c863873aad235606187978c2ba9621cce6cb1b453fc4b5d8e9 -size 2205954 +oid sha256:63aaf9a1d331f26cf2eaf188e0b1d5f6360c5398ebe0d42509ff86d127a5272b +size 2291574 diff --git a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_093733bb-a6a3-48b0-9aa5-bbe2a4b258aa.png b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_093733bb-a6a3-48b0-9aa5-bbe2a4b258aa.png index 9a09ffac7719a0bf6452353860994129aab2f122..0ea2045a91d15b3060f5218c72aee61441e9afca 100644 --- a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_093733bb-a6a3-48b0-9aa5-bbe2a4b258aa.png +++ b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_093733bb-a6a3-48b0-9aa5-bbe2a4b258aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a23cb1a785fd45a34f855a599103cced4d3d71fad4faae724094551a16b4541 -size 401797 +oid sha256:07a7a4688ac2fa020296e764f8658e866a431056b5a75054db60c8b5d49819c0 +size 747095 diff --git a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_0be743dd-2860-4ed8-81aa-211cb3c67518.png b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_0be743dd-2860-4ed8-81aa-211cb3c67518.png index e56fba7f0912bc0ea954c6e148d7d5d3a0e53a76..813cc62a996e4ab12a77ac28321bcc78e9389651 100644 --- a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_0be743dd-2860-4ed8-81aa-211cb3c67518.png +++ b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_0be743dd-2860-4ed8-81aa-211cb3c67518.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a0ade3fef0cc78b2c2e772d0ce802da24e1150d019f22c173326720a3c21a30 -size 402689 +oid sha256:1cd5cf0d554d522289af4146abbc4cd308d616b29e97e88278d44723e1f86a22 +size 833903 diff --git a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3b6ab224-8bd0-4206-ada8-7e14e8308314.png b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3b6ab224-8bd0-4206-ada8-7e14e8308314.png index a4c0bbc1ffdedcb6e99a7944f9d40ce405adc26a..b342dc518a962ef9fe2b5b8fc293d135d038153c 100644 --- a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3b6ab224-8bd0-4206-ada8-7e14e8308314.png +++ b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3b6ab224-8bd0-4206-ada8-7e14e8308314.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a014a8d008c6da8f7c208904e6df978a8e93ea424d9a4bef3283817174ba7de -size 417259 +oid sha256:b68f23ae0719298af5fcb01280f1651f03898a9ecee7e819da537e0bf8f14744 +size 849509 diff --git a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3f6e79c5-fb1f-41c7-be6c-53bedd7bd544.png b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3f6e79c5-fb1f-41c7-be6c-53bedd7bd544.png index 94a720402973fb311b531d6495008ee94c41f920..b2f65cf4ad630e94ce4b7845c433116571a61c2b 100644 --- a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3f6e79c5-fb1f-41c7-be6c-53bedd7bd544.png +++ b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3f6e79c5-fb1f-41c7-be6c-53bedd7bd544.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b07dab30d24ddd60643db188369adb82669a9bbcb06e8431f825ce0189e73b8 -size 399871 +oid sha256:3ae4880f7b346acae61df9f301f1b4190f725ac84f17d8e0a9502441e01c99a2 +size 795756 diff --git a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_df415b14-e121-43a8-8548-058989210645.png b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_df415b14-e121-43a8-8548-058989210645.png index eabefd8582c4aa893dde2992667edc776d5a91c7..8eef5df618add5750d7df42c1af1f2915b5746fa 100644 --- a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_df415b14-e121-43a8-8548-058989210645.png +++ b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_df415b14-e121-43a8-8548-058989210645.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f26410b9ab155b6a45be2741c4aa34a5af0475eb62941193d1ffe08d33a4f2fd -size 455778 +oid sha256:7430a2bfd65a50150e4f3ec7d68a963f60305319abf9cc7a3e8bc4e3a84238d0 +size 847992 diff --git a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_ec602108-1535-425d-be37-bc2d202490c6.png b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_ec602108-1535-425d-be37-bc2d202490c6.png index 0eb6d4a4c150756bc10d86209478ab466a0eeead..49bb0aa77f31761e0a0a0e4672a8533cebfd3c2a 100644 --- a/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_ec602108-1535-425d-be37-bc2d202490c6.png +++ b/images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_ec602108-1535-425d-be37-bc2d202490c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1be577a21431a18b2b507cf0359334e5aec1c270b0b558e2642868157765f383 -size 418701 +oid sha256:a3c2446c920ba7bc3b98dafc50ffa1b75da3f6531fd443444b79b9ff069405e8 +size 309582 diff --git a/images/c14078dd-a4be-4784-a46b-cb01333e3019_12955726-9213-4e4c-bf79-a6773d5f74f1.png b/images/c14078dd-a4be-4784-a46b-cb01333e3019_12955726-9213-4e4c-bf79-a6773d5f74f1.png index 756dfc01cdae67c13ca6db4089cd991cbb42a25b..fc70b899927ebe88db66c0ba176d410c506bbafb 100644 --- a/images/c14078dd-a4be-4784-a46b-cb01333e3019_12955726-9213-4e4c-bf79-a6773d5f74f1.png +++ b/images/c14078dd-a4be-4784-a46b-cb01333e3019_12955726-9213-4e4c-bf79-a6773d5f74f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9bc97a8c2c0c897709778f2840d630e9a1e7d9a0b49d34a09d7c131e5dbefb7 -size 765842 +oid sha256:b3544715398fe6f44839e1966e970d0253c1952451011a8cc8dda56e451a302d +size 1163505 diff --git a/images/c14078dd-a4be-4784-a46b-cb01333e3019_146bcec4-09b5-47b1-97b0-6a17d09e9e95.png b/images/c14078dd-a4be-4784-a46b-cb01333e3019_146bcec4-09b5-47b1-97b0-6a17d09e9e95.png index 5c9aa58b2fab8bf1a285656f8cc4146ee190a2fa..89c9cc696f79568d815ad06cce5125e514228d56 100644 --- a/images/c14078dd-a4be-4784-a46b-cb01333e3019_146bcec4-09b5-47b1-97b0-6a17d09e9e95.png +++ b/images/c14078dd-a4be-4784-a46b-cb01333e3019_146bcec4-09b5-47b1-97b0-6a17d09e9e95.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca9649c7b24586be0c3c96a6a9e8e1bddcacfe327246fc2ceaf238facdf254aa -size 896649 +oid sha256:d63c919fb31c3dbb26e9a9426d3153eba3033a98d10f74453e05c5e4ff4358c6 +size 833024 diff --git a/images/c14078dd-a4be-4784-a46b-cb01333e3019_1d55f87c-4f09-44d6-a9e3-9b9809a3584e.png b/images/c14078dd-a4be-4784-a46b-cb01333e3019_1d55f87c-4f09-44d6-a9e3-9b9809a3584e.png index 289ae873203bc3ca704230f08674b26364aa5600..2437f9b5e21e1ea401d51041866d7f9f7bf311ab 100644 --- a/images/c14078dd-a4be-4784-a46b-cb01333e3019_1d55f87c-4f09-44d6-a9e3-9b9809a3584e.png +++ b/images/c14078dd-a4be-4784-a46b-cb01333e3019_1d55f87c-4f09-44d6-a9e3-9b9809a3584e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc4efb4a879915ce24435d05688087ee38863af3ba8f21edc49f3be015c0389d -size 1649053 +oid sha256:cb31515be2257aad4d65da01c0ab3d43d1f797bb2e2cdc9ee3451bb0f49bd646 +size 1333219 diff --git a/images/c14078dd-a4be-4784-a46b-cb01333e3019_55f783b6-bbd7-44aa-b83b-cf2fa14ef9dd.png b/images/c14078dd-a4be-4784-a46b-cb01333e3019_55f783b6-bbd7-44aa-b83b-cf2fa14ef9dd.png index d8fa4f95cce949075d6008788218b29df077f228..dd9618eda6c7e11a6ff0e83195238aa8a0da8721 100644 --- a/images/c14078dd-a4be-4784-a46b-cb01333e3019_55f783b6-bbd7-44aa-b83b-cf2fa14ef9dd.png +++ b/images/c14078dd-a4be-4784-a46b-cb01333e3019_55f783b6-bbd7-44aa-b83b-cf2fa14ef9dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de946ed5fb005742a6202b7d5c1498a38235b2092e5094912a4d0243fc61cff5 -size 793765 +oid sha256:284992aa913f398edf7b5348b252d871dc468d0c12915a5adac1fb10aca24d24 +size 845383 diff --git a/images/c14078dd-a4be-4784-a46b-cb01333e3019_85d89f31-f66f-4dea-9d27-26cffeb6b2ee.png b/images/c14078dd-a4be-4784-a46b-cb01333e3019_85d89f31-f66f-4dea-9d27-26cffeb6b2ee.png index 8c75dc9e855b7de90f283e136a8d52bbe2c704e4..ec945acf05fd8eed76282989dee2ad04334ab72b 100644 --- a/images/c14078dd-a4be-4784-a46b-cb01333e3019_85d89f31-f66f-4dea-9d27-26cffeb6b2ee.png +++ b/images/c14078dd-a4be-4784-a46b-cb01333e3019_85d89f31-f66f-4dea-9d27-26cffeb6b2ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dea41414800bd3afcaf330b4e3b5721ebdda2d0de79385da7c0a31323c476927 -size 478434 +oid sha256:343c66c49ee41c73c49064ab0593f7bf68410da9da2fbe9b8c1f8bd08a16d905 +size 654955 diff --git a/images/c14078dd-a4be-4784-a46b-cb01333e3019_99951f39-43d4-41a0-aef5-e95a0a34b32f.png b/images/c14078dd-a4be-4784-a46b-cb01333e3019_99951f39-43d4-41a0-aef5-e95a0a34b32f.png index 90d0d2e624bb6af72228952bb21716d73a8403a4..fbc99c1d9fe37e2e3e0a247851c9c20ee87d0a1c 100644 --- a/images/c14078dd-a4be-4784-a46b-cb01333e3019_99951f39-43d4-41a0-aef5-e95a0a34b32f.png +++ b/images/c14078dd-a4be-4784-a46b-cb01333e3019_99951f39-43d4-41a0-aef5-e95a0a34b32f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57dc5722ac7ff0633dfd167e7b2288e63f10feb024b524c4697d2ef7e19e54e5 -size 1384153 +oid sha256:2e20a69cd0a8f0920ca9063fd2dcbaee96105a66ab6dc324ac9403c292d931ee +size 913993 diff --git a/images/c14078dd-a4be-4784-a46b-cb01333e3019_da9e60a4-6770-483e-8d86-fdc06a48523d.png b/images/c14078dd-a4be-4784-a46b-cb01333e3019_da9e60a4-6770-483e-8d86-fdc06a48523d.png index c846ebb0efe7d258be715bdef3078320bb12e064..c838ec92ef52da201cb88d666dc80526b8a48ad2 100644 --- a/images/c14078dd-a4be-4784-a46b-cb01333e3019_da9e60a4-6770-483e-8d86-fdc06a48523d.png +++ b/images/c14078dd-a4be-4784-a46b-cb01333e3019_da9e60a4-6770-483e-8d86-fdc06a48523d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40f9ac9d25a7ff0580b08241885376a1ffc6d29e975f4c9ec198877d8cdaf307 -size 490452 +oid sha256:bb72a22a8754711105bc0e867675fe44caed0d406e1f6f5360a2665b7d97677a +size 556802 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_0f4938d4-3d29-44c8-89bc-96b02e751dc2.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_0f4938d4-3d29-44c8-89bc-96b02e751dc2.png index 4cdc362b0aa46208e454f9db01ef2c0d5a3f96a1..87819390ea68aeb237665e648ed80acfe7b5baed 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_0f4938d4-3d29-44c8-89bc-96b02e751dc2.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_0f4938d4-3d29-44c8-89bc-96b02e751dc2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18f9d391f80a5dcccd36a650c268e8aab7d47a10c43529c040fd27b07e943645 -size 1273130 +oid sha256:7157fa1ec9a3cb324c67099e06d95df361b089882a7a96bcf061dc79db9be40c +size 1259461 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3b5e3d83-6a1f-443a-b5cd-0946e3dbc507.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3b5e3d83-6a1f-443a-b5cd-0946e3dbc507.png index 9f34bc0bd4603a941875b6425ddcb2cb1e15d659..c889f723decdefe1680efbe19f6edf70dad1e86a 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3b5e3d83-6a1f-443a-b5cd-0946e3dbc507.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3b5e3d83-6a1f-443a-b5cd-0946e3dbc507.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11ce29b548ebf298ce4c87c8c4716ff067901327b910f6518539c329415e23cb -size 1189932 +oid sha256:56d0777080810bd7a4b920790b3572b3a255dcf6e94b1b17dd67cb3afc5552d1 +size 1029117 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3cd2999e-b440-48bd-9a23-54e47a560466.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3cd2999e-b440-48bd-9a23-54e47a560466.png index 55ccc3e58592b1f6662bdee7f69215da950e2101..b0dc96e2aef530510f577c12e82f7dbf19551ccc 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3cd2999e-b440-48bd-9a23-54e47a560466.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3cd2999e-b440-48bd-9a23-54e47a560466.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:775687666aa80041c4c7ea3488c9b62c76568e1bfb4440ea91a8ab8f34665c94 -size 1288990 +oid sha256:77bc9cc1b0c77b3ead7a9e64a12ed8033a726a550364c3d4619b499e140ce62c +size 1562114 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_45607f84-2528-47d1-b276-81c3a6d51f07.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_45607f84-2528-47d1-b276-81c3a6d51f07.png index 62733a270a4579d4ae5a0e06a3f2420d0b39ddcb..f2888666dce4ac08d037cb30d45baa6c0424ead1 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_45607f84-2528-47d1-b276-81c3a6d51f07.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_45607f84-2528-47d1-b276-81c3a6d51f07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:696bf32def3c24f29fb83d14c5bfe50afa729080d5ed69c7539aee5afc65b70e -size 1241624 +oid sha256:249c6f5a5cd4995380e0cd2884608eaf279bb9baf86c68e31fccdac63822120b +size 1870210 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_a83d190e-6580-4124-aadb-f55e49050396.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_a83d190e-6580-4124-aadb-f55e49050396.png index 098ca2e5a0ca876fb9780905d9a28a328d9fd397..70135808700c976f5704c572490f38cb25365b2c 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_a83d190e-6580-4124-aadb-f55e49050396.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_a83d190e-6580-4124-aadb-f55e49050396.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3642d219a8e28a199dfa5e32c41bc1753aba7a37a73267c3e8fc08c49903ff90 -size 1233221 +oid sha256:320956a18a55efa41f273c8b2eb66b37e20be9df0f92db8d4bb08e998ac3aa39 +size 1218762 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_b7ea921e-9106-4ffa-8427-c196f77649fe.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_b7ea921e-9106-4ffa-8427-c196f77649fe.png index b1debaafc51a7ca6991dce008fd2c8dbcb46347f..e5e73d403a8d80c3cd60900be25fcddd7702d4ff 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_b7ea921e-9106-4ffa-8427-c196f77649fe.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_b7ea921e-9106-4ffa-8427-c196f77649fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41ba244319d644a5020a5566888a89f8428e76f4adea46210129c313a912ee0c -size 1339042 +oid sha256:efa8caafd12df178f3492e4111c7f27899d16171b256a3eae9c5b1835f08caeb +size 1830641 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_bde4fd17-cc02-4bf9-83bd-db5d49cca9ef.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_bde4fd17-cc02-4bf9-83bd-db5d49cca9ef.png index 31b96152364361c29890defb43b5e5d3091c7798..0458fc0d9be188aa23148a6a9d977c3ef7b19bb5 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_bde4fd17-cc02-4bf9-83bd-db5d49cca9ef.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_bde4fd17-cc02-4bf9-83bd-db5d49cca9ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f726e2959b3618e5994149bbec7148fabcc7ee16ae231013c070fb1a7080bbe -size 1485333 +oid sha256:17354174d275a4067e79a88e822f8778d485f9999b56ad83177cb8545ecea599 +size 1941986 diff --git a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_eec4751e-9ac2-4842-98ac-2edd26e0d41f.png b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_eec4751e-9ac2-4842-98ac-2edd26e0d41f.png index ab048c9eaf9478dda24c267fc69a275388a8263f..0a9d52c505cae0e144a2c5f70496110388a7b445 100644 --- a/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_eec4751e-9ac2-4842-98ac-2edd26e0d41f.png +++ b/images/c175fe34-143b-4a87-a462-e8e9d69d0b95_eec4751e-9ac2-4842-98ac-2edd26e0d41f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e39060384d8e2117579687e72b3e38e741815061902eafed80c76a4af8768222 -size 1030922 +oid sha256:12d66f081202abfaab9feed78d6f9514f677d4c5e0bad7810fda7cda44fd9dda +size 418739 diff --git a/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_2763fe68-209b-4181-b309-e9e75ebaf703.png b/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_2763fe68-209b-4181-b309-e9e75ebaf703.png index 723b22b537614ad60a63d5ba18eab634403350b2..dcfc144f152422cc46e720a088112659a84fead3 100644 --- a/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_2763fe68-209b-4181-b309-e9e75ebaf703.png +++ b/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_2763fe68-209b-4181-b309-e9e75ebaf703.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59694ee3284309102897a3dde1c69d788a67fcb91a7430888626bcc1451c54db -size 259199 +oid sha256:c475fe29ec18cdd3fc22edfef0c5ec0209637929d8e0e354d2a81faf6d41d154 +size 278053 diff --git a/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_67dad40a-d63a-4cbf-9271-85500b8de12d.png b/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_67dad40a-d63a-4cbf-9271-85500b8de12d.png index 015f189c4a7e7a7d5676f3f452182135b229cd44..68e826e360fdf11eb30dad8cd63323a416a18c2f 100644 --- a/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_67dad40a-d63a-4cbf-9271-85500b8de12d.png +++ b/images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_67dad40a-d63a-4cbf-9271-85500b8de12d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e848717739a84d673b82026c569091c101fb12a3094bdf449aa551875ee40b7 -size 2586348 +oid sha256:3206141df0e7cc311e0ddc49570f2e179170eab5a9ba31873ef5bd42780063ac +size 1964336 diff --git a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_3966eb03-525b-43a2-adc3-77b700f1eff0.png b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_3966eb03-525b-43a2-adc3-77b700f1eff0.png index b2372a639ea5e18ab4d46623894b634500e85d59..a0f262badce8d1c3c4957b75a6215e8b8b19c59d 100644 --- a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_3966eb03-525b-43a2-adc3-77b700f1eff0.png +++ b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_3966eb03-525b-43a2-adc3-77b700f1eff0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbbaeab7c35de90eef84af49cfdb8db3f10ce7fca04a41bdbc86f55efb4e9c45 -size 329258 +oid sha256:3f73f1ef0c456062e90cac0c68e697890c346c11da549c4b5ec53c47db7c44b5 +size 275835 diff --git a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_83752122-ca77-4eda-ba7c-c98b8fcfe3af.png b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_83752122-ca77-4eda-ba7c-c98b8fcfe3af.png index 2f18d3bb6f66c4f6bf56136cc2f86065948d9f08..0ef51e12b3bd6bb0ed5afbd3973340dd8eb22390 100644 --- a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_83752122-ca77-4eda-ba7c-c98b8fcfe3af.png +++ b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_83752122-ca77-4eda-ba7c-c98b8fcfe3af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74a3eaa94b80ee9074cb33a8ec083ee55d09a90caac934eb9cf6c73fc4b168e3 -size 1994277 +oid sha256:b09f700b5626a19795052e52ab5c1d676704dda6d73670eb544016b3d4456610 +size 1154849 diff --git a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_bd722609-34f1-4a98-bab9-25999877944e.png b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_bd722609-34f1-4a98-bab9-25999877944e.png index c113ed2c9799e8d76c3f55dbc319146ff99c651b..4855759053b008f6f461397a0cb2f28265b019cb 100644 --- a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_bd722609-34f1-4a98-bab9-25999877944e.png +++ b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_bd722609-34f1-4a98-bab9-25999877944e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ec4486160ec17a7aee9e44a7dbb09c68c22faed4e8697ec4757d1c7cca198e7 -size 897967 +oid sha256:0de1afe9800476ed163199ae1bdc8f3c112b32b64fec556ef14a5331b99fd6da +size 598884 diff --git a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_d20044ad-f983-497b-a184-5cc46fe9b448.png b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_d20044ad-f983-497b-a184-5cc46fe9b448.png index eadf2ef6bbc0c98f6395e168e9b68e08dcd6c721..20b1f4c1e2e8f7342a1acfa32f51d71de6ffdee1 100644 --- a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_d20044ad-f983-497b-a184-5cc46fe9b448.png +++ b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_d20044ad-f983-497b-a184-5cc46fe9b448.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fccc129ae6c30b898012c895694e606b00410e69d8f6d352244efdc20100d802 -size 1477836 +oid sha256:ca108036437a840ebe5a34a6ed6752270ba59cf4d6be9247bd78ad5b0076f3a8 +size 512710 diff --git a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_daaa0ead-b5e3-4a9e-91aa-d9cadc1b97dd.png b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_daaa0ead-b5e3-4a9e-91aa-d9cadc1b97dd.png index d1aa3b2e00270de63e905fc39427d1328d9e844e..f3cbdf0df4299dbade5d2f01613fc380649219b9 100644 --- a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_daaa0ead-b5e3-4a9e-91aa-d9cadc1b97dd.png +++ b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_daaa0ead-b5e3-4a9e-91aa-d9cadc1b97dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac889a823e32207b9b7b342a6a422c6d365a5ac397ed4192bb7ffbbbb46ac519 -size 1263902 +oid sha256:88be1805ca94254dd1186ad588b0eb688e33f646ef2d0ac5a48f2ea17298b769 +size 1263672 diff --git a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_ecfca8ce-c709-4d5d-8104-bb73107d2eb7.png b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_ecfca8ce-c709-4d5d-8104-bb73107d2eb7.png index a2c8e331934cc903ad296d6d92bc26c816c325b8..809f1a9d3a63365c21bb4597ead9fa799617d10e 100644 --- a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_ecfca8ce-c709-4d5d-8104-bb73107d2eb7.png +++ b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_ecfca8ce-c709-4d5d-8104-bb73107d2eb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6812a8cf37df0f90a3acbf482d2d13e4f507ceee982ccaad7fc1fc9fa06d5b7 -size 979709 +oid sha256:fa5c6a9fc9dbb4b9fab40474f33b0de01e84ba9aeab8e69f5b621caa61e53b8a +size 936731 diff --git a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_f7d7601c-aa74-4cfc-a9c4-2f08fe628d44.png b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_f7d7601c-aa74-4cfc-a9c4-2f08fe628d44.png index 6a25c8f96d1e3ed035c4c02950bb218b8272557c..4fd48c6c00a9673948c421e8053ba3ad95d0053d 100644 --- a/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_f7d7601c-aa74-4cfc-a9c4-2f08fe628d44.png +++ b/images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_f7d7601c-aa74-4cfc-a9c4-2f08fe628d44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3fbc5844033bb958a3b25fc7f7f182c16d7b23853acd92b17b75fa859de156b -size 1287543 +oid sha256:1553d862b570205600db775f25d09d57e147980fe2689cf116653ce2ab415fa5 +size 906066 diff --git a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_1b6cc686-4b86-41f5-9df2-290404ccfc53.png b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_1b6cc686-4b86-41f5-9df2-290404ccfc53.png index 5eb7bcd72ef10ef1340a942bd32e38a9c9131842..60aaf9820c9ebf3c57fd281c41e20daf1aa9747c 100644 --- a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_1b6cc686-4b86-41f5-9df2-290404ccfc53.png +++ b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_1b6cc686-4b86-41f5-9df2-290404ccfc53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e469da6602a59ed0f33196d67aff04c5c1f3efaef33f561a09210921e693e626 -size 1829715 +oid sha256:b799cd0ec46913e8b39a6441deb782a18e5678fc53e6dae1b5abf2773ccfd740 +size 1641718 diff --git a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_6f4fd24e-96c4-4cba-9914-2abd10715701.png b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_6f4fd24e-96c4-4cba-9914-2abd10715701.png index bc5bfe72d1da0d02673ce663d04ab14ed9d661ec..9e6cb6be5dca36702bc1fc63c1cc84ec49757812 100644 --- a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_6f4fd24e-96c4-4cba-9914-2abd10715701.png +++ b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_6f4fd24e-96c4-4cba-9914-2abd10715701.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae5b9b81b005663c53dade59d21bee9fe9b1d645dc4f82c501c245284df2b4e3 -size 1704362 +oid sha256:c38c54e6f0a47ac5318737f238faf46987540bb92ad6637db654cf5a66160ad1 +size 1523245 diff --git a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_b0495a6e-1270-4d11-8868-2413bc8f1272.png b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_b0495a6e-1270-4d11-8868-2413bc8f1272.png index f9216ecc1fe3655623a98738646fdf42bb7ed30c..d474ab6efca489af1a6c1fedd5f5e37743cc940f 100644 --- a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_b0495a6e-1270-4d11-8868-2413bc8f1272.png +++ b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_b0495a6e-1270-4d11-8868-2413bc8f1272.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8babea7ed4edf04fa9c15fb39d144709d69f5735a32af86d1390ebc01ee0622 -size 2497388 +oid sha256:4de51e245e85158b9595d75dd91335e8a5f08c6e2a3707d89fc7082396767548 +size 2672514 diff --git a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_fdd7c5bb-257a-4e04-9762-7079f631669a.png b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_fdd7c5bb-257a-4e04-9762-7079f631669a.png index 1bc46f31c39809e6c401d27747f9c9daae9c2494..2b13c50ac49e8fe2d3799e12b7eab5acd69f1d7d 100644 --- a/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_fdd7c5bb-257a-4e04-9762-7079f631669a.png +++ b/images/c2a17420-ceb4-4324-ae8f-60cf845a144b_fdd7c5bb-257a-4e04-9762-7079f631669a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c70e9d8a780b7bcf1cd0c3b09adfdfcee5237e0ae1026b205ee5491144c30860 -size 2154090 +oid sha256:3aa4d96003c53f985f5214c0f6c5afbe51076d0011f00cfc89843b14723e7269 +size 2752908 diff --git a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_1e652aec-cd7e-4681-824e-ead22c58c1e7.png b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_1e652aec-cd7e-4681-824e-ead22c58c1e7.png index ef2929cd8c55c731219435b5f43cd9299c0da138..0328de23ad3e082b1fb3d89a183a0e5f6a8f6b65 100644 --- a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_1e652aec-cd7e-4681-824e-ead22c58c1e7.png +++ b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_1e652aec-cd7e-4681-824e-ead22c58c1e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73e9d397f60e08e7ac23d70ded9ef43253f0ecaf0a5bf155db73f5c437194f0f -size 1462287 +oid sha256:6af35d256f2fb49d4eda6bc1c34dfd0d1c880c92fdb8fc317874deb443ccf563 +size 910008 diff --git a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_4fc388d4-400a-4097-86db-59e5f812f69e.png b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_4fc388d4-400a-4097-86db-59e5f812f69e.png index 00ff198c413df43a67086a4036db4f3e2ad1852f..eec3b4af37f9ae644c210aab8815f9c1dfe50369 100644 --- a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_4fc388d4-400a-4097-86db-59e5f812f69e.png +++ b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_4fc388d4-400a-4097-86db-59e5f812f69e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5896317f6884263576adee5e4512e529dc17cf5ceba30964df140f723ea77e23 -size 1475870 +oid sha256:4a7a70f0a43059252ea1c8ba8f2358bd48d7cb5575edd85c79311f0f6c11e4f4 +size 1209999 diff --git a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_a2e11919-3d09-4a0d-bcb1-521927016889.png b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_a2e11919-3d09-4a0d-bcb1-521927016889.png index 1e5b4c78dcf44d7eac126c86433bad979e7b8df2..c05bdc2f88333cba60d2a9bfa7e6fdd5971147de 100644 --- a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_a2e11919-3d09-4a0d-bcb1-521927016889.png +++ b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_a2e11919-3d09-4a0d-bcb1-521927016889.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a61103e942816f64336d94962b04a66fbc876bb14dacb57a77dc20f3df4aba78 -size 1824449 +oid sha256:121b1d13ac20362e1f50a8680a9d6846f79ddd29f29067fc219f5b86dc8ceba6 +size 1972565 diff --git a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_cf98d157-acd5-4580-b1e5-bcbfc964517d.png b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_cf98d157-acd5-4580-b1e5-bcbfc964517d.png index 1436575f8352c9b0da7dbd961cb03eaf3a480aa5..67808be960c434c42eaac38d124e81bb47a1c77b 100644 --- a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_cf98d157-acd5-4580-b1e5-bcbfc964517d.png +++ b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_cf98d157-acd5-4580-b1e5-bcbfc964517d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ed7c27305a11eb5153b056ef5522e161cd79ea6b15d6f9d651f657e7e374d52 -size 922748 +oid sha256:99d1059b8787fd5f0c1b57774afbd2d6cff40177901baa144cb8efd2d05f6541 +size 1532773 diff --git a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_f9c4ef1a-dfda-462f-a275-179397cc7580.png b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_f9c4ef1a-dfda-462f-a275-179397cc7580.png index 81d715b3f7dee48e7fb78cd53e92975fb068d0f6..ef6c1ac6205640bca72ccab293ffbfea3613c7b8 100644 --- a/images/c2e4800e-684f-4bb6-99ab-782806c8776d_f9c4ef1a-dfda-462f-a275-179397cc7580.png +++ b/images/c2e4800e-684f-4bb6-99ab-782806c8776d_f9c4ef1a-dfda-462f-a275-179397cc7580.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15a48b6eefb5a1f3e77b40700284799797ecf4b60e606afe3f7f933bab34e519 -size 1334799 +oid sha256:103751aaba01c24c0a989edd48de704d81e86f81e89e8d5fe1aeb93382d7d738 +size 1378577 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_14307382-3b81-4395-88fd-b75a99a93339.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_14307382-3b81-4395-88fd-b75a99a93339.png index 0b8f08e94f9262259f1e2b3f24bcefcc2001e13d..8aa5f47e3c934452921344736bd3e71033c96d57 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_14307382-3b81-4395-88fd-b75a99a93339.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_14307382-3b81-4395-88fd-b75a99a93339.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95688cfe14d869e30df71e9b6fb1cadc22b54ec40f5cfbdab1df1d5ce1295cc0 -size 678955 +oid sha256:aa04b30bcf2ea2f8560c13883d918d2fefacbbdca13453f2a90ccd462653bbe9 +size 1478591 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2355acbf-015a-411b-9255-66eb6a6ea664.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2355acbf-015a-411b-9255-66eb6a6ea664.png index f10c903f23632f625a685f850d69790ac5cc5d4d..f21a8fbd1c9f7674c8736bdbde94f9020a4b6148 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2355acbf-015a-411b-9255-66eb6a6ea664.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2355acbf-015a-411b-9255-66eb6a6ea664.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8499c2320fed5712bdcf08af9e1796b2904344a74175777633bdd684c440e843 -size 3151920 +oid sha256:83323367464b012270ebf7768a63ef00658326725e829fc29eb448703fe39d95 +size 2358341 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2722ee03-60cc-45cc-8e74-a341b470de12.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2722ee03-60cc-45cc-8e74-a341b470de12.png index 84dba27c3469e0745666564b46b652aa829f4e5a..29249a2cb17c55d7ebbd67b3b4802ffc973a3966 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2722ee03-60cc-45cc-8e74-a341b470de12.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2722ee03-60cc-45cc-8e74-a341b470de12.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8539e7c7262a74d821b13dcb1c22b4e0c1901697856b4c3b6abcf9f95ad97e04 -size 2146767 +oid sha256:3f6858eb329b69e375ed3e6757394983f1ab8d07303f0bd6b3d69fe0b906f488 +size 2084475 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5089f916-bda7-4572-a489-5174ee03e1bf.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5089f916-bda7-4572-a489-5174ee03e1bf.png index b49240a859975fc67d10180499b12e3756998cf3..b6f594c0c38a2cd68659591542dd79572bfe08e3 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5089f916-bda7-4572-a489-5174ee03e1bf.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5089f916-bda7-4572-a489-5174ee03e1bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:419271034b515f1c7c74a91a680c168a0e21ba97d502a0d1c8f9a53aa6ea2e15 -size 702202 +oid sha256:54e7d6be755003d4ad37dab5b23c3389112b6ba18ae725bb19310c4b69d3d334 +size 1069826 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5cf65818-7dae-4713-b976-169a11e7b498.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5cf65818-7dae-4713-b976-169a11e7b498.png index 2433be623c3b45dc520759f6c354d6bf8f7129fe..7ee608cf878903a214a68498aa9693b12a5878c6 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5cf65818-7dae-4713-b976-169a11e7b498.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5cf65818-7dae-4713-b976-169a11e7b498.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ef086e68ed6068441e1d68cef95ad46f2ecd819c734ebbf7610222d40b3de4c -size 951157 +oid sha256:21fe73c53babfaf014d7adeb419a15d0b77f3c3a5f7472c448480110a463ff24 +size 934693 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_85e3c094-4c46-499f-90fa-05b2a66d9a39.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_85e3c094-4c46-499f-90fa-05b2a66d9a39.png index 8f0651ec88042dc9f133f1cab0f0f6c88875e1f2..7c1e0e6960df8b5eb731f72d5677f36b52275928 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_85e3c094-4c46-499f-90fa-05b2a66d9a39.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_85e3c094-4c46-499f-90fa-05b2a66d9a39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2a4f0489bf17ce2b95aa389bfca9cd7f06d1c63253df1aa997c7574459166c5 -size 950307 +oid sha256:8d9c74c36caada888fb93fccaddc8e0b6b3b709fb6c8d0f184fe730ea3ad2595 +size 1577346 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_a16a9f27-7699-4f5f-a78f-5b0d6429569c.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_a16a9f27-7699-4f5f-a78f-5b0d6429569c.png index aa397b0e2b0e9efd91417990638073ac54222f66..e94d7b42a713f999809a1f593f33fe66e848a351 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_a16a9f27-7699-4f5f-a78f-5b0d6429569c.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_a16a9f27-7699-4f5f-a78f-5b0d6429569c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2555d7b46105f425ced51b78b120e572e71eff4d123a3062425faa21878e1f05 -size 1734063 +oid sha256:2332d2b19111a66879eb4525ad5ca4e5c60b42129c51be7678c57ddca257716c +size 1486369 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_b08451d0-5987-4e39-a51a-6ff6fb83cf22.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_b08451d0-5987-4e39-a51a-6ff6fb83cf22.png index eff604cba433a04ac9836489d6e76e6125b90672..13e46bc46ba975d27571d355e8c70435008a9d51 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_b08451d0-5987-4e39-a51a-6ff6fb83cf22.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_b08451d0-5987-4e39-a51a-6ff6fb83cf22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5bce44bfcd9ff259066b9839a9bff959356a1358f781e9c51cb0b978e293e3d -size 634531 +oid sha256:b623eb15db5b68197eb4e6d1b5310ee09245983891a2c13ced00306b5db67a7a +size 1305735 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_c859da83-a276-460b-ba2b-d37555a94449.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_c859da83-a276-460b-ba2b-d37555a94449.png index dc566edbc2c3eed6bbd5751d9fd29c1ba2fafa41..4798f2f0b5fdf9ed0bc153ad762fb326bec36807 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_c859da83-a276-460b-ba2b-d37555a94449.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_c859da83-a276-460b-ba2b-d37555a94449.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00e73a1c02ee9848f06cecb60febea24c4d26f84375b9ba8086802e4c793918d -size 1457544 +oid sha256:746e2508ae76fc90afc163656de7aec0cb9313637068bc5b8c2394eee7637943 +size 2176485 diff --git a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_de99fc69-4313-49d0-9740-e0fabd61bc14.png b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_de99fc69-4313-49d0-9740-e0fabd61bc14.png index 43f1b5ae659f50d7f3b3638531921c97ad90b2b6..8aa13acdfd93becc6ef4ac2a2ae0870f064070ef 100644 --- a/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_de99fc69-4313-49d0-9740-e0fabd61bc14.png +++ b/images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_de99fc69-4313-49d0-9740-e0fabd61bc14.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d32268dc01f2ece3a9828f7f9433be25841cc555a74b566569be5440536f202c -size 3164452 +oid sha256:d27d3f912adc5778bc957d70efb1df60f5211530730ba7a1919d637b713bb76d +size 1633612 diff --git a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_47c0995c-7238-4f67-8bfd-dcb9ebad4a86.png b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_47c0995c-7238-4f67-8bfd-dcb9ebad4a86.png index 360c6eb9098833a948bebe54afe0098eeae9b8a7..a400f76bab564c3a6ef324af6a9767c0bd51f7c1 100644 --- a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_47c0995c-7238-4f67-8bfd-dcb9ebad4a86.png +++ b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_47c0995c-7238-4f67-8bfd-dcb9ebad4a86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6d4552c9b470191c4aee07db9240e07310fc2017881681ae5a9e51b86e67b1d -size 1030272 +oid sha256:59431fdc3b8114491031118a87dc8c0ba261bd2a47e61ce6e5c25665813b5834 +size 1134000 diff --git a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_70b1e9a6-c1b1-42d3-8b25-a284ee385e10.png b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_70b1e9a6-c1b1-42d3-8b25-a284ee385e10.png index 30bd76a578d08b44b9ee29737a293bebff8f2bf4..dac24a3778434100f9033a2228b03fef4d1a0b52 100644 --- a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_70b1e9a6-c1b1-42d3-8b25-a284ee385e10.png +++ b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_70b1e9a6-c1b1-42d3-8b25-a284ee385e10.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e086ae9d51bc1c5ebcbd6b94abfb72cecb512dfee448c7602da0a49aa3cd26bd -size 740169 +oid sha256:f09a4c561f523aec345fd4781cc542b9dcd6b0bd5c5fbbd7982e8dc786bbb001 +size 1031445 diff --git a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_aa22d0e0-82c3-4792-afb9-94cc4366b61b.png b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_aa22d0e0-82c3-4792-afb9-94cc4366b61b.png index 037a0c18020d83b1a034d6ff7de5a9367f3fdf5a..afdca503d4c878c8397aa0fdcf39882b8c498a80 100644 --- a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_aa22d0e0-82c3-4792-afb9-94cc4366b61b.png +++ b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_aa22d0e0-82c3-4792-afb9-94cc4366b61b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6420fb18579ca8eed3ccd807912a91173520d46a2a43c7a2a223db36f69e2d05 -size 1033373 +oid sha256:6d86e1af30c0af0b5eb92d1a7af705f38ddedcbfc443c37b1498e4c35071f831 +size 1128655 diff --git a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_b6b56f9c-77f1-40ed-bfef-d708917927c4.png b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_b6b56f9c-77f1-40ed-bfef-d708917927c4.png index 14c8359711929c413016b3a0263613c6c44bcd21..1d92c14c6fa16d5a308882a3d3425d3927ef864b 100644 --- a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_b6b56f9c-77f1-40ed-bfef-d708917927c4.png +++ b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_b6b56f9c-77f1-40ed-bfef-d708917927c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0428f6ed3d3937df00ce49f26adeea21f1b22aca1d27daa82573a200a6be2994 -size 942140 +oid sha256:ebb15759ba9aa934af3a92565abc72b91a6c84ef5d764b916d23daa5f872d417 +size 841063 diff --git a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_c19e76a4-4664-435b-ba46-9aa4971e02db.png b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_c19e76a4-4664-435b-ba46-9aa4971e02db.png index b80b3bc5cadc39145807665797bebae0e3f015ab..16457805a5739e23637a17abbec3e46e45e37f32 100644 --- a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_c19e76a4-4664-435b-ba46-9aa4971e02db.png +++ b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_c19e76a4-4664-435b-ba46-9aa4971e02db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2285cc5835957a9f82f34ff42cddd799419bdfd3b7c76af0be228bb4d6fedbd3 -size 1557865 +oid sha256:9026aa917276b4497aebb5ee9adea2448563f6966f2744b6b6931cf73dd30c31 +size 1365684 diff --git a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_ddcf9faa-d926-4c7b-bdc8-ac481e2daddc.png b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_ddcf9faa-d926-4c7b-bdc8-ac481e2daddc.png index 07008f44d6db2154a73d328cbfd9dc743e2357d4..818e9aa41b76446f5a97436be06d0578b1e3dca9 100644 --- a/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_ddcf9faa-d926-4c7b-bdc8-ac481e2daddc.png +++ b/images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_ddcf9faa-d926-4c7b-bdc8-ac481e2daddc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7bf6bb2ab1e6af74f8a0b6e9e2abf5b968bece30d74a829d722b312d69ff4ff -size 1846201 +oid sha256:55cb791bc06832007da22b3c19a1819b5e73e0c4cb99fcf4870f0f4249f6c8e8 +size 1764984 diff --git a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_29164d9e-ba63-4d06-8c46-c482d44a416b.png b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_29164d9e-ba63-4d06-8c46-c482d44a416b.png index 2d8dddcd04f352dde62e06771f3883f5321df990..64dfc159f04491e1936c0cbf029489f3434fe76a 100644 --- a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_29164d9e-ba63-4d06-8c46-c482d44a416b.png +++ b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_29164d9e-ba63-4d06-8c46-c482d44a416b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd31f8c1fb3ab4aa7ae31efdb4a16fe0f25d7e50f0eb121143078b7a8e24440d -size 3146991 +oid sha256:9d46f2ecc7ef3a8a999b21ac0581121bb940338a7f5bc9e47e2fb8f20d2142e8 +size 2453667 diff --git a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_4dd1dfca-450b-40d4-a55b-c20df696ad63.png b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_4dd1dfca-450b-40d4-a55b-c20df696ad63.png index e07c46250e2fcff87e48e9a0e7096ed43d256aaf..722d39fd702559fa3b37a577b660687e8def5f7c 100644 --- a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_4dd1dfca-450b-40d4-a55b-c20df696ad63.png +++ b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_4dd1dfca-450b-40d4-a55b-c20df696ad63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55398e9093a219125e3a3f642608f70e89c1c4919851e48a31429f857bf324ff -size 2182812 +oid sha256:89a9e001064010ffcd47e7aec1f6a879bab5761fca50de66c7508b557222fe50 +size 1553902 diff --git a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_96892596-2a86-4978-8c2c-701040a4f9f5.png b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_96892596-2a86-4978-8c2c-701040a4f9f5.png index 8e77a9f1ed12dfd5a10de0b700f507485fa06ecc..aa153781df9314fefea673b4d275a3f9c67f6222 100644 --- a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_96892596-2a86-4978-8c2c-701040a4f9f5.png +++ b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_96892596-2a86-4978-8c2c-701040a4f9f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6ca13fa4558d536f4c8bcde8a00e12d2431e6f4427d57cf4207b82aeb44c0a9 -size 2811080 +oid sha256:60f62035f8ae38956fc1aef627309ddeb79e28f4be2cb60698edf983119ca683 +size 1855178 diff --git a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_98a07d01-547e-46a7-a19d-843c7cef225e.png b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_98a07d01-547e-46a7-a19d-843c7cef225e.png index 9136093f4e0299fd94cb1200183baacec9706515..fb3d458d531a1ca1ac78ef799ff72414eed4df40 100644 --- a/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_98a07d01-547e-46a7-a19d-843c7cef225e.png +++ b/images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_98a07d01-547e-46a7-a19d-843c7cef225e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d0c2565b26531c7b60dae8cb4bd302ba9b4652c30daa66ef4a5718ae2d5d1ad -size 3150637 +oid sha256:e8bc2f7076eb25c02a007b321fe068c1fb8e22f7ab2794cb3f871965532e4024 +size 2482795 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_2ba8d331-eb38-47cf-a09d-60885c37a401.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_2ba8d331-eb38-47cf-a09d-60885c37a401.png index 4e42dbb53dbc018718f19890f5b6faa035bfe6df..ae538e586787fdd0c8b19ed43bc3e5f530bf69cb 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_2ba8d331-eb38-47cf-a09d-60885c37a401.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_2ba8d331-eb38-47cf-a09d-60885c37a401.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:512f2eb4fe3befc997bca649159b209a74a1b160d1e7be38c3bcdc493f135a6b -size 1560539 +oid sha256:409ad35491b3eeff99c8c7a39c485d497e7a81b9abf393a5570655317bd099a5 +size 1541420 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_369d575b-1f79-4eca-87a2-b9478ab681be.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_369d575b-1f79-4eca-87a2-b9478ab681be.png index ea4259602941e34b12be829541edb395b88343db..6dfeb7e544c23e88cf8b7b8552dcc1c0ed222723 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_369d575b-1f79-4eca-87a2-b9478ab681be.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_369d575b-1f79-4eca-87a2-b9478ab681be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e43bc3a06435e372f995c440d034f2a8648e264bde8eea1e2b756f5c85207e86 -size 822807 +oid sha256:719286adaaac9d3b65158e08ccaa29f0ac1fc103647a62398c1056b0c8b9c111 +size 758689 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_44cd97dd-6f7f-4709-b641-f662ff17208b.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_44cd97dd-6f7f-4709-b641-f662ff17208b.png index fed877ecd0220138ec142f793d30f73234f60762..ca19d94f55f11c2fb9d98204307a629874912c0a 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_44cd97dd-6f7f-4709-b641-f662ff17208b.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_44cd97dd-6f7f-4709-b641-f662ff17208b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcba0b669e39b06d47aef436f0881fa4f828c2680e3db30fdc1d09d35824c9bc -size 1185662 +oid sha256:52acba42c3c7f717727b95be0cf0b384702ac75502efcec87231c4d86569edb2 +size 1774382 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_648d07dc-b2c7-47e8-b6b9-2c603d07455d.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_648d07dc-b2c7-47e8-b6b9-2c603d07455d.png index 730b0d2842099493ffe0780d5f1ac581d9eb5cb4..3cfbfcdaa1256c55683037940f65f785d979486a 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_648d07dc-b2c7-47e8-b6b9-2c603d07455d.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_648d07dc-b2c7-47e8-b6b9-2c603d07455d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f1b426e3336b8cdd3f1fb46b015ce7cf02917382508a337ce5e5d1447bd033e -size 1257184 +oid sha256:f5781d36d52f5e00bb33a81d7d869560c2f120c0997f1696a381622ef72ba6b2 +size 1243702 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9301c32c-6e2d-47b7-b52e-d71621d2e4e9.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9301c32c-6e2d-47b7-b52e-d71621d2e4e9.png index 0c5414254a17239b35a1b406182b0de5673457b2..a0c0e2c8fd19b9ebb99fc7562a027ce354703987 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9301c32c-6e2d-47b7-b52e-d71621d2e4e9.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9301c32c-6e2d-47b7-b52e-d71621d2e4e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e755dce21a669f356e2b8a20f59abf73b0bc1c91074a5121ab05e5d69863888 -size 1065905 +oid sha256:8ab790cf4585ec4d8a926c5c310429bf0d63b5cd973a985ec969513ae2eb8ac7 +size 1279606 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9ae02909-fe47-4383-8a72-7194c0f533cc.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9ae02909-fe47-4383-8a72-7194c0f533cc.png index 6ca8921d2da0fb04ca6122a0c180f83fa33b87d6..bae122687b8c49bf4c148b946b59eb0b1d42d30d 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9ae02909-fe47-4383-8a72-7194c0f533cc.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9ae02909-fe47-4383-8a72-7194c0f533cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d08af4165f6c582113b8662a3c1c8ebbb9f17e5f4b3fd043e5415ba65350e420 -size 1613990 +oid sha256:861fead7f54259b0c8fd4dab5cd6032f87ac59c45d9859d8583bdd16d21d0696 +size 1591384 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9cb46440-5f2a-4fd8-8725-bb7a17846a1d.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9cb46440-5f2a-4fd8-8725-bb7a17846a1d.png index 73ca2b3361ab1345e2ecd2a4cfc61651765e602c..4a0bf18530d24d13d65ea677db91ff571eb732e6 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9cb46440-5f2a-4fd8-8725-bb7a17846a1d.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9cb46440-5f2a-4fd8-8725-bb7a17846a1d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40008bab18c9a5fea9293ff6ea28f7aba442eaeb22066d68ff6d047a474fedd0 -size 1597125 +oid sha256:c24c122d026ef0ca49bfa974337891456c15b571d7b0672d2b2e911bea53e677 +size 1119971 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a356fe26-1097-4344-9d45-c4c9cdaf42a4.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a356fe26-1097-4344-9d45-c4c9cdaf42a4.png index 8ea9af43cf5a3aa0a18fe65d3b02afb74bc5ecbe..15daad3452d82b06b652ec5fa56a6846ee93b0b2 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a356fe26-1097-4344-9d45-c4c9cdaf42a4.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a356fe26-1097-4344-9d45-c4c9cdaf42a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b7914f48b4d68c86b84243344d684ec444f83fd0bbeec09dfff6daa66ff2bc1 -size 1170352 +oid sha256:9509e4b2964474563546a7f844165f4b338c5b732fc9e0320fbf20a04ee404f8 +size 1092543 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a4326b33-6f1e-44f7-bea5-3d9949eb8009.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a4326b33-6f1e-44f7-bea5-3d9949eb8009.png index 3a91f9d91a23fc036b6a2ada8046bb100949e86b..5207cdeaf03f594ba723ce3b7c73a0506e71159b 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a4326b33-6f1e-44f7-bea5-3d9949eb8009.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a4326b33-6f1e-44f7-bea5-3d9949eb8009.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b151be4e2adc1856957eb153fcf995686b5125161f871539ad2bfda397eeefac -size 1147672 +oid sha256:782a491ce8c6eba2191595c43f085ef39d6b50005996263b611e1801d1443d50 +size 1094045 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_ae94047b-798d-4a4c-a272-9afc85a85965.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_ae94047b-798d-4a4c-a272-9afc85a85965.png index 1f4062714e387d74c3d2187331070e6e20411a67..132c690efd1b3992bfa502ef0110eead1b61db2d 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_ae94047b-798d-4a4c-a272-9afc85a85965.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_ae94047b-798d-4a4c-a272-9afc85a85965.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a82a398b771e065282c886c2c3e404547e3278096b4844432b96c3fa46735eee -size 1805480 +oid sha256:dfc5869ca229803fe977442342a08f5757c1f1c486e9db4079a4bb330d06356d +size 936615 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_aeacd270-9832-4504-8b6b-2767cc583100.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_aeacd270-9832-4504-8b6b-2767cc583100.png index 565e9a0f5b27159a630244090038bc51471b4098..ee21b2e718bdb5e3bfcbf05fcf1b6983237917b8 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_aeacd270-9832-4504-8b6b-2767cc583100.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_aeacd270-9832-4504-8b6b-2767cc583100.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:894cb3588f930424404a5476c87396afcb915afa08c73d98a76e0d9626a11ec9 -size 2018691 +oid sha256:f3b122001b40a82ee05496045c67331af5917186f0c6df670c68697c6205d7e1 +size 1549565 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_d13b77bd-d861-4db3-a2bd-5e9b93f3a743.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_d13b77bd-d861-4db3-a2bd-5e9b93f3a743.png index c2af509c0f03552fef97bfcdfa6f9c020fc0e65d..c1cc1a61deda42bd49765ccd257dfa34520a38d7 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_d13b77bd-d861-4db3-a2bd-5e9b93f3a743.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_d13b77bd-d861-4db3-a2bd-5e9b93f3a743.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc34fea3b3ecd229b62d3b2f4a8000ba4307a29bb52f05aa33bf6e2169c2b09f -size 1710817 +oid sha256:5bb6f736c95ae747e4f4143fd701ad27030721d36516fd78369d866bbd636351 +size 1066337 diff --git a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_fb4ec3f7-9d27-400e-bfdb-d206cf95919c.png b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_fb4ec3f7-9d27-400e-bfdb-d206cf95919c.png index 7ee5af54bf502a3d1089d7ec359fbf2063f74fb7..c7fe453b1753e5c2705a67610139e54bb2b6cff1 100644 --- a/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_fb4ec3f7-9d27-400e-bfdb-d206cf95919c.png +++ b/images/c497534c-76a5-4ffb-af9a-10ee7afcc784_fb4ec3f7-9d27-400e-bfdb-d206cf95919c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e26e3027161807238ed2b3b07fdd14292f3c1afa65d2f78b38dadfc3ab13864 -size 867062 +oid sha256:f8de88207fdfe10e217f2504fa0ba3424e90c1952ed66328b51779c6ca1ce18a +size 978158 diff --git a/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_4fbee4b2-dd73-4f1f-b6e7-11092cc67c5a.png b/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_4fbee4b2-dd73-4f1f-b6e7-11092cc67c5a.png index dc58bfff16316445d38ca67e338ae987d5e687a9..fee51e2ff6f1eded9d5ad2ce3c675dab6021fc5e 100644 --- a/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_4fbee4b2-dd73-4f1f-b6e7-11092cc67c5a.png +++ b/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_4fbee4b2-dd73-4f1f-b6e7-11092cc67c5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e5559605f575bcc2a8ac9f2d8280ccc488f6a596fae9db2ca14a7bf25d850ac -size 1618522 +oid sha256:8d65c66fb9d34f69ee92a85bff681227efc08e01cd2fcedd1af2b5680d5f585a +size 1881930 diff --git a/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_52fd01f6-62fb-44a2-afa8-13ff5b1088cf.png b/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_52fd01f6-62fb-44a2-afa8-13ff5b1088cf.png index 2f7d81c0abaee37f92fd48148fcda861a7f377e1..f644ac580e04a8abe582ac9a32691fd66dafb3c2 100644 --- a/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_52fd01f6-62fb-44a2-afa8-13ff5b1088cf.png +++ b/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_52fd01f6-62fb-44a2-afa8-13ff5b1088cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52895785b4316789154f3832f6d17324d6fc14b33a7252598afe1d1a1f32237a -size 1989985 +oid sha256:8189641bc95123cc33e807516aaeb8b86d3aae0d748af76d76f8d569353f74f5 +size 1938302 diff --git a/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_a3d9acf3-3ec0-4d6e-bcb3-2ae36600edbb.png b/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_a3d9acf3-3ec0-4d6e-bcb3-2ae36600edbb.png index 4310e594b6e536a9dc99419fec7dfa8f3c401add..70b8ffadeb354f9d0587756072eaa0e5cc6aabde 100644 --- a/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_a3d9acf3-3ec0-4d6e-bcb3-2ae36600edbb.png +++ b/images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_a3d9acf3-3ec0-4d6e-bcb3-2ae36600edbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:defba66b6a2394fe795fad68ab5e417c4b69cd2a9f0688bd610ac081cdb83e3d -size 1423728 +oid sha256:605e2493a224e761d08de012851c1bc93906b10b4e235479721a30dbd8a86a92 +size 1416494 diff --git a/images/c5070129-37b1-4f87-930f-c45049a70e97_1a8a24ff-8bbd-4682-a611-5cdfe6fe4811.png b/images/c5070129-37b1-4f87-930f-c45049a70e97_1a8a24ff-8bbd-4682-a611-5cdfe6fe4811.png index e6ecfea7e190aef17a2befe7e4e243da9c5b4f34..d7c04e16260e72cda191f8b67663a2a2632d1446 100644 --- a/images/c5070129-37b1-4f87-930f-c45049a70e97_1a8a24ff-8bbd-4682-a611-5cdfe6fe4811.png +++ b/images/c5070129-37b1-4f87-930f-c45049a70e97_1a8a24ff-8bbd-4682-a611-5cdfe6fe4811.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5051679b5ed594bb257570b621d67673260d132071f25b2bed5e9d846a372f3a -size 555966 +oid sha256:6a8d2c498d4ecbf5bdf64f815bb140049d675b8984f741f1110b7ce961b10096 +size 518164 diff --git a/images/c5070129-37b1-4f87-930f-c45049a70e97_2e80fca7-6d2c-46fa-a7f3-a9569f0dc82d.png b/images/c5070129-37b1-4f87-930f-c45049a70e97_2e80fca7-6d2c-46fa-a7f3-a9569f0dc82d.png index 7b17ba9d097ebc0f288613d75bae30ca76933d65..be4053d5aed458125c38e44609d7a136d332c035 100644 --- a/images/c5070129-37b1-4f87-930f-c45049a70e97_2e80fca7-6d2c-46fa-a7f3-a9569f0dc82d.png +++ b/images/c5070129-37b1-4f87-930f-c45049a70e97_2e80fca7-6d2c-46fa-a7f3-a9569f0dc82d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96b7c1a12cae18d4daef5e5f11e246d95ce61b633b38e5a6596ad4e0fdd9b2e1 -size 538243 +oid sha256:b1bc2bf26e1a8bc5013780503386b74615325fc65cbb90a3cc43d22b8c0be236 +size 538620 diff --git a/images/c5070129-37b1-4f87-930f-c45049a70e97_50e901a3-dcef-4026-a6b6-282ba62561c2.png b/images/c5070129-37b1-4f87-930f-c45049a70e97_50e901a3-dcef-4026-a6b6-282ba62561c2.png index 7b17ba9d097ebc0f288613d75bae30ca76933d65..4dc727804cd0dbcab26893fa0c83f07d4071c419 100644 --- a/images/c5070129-37b1-4f87-930f-c45049a70e97_50e901a3-dcef-4026-a6b6-282ba62561c2.png +++ b/images/c5070129-37b1-4f87-930f-c45049a70e97_50e901a3-dcef-4026-a6b6-282ba62561c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96b7c1a12cae18d4daef5e5f11e246d95ce61b633b38e5a6596ad4e0fdd9b2e1 -size 538243 +oid sha256:1580f324d118c6900e29f69b24e1e3b4c22d1c8a374d7bb50d11ba0bfe447233 +size 521709 diff --git a/images/c5070129-37b1-4f87-930f-c45049a70e97_8e1be294-c865-4017-b9c4-d0039658abe2.png b/images/c5070129-37b1-4f87-930f-c45049a70e97_8e1be294-c865-4017-b9c4-d0039658abe2.png index 6b6a2bf779e2834bb4612e39a145a7cbf1f15049..f955d090916ebb581b9d104862a4baf628d2f818 100644 --- a/images/c5070129-37b1-4f87-930f-c45049a70e97_8e1be294-c865-4017-b9c4-d0039658abe2.png +++ b/images/c5070129-37b1-4f87-930f-c45049a70e97_8e1be294-c865-4017-b9c4-d0039658abe2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc68aea7f58522eead5ced55b8becabd85bb8eb852046e6ea155a005e2db4fcc -size 783652 +oid sha256:a74ba16f0eede9b26acf4951dfa605e2a24f69982a418f2023b8e69632b3362c +size 1066367 diff --git a/images/c5070129-37b1-4f87-930f-c45049a70e97_f09e8313-161d-4b4b-90ba-c795643614be.png b/images/c5070129-37b1-4f87-930f-c45049a70e97_f09e8313-161d-4b4b-90ba-c795643614be.png index e27ad899dfcddb4b15261ae7e1dcec730fb2f1b0..b1938f0118e308c17c85edd2d1b3e40c7d3fa2dc 100644 --- a/images/c5070129-37b1-4f87-930f-c45049a70e97_f09e8313-161d-4b4b-90ba-c795643614be.png +++ b/images/c5070129-37b1-4f87-930f-c45049a70e97_f09e8313-161d-4b4b-90ba-c795643614be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b43141597b6fdc90825ec3260a165461e3a741b66456700668bc15af099be6dc -size 1363272 +oid sha256:d310ad6667a9dba52debb9154c417b5473581a8c85b90ebce29f33f00efc64d8 +size 1358342 diff --git a/images/c5070129-37b1-4f87-930f-c45049a70e97_fcd0a544-caa9-4349-bbaa-c79b998d2979.png b/images/c5070129-37b1-4f87-930f-c45049a70e97_fcd0a544-caa9-4349-bbaa-c79b998d2979.png index 7b17ba9d097ebc0f288613d75bae30ca76933d65..6ac0aa707fdc5704a9addf0bde163a5b341a9eb4 100644 --- a/images/c5070129-37b1-4f87-930f-c45049a70e97_fcd0a544-caa9-4349-bbaa-c79b998d2979.png +++ b/images/c5070129-37b1-4f87-930f-c45049a70e97_fcd0a544-caa9-4349-bbaa-c79b998d2979.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96b7c1a12cae18d4daef5e5f11e246d95ce61b633b38e5a6596ad4e0fdd9b2e1 -size 538243 +oid sha256:ba8c58ae074a91aa89d7561d7463b09e86775b535a8d522a3f56885c7caeab70 +size 523918 diff --git a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_700030bb-f584-4e85-91bd-357f444c6051.png b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_700030bb-f584-4e85-91bd-357f444c6051.png index f815fa6b620fbec71808b49ea84965f7ddfe8fd2..6e5fcde32e92c979c905a2e632f2e1002cbef1bb 100644 --- a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_700030bb-f584-4e85-91bd-357f444c6051.png +++ b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_700030bb-f584-4e85-91bd-357f444c6051.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cfa3cc276d18c069251129d1a0db49c9d90bd0ee9e236ce16e79c8b8495d84f -size 691545 +oid sha256:2c09bcac6ec6dc4bd3c19984eb16087e63f2ced60987afe9e5429eb821ee291e +size 682159 diff --git a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_bffa44f6-a3fb-46d7-acc7-61240391f67f.png b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_bffa44f6-a3fb-46d7-acc7-61240391f67f.png index 76d4c215d640c8dd658771ded90a71e00a8baeea..117de41448401d1fbe2b7a08104569a5fc5f798e 100644 --- a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_bffa44f6-a3fb-46d7-acc7-61240391f67f.png +++ b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_bffa44f6-a3fb-46d7-acc7-61240391f67f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fd906b5dc2d03bb4e255f24155ec66db0367c6c597046ba8698896ae8a61618 -size 805269 +oid sha256:cb86ccd3eb598da2b34bbb3233c90badeeda1fa461c138a596e17dd7d628695b +size 825352 diff --git a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_e8b587ce-c3a8-485f-8455-bc7869669484.png b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_e8b587ce-c3a8-485f-8455-bc7869669484.png index 482b2041becf6ec321f048c7ba0e6193ec74f34c..06d3e1b3af7e924c677ee430fd9974c4578362be 100644 --- a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_e8b587ce-c3a8-485f-8455-bc7869669484.png +++ b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_e8b587ce-c3a8-485f-8455-bc7869669484.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03d49e0a136ecbffb3b3eba8613f4eb9d543f1fee011c23dfe2ee8bf67aa0811 -size 842548 +oid sha256:da331aa45cbd10878760a1ccc1b401d7504d346f829fbab5e8956475a7f37e51 +size 727360 diff --git a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_fa153d9e-d788-4f23-b0b6-468c38cc3a47.png b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_fa153d9e-d788-4f23-b0b6-468c38cc3a47.png index 858106020b07454be966a7e91e7734a7f3c3fa23..30ebe312ae31b5865bb6df02ef47c538bebcc92f 100644 --- a/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_fa153d9e-d788-4f23-b0b6-468c38cc3a47.png +++ b/images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_fa153d9e-d788-4f23-b0b6-468c38cc3a47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e51434041f2a40c7e0a7526d3eea3adb725b581fc83d8ad31221c979bd53f11 -size 1046702 +oid sha256:c8c99aa5185b91a0ba1091b96dec6a808929d7a5649fa35573339c914613c0f3 +size 1011256 diff --git a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1a2b5134-f49b-44a0-8398-2c8c34b3636f.png b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1a2b5134-f49b-44a0-8398-2c8c34b3636f.png index a4032e559ad01cbb792b26e968bbc515fe742344..b4b45dc41380d1a1a175a4cef1b170ab3a55c6d3 100644 --- a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1a2b5134-f49b-44a0-8398-2c8c34b3636f.png +++ b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1a2b5134-f49b-44a0-8398-2c8c34b3636f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2635187740105ddaa490eb72a8fdeeb94accafb1ada729125811ff9115bf7aa -size 966772 +oid sha256:dd8fefb9bfb0dcc050bd719cc0a6dd0b2c085c97f5411645fdd4a3651aef908e +size 872183 diff --git a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1f2e7cce-dfb0-4d72-82ce-64467ec3600d.png b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1f2e7cce-dfb0-4d72-82ce-64467ec3600d.png index 8d2dd4e39f6ead496af7815bd58672e99b3bcd09..aadd0ae90afe676a535feab16a34babd795658ff 100644 --- a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1f2e7cce-dfb0-4d72-82ce-64467ec3600d.png +++ b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1f2e7cce-dfb0-4d72-82ce-64467ec3600d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32ad3af14e68e19da7d0da2fb85799517cd6762bede6b4e277b92f8c5c6a3b15 -size 1139052 +oid sha256:a0c288c971525d2f6e26a3f288d56c79894f70c94e29669c73fdc94420441337 +size 1145832 diff --git a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_218ae251-373b-48e8-8cf3-1af7deee8ebc.png b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_218ae251-373b-48e8-8cf3-1af7deee8ebc.png index 4695ba38f7d5f4fa2562da0d1545840040c4a81d..cb9226dfed3aad4c995e174223e0be0d86b69d1f 100644 --- a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_218ae251-373b-48e8-8cf3-1af7deee8ebc.png +++ b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_218ae251-373b-48e8-8cf3-1af7deee8ebc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71fba6971625fac29c8d7b4f2771c92fc2cc13924927f95aab0fae01a00c5a3e -size 730353 +oid sha256:68b7fbb7c7fa397c5df1b272be5a8950a421d967a70bd07dd952e925cc20621e +size 884219 diff --git a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_64766e71-e258-4354-8bde-2a3a0b75014b.png b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_64766e71-e258-4354-8bde-2a3a0b75014b.png index 7b1c183fbe5a324cf35e1f1d3db8a91f67a291f7..a16bf846eab6cada7d70fbb657326c428e90964e 100644 --- a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_64766e71-e258-4354-8bde-2a3a0b75014b.png +++ b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_64766e71-e258-4354-8bde-2a3a0b75014b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4d744d4ebaf801e31da2225437720ace5b4e2ad86ba0ff5053e4e1cf296b0ed -size 1122836 +oid sha256:5f365b294f7a4aad37d254e18b5fd006b01c599573ce0a93cdb7c5785eef5ff1 +size 1111781 diff --git a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_b0060309-c2c0-4df1-b25b-a0246d005187.png b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_b0060309-c2c0-4df1-b25b-a0246d005187.png index 63a91949c7b1da510f527e5b4e9ae291d91bff07..c918f163380b61b7db23b16702269da3ffe278b7 100644 --- a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_b0060309-c2c0-4df1-b25b-a0246d005187.png +++ b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_b0060309-c2c0-4df1-b25b-a0246d005187.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28e0f8f13b85662198d0747fc5c59df79ae860ba8587003e3acb5847ef4d6c83 -size 1092225 +oid sha256:147b3aaf9499e4d1c031d495d1d30eab71966915dee688f6a7bd1631ab95423c +size 947016 diff --git a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_be1bab14-11b9-41e1-b4b2-b0a1f0a834c1.png b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_be1bab14-11b9-41e1-b4b2-b0a1f0a834c1.png index f8a2e69719df11ee57ee17578b543d606159511f..ff9ac9367b13cba2a51d1c4e288ea908590ee746 100644 --- a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_be1bab14-11b9-41e1-b4b2-b0a1f0a834c1.png +++ b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_be1bab14-11b9-41e1-b4b2-b0a1f0a834c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:634e99380154ff8c815a1c72e24a52b0488cf7cfbc3edb4d972755fa6139e280 -size 963734 +oid sha256:39d11fa582152612c8721ac5350c299a1ac55b85206aebef1581ee54caac9e5b +size 909022 diff --git a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_f6b71d34-d022-4c76-867a-5aac1e9c41c3.png b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_f6b71d34-d022-4c76-867a-5aac1e9c41c3.png index a5d63780f1d0f920d6ec611ca94b150c466708ae..6d338d647b1fcefd8c9e584c785a47d8e50596b1 100644 --- a/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_f6b71d34-d022-4c76-867a-5aac1e9c41c3.png +++ b/images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_f6b71d34-d022-4c76-867a-5aac1e9c41c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f7fec2eca8e1f7bfdc040dbed30ccaffd072f6bca42ba2413d6b8a525165301 -size 1828731 +oid sha256:ec3a50cdd74753f6f75a329758e2ffcb854ca859d042da5fd1261fd8504e2569 +size 1461458 diff --git a/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_6d02d310-1f7b-45f6-b680-73edaeff707b.png b/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_6d02d310-1f7b-45f6-b680-73edaeff707b.png index 8396b1a2e7788bc95bc461d5e5a7693a874b4a75..e89cfe02bd845765b522754eac8a80479a1c6fee 100644 --- a/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_6d02d310-1f7b-45f6-b680-73edaeff707b.png +++ b/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_6d02d310-1f7b-45f6-b680-73edaeff707b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd3d8a3b650afc7f21c8ce90c649a36ab0301ad5af044a29cacad62dfaa6ea73 -size 800245 +oid sha256:a8bb410f74c5494ba74e8be960abb2d6adced386b0af804b05d7eab4954136f8 +size 1476923 diff --git a/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_f10d0566-f01d-41e1-a9e1-1838fb425783.png b/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_f10d0566-f01d-41e1-a9e1-1838fb425783.png index 3556780974fa2a09135f3782766453f5689b5e0c..97dd556a789d6718a57eb93eec48137fcf4a78a6 100644 --- a/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_f10d0566-f01d-41e1-a9e1-1838fb425783.png +++ b/images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_f10d0566-f01d-41e1-a9e1-1838fb425783.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54750c1aeb3c184294bf9b7b6c65848ef128ec517ca96393cd31a4986727c2d6 -size 217340 +oid sha256:6cbecff5201897f139c1365aee9e44f9f59ce67441ffb1690a8d28826462f518 +size 578291 diff --git a/images/c55b9949-b785-4d9e-8b20-b626cb595623_281334a6-f548-4381-9965-fdac05c9b599.png b/images/c55b9949-b785-4d9e-8b20-b626cb595623_281334a6-f548-4381-9965-fdac05c9b599.png index f22d017eeba9c0e4df99bed217c237ea36a52bcb..d025e63465d287eac07250e73bfcd452f07bb7b8 100644 --- a/images/c55b9949-b785-4d9e-8b20-b626cb595623_281334a6-f548-4381-9965-fdac05c9b599.png +++ b/images/c55b9949-b785-4d9e-8b20-b626cb595623_281334a6-f548-4381-9965-fdac05c9b599.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b42ec2a3821cfc7882e5b0fa15b3c953db16a15d14a83ebe71cc0dde04d66d8 -size 956678 +oid sha256:065c1b43eb93bebbfb81e1ab1b8a1a1a7d2a4b7f758767be59c856c52cc27a6a +size 927968 diff --git a/images/c55b9949-b785-4d9e-8b20-b626cb595623_74bf98d9-7598-4103-8a25-8fb7859700e7.png b/images/c55b9949-b785-4d9e-8b20-b626cb595623_74bf98d9-7598-4103-8a25-8fb7859700e7.png index 7e6015c04a4fed58f487419a6df80855caea2d45..24d48ec96d5a46cdc1ed4de4389fdbaa4e348271 100644 --- a/images/c55b9949-b785-4d9e-8b20-b626cb595623_74bf98d9-7598-4103-8a25-8fb7859700e7.png +++ b/images/c55b9949-b785-4d9e-8b20-b626cb595623_74bf98d9-7598-4103-8a25-8fb7859700e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:418eefa275d8e692a946c6a361aab9bcd4adafb464cc3e65cdfef693a1f58d30 -size 1682022 +oid sha256:cefa59b6166bacb180b0a0787018978c97c8614c7cb4e67236358ec308a1999c +size 1283266 diff --git a/images/c55b9949-b785-4d9e-8b20-b626cb595623_7ed1aec7-f9b5-428c-ab0a-0340f1a44480.png b/images/c55b9949-b785-4d9e-8b20-b626cb595623_7ed1aec7-f9b5-428c-ab0a-0340f1a44480.png index eb2707431d259ed00531348cf1e80cfd498220db..ecf9a3b2b4170d6872631701a2f074430b507337 100644 --- a/images/c55b9949-b785-4d9e-8b20-b626cb595623_7ed1aec7-f9b5-428c-ab0a-0340f1a44480.png +++ b/images/c55b9949-b785-4d9e-8b20-b626cb595623_7ed1aec7-f9b5-428c-ab0a-0340f1a44480.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b23f1153a8d8d069f91ef48cfec87347381edc8bc7dde4c963ebf2ff0a2d61f -size 2178915 +oid sha256:a977fcebccdbc697b91597caf59b4ac969e3d0b675d3b60b3d0635d478c2ea72 +size 2113151 diff --git a/images/c55b9949-b785-4d9e-8b20-b626cb595623_da2291c0-9f33-4a50-ba54-cb1a4a4ec265.png b/images/c55b9949-b785-4d9e-8b20-b626cb595623_da2291c0-9f33-4a50-ba54-cb1a4a4ec265.png index 9422d8a0aba85f6e8be9c3d49a82a07fde57e2cd..a439e3604b67f6b5089607e57c3283f4d7de4a90 100644 --- a/images/c55b9949-b785-4d9e-8b20-b626cb595623_da2291c0-9f33-4a50-ba54-cb1a4a4ec265.png +++ b/images/c55b9949-b785-4d9e-8b20-b626cb595623_da2291c0-9f33-4a50-ba54-cb1a4a4ec265.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9237540e3dc06e0950e620cfc3228847fe99fa6b2b3782290b73f0ff642cbc09 -size 884370 +oid sha256:0793ec748b1cab1863401dcc95520d22650ffa375ebcc10ac0109dfd000569b9 +size 981487 diff --git a/images/c577375b-ecca-42f8-920c-b06809eef2b4_82ce6c3a-9087-41e3-9900-56d7d8798099.png b/images/c577375b-ecca-42f8-920c-b06809eef2b4_82ce6c3a-9087-41e3-9900-56d7d8798099.png index b2f8cf9b809dcb99820cfd8cceccd8b9fe5b32ba..40b989fec1c3839b34e2ecdafc4ca692cff6bb6b 100644 --- a/images/c577375b-ecca-42f8-920c-b06809eef2b4_82ce6c3a-9087-41e3-9900-56d7d8798099.png +++ b/images/c577375b-ecca-42f8-920c-b06809eef2b4_82ce6c3a-9087-41e3-9900-56d7d8798099.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01731cffce6093ee73041fc513807d23dbed285458db8eecec15b35667298bf9 -size 367205 +oid sha256:2fa60ef68e1e090ae23da1d7d05dda74d1cee3bb868d4f74f61e31f8c6f616fc +size 461994 diff --git a/images/c577375b-ecca-42f8-920c-b06809eef2b4_851fc5a4-d856-4f18-9634-c1e1a0669314.png b/images/c577375b-ecca-42f8-920c-b06809eef2b4_851fc5a4-d856-4f18-9634-c1e1a0669314.png index 0c8085b8797733022eb4160396643d201b3e6272..c71be54013f9df0f0dc09bb6f09e40aee037a83b 100644 --- a/images/c577375b-ecca-42f8-920c-b06809eef2b4_851fc5a4-d856-4f18-9634-c1e1a0669314.png +++ b/images/c577375b-ecca-42f8-920c-b06809eef2b4_851fc5a4-d856-4f18-9634-c1e1a0669314.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d517d48e07d7999ac590f69e429576fc60c76f7531c62b865d305fd08b5d1fe8 -size 440215 +oid sha256:42321bb2d74cf4863ea0e6854cdc64729203b1ca76a8aa4ab989b8b349c3cefe +size 343724 diff --git a/images/c577375b-ecca-42f8-920c-b06809eef2b4_be3483db-df81-4a20-b60c-360fa9beb6f1.png b/images/c577375b-ecca-42f8-920c-b06809eef2b4_be3483db-df81-4a20-b60c-360fa9beb6f1.png index 071b3908183e329f76635f215db6348b62a7aaa4..1750aa829f12e7d629b100d9f77914f2dd8dcf17 100644 --- a/images/c577375b-ecca-42f8-920c-b06809eef2b4_be3483db-df81-4a20-b60c-360fa9beb6f1.png +++ b/images/c577375b-ecca-42f8-920c-b06809eef2b4_be3483db-df81-4a20-b60c-360fa9beb6f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5df55cfe7d4cafecbe3ca0130fa75b6558d0575cf655228c3ee61df62ec97025 -size 968040 +oid sha256:2f653c6090ee723803d790c584b6c41ce7ed3dac5b4ad68e69a1a04149e94558 +size 770465 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_047beaf4-32d6-4503-ab8c-605d51ef5049.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_047beaf4-32d6-4503-ab8c-605d51ef5049.png index fd47bad66f52db84c7e7f19455b9ce83f7c65ee8..59d50a83bd0ff984cc363e450af55ee6864248c4 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_047beaf4-32d6-4503-ab8c-605d51ef5049.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_047beaf4-32d6-4503-ab8c-605d51ef5049.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1126e65cf58ea0664da5c8dc9208dc63cd422a50db1857f78ab75711c47abc46 -size 1181312 +oid sha256:2ec9aacbcc9f39fb145bbc4c9b5ad57e8cc5e1fd06473a99d02c4faef2ee65f3 +size 1172611 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_0c7057f1-7639-49e3-8429-720608a24422.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_0c7057f1-7639-49e3-8429-720608a24422.png index a6a1532d3dc921b46ebb8cbbb7e5f410eff5b4e8..bcae4c2a72ef9def7e413f40742417e82371da29 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_0c7057f1-7639-49e3-8429-720608a24422.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_0c7057f1-7639-49e3-8429-720608a24422.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0de07c39c3a7cfaff24d47f8af7855d822728154154187dd183aa39ed059575e -size 2662455 +oid sha256:b70a563786fabff3e45e6f3386ddbdfd7aae1217a2fd309fe36147e1058c386d +size 2349978 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_15e52d78-d625-465e-b260-2fc9775b965b.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_15e52d78-d625-465e-b260-2fc9775b965b.png index bf4d9606cc7f377f424d5124a9cf15325d7eda01..1b2c429492221a8a9ee776ec23762bd1eb0b1e94 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_15e52d78-d625-465e-b260-2fc9775b965b.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_15e52d78-d625-465e-b260-2fc9775b965b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bdc89bd0f1558db78ffbc2a593b67f3bc0c55c02231df50b127b4a5ac1d9d1e -size 707597 +oid sha256:69fb0f63e5477daff7d6f947da3e549b83c7a9a63bbeede4f10e1d3d5fd705f7 +size 1277431 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_55d58f35-4297-41a6-a078-363060e92b32.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_55d58f35-4297-41a6-a078-363060e92b32.png index 9f44e0dda2d60925db2f3354d79893bf57562440..bb26569a59aa2db96015f65a4cbdd2798ec626ef 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_55d58f35-4297-41a6-a078-363060e92b32.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_55d58f35-4297-41a6-a078-363060e92b32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5076bc89328f9587d8c5e66dfe5c16ec1d3c4898094cf8ea0ff0f1b4fad996c9 -size 2271157 +oid sha256:4923d48249712e50be68334ee66e058c3e3ed74cadb9205b31e1427d42dd7909 +size 2053940 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a1e01f2e-743a-423b-b3b2-8c89b8775b7e.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a1e01f2e-743a-423b-b3b2-8c89b8775b7e.png index 98e13ea86de834d2ab288926351a2439973c205e..9f5820a1ebd85185323480551217932fc87b8a0b 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a1e01f2e-743a-423b-b3b2-8c89b8775b7e.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a1e01f2e-743a-423b-b3b2-8c89b8775b7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd05375e57e9fb7009c8b991e2034508cee91b5fbcff1c1e65f6eebc9670d7c3 -size 1140266 +oid sha256:9111d3addb005e6cb5e31b594fc8e1c32d0463e6f91dc5ece156070c00cc3bad +size 1152420 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a911199f-5c06-4042-8dff-d1d095ba7f21.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a911199f-5c06-4042-8dff-d1d095ba7f21.png index 3d51116b154b7c7579c7193828e2379145adfb9f..de22c324c2bcfde385a23e2f689697a97e143274 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a911199f-5c06-4042-8dff-d1d095ba7f21.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a911199f-5c06-4042-8dff-d1d095ba7f21.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23d3045c6b231032865d8bc4329c8add6afcc2b1914513a2ef1ac3a37e333799 -size 1158277 +oid sha256:88c59f8e11f785a45e0f51612d142126c49830901070d67309c4a4a9294caf5d +size 1191600 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b66287f6-86b3-4e91-97b5-be53a7338c72.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b66287f6-86b3-4e91-97b5-be53a7338c72.png index 86b2966654d3e42e0561721dc28e62d5552e84d3..e35571455240c1ff52429d7ec03ee67966ef83b0 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b66287f6-86b3-4e91-97b5-be53a7338c72.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b66287f6-86b3-4e91-97b5-be53a7338c72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81726bd16f0a8a67bf1794a163348571fab918f1dc403b52a713b8264d525c43 -size 1806721 +oid sha256:a262ad27d6923d75a1b8773d5c9158f5b8d50c18584649498cd56b4b24805af6 +size 1152543 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b8c12588-7323-4532-ab73-d2a388e1fa4b.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b8c12588-7323-4532-ab73-d2a388e1fa4b.png index 5317c9851650c63d3cfb00ad1f18ef2a236467f9..f28c5e1f411f04a711f2bdd8493e0912aeab0eb8 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b8c12588-7323-4532-ab73-d2a388e1fa4b.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b8c12588-7323-4532-ab73-d2a388e1fa4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21947b7c2f5cf7a50547a16a3b2677e6f83532a68ec60358de8288cb3029c901 -size 1294139 +oid sha256:d38cc2f8d05ebff3c41154c03e6e5b606c17aaf6c7de865cbbbfa11c1fedf60f +size 1300572 diff --git a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_f35b1ee4-b294-4c01-9578-db54c94efe46.png b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_f35b1ee4-b294-4c01-9578-db54c94efe46.png index f9186a0db1a1be0288a693f20aaed4d9e3bd09ba..6d9cf090842c733cd8510e2bf388b5d74a5c695c 100644 --- a/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_f35b1ee4-b294-4c01-9578-db54c94efe46.png +++ b/images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_f35b1ee4-b294-4c01-9578-db54c94efe46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0e288a921f8a31ae760186969535718862994715767b29008c622b460f237ae -size 1216558 +oid sha256:0bc59e2c176ecf548539d178f247ca9356e24423c0a49f7af34464cf8edc20a4 +size 681803 diff --git a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_266eb157-7298-4781-b591-f73f82a00451.png b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_266eb157-7298-4781-b591-f73f82a00451.png index a9c3a96322c80eb9075d1f6934eabbcb8e62ddad..ad1bc57186de0b25cdf1781be8ad18582785399b 100644 --- a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_266eb157-7298-4781-b591-f73f82a00451.png +++ b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_266eb157-7298-4781-b591-f73f82a00451.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b223e8aad9078307188d263b4224ae515cdd8c52d51828fdac1ff17c5dc68b06 -size 1122230 +oid sha256:2f73beb32eb23692fd0be0db5e1636aa0155574fae072b2438324efd4d1dace5 +size 620351 diff --git a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_2aa336c5-81e8-426c-b8cc-18d8ed689c5b.png b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_2aa336c5-81e8-426c-b8cc-18d8ed689c5b.png index 10b501a017a4433157253c345929732ca9f0b137..80b3e5243c3b941cb6096944f7bbab3492f66bc8 100644 --- a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_2aa336c5-81e8-426c-b8cc-18d8ed689c5b.png +++ b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_2aa336c5-81e8-426c-b8cc-18d8ed689c5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2bd172b2675354840ae468c31d6e269211683a9ad12d696a890b6e6ee27bd0b -size 1654548 +oid sha256:ce76338bbb021f1b6e68d400f0549bd77d58154321889c591ff1d6def68b654c +size 1831842 diff --git a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_4edcf87f-d3a2-4bcc-b55d-01d9bb2cef31.png b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_4edcf87f-d3a2-4bcc-b55d-01d9bb2cef31.png index 98a3dacf5aece41211b10aff130d2df8494576ca..b86da7696ccf32c03bddedecbedb834d24446b9d 100644 --- a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_4edcf87f-d3a2-4bcc-b55d-01d9bb2cef31.png +++ b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_4edcf87f-d3a2-4bcc-b55d-01d9bb2cef31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76c7a3568a8d3720deda72ddd772afdc776faae87e9a65bf68a2a656368665ac -size 532170 +oid sha256:f581660c45afafb5aac4366cedeaaff10359d24bbe07b61ee368b5727eb45d28 +size 812151 diff --git a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_89f5d60a-f436-4da2-8d01-57ed7e61d270.png b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_89f5d60a-f436-4da2-8d01-57ed7e61d270.png index f9b0719d8eed348aa1152b3fffb0559cc73cb4c7..e0f283af5a0c7e2d1912cd6615c4c41cfd6760b1 100644 --- a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_89f5d60a-f436-4da2-8d01-57ed7e61d270.png +++ b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_89f5d60a-f436-4da2-8d01-57ed7e61d270.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4edbbcd027792c4d5dbb613642d5d2326e819720abf2ac16e4883d3ef31c0539 -size 1582307 +oid sha256:1f46d5a83ee9e1017530e25e1502a4883eb287375f075b2dec3dc397bdfe48ed +size 527280 diff --git a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_ed7b5274-aae9-47a3-8b14-63e67b3f171c.png b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_ed7b5274-aae9-47a3-8b14-63e67b3f171c.png index f04a358f3284f0b7055a0cb42c1f6f48cf6abb24..b36ae52ce1dacba637bf6551cf277aff695e9739 100644 --- a/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_ed7b5274-aae9-47a3-8b14-63e67b3f171c.png +++ b/images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_ed7b5274-aae9-47a3-8b14-63e67b3f171c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9dc62c944512cd9f514aff31d2e0935ad0c75425b2d72db83deacd87bba8188a -size 578857 +oid sha256:8a6ac437232edf96c21b08be98fad4d32857378b1dcdec85f22147b4f8f536a1 +size 399200 diff --git a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_3ff7c153-dc10-47fc-9bb3-1c5efd5307f3.png b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_3ff7c153-dc10-47fc-9bb3-1c5efd5307f3.png index 2a5c5f9c7279fba25289f549aceab3cad8288f63..2a5cde26015dc1d2c8668e37ab93e4eca0eb2856 100644 --- a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_3ff7c153-dc10-47fc-9bb3-1c5efd5307f3.png +++ b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_3ff7c153-dc10-47fc-9bb3-1c5efd5307f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64d7d5c7fc8f60ad8410c2db2ecb16a0132fbfb0b2d2a78144d65250a779a7d4 -size 1485845 +oid sha256:70cf8d94840797b83b3e4a3e9acec4dd47b4d281a8f4ec2afb004bbb9ecf7d7e +size 1454829 diff --git a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_62440a28-36b1-4101-bba1-55fc81c56f90.png b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_62440a28-36b1-4101-bba1-55fc81c56f90.png index 2a5c5f9c7279fba25289f549aceab3cad8288f63..f98a349c3d471bd3bc1b2dcbbbaa96882cb0b3f4 100644 --- a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_62440a28-36b1-4101-bba1-55fc81c56f90.png +++ b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_62440a28-36b1-4101-bba1-55fc81c56f90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64d7d5c7fc8f60ad8410c2db2ecb16a0132fbfb0b2d2a78144d65250a779a7d4 -size 1485845 +oid sha256:89912f20060cda4c743849678979772e340091a76c33954613fbdc2cee981137 +size 1924733 diff --git a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_b1ca9ca5-f756-40f0-9e77-3ee9207a3e2f.png b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_b1ca9ca5-f756-40f0-9e77-3ee9207a3e2f.png index 2a5c5f9c7279fba25289f549aceab3cad8288f63..d395f0268be566f52ee2c824b7e54442e9329d84 100644 --- a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_b1ca9ca5-f756-40f0-9e77-3ee9207a3e2f.png +++ b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_b1ca9ca5-f756-40f0-9e77-3ee9207a3e2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64d7d5c7fc8f60ad8410c2db2ecb16a0132fbfb0b2d2a78144d65250a779a7d4 -size 1485845 +oid sha256:47918d766dd10dad92cf8c17c3e029ff1069558f07c150349977580b73bf7f92 +size 1882689 diff --git a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_babd2160-a830-4bcf-a262-9729e78664c1.png b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_babd2160-a830-4bcf-a262-9729e78664c1.png index 2a5c5f9c7279fba25289f549aceab3cad8288f63..76b605cd27e9f66decc7ea50bdf5a81163df4855 100644 --- a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_babd2160-a830-4bcf-a262-9729e78664c1.png +++ b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_babd2160-a830-4bcf-a262-9729e78664c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64d7d5c7fc8f60ad8410c2db2ecb16a0132fbfb0b2d2a78144d65250a779a7d4 -size 1485845 +oid sha256:176bf6d534ccc56b094448a98fa24cc76b2cf0bb34a50c57d98dc6d9ace5f46c +size 1065100 diff --git a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_fcad2218-4124-4bbc-bee8-b921a0a01c0b.png b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_fcad2218-4124-4bbc-bee8-b921a0a01c0b.png index 90c40ceaa7b6c2939f7294dd4d7aa02aaba69518..8c1e44a64bddd0018b07c18f28150205057eb371 100644 --- a/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_fcad2218-4124-4bbc-bee8-b921a0a01c0b.png +++ b/images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_fcad2218-4124-4bbc-bee8-b921a0a01c0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3f7e2dfa16f9f4ebe71704fa5a47429b582c373c6d87ae1944f091549757d8f -size 1690418 +oid sha256:5794903ba039a9847bb3db2f25a629a6c2383e635edc00af9c8beddc3609a6d6 +size 1920790 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_2e1e2b82-41a6-4bb9-af07-28fc1b8604d9.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_2e1e2b82-41a6-4bb9-af07-28fc1b8604d9.png index 69140719f4cc67109b72e4f7856e395e14d7345f..68f8c4c7432be57bcd1ab351dcb8bde3acfc57e3 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_2e1e2b82-41a6-4bb9-af07-28fc1b8604d9.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_2e1e2b82-41a6-4bb9-af07-28fc1b8604d9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b30a02f03ddeb33b7ee4608c633e448227cff52965d49b3e4d6c61f0b1e163fc -size 1073537 +oid sha256:5dbb61cddae423811c5575d05717569e71479158fa20a664d58c17f100429bc6 +size 860353 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_3ee72ba6-1685-4f66-8150-7ab99ac1d9de.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_3ee72ba6-1685-4f66-8150-7ab99ac1d9de.png index 4a8fa6749127ddb9b521e0687fa499f0b76f837c..86175ae8c3ab3477c9b92157a9b5b7836be38730 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_3ee72ba6-1685-4f66-8150-7ab99ac1d9de.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_3ee72ba6-1685-4f66-8150-7ab99ac1d9de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81a199f77e5338800b6d09301f813f91a39f43a7ca053cb781dee881f01d1a41 -size 1092935 +oid sha256:07e10f98dc960caf5c2270d516b99a2f1245a0ff0672f3c65c501ed1899c06f8 +size 563285 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_48083d7b-9980-4fdf-a149-9e9b59d87979.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_48083d7b-9980-4fdf-a149-9e9b59d87979.png index 3cf2900e4731a5777d6f18109d977ca88ad8dada..e705154b4f8bd30d897092d5139a1113068619f9 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_48083d7b-9980-4fdf-a149-9e9b59d87979.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_48083d7b-9980-4fdf-a149-9e9b59d87979.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93eb21eb0b0b7e838d17634d1080dbfb8e6974324032ea8d272c0e7a586854fc -size 249330 +oid sha256:30dd2b65669b5f5fe6763dbebc70d8f1f40d7a3a0293a2b7a52ba360b4a50dd6 +size 247741 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_55539bcd-3c78-406e-ba78-4bc08281ac01.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_55539bcd-3c78-406e-ba78-4bc08281ac01.png index d6d86a3c1ed92d3e13df18154e5f0cab7deb4213..0aff5814240ba031bd525ecefc7b6cc02f7390bb 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_55539bcd-3c78-406e-ba78-4bc08281ac01.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_55539bcd-3c78-406e-ba78-4bc08281ac01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94c6030fac8830b9679f58be0232668432e1dd54969a23cc246f0ceb10f1c2c0 -size 1013727 +oid sha256:45940a45b8580834eb1532ce7ab68c1751360689524bc23e751832e2fb807ef7 +size 1593381 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_5b3eb865-638b-48aa-8415-2acfb4905ade.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_5b3eb865-638b-48aa-8415-2acfb4905ade.png index 22fa940c7d16ed2aa6980d9ce5b51bdde334eb4c..8e558e0f9db313cc0ed673400d7ffeba09297ca5 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_5b3eb865-638b-48aa-8415-2acfb4905ade.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_5b3eb865-638b-48aa-8415-2acfb4905ade.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f2e63e455f674195d33088a302b539954eddcda49fc7a5376048ddfa638ba8b -size 1075534 +oid sha256:c09af1e554fe789de9e030a630317c3a1487893f7056397ed5834949316051d2 +size 1755203 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ac7a36f2-839a-4c24-bda8-118aa8ec52be.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ac7a36f2-839a-4c24-bda8-118aa8ec52be.png index 8261e4b846e9f0a596aca4451f57321f6ba7b2cc..e61660167017ee145e68bc5049875bfa441eecf8 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ac7a36f2-839a-4c24-bda8-118aa8ec52be.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ac7a36f2-839a-4c24-bda8-118aa8ec52be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a8388b5d1bf692de4b5e28e4142c2abd9d1ca349b1e60cc149b0228cc9f7e558 -size 980492 +oid sha256:9a7101c5ca345baa47fdb32f81641c857c6b189dcc2be10cc0373749a2e458fb +size 1640232 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_b564668c-3c8e-4538-9bee-e1e48c71fa99.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_b564668c-3c8e-4538-9bee-e1e48c71fa99.png index 0500c24f149db2c5dbe42b03efd9426728e031f6..dbf8dc773299022e6ff4463f5f02871e9a811a52 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_b564668c-3c8e-4538-9bee-e1e48c71fa99.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_b564668c-3c8e-4538-9bee-e1e48c71fa99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fd8a4f07ef26ca74e61a8f0d05af2dda7f3be736a6af5cdd889c2627e48633a -size 845549 +oid sha256:951cf6c9fc31bb93064bb22c723479cf38063d431a28f13932c8d24b6843dd7e +size 476582 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ba1b3830-c493-48da-9c25-87c05df40afb.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ba1b3830-c493-48da-9c25-87c05df40afb.png index 79ac59d401cc8aa1d43e2bebced606f244e6e856..2ae979024336f575a200f91709993fb8109af4ac 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ba1b3830-c493-48da-9c25-87c05df40afb.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ba1b3830-c493-48da-9c25-87c05df40afb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f21d46a348255654e5e0c27ed505be6932808d66f12660b2e697b8a939060858 -size 1668443 +oid sha256:397c4de35afd4e2d78759803bad121e5e98b10bc0f8e0ae257e2bc231b3f8d1a +size 1308463 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c72c6551-816f-4f68-a498-19516269cc9f.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c72c6551-816f-4f68-a498-19516269cc9f.png index ed8f0c0bba967fe606bfbb346dff8d6482a2f035..56a1b154b73ea382177a8156b58cfe353be7b1b0 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c72c6551-816f-4f68-a498-19516269cc9f.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c72c6551-816f-4f68-a498-19516269cc9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f67115e3c070922dce325b03bb0e61300686a844bd5b79ff84a046e5880bfef -size 1731855 +oid sha256:4e4bdeacf7988d456e6968efb67c34652c3b00063d0929c0a2112ab7d8782eef +size 1636770 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c7fbea4e-d582-45ed-82b3-2f01cedfc20b.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c7fbea4e-d582-45ed-82b3-2f01cedfc20b.png index 0de9c80dd0b6aaede7031f27cca735618548cec4..56acbca2e841ee93037812fc33e5a9a6a135b3c2 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c7fbea4e-d582-45ed-82b3-2f01cedfc20b.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c7fbea4e-d582-45ed-82b3-2f01cedfc20b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96c8e99e98eea280950967d3cf063079976099eb0048f82ff8b123b2884ceb90 -size 265419 +oid sha256:61771aefb65f509942d032094e1244d080218963850e662307df66f3fd2a1a5e +size 260390 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_d79f9228-b3a9-418d-add9-33ed60d96f36.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_d79f9228-b3a9-418d-add9-33ed60d96f36.png index 6192521dabfb7c3f3e03a2005e56a0d19c7f22fe..c09f0988da06d80ec8653417c0e7057423d484cd 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_d79f9228-b3a9-418d-add9-33ed60d96f36.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_d79f9228-b3a9-418d-add9-33ed60d96f36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:745484fbea2d058f879fd3b1349601e27beea769a4ca68cc2b4579a57caef6fe -size 1076282 +oid sha256:76ceab69d06cdd0d05e2c9db7d5b57ba73b75e252ccc8043cd41ea649b9bdc06 +size 1742097 diff --git a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_dfdb363b-157c-44ca-9aad-be92a8572f15.png b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_dfdb363b-157c-44ca-9aad-be92a8572f15.png index 2620356df35d4b2631f86987de642a9ab2607b18..7c4eeb7baebc63cadda9ac0dc431bc9844136eb3 100644 --- a/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_dfdb363b-157c-44ca-9aad-be92a8572f15.png +++ b/images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_dfdb363b-157c-44ca-9aad-be92a8572f15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:498fdad9d91721a3fe3142326363fed8d2e00574c6b6e485afef50d658ff73e6 -size 1265274 +oid sha256:9f131add3a76cb1f7397f2f5cedd5799fbcf2528bdb6d21555e47dd5cc67dfc9 +size 1056759 diff --git a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_15c7fb1a-57d3-453f-b3de-6cf368e782f2.png b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_15c7fb1a-57d3-453f-b3de-6cf368e782f2.png index 2219a2407b3dd5b1d240dcbb7a558ceb9a70cb7f..46a5a71d2f32a6a46204523c0636a46500aac91e 100644 --- a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_15c7fb1a-57d3-453f-b3de-6cf368e782f2.png +++ b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_15c7fb1a-57d3-453f-b3de-6cf368e782f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96873092523c702bab83dc12164394cc8548dcd94b5b86b9a56e124eb8c9c4d3 -size 559964 +oid sha256:253750539ff9e65205e3e52e913e91ba7445f4a3b6a1336f61f2fca177aee87b +size 1038575 diff --git a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2da8d261-db2a-478c-b02d-cd0694309653.png b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2da8d261-db2a-478c-b02d-cd0694309653.png index f5a16a66baf288de7e364453107514567470eb7a..52257693aa4483e064d47a8a6cfdc2a082301df1 100644 --- a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2da8d261-db2a-478c-b02d-cd0694309653.png +++ b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2da8d261-db2a-478c-b02d-cd0694309653.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d17ba7698ee12f673f6cda78a3e2125f59a170c64f4cad2ecf2c44443b1d90d -size 919249 +oid sha256:4942699d0c518b25e38c7637aa4a2f615f38c8fb5e178b249d09cf4a88a02768 +size 1614680 diff --git a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2f895a7d-c8c2-474e-959d-2cc70df86dfe.png b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2f895a7d-c8c2-474e-959d-2cc70df86dfe.png index 8989b1a42c176eab36d17aa093160cb488037cc4..14cb6171972ba2a60aff303ff6af87a228eabe5c 100644 --- a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2f895a7d-c8c2-474e-959d-2cc70df86dfe.png +++ b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2f895a7d-c8c2-474e-959d-2cc70df86dfe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d071a9aab27fc6c555e10bf3c1304026a1ee4de293de6ac23146759f55e81ae -size 1057012 +oid sha256:9fb536d045913a7660230bba18ec01248757c46b6869756b16f3293dbeef8c7a +size 1835095 diff --git a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_5630e994-101d-43c2-8c69-da80024e3159.png b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_5630e994-101d-43c2-8c69-da80024e3159.png index 9b71f12c97e2cec147d2a8f42d8fea509313f8b9..615024f3c35432e2ca9ea5abf4ccb41bddc8e75c 100644 --- a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_5630e994-101d-43c2-8c69-da80024e3159.png +++ b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_5630e994-101d-43c2-8c69-da80024e3159.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9df96fd3a3f39bc1cd749bdd6ab876193ef8d355cd93807d6db87e0f43506332 -size 543954 +oid sha256:6d4463d1f3f4c53a3f3681af899be325ba463c6bf83c4c4dd368d34684adffd7 +size 1140448 diff --git a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_c5643e17-b79c-4cfd-a521-fa58b0c006ac.png b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_c5643e17-b79c-4cfd-a521-fa58b0c006ac.png index ae6929f4bbd5e63be95bd36ab5d87a1739b94d93..74009d9bb9db8251b758478c384688ddaf3e9552 100644 --- a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_c5643e17-b79c-4cfd-a521-fa58b0c006ac.png +++ b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_c5643e17-b79c-4cfd-a521-fa58b0c006ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07822a38120425cf6ea04cdf642335dd8276f1bca69ff432be98216bae828cee -size 560831 +oid sha256:63eb1c18371d066df256ddea682ed5dff54ad5b1b2b2595fd99e2f517f64936f +size 854391 diff --git a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_fd745a55-eadc-4aff-a3b3-fd9c98aafbb5.png b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_fd745a55-eadc-4aff-a3b3-fd9c98aafbb5.png index 7ec438fcb8705d7fedcec7c59af66af1abcfeb54..d7089be25aa50d12e1f9452ccef237cb569cd752 100644 --- a/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_fd745a55-eadc-4aff-a3b3-fd9c98aafbb5.png +++ b/images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_fd745a55-eadc-4aff-a3b3-fd9c98aafbb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef95d3794c41ad3c59efa22f3266d856b52ffaedb59888aaf585aa9044beb67d -size 1302445 +oid sha256:904c2c7ff0ff84e8cb3c336d335159972b82112cdc69fb1d37a2afe7826ad201 +size 1548673 diff --git a/images/c7058499-3dc9-4175-9142-d22416d25a1f_0aeafd95-fef4-4d22-aaea-cd873ef8fd5c.png b/images/c7058499-3dc9-4175-9142-d22416d25a1f_0aeafd95-fef4-4d22-aaea-cd873ef8fd5c.png index 0cf9d6db77bf33af09383943c10770e9d63a8e44..7d3919ff6c4fe87ea8f025e0afdb226cbe89bbb2 100644 --- a/images/c7058499-3dc9-4175-9142-d22416d25a1f_0aeafd95-fef4-4d22-aaea-cd873ef8fd5c.png +++ b/images/c7058499-3dc9-4175-9142-d22416d25a1f_0aeafd95-fef4-4d22-aaea-cd873ef8fd5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e88fe414378a4acc1d18ea9a857b4ec98be8b4fd68082441a2c415611c38a2 -size 1563192 +oid sha256:af2485b81ce0ca9352c743c8ac670053da82956654f50728c6a7978206490845 +size 1394438 diff --git a/images/c7058499-3dc9-4175-9142-d22416d25a1f_5506a376-2a29-4df0-bf29-d43bc2bd831c.png b/images/c7058499-3dc9-4175-9142-d22416d25a1f_5506a376-2a29-4df0-bf29-d43bc2bd831c.png index 6e15118cbdbe905be4704446fc9419f1d3a896a5..0bb1952aac6107c3ced3818ab02cbc80432e7ef5 100644 --- a/images/c7058499-3dc9-4175-9142-d22416d25a1f_5506a376-2a29-4df0-bf29-d43bc2bd831c.png +++ b/images/c7058499-3dc9-4175-9142-d22416d25a1f_5506a376-2a29-4df0-bf29-d43bc2bd831c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1a63533458ef831309037a4a3c9ed4411d8ae7f2d64cfb6f33c111f82d61c4e -size 1311257 +oid sha256:c43fd0cfebe8e994b9186ee0e565e13a4766820a6865ba20b925beb029e0a3a9 +size 1174616 diff --git a/images/c7058499-3dc9-4175-9142-d22416d25a1f_87e4d380-4acc-465f-a3ee-6c5084405805.png b/images/c7058499-3dc9-4175-9142-d22416d25a1f_87e4d380-4acc-465f-a3ee-6c5084405805.png index 0cf9d6db77bf33af09383943c10770e9d63a8e44..24e5770e0e8eb0f15cf3675262a2563d52cd1442 100644 --- a/images/c7058499-3dc9-4175-9142-d22416d25a1f_87e4d380-4acc-465f-a3ee-6c5084405805.png +++ b/images/c7058499-3dc9-4175-9142-d22416d25a1f_87e4d380-4acc-465f-a3ee-6c5084405805.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e88fe414378a4acc1d18ea9a857b4ec98be8b4fd68082441a2c415611c38a2 -size 1563192 +oid sha256:7a7a6176b5dc7ea1196c7244ba4018ea21799cd3d82bdfeb4111f525bdc9f5c2 +size 1389188 diff --git a/images/c7058499-3dc9-4175-9142-d22416d25a1f_a42afdd3-8e38-4ae9-bc0b-ddd2a3d058e7.png b/images/c7058499-3dc9-4175-9142-d22416d25a1f_a42afdd3-8e38-4ae9-bc0b-ddd2a3d058e7.png index 0cf9d6db77bf33af09383943c10770e9d63a8e44..07ceaad8a2f22cdd2dc07f35561f994385c98c07 100644 --- a/images/c7058499-3dc9-4175-9142-d22416d25a1f_a42afdd3-8e38-4ae9-bc0b-ddd2a3d058e7.png +++ b/images/c7058499-3dc9-4175-9142-d22416d25a1f_a42afdd3-8e38-4ae9-bc0b-ddd2a3d058e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e88fe414378a4acc1d18ea9a857b4ec98be8b4fd68082441a2c415611c38a2 -size 1563192 +oid sha256:d6198af0d58bcc48940b10d4bb2a9534f20010a35173f2ca1c0d57a2cf5d8786 +size 1535421 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_061388be-251a-4437-b7a2-8f6cd7bfcbb4.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_061388be-251a-4437-b7a2-8f6cd7bfcbb4.png index 4aecfc86844282f065ad0121cf93b248de6deba5..bc6bb2a873873ece874b1d356f5d8f608fa65a26 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_061388be-251a-4437-b7a2-8f6cd7bfcbb4.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_061388be-251a-4437-b7a2-8f6cd7bfcbb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22ad730d701a568b57f8b8165c68ce4bb7dd9a593206b59ab37a6d4f1464551f -size 773426 +oid sha256:8d37a40aba252bdb1d95ad1f5a14f5ebdcfce378e645b6b27947a0557878ff11 +size 757404 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_0bfd0d38-184d-4d8a-9764-9b845095d0df.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_0bfd0d38-184d-4d8a-9764-9b845095d0df.png index 1453a8f6a6128e3cd13bf07b18e2d15fc49c8e47..bf672aec24d51f2f2ff0b90987c03aa4d4a3f99c 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_0bfd0d38-184d-4d8a-9764-9b845095d0df.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_0bfd0d38-184d-4d8a-9764-9b845095d0df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbcaa8dec8eb09f1d8c2111be349aeb0ff1b3c2162db620fb1fdb2a9f0c5eb40 -size 875036 +oid sha256:d07720c6d81794bb60dc945f6a982b225d2f055204eb8949e64fd8041fc06481 +size 890815 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_58afded5-ed7e-4bc6-b0c7-b83e4bfb4234.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_58afded5-ed7e-4bc6-b0c7-b83e4bfb4234.png index 3991ba22a239a847e9e32d133e0f578e1d3cbc84..197cedcf602bed1ab27b4e015b73b0a712ae7b94 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_58afded5-ed7e-4bc6-b0c7-b83e4bfb4234.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_58afded5-ed7e-4bc6-b0c7-b83e4bfb4234.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7f87efe634923a5a853ef007ba599c6e46a8a10c3f9245e0153b1a59e649663 -size 1595128 +oid sha256:dbba34c79c3c4a7971bdb34c6cf2df3edd6b3a816fb917f62b44a0a7d80a543d +size 1179621 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_5fdc4213-5ec5-4e87-9984-4b602c1a2368.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_5fdc4213-5ec5-4e87-9984-4b602c1a2368.png index 8e8bd9a6772a35c168c4051fe597066bd686b871..0d054b990fa3c430d30291058dccb0a86d4fbcfe 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_5fdc4213-5ec5-4e87-9984-4b602c1a2368.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_5fdc4213-5ec5-4e87-9984-4b602c1a2368.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66062de02c593a94e2d0ed0977c3ccb53ce9452fe7013edc2a122714ffd6ee48 -size 668906 +oid sha256:10e64835391e8faebc967eb46a9c49692011f6cf3dea79c09af7905aa4f29ae0 +size 941568 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_7b90ff88-5507-4a4a-8c8b-52d348e46ff7.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_7b90ff88-5507-4a4a-8c8b-52d348e46ff7.png index e458f7d20fdedb6e1cd8df47fe02b54b9840696c..4b0c0082fd74573039857064c4d658283cd2d06d 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_7b90ff88-5507-4a4a-8c8b-52d348e46ff7.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_7b90ff88-5507-4a4a-8c8b-52d348e46ff7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49561072153b8ff680fe6e6eea95b62b8f5dc048b8c9b961f367d9faf6845fff -size 1175347 +oid sha256:0568dbc5f72ecb460f9d368ab23076cdc2b83e033d1801e5e7ac5879fe25b65a +size 1901438 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_898acdb2-6360-4662-92d3-040ee591da52.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_898acdb2-6360-4662-92d3-040ee591da52.png index 5ab6fdceca62010512b6c81fa84cefb1fa5527e7..45865b11e9aea72ab25d92f1df7d73c8d1fbccb8 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_898acdb2-6360-4662-92d3-040ee591da52.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_898acdb2-6360-4662-92d3-040ee591da52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:286c36c5ff9269a8a9f3c41f1dbe387f2e10be0fd03a79ab375bcbc57c698a64 -size 1201834 +oid sha256:d4070dcdaa00698c58b3e804cc3aa70e42807f62ee8563fce6e440f910e66f7c +size 1176988 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_8f8ea352-b937-46b3-aabe-c25ac19c884e.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_8f8ea352-b937-46b3-aabe-c25ac19c884e.png index ea72b8cba70001848970c770e7ebc182edc3acff..4299bd1fdd1a08dc48a51f5530e1c889c5057534 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_8f8ea352-b937-46b3-aabe-c25ac19c884e.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_8f8ea352-b937-46b3-aabe-c25ac19c884e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e4a4f0cbc43c6fb97680bc7b164f870593806aa1a8f8ac6d11e351378de9909 -size 1269522 +oid sha256:a88852fdace766cbcc6e4f4fce2690e64a9debd694e6e87005fb3ff13ca7a4ad +size 881260 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_af658e24-111b-4b86-bc49-099bb5c8baec.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_af658e24-111b-4b86-bc49-099bb5c8baec.png index 156af25d1cfdab7839b04eb71b53d18bc999eff0..0929830e2023f0e32ed67281be8cea82205eada0 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_af658e24-111b-4b86-bc49-099bb5c8baec.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_af658e24-111b-4b86-bc49-099bb5c8baec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5a5fdd9a52cfc45347f3a1567fd81f0fe35e6e35e8e5612fb8575e0c97be218 -size 772928 +oid sha256:54426e03597bde4f9956f939fc68975d58f275dd96f375506429af2df7e422bd +size 746772 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_b4e17a34-b113-4740-a22f-b3d783bf549c.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_b4e17a34-b113-4740-a22f-b3d783bf549c.png index f9d6004b9936d3ad50423068bccdcd351cc994a9..4fc84100bd0749390bc3390a626a6896aac033e8 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_b4e17a34-b113-4740-a22f-b3d783bf549c.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_b4e17a34-b113-4740-a22f-b3d783bf549c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc16ba59c27f5fb510fc2ebdb5970704dd898b43efdd1f712cd535ffa4b801de -size 1435536 +oid sha256:fb2fdd42e0fe46b0cd94ba6a48b28a0027800b548b61c915393a6e89f791c850 +size 1211512 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_bc59266c-98c2-4d58-8b55-2df5b754a1e8.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_bc59266c-98c2-4d58-8b55-2df5b754a1e8.png index f0814532551e9a44404b8a417809efaec0e1a2e3..260c371e788a402202ca031bdfa1b88eca47a228 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_bc59266c-98c2-4d58-8b55-2df5b754a1e8.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_bc59266c-98c2-4d58-8b55-2df5b754a1e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69fab4c0fb00bb678e5f32514c22790c8c35f354d831c46f07dbf46ca4f0b8da -size 1575101 +oid sha256:a3fbc69ebf604f0905b26e14d235c6fd479c71902facd709f80a4ed29d517d22 +size 1591373 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_c006a047-8042-442d-ae2c-a608af6664b8.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_c006a047-8042-442d-ae2c-a608af6664b8.png index bced92860ecca89399a2b91d00455d56a16afe45..1d72be2f8429ed3b071afb5b651251176f27df35 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_c006a047-8042-442d-ae2c-a608af6664b8.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_c006a047-8042-442d-ae2c-a608af6664b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b019f54b9f152dc7fe3e5cdd76ad16cf37dc9a25a0c7a92e7dc4a6172134999 -size 1259013 +oid sha256:c2d4c317ef80b22594c01ffcdcc1779680f9f9309aea7be3a493cc717d413c32 +size 994099 diff --git a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_d39863cc-1c9b-4c24-bccf-c7f95e0ade4b.png b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_d39863cc-1c9b-4c24-bccf-c7f95e0ade4b.png index 85aeec674c1df9ad1882a3ddf7f573fdaf70cf34..7e59bd6b9e5f1db91e1335fce6777a98e79e0c2e 100644 --- a/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_d39863cc-1c9b-4c24-bccf-c7f95e0ade4b.png +++ b/images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_d39863cc-1c9b-4c24-bccf-c7f95e0ade4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03722c65ff683fc156539614d6f95659bfcdfd1498a7d8c2f2500689b4210523 -size 1515387 +oid sha256:944728d0c681ea2bf46729ba766818a1a197c911750be396cae3eba6d5d7aeec +size 1728372 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_08e49083-5052-42c3-b813-4591b4e718c9.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_08e49083-5052-42c3-b813-4591b4e718c9.png index 687cc92c762e2fc68305b5fa8aec87c75072ccb4..8e6aa867e862c645c1f91a0158144cb99aa01cf6 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_08e49083-5052-42c3-b813-4591b4e718c9.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_08e49083-5052-42c3-b813-4591b4e718c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b3207a4fe439e2376355b4bff971beb973ede4db91e229237a2c80bb257b7d1 -size 292828 +oid sha256:fb837ee39a28fd44d834a7ba66f1a3a5247560d10719625ee1477b62c96fd1f4 +size 465957 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_15a08e40-68ed-4f4d-a357-4fd4c651041b.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_15a08e40-68ed-4f4d-a357-4fd4c651041b.png index 4324be6b5db51b09a27fea34758f9fdb1a2ce4d8..639884f28b44d335b88d117917524cb062101f57 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_15a08e40-68ed-4f4d-a357-4fd4c651041b.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_15a08e40-68ed-4f4d-a357-4fd4c651041b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d9982e5518a4ff12f76bfec118e3485a0197d02a94404ae622d6bfa8b4be8d8 -size 203740 +oid sha256:42b9418a78a935518ca0793e589ddf4c1538033fb22aeb029b18f2b2e4328947 +size 206973 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_1ee44a3e-5f91-4a34-bbed-16b7b4fbb81d.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_1ee44a3e-5f91-4a34-bbed-16b7b4fbb81d.png index 825aabb7db41a9b55397e3dbc8aef7917da29dad..9811a8ea6c5d1fba1804aea64a541fab09a64658 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_1ee44a3e-5f91-4a34-bbed-16b7b4fbb81d.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_1ee44a3e-5f91-4a34-bbed-16b7b4fbb81d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15738b9065cb3f49e5e2c9b31015399a2118cf5b228bc1a8e5171a7190be0e49 -size 1090519 +oid sha256:b8bf985d6f53194b8b7e4b54b3b2d3e32a3c6704947e8eb0e190e4b1b0e5bcda +size 811971 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_2010968d-7952-46a7-b08a-1b281434fdac.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_2010968d-7952-46a7-b08a-1b281434fdac.png index f10665d3e7150cfbe2405e18fa6ee80d3fa51d09..d51fc480466473453a69e778582dcd2315f6f0fa 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_2010968d-7952-46a7-b08a-1b281434fdac.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_2010968d-7952-46a7-b08a-1b281434fdac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5280a00cbbc4e7813c9831e6a003913ff7a0833957f103c23246b900fb25aae8 -size 990510 +oid sha256:5bca04c16899b9ba18e0cdfecf3fb812832a6e8a6bcda1b03ab6ef6454ef9e59 +size 611565 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_22f40101-359f-4039-b0cb-fde2895aadc1.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_22f40101-359f-4039-b0cb-fde2895aadc1.png index 687cc92c762e2fc68305b5fa8aec87c75072ccb4..0aaa866af77471bc1ae87476562ad02c4359ed93 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_22f40101-359f-4039-b0cb-fde2895aadc1.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_22f40101-359f-4039-b0cb-fde2895aadc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b3207a4fe439e2376355b4bff971beb973ede4db91e229237a2c80bb257b7d1 -size 292828 +oid sha256:e543de555f8a4165b8c889c6a8742be62bec0b5977baec13df7cfac2102a88ef +size 623920 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_35161327-e603-416e-871c-83dc1e489fd5.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_35161327-e603-416e-871c-83dc1e489fd5.png index ff651ed9e2f739d89fd8928177950c015d9b9657..87a18c950af73b2d0ed274d5a609a843f793e3f1 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_35161327-e603-416e-871c-83dc1e489fd5.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_35161327-e603-416e-871c-83dc1e489fd5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8140e236b8ca12da76c66eff212a1225fb292be5602907e0da11bd8b7434d94 -size 184696 +oid sha256:cd743abf994e732eff36492fc25342960c18904fdba512edf47f47a243e4e468 +size 526396 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_4c00ab57-f61c-4d58-804f-25da94282151.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_4c00ab57-f61c-4d58-804f-25da94282151.png index 7e0150fda80d32ca0637488ee0e8dd1bb93d27ef..198f8d2dcea5934223937a6fb044a870ff04a9a0 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_4c00ab57-f61c-4d58-804f-25da94282151.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_4c00ab57-f61c-4d58-804f-25da94282151.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c007b0c89d49ef736e347a873bf18947dc628977a12dbb5309506112b6712759 -size 760366 +oid sha256:1ebde095773218149bb51feba9bb3ad26d34448a8038629d652c67cad6278a7b +size 653067 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_571fca71-af69-4723-b8b5-c0ca0f59f498.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_571fca71-af69-4723-b8b5-c0ca0f59f498.png index a6d76937c710a2e79d8e97c6b66caef64849394a..9b6f438a0033e2e88cfa1992910320d8de57c8bf 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_571fca71-af69-4723-b8b5-c0ca0f59f498.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_571fca71-af69-4723-b8b5-c0ca0f59f498.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e20df27921ce26d83730d28b34d495c9dc2693d5d17bd72bf1709d85b65c7f0 -size 256565 +oid sha256:643d3dc4e9fdd2ca2eab00fb8b3a23337b19e89bd6f894dee9f557c7643149dc +size 259521 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_8329edd0-0afb-4c85-8c1d-84666687cb56.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_8329edd0-0afb-4c85-8c1d-84666687cb56.png index f3fbe4cc7eeb0e6bec305a22fcf23b96f643accb..680c8660fbec35ff2a8ec5b214e1e8055af2dce9 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_8329edd0-0afb-4c85-8c1d-84666687cb56.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_8329edd0-0afb-4c85-8c1d-84666687cb56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8e0ded5fdce6973981808d1fe3ab79f3de7cf01d141d9360fda7cf33ec3b888 -size 483315 +oid sha256:cbe98a27e3e376d7d08990b2a420c70fdcd3e7758105d332e951974ad33292b5 +size 391031 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_847bd686-a068-494a-b37e-7d5679ff8cd4.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_847bd686-a068-494a-b37e-7d5679ff8cd4.png index 81c672228c0aadb91a6cc4fa3f46ebd5bc46e64c..c2a05d719d14eff03d69d0516e656ab0911ae189 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_847bd686-a068-494a-b37e-7d5679ff8cd4.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_847bd686-a068-494a-b37e-7d5679ff8cd4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41826f9c780df930652e8c928e45935f06b1c9b9851e00ff1d6138abc221538d -size 234061 +oid sha256:7db621b87092d93453be61a89f8422894bcfbe9ac30150cfc3517f061651b4d5 +size 238882 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_889e9377-6802-48fc-b4ac-abacdb2d89b4.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_889e9377-6802-48fc-b4ac-abacdb2d89b4.png index 7ac2ddd087a4ac2a64f223c1f7a14d9bd73736b1..4e57fa71434b842fe24b2d96426bf8f45100b11d 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_889e9377-6802-48fc-b4ac-abacdb2d89b4.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_889e9377-6802-48fc-b4ac-abacdb2d89b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38f2e96f4437b9f8a4ba8e736cf299fef4cb35786073341f4b60473c69ac457a -size 209627 +oid sha256:c8e4dece8639ead3674c5885c14a718f741493ed5deb627f6dfe4a6180133b15 +size 89462 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_9fa4d940-d07f-412d-8bff-7f66d56fc5e2.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_9fa4d940-d07f-412d-8bff-7f66d56fc5e2.png index 50a749c734aac468b0bd50a337c3ddc5fa205b62..6aa9a60df70cd0f1ef02eab3a0e9c94bdcba48c3 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_9fa4d940-d07f-412d-8bff-7f66d56fc5e2.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_9fa4d940-d07f-412d-8bff-7f66d56fc5e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9be16742076a7282e5cf24bb962fb0809aa2b7ba04acd92e730e1a9b0e39ab3a -size 738165 +oid sha256:d88ce3cfe85bc557f7bbdcd34b04735819d188a0b58d1ad813b1b2c4f079ce99 +size 705107 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_cdea18a2-830d-4169-81c4-6750fefe1837.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_cdea18a2-830d-4169-81c4-6750fefe1837.png index 2fd45a0e510113efda8b7d3a9537dd8952aec4b8..ef58fb3e4710e9f9685137cd9ee088afaeef5556 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_cdea18a2-830d-4169-81c4-6750fefe1837.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_cdea18a2-830d-4169-81c4-6750fefe1837.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c2ac9575a9cd36cbd5f2746c00f91031e1e788dace77b3bf5a3da106a561307 -size 288555 +oid sha256:9d84eaa8d96fe873ff953ca3e439f32c0d338244d208ac852999e3065736146d +size 127492 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_d3ce6c2b-fa4b-473f-9d10-2980148592c2.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_d3ce6c2b-fa4b-473f-9d10-2980148592c2.png index 57d76d7a07dbc575cb7f2bbf7fc8f52c1dca300e..34af8fd5e9dcbe1e21e6842c88fd0d9a30956e55 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_d3ce6c2b-fa4b-473f-9d10-2980148592c2.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_d3ce6c2b-fa4b-473f-9d10-2980148592c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c0449cf34e22fc86ce7e6812704f8223e4beb4c27418ca02788d76eb08884a3 -size 190260 +oid sha256:73f6711e79840bd01063c62fde887f1b95f5a9bb9b5922ae768e1448751c4347 +size 191399 diff --git a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_f786baf2-b7ce-4eb3-ac8a-bb407d850be6.png b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_f786baf2-b7ce-4eb3-ac8a-bb407d850be6.png index 825aabb7db41a9b55397e3dbc8aef7917da29dad..51d357be7f5216dfb228adf9ee5677e732b991a6 100644 --- a/images/c73b1674-9c61-46a3-84dd-8fad3622a035_f786baf2-b7ce-4eb3-ac8a-bb407d850be6.png +++ b/images/c73b1674-9c61-46a3-84dd-8fad3622a035_f786baf2-b7ce-4eb3-ac8a-bb407d850be6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15738b9065cb3f49e5e2c9b31015399a2118cf5b228bc1a8e5171a7190be0e49 -size 1090519 +oid sha256:75e2c8757f2d5b8980ea7b3d2e7331459a9f5eb90ca3eaf27a3b2c177cc4ca5b +size 669819 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_07d2e417-e70c-4681-b7bf-d317df0f8582.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_07d2e417-e70c-4681-b7bf-d317df0f8582.png index 06a8bebe73ef008d7d7569296b9b127615a605f5..43a1464818cac2019840d593d460442c1303f21c 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_07d2e417-e70c-4681-b7bf-d317df0f8582.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_07d2e417-e70c-4681-b7bf-d317df0f8582.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:990481a7fd6d0ded8c03b2b68763bcd9f7442642530553f1025b46a12726d3be -size 852799 +oid sha256:d82cd152df8ff9329b4bfef15ad97d97ec37c6e95c1796e6c8ec2238334b493c +size 1033824 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_0bcc192a-b80b-485c-a8f5-66deacb89805.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_0bcc192a-b80b-485c-a8f5-66deacb89805.png index 56547b4dda0eb352841cd54859e93288ef2e82c3..83f1740caaf010f5f1a819a2a95a276402bbe16e 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_0bcc192a-b80b-485c-a8f5-66deacb89805.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_0bcc192a-b80b-485c-a8f5-66deacb89805.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1283595c69b4f98fbd25a93abe4984e8c7ef8674710275bfb83260a4f00ad423 -size 880387 +oid sha256:d2d4abff8cfa982fe2e2a76046a175cd37d644896615da04f1a078d203851e7e +size 656660 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_16980e46-484c-4532-873c-aa941e926a51.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_16980e46-484c-4532-873c-aa941e926a51.png index eb256e61cae5f29e3605210efa97ee1d5ffd3585..67a64dcc5bdfa21f1fd2e9f466772f1446ef2023 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_16980e46-484c-4532-873c-aa941e926a51.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_16980e46-484c-4532-873c-aa941e926a51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30d754b25db9cc2f6041ae21ca525984488a485a289c9b4a004ddd2f9f38769a -size 471177 +oid sha256:4ca493b21853a36b07826a59b326a4c309b5bb81626e2bf14cccb648af580b91 +size 363681 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1741ce47-1b26-41c1-9827-b8393e3dfab3.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1741ce47-1b26-41c1-9827-b8393e3dfab3.png index b8aaac6233f6244123b520bd2acf23dbed57391c..e93c20bff05cb3e9585b753cf48454656731e7e2 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1741ce47-1b26-41c1-9827-b8393e3dfab3.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1741ce47-1b26-41c1-9827-b8393e3dfab3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6788eecd5c0e182fbd1430c5a76d4c80920ff7446f7b37301aed6b86276861b1 -size 860161 +oid sha256:be2f0389e8c257f39c6e70f0b3e17d18f3f2e2f16c8765195c7c5c3efa0dd8c9 +size 968512 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_19891f32-74d8-4b7e-9529-b6ad116c7002.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_19891f32-74d8-4b7e-9529-b6ad116c7002.png index 911662cc9b9feec32a176793d31bf2f8f4c3608d..6c2132d2a229be46e5807a43385d4d76e5290118 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_19891f32-74d8-4b7e-9529-b6ad116c7002.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_19891f32-74d8-4b7e-9529-b6ad116c7002.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac674aa553844c061d84f981d7315694ce2ff67b350edf7225dfcd5c7a289108 -size 953909 +oid sha256:dbf410e74870e4f38bec1a3b9feb860463a3989d394ac111d3cc51f01b72ab45 +size 937022 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1c9b36d1-16c5-4ca4-9e82-ce67012536be.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1c9b36d1-16c5-4ca4-9e82-ce67012536be.png index cba24b276af343b8b27cc1bea548d605c2ca0b52..80ea7f512d736b7bb1ace41d23d4fc44b84213a4 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1c9b36d1-16c5-4ca4-9e82-ce67012536be.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1c9b36d1-16c5-4ca4-9e82-ce67012536be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b973129d2099c6aaa95735473c20fc1bcdb161bcb8b6702dc118cb954b1b2b6 -size 900106 +oid sha256:ec1c14dc42c2dcdcec2e61d527991fc2b22e0e8c5df0085877bad0e615507975 +size 530715 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_2af9e053-5c3c-4c50-bf2b-199258df6d98.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_2af9e053-5c3c-4c50-bf2b-199258df6d98.png index aa90418c803a8104fec94b3d636bc8a7b0036e02..689b3545db9f9dd61923b4fb7fa1d157e62f00b0 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_2af9e053-5c3c-4c50-bf2b-199258df6d98.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_2af9e053-5c3c-4c50-bf2b-199258df6d98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ceb250ca8aa79fef858917e002c538a3ac3b8bcc18e368985b3a8148bf50b85 -size 851631 +oid sha256:d4814763cdcb18bf3a6003f6b3903546cf4c751db1131ddedff26a10b7b0ee1e +size 419632 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_40470783-4757-4eac-a28c-fa1bfa9a8517.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_40470783-4757-4eac-a28c-fa1bfa9a8517.png index 987df6ba30ba9ef68ba5ccfaebe73c1e17fab130..e531030d20b83a76afbd0f16f8f8fb7b2b35aa81 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_40470783-4757-4eac-a28c-fa1bfa9a8517.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_40470783-4757-4eac-a28c-fa1bfa9a8517.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ec826418803614e9f8933b4010f7f9a3ffa56f2fb8f7941fc04c67ccc60c532 -size 540323 +oid sha256:5a3e8ac07488cf6583d4a8d2b13ef8747d480946c405ab19716fb265c0cb58db +size 774023 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_47aabd28-f643-4dbd-96ca-fcd0b7cbaae2.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_47aabd28-f643-4dbd-96ca-fcd0b7cbaae2.png index 73e3d5a407e3647363b4ea6deb3ecb45be16983b..9c6793251feab2c0581e711c37e8363e5b8ae340 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_47aabd28-f643-4dbd-96ca-fcd0b7cbaae2.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_47aabd28-f643-4dbd-96ca-fcd0b7cbaae2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d6810196dac646ea431b3acf036f44c2cec32a9dfb7edcd35ef3929a7bf3243 -size 834622 +oid sha256:dd1b50b5079ff8a31cf47bcb122379f449bfadd45291ed22b4ea13da404a9fde +size 726747 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_5d5f28c5-fb7c-4135-823c-8ff20b3c324d.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_5d5f28c5-fb7c-4135-823c-8ff20b3c324d.png index 856bcfd7b15510e30ee3e2dcaec8c2af43cc9df1..0dd78b345685e7c13827e3d38142aa6420e2b7bd 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_5d5f28c5-fb7c-4135-823c-8ff20b3c324d.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_5d5f28c5-fb7c-4135-823c-8ff20b3c324d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70a30c2cfd62ab2652b1e6948f5ccb209e181c0c0ee738e24f0206f17c39fb6a -size 925256 +oid sha256:529d179f8b5d4e414d6b3358aca04f50ba057115725582345ce5d6f928665b99 +size 843306 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_76efe3f4-a2cc-4414-bd56-7ba6012a68a7.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_76efe3f4-a2cc-4414-bd56-7ba6012a68a7.png index 145d36106f007dada9302af6a1b13861d20c51ec..5d63201556a1141bbb47ecf63ab0ba4c5e475b1e 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_76efe3f4-a2cc-4414-bd56-7ba6012a68a7.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_76efe3f4-a2cc-4414-bd56-7ba6012a68a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4cf963d4d0366d3d1ef35e5414db5628b5f7da58a994c1868a7b9e3086e457a6 -size 853390 +oid sha256:2e88ec4f9e6478d05087de083761d84b281a756c7805a48980e35db736ef356f +size 1313951 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_8acda36c-321f-4831-af11-490704136e6e.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_8acda36c-321f-4831-af11-490704136e6e.png index 5cb39254675d0ce233c4e3e90a3e09df52322d64..d708e121e80f3006d771aa61ddb96e181f4020d9 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_8acda36c-321f-4831-af11-490704136e6e.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_8acda36c-321f-4831-af11-490704136e6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c019d4c1e1c48bffeb6d2f0a4e70a0471936d2e9c0da0c7c5a7ff5efc837e582 -size 792451 +oid sha256:488021292962d70440f7d81c47218c99f3c40787f061e773737accd8fa8a9d27 +size 995017 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_9feb1093-8b40-4fa5-81f4-548ad83d5940.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_9feb1093-8b40-4fa5-81f4-548ad83d5940.png index dfdc2e76e80a79c10d23ef68256b93510ab2c122..054e6d357339f8e745eccbf56bcc9aa5bc8e1fdf 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_9feb1093-8b40-4fa5-81f4-548ad83d5940.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_9feb1093-8b40-4fa5-81f4-548ad83d5940.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e53070d17cf1ea731421e5d7ee229fa4c93738d9b470fef362a9d941611939ba -size 800586 +oid sha256:9a7130777ef33b6e6ca562bf0b97ca4fa9f7fe0abd30de986d9beb770a850f09 +size 945360 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_a007f0aa-95eb-4604-96a1-fe8c2dfbd07c.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_a007f0aa-95eb-4604-96a1-fe8c2dfbd07c.png index db675684f770d8127bec022de3d915cd2592c639..cca0c49f6159581ca0ab39e25f9f909be9b6e952 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_a007f0aa-95eb-4604-96a1-fe8c2dfbd07c.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_a007f0aa-95eb-4604-96a1-fe8c2dfbd07c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6326455799ce9334ad2d210fd8e6842f1c1945e3d80a78a2f6940e96ccaa2e56 -size 779171 +oid sha256:3316791f92a1891137a0edecf1d4fc52912e9971ebc4a8b59fd20e90de4fb6c9 +size 1104833 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_b2e83bac-a21a-425c-a069-086f2dca47e0.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_b2e83bac-a21a-425c-a069-086f2dca47e0.png index 9bdeefaac96b71ced9c9164a669e7c6930fffc1c..a638a230de10c47e66ba64a719f3af55f9dd9816 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_b2e83bac-a21a-425c-a069-086f2dca47e0.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_b2e83bac-a21a-425c-a069-086f2dca47e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b8d0d39909b90dccb3a72d7269966d0e6e431f8341ae5810577adacdec44f76 -size 818754 +oid sha256:827cb55efc5f29586f31497c765b29b8953df8bd348ce6ea620f63f844f79226 +size 839070 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_bd3e1205-bb50-4e1e-87ac-9e39b9b46b1b.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_bd3e1205-bb50-4e1e-87ac-9e39b9b46b1b.png index a8be9cdafc4c8671fe7668f24f7abdbe2c911224..e77786f1ae292213765229f097c0973098799cec 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_bd3e1205-bb50-4e1e-87ac-9e39b9b46b1b.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_bd3e1205-bb50-4e1e-87ac-9e39b9b46b1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa2fde1ab3af0f4cdef736856df0e4db69f452e9c55164341f06513e2ec0d8e9 -size 775049 +oid sha256:7181bf182269cee2c5a0e90115317433ca525160780636e06b2c88e8c8faec3b +size 570317 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_d62876cb-1030-4f2b-a107-57a4769fbe1c.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_d62876cb-1030-4f2b-a107-57a4769fbe1c.png index ac522c43ea377b614dd649ef62daf5448ca6204b..6141faf9dd66ca24d5bb1a9ec07361c8942b6bc8 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_d62876cb-1030-4f2b-a107-57a4769fbe1c.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_d62876cb-1030-4f2b-a107-57a4769fbe1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7521ef83c15ca8184e8419ad84cf495ba55ef997a7644890d6fc220adc74971c -size 787617 +oid sha256:5cb733b4ba64580f20d7cc521f37fc3e7cc613eaee1a5b94e54f769a0e6c2757 +size 704229 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_dd54cc31-18ee-44a1-92e7-fdef1940d932.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_dd54cc31-18ee-44a1-92e7-fdef1940d932.png index 34b93368d210ec7a55dc5fbef3155ec1d4387ead..c5c62d81d4af7f0929cc3f4cc3c2793c75e30b6a 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_dd54cc31-18ee-44a1-92e7-fdef1940d932.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_dd54cc31-18ee-44a1-92e7-fdef1940d932.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea17cd7543b4fb7c96ebf082d75fa666e450ada2ba4f5bcc78e5e0c22729ca34 -size 845932 +oid sha256:d2ff0c64144168ed3eae9b4ac4a1b9a23b8b6e103a04dd2795f0b861f938ef1e +size 1090919 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e6a4b3ef-f0b3-4aed-9c52-7e8ad1b03356.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e6a4b3ef-f0b3-4aed-9c52-7e8ad1b03356.png index 275078a77d3db6d17e38d1a4766fad5325be2b4d..89134db10c3ba2ab864143ff632879e781fa2022 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e6a4b3ef-f0b3-4aed-9c52-7e8ad1b03356.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e6a4b3ef-f0b3-4aed-9c52-7e8ad1b03356.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e62c12fc93aee135954c155bd2068b4bbc31f4f0fe3bfa4c906c21a5be2e0f83 -size 705233 +oid sha256:b4c62ba6e0a7efa22f0bc1a811d2954b4a38b8174514752113c6e6a917654e43 +size 553047 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e8fc2f56-54e4-48fc-8ec2-dd86b6042ceb.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e8fc2f56-54e4-48fc-8ec2-dd86b6042ceb.png index 242c4e71638f1548a15e99644f771e0bbaf528a0..b9498e9316da477f1fc23e8e526c4a51a8c5b777 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e8fc2f56-54e4-48fc-8ec2-dd86b6042ceb.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e8fc2f56-54e4-48fc-8ec2-dd86b6042ceb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8947102d0f0f9e679f5c90f44eea1997d337af84f446e518f9af2a37efa2fcd -size 800152 +oid sha256:0bb130a3fa1b29fe3329ddf0a616b73564fbe57aa3af648a45c72d1322f33429 +size 1062501 diff --git a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_f3dc63f7-80a7-4979-99bd-7bda15aee89c.png b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_f3dc63f7-80a7-4979-99bd-7bda15aee89c.png index 012ebbc1416f8c4380174e64be7777edc813d77c..e7edbf69a67147ad6d8be341777b6e9ae3df4389 100644 --- a/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_f3dc63f7-80a7-4979-99bd-7bda15aee89c.png +++ b/images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_f3dc63f7-80a7-4979-99bd-7bda15aee89c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:513c51c7c72a195dbe008f3e8b2bd550e412a464d97ea06e655de145d2a3afc9 -size 663215 +oid sha256:e899596e2f9966004e24ac86c11575d80c5af80ba20e362880b581a5fa5e42de +size 621934 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_5d332384-8419-4484-8e27-3a97401f38f6.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_5d332384-8419-4484-8e27-3a97401f38f6.png index 8564a9c2597360336632d8690c6de8c3942c7921..2617909a16d98c4c371bea54073df4149cbc1349 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_5d332384-8419-4484-8e27-3a97401f38f6.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_5d332384-8419-4484-8e27-3a97401f38f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fae1c08ba42899a0c19a759bd23b86bded9264a30e56da0dff1cdd258ee3eaa9 -size 2825817 +oid sha256:c5e9d70bcdc9cd8e3223dd431b0b05de58d0964d61a700ad1a9fdec6d0e8684d +size 2200542 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_7fd7c256-6857-49f5-bd5a-c761fb99587b.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_7fd7c256-6857-49f5-bd5a-c761fb99587b.png index 8c761d8aadc4689c3da577acaf8050ea43b86137..a18e1aa3835635d4f6d740dfd0d3c805d1c48d87 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_7fd7c256-6857-49f5-bd5a-c761fb99587b.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_7fd7c256-6857-49f5-bd5a-c761fb99587b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:103d22bf0d6a8cd557294daaaf768c1f5ac71fb0b06b3c7100e0ad739475f568 -size 3406955 +oid sha256:973dda863163be8beabac14f131c53a3089671c565e3942d71a2aedc16a3c92e +size 3296521 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_88c965f7-c3ca-411a-a58b-9c9db6803254.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_88c965f7-c3ca-411a-a58b-9c9db6803254.png index 786e46cc3d7af8596be6d33bd4bb491bbdeb74f1..6b557c4bd58e17ed8a6e06b71fa27f1f0a0799e1 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_88c965f7-c3ca-411a-a58b-9c9db6803254.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_88c965f7-c3ca-411a-a58b-9c9db6803254.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97d3f68cddd3ad9326cdbb4d66bb2c78153a032594a8d4de0f81695a00cd2562 -size 2671449 +oid sha256:58ada62b217b0b95dc418bc9c529e7f3fac45f6f6a156cf69c8c05762b9639a0 +size 3280118 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8dfab805-e808-4b48-a914-5fd5765be1aa.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8dfab805-e808-4b48-a914-5fd5765be1aa.png index b4f059e7593ae405263fa04cdd5f52bee1e8da6e..0d00ea391137b35b411848942c8c4feacdfd3399 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8dfab805-e808-4b48-a914-5fd5765be1aa.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8dfab805-e808-4b48-a914-5fd5765be1aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1527d17d4b2f8181fd912f26997cc59c4c398bea0394a60cd1ae565cf9483127 -size 2935989 +oid sha256:aa4a47f54dd7d19b6c3f47535eb551f5dd64b3998a185c078fd42550ab2d6388 +size 2053127 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8e4a80cb-3d65-4a00-9649-1985306aa50c.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8e4a80cb-3d65-4a00-9649-1985306aa50c.png index 148af740674a229c1c93681b3407f63817f7a46a..ba8e73f4ee11083815ac6d285dd81599b4418cc1 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8e4a80cb-3d65-4a00-9649-1985306aa50c.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8e4a80cb-3d65-4a00-9649-1985306aa50c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da0369624ec2ced90e3143e66f2357ac1b24be73440621d41fbe50ec0edb1600 -size 2946293 +oid sha256:b2d2af76416ddbb960deb203b4438c8e81fbe5b53dacff58dc7a2bb3817504f1 +size 3650700 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_977bba29-aae4-4a39-b861-3078f910070e.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_977bba29-aae4-4a39-b861-3078f910070e.png index a15a668b1be0fd5d4c85a595f98bc10339c51aca..18550978f6f05b790e98e9df1629ab2961fca9b2 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_977bba29-aae4-4a39-b861-3078f910070e.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_977bba29-aae4-4a39-b861-3078f910070e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c70bbcd8f44cbc79c6e0ff7b5547c97bc0ba7736d4751aad3acebadbce6eee4c -size 2354200 +oid sha256:73fa167ee8b09d935f85320f8a1c85ec9c646362e0df08764a768c876dd1911c +size 1594982 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_b444233b-12da-405d-b489-b08e50eeecc0.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_b444233b-12da-405d-b489-b08e50eeecc0.png index c3d8a899734943f7b49ac00bb132b1c2c7e5dd2b..73243239bd7537d4e45cff2a7d1a6dba35638064 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_b444233b-12da-405d-b489-b08e50eeecc0.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_b444233b-12da-405d-b489-b08e50eeecc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8ff39b271107f4ba572b8aacb17580b48f8ae41edd4fa50804a1a4b6376d33a -size 10125292 +oid sha256:74873890997a50b1ee9e63a3a71d008271a27d5779d373e7ff8c994f3edba18f +size 1843842 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_fd9ff4a8-72af-417a-a5ed-69dd689c1143.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_fd9ff4a8-72af-417a-a5ed-69dd689c1143.png index cd6116eb1e09000a4539cb73ab20041ee9019b0f..2ebcdfa7dba5cee5f2882480b4dc56eaf5cfc29f 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_fd9ff4a8-72af-417a-a5ed-69dd689c1143.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_fd9ff4a8-72af-417a-a5ed-69dd689c1143.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5abcb59c3eeaccf9548519a703c28a211caa72f1d4346256c48227c24473b0bd -size 2459042 +oid sha256:591b16ea171c9eacad124a37499946726287f6406b09e6f9358dc51a01d9b53b +size 2453682 diff --git a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_ffe315e9-ad0e-4366-ba6c-cbbe02d20908.png b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_ffe315e9-ad0e-4366-ba6c-cbbe02d20908.png index be2cb9c84c0d6c2e35b74ce9d2044fd1fac3c635..097f2b5daf57f0205e6394ac390ceee44731f4f2 100644 --- a/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_ffe315e9-ad0e-4366-ba6c-cbbe02d20908.png +++ b/images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_ffe315e9-ad0e-4366-ba6c-cbbe02d20908.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ff29282b9299fac08e62708236bf32d93a6fbb5fc30585a1601592d3a8aee10 -size 3196717 +oid sha256:12bb127dc06b1e450be359cf1d41f379b6b7174558f27e75cab2fbb3f05b8329 +size 3513308 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_19e8ffd0-e981-4b9e-8eac-a752dabc1b72.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_19e8ffd0-e981-4b9e-8eac-a752dabc1b72.png index 12e66777777b0a2051a12f4b4a20153575278376..39f21ca8cd81e9c7d723e0f1c2dc7145db1a0a00 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_19e8ffd0-e981-4b9e-8eac-a752dabc1b72.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_19e8ffd0-e981-4b9e-8eac-a752dabc1b72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a04c8e2b5971f16e70a91f2f1df1a48757845dc7872b5e7b264315d5b90e88a6 -size 1437078 +oid sha256:eb341ef8c493b91a75f056b26095a80de4b4680c1da47560032c17da3c28e5f2 +size 2058928 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_48fb0147-627c-4f08-beb2-1466c609b79c.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_48fb0147-627c-4f08-beb2-1466c609b79c.png index f8cac40b26194d9c88057cb16a438b809b847091..2e6ab61677792b3a9b72756de4bbe52f18b8d1f5 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_48fb0147-627c-4f08-beb2-1466c609b79c.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_48fb0147-627c-4f08-beb2-1466c609b79c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:328bb72988d9864d792644d5512f4bfe0de6a6508d9d675efb3813604c5fb502 -size 1612592 +oid sha256:77c7446f712ceeebbfd0bd353d607222dd95204b4833cbbe5bf46a03ac357cd6 +size 1431589 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_7b711ada-4f5d-41ae-b080-8f23e4d171fa.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_7b711ada-4f5d-41ae-b080-8f23e4d171fa.png index e59d2e56d36fdf44cf60fd2ee7f27ca692a55896..176727f648e7e6c8079b448877bcb2abc8e7dc57 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_7b711ada-4f5d-41ae-b080-8f23e4d171fa.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_7b711ada-4f5d-41ae-b080-8f23e4d171fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e8502328dc0e519553e451db7bffdcb5ce61d4626bc0629dfc30409cb3a28d3 -size 1174803 +oid sha256:a2422fa02c53f19c16e43164677a964bed01552947f75fc7a33badcc51113d6c +size 632858 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_8e5642a1-2b18-401e-ae2d-1addfb70704d.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_8e5642a1-2b18-401e-ae2d-1addfb70704d.png index ade1dfc4c85aee9a3d0d2c58418e02b50eb41566..b1ba88f3f381b81bab742445d378949ddc5c1cae 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_8e5642a1-2b18-401e-ae2d-1addfb70704d.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_8e5642a1-2b18-401e-ae2d-1addfb70704d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0febf4389ef5bc7c5c0dde0b713b00b749630003044e0393b7dcaca6f771317a -size 2368592 +oid sha256:4b1bd21c47f21bdb12d1e657da5ed0fe9ddfc97e64fc21f0e39c08b7c8585579 +size 2902708 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_ac505227-fbab-4016-b968-22a429f2788c.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_ac505227-fbab-4016-b968-22a429f2788c.png index 382281a4bfe96c49affe010e90e804f8a82cb790..00ce63bfec0fa57bf4a642bb8c59a5c74eeedfab 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_ac505227-fbab-4016-b968-22a429f2788c.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_ac505227-fbab-4016-b968-22a429f2788c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2aed018f6671c310663e809ec0b442617e99d5faba5f56f771fbc31679e71d15 -size 765035 +oid sha256:33f3b9e340bc9b8d0daff9df04ea2643406da909f4f8985d08b17686ddfbce4c +size 715053 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_bb39f777-043d-4f12-9973-afd6bef8c9b5.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_bb39f777-043d-4f12-9973-afd6bef8c9b5.png index d6742b47b8e0387ac364acc63c3fc7e36833b37e..ddd7fabfbf5c66c5198adde5515d2cffd7247c2d 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_bb39f777-043d-4f12-9973-afd6bef8c9b5.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_bb39f777-043d-4f12-9973-afd6bef8c9b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:879f17df10f1ca5416d15c0d54698540aa237ebc7f88e82860f5ecbc424b7202 -size 983918 +oid sha256:2a4ef11b3d3edd4a92cf8037a78d1d2c0115d65f53c0125664d587027b565f9e +size 982089 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_c2addf44-300d-4f7e-9bec-b2b5471e0d2d.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_c2addf44-300d-4f7e-9bec-b2b5471e0d2d.png index 28d6217e3377ef9a249a7a7ac4fd55c558d9826c..4987290be333c19670ac5096a9a49d917632f2c7 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_c2addf44-300d-4f7e-9bec-b2b5471e0d2d.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_c2addf44-300d-4f7e-9bec-b2b5471e0d2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5fa7e114aa3f659095b1db801170fc3d233e05b51108a9181bc140bf03900cb9 -size 1257169 +oid sha256:9a7879aa97e9d87450d9b891855af29ae45d0985179f896e86fbd3011d077e97 +size 1264506 diff --git a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_d0bc46ae-42b7-4510-949b-2c0c747f8ac3.png b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_d0bc46ae-42b7-4510-949b-2c0c747f8ac3.png index b7832596218162edcedf23b71e8291fb85083f79..4f8c41671a348d52c72b6f46c4425aa3e8c740fa 100644 --- a/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_d0bc46ae-42b7-4510-949b-2c0c747f8ac3.png +++ b/images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_d0bc46ae-42b7-4510-949b-2c0c747f8ac3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09274d6af7318ccd3d858d95dd4f2ad75dfa677b86ec2ce0143ed508cded5d7b -size 1862019 +oid sha256:2feab516919565c27c97760512a92619adc880cba992f2e59507dcea985fdf7e +size 1113363 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_0acfa22e-2dfe-4d1f-b42d-93ddfd168334.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_0acfa22e-2dfe-4d1f-b42d-93ddfd168334.png index d607d4582e434b01148943563f34c7f367c82d14..f0a9beee471443019e0a511e99b6b74a5fdebaa8 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_0acfa22e-2dfe-4d1f-b42d-93ddfd168334.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_0acfa22e-2dfe-4d1f-b42d-93ddfd168334.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c08963417e59389def0267a13dedaae9aec9e7724faa053f2300f2e8b04bca82 -size 1211506 +oid sha256:e78e2988f11b5f86be97eeeb76545ee1e76e5c8b98f9eee1561c7b02f6e2575b +size 1686288 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_103014cb-e53e-4b52-84d3-7653842690ec.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_103014cb-e53e-4b52-84d3-7653842690ec.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..d5c42c542e11b9a212f0521bec88e85186c3ab7a 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_103014cb-e53e-4b52-84d3-7653842690ec.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_103014cb-e53e-4b52-84d3-7653842690ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:5d9f5acaeb8a040b832315ab9a0210da2127b20a4d46da6a6b9b79ce2cc54fe6 +size 2107962 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_241fbd7e-3408-47f1-ba87-e873528e7048.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_241fbd7e-3408-47f1-ba87-e873528e7048.png index 3610a06b7baa5000d1b412aeb6fa77965dce0721..e1d4acff1c82ccd14d62c87f1f2bf32f98b0efd5 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_241fbd7e-3408-47f1-ba87-e873528e7048.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_241fbd7e-3408-47f1-ba87-e873528e7048.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e627decc2068aa90fa4c2303fce2390957806fcb663f72fd4649fff002e94d1f -size 1088138 +oid sha256:597c281c99ed96e91e5ee85c673982f9ca39fb2db36cb1dc52872c39274b2bd0 +size 1142303 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_33c337b9-44b0-4f88-af43-acaaec73c2c8.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_33c337b9-44b0-4f88-af43-acaaec73c2c8.png index ecb55812919d23c5ab4581e71a72dfaa29de470f..b8d6a3290e8e8d0a6507a53d23ee963004dd3580 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_33c337b9-44b0-4f88-af43-acaaec73c2c8.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_33c337b9-44b0-4f88-af43-acaaec73c2c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:176a0bcdec2372c00bd4d4d34f8523fb0a53dc84f0dcb1142b681898addd3ed7 -size 2310697 +oid sha256:821d8e8ca96cb632dfc767293c10773297ea310c77ce34dbf420c5e0c7c0e858 +size 1818569 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_3d571853-dba8-4f55-a5ec-afcb5b710d90.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_3d571853-dba8-4f55-a5ec-afcb5b710d90.png index 80c1a23aec7af38c14ccaeea46dba24e265238c0..98378e349952ec0318a439bf682b84a7129c1ea4 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_3d571853-dba8-4f55-a5ec-afcb5b710d90.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_3d571853-dba8-4f55-a5ec-afcb5b710d90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cacd3b4007d7d3fcc23ce730df8dae65507b7d6025b765446fb0f5c76bfd3fb -size 2190951 +oid sha256:049af924a9a26a6882b4d035aeb206136385a4ce21b87b08f074c5ec6f1098b9 +size 1692349 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_72aaf637-cf0b-4762-beb3-e4cdfe50dbf8.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_72aaf637-cf0b-4762-beb3-e4cdfe50dbf8.png index e61648b7d21ce6f5d7cfd4ecc45a4ecb7a8ac12b..411f89afe4876e8997dfd692a16b4216abaf94b1 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_72aaf637-cf0b-4762-beb3-e4cdfe50dbf8.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_72aaf637-cf0b-4762-beb3-e4cdfe50dbf8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1c39a1f0c963b6e66006aab0936ab85098af584a55577ed4368e39b4dea8975c -size 1952973 +oid sha256:d6c7a4ac6faa260bc57ba10a2876bcdddc193b1dfd3165fec455b8b3fee9376f +size 1292752 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_7466e760-1596-4515-be42-00af9e90ecb1.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_7466e760-1596-4515-be42-00af9e90ecb1.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..bae7729e4d2170ae685be0ce5dbbf14edb43c002 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_7466e760-1596-4515-be42-00af9e90ecb1.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_7466e760-1596-4515-be42-00af9e90ecb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:ec84bacfb7b8a1a904f0d5191c9aab6b8dd7bb66610254b32712282396ba241f +size 2108034 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_84502966-8969-4b4f-bbef-370c2f4e62bc.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_84502966-8969-4b4f-bbef-370c2f4e62bc.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..0c3913cb88ba2cdd3acba98aca465f87708df5ab 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_84502966-8969-4b4f-bbef-370c2f4e62bc.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_84502966-8969-4b4f-bbef-370c2f4e62bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:3d270889b29e846ee0b0441add6c9a4a9038a8ee834068305dcbab87f4a41a04 +size 2104316 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_a0511245-165d-42d4-984b-d22c988d5742.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_a0511245-165d-42d4-984b-d22c988d5742.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..4532013b591ba7ecf507e020b60d8ac34cb04662 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_a0511245-165d-42d4-984b-d22c988d5742.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_a0511245-165d-42d4-984b-d22c988d5742.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:bddc167d022862ee589496665a98b56346a2aa7b495b84187ecdde0ea21473de +size 2105796 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_a346f608-7469-48db-ac2e-ecd8eef73e57.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_a346f608-7469-48db-ac2e-ecd8eef73e57.png index 95cb770c8d21e2f356ed9f1e5a0ae9ac59f8327c..f5c48bfdabf92c34fc30b9676fc001962d1edb13 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_a346f608-7469-48db-ac2e-ecd8eef73e57.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_a346f608-7469-48db-ac2e-ecd8eef73e57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90fdec7f996de039e6c38a28fd59d031837151eac4b65b387befb44820a17952 -size 1663319 +oid sha256:dbf946c09a0dbd04876badadfab3d844796d895b5b0088be140de8fc653e35e0 +size 1655937 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_d73fbe6d-8222-4166-9484-330d448e6b15.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_d73fbe6d-8222-4166-9484-330d448e6b15.png index 5e222e5e346fccc98d1f97d6a57c6f775d1dd013..5b5ea7f7f5c1ad5bf48f277d113e22850b95c64e 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_d73fbe6d-8222-4166-9484-330d448e6b15.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_d73fbe6d-8222-4166-9484-330d448e6b15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7793bf1960aa7ac59b6d2a84ed5541e4da741c05b7843eb643ec88e1e7243635 -size 1138661 +oid sha256:0254a9aaf76a294762a688db5a00723a4dfa99b04758a3f852bce30a5802ac9c +size 1090768 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_e50dbda8-1633-4acf-9084-5102c30e7ae8.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_e50dbda8-1633-4acf-9084-5102c30e7ae8.png index 92882b4821e1fa600edea26f92b279d8b7375103..4f3f5ae7f8b12692ec1f154e8306ab7c24d37081 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_e50dbda8-1633-4acf-9084-5102c30e7ae8.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_e50dbda8-1633-4acf-9084-5102c30e7ae8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af8f34c373891842f7b6c1b801822a6e29cad4fb31d74c67ddbaae34f7395091 -size 2183750 +oid sha256:6dc91ebe13f553764dad591e55a13dfed0dff3d32f6d13879188ace669364afd +size 1397931 diff --git a/images/c8990751-0aab-440c-bf6d-a32ac1216344_eef7607c-2b44-4939-8098-d82b207e60f2.png b/images/c8990751-0aab-440c-bf6d-a32ac1216344_eef7607c-2b44-4939-8098-d82b207e60f2.png index bd100cb49f1af7cf458eb90508c0067f02b61525..f89ffb78db7ef978f05f025a1f1bfeeabd0008ab 100644 --- a/images/c8990751-0aab-440c-bf6d-a32ac1216344_eef7607c-2b44-4939-8098-d82b207e60f2.png +++ b/images/c8990751-0aab-440c-bf6d-a32ac1216344_eef7607c-2b44-4939-8098-d82b207e60f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d05c3e534f6e3ce6caa2f748fb38a34bbb17a1031ce6da670bcca6cea7cd8851 -size 2188211 +oid sha256:e00659ffa0c00aef549c0a1e27c0549ac119b8059d662c03c654ab7fd4a84534 +size 2254039 diff --git a/images/c9215395-70cd-4da5-af99-9ba36cde858a_1d0652c6-8c34-4087-aea7-e41d19eea42e.png b/images/c9215395-70cd-4da5-af99-9ba36cde858a_1d0652c6-8c34-4087-aea7-e41d19eea42e.png index c4c9abefd2b3b3f31dfaf5ba175af8bba55e2420..db2d5166bf6eb8bc2b8676eb4388e8970a7af4d7 100644 --- a/images/c9215395-70cd-4da5-af99-9ba36cde858a_1d0652c6-8c34-4087-aea7-e41d19eea42e.png +++ b/images/c9215395-70cd-4da5-af99-9ba36cde858a_1d0652c6-8c34-4087-aea7-e41d19eea42e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5e91db5604ed088b4014e1b9f028693d13b5870722aee12040bfb03b083ba7a -size 870623 +oid sha256:5a9f159bfc659ca4cb7614dbc1f98717e40557098f8b2bf937a3226c2619cc24 +size 1325980 diff --git a/images/c9215395-70cd-4da5-af99-9ba36cde858a_352d4ed9-383d-4f19-90ac-4167fe22c6e2.png b/images/c9215395-70cd-4da5-af99-9ba36cde858a_352d4ed9-383d-4f19-90ac-4167fe22c6e2.png index 9247f7b812861f84c0b4e5b373ec3df52ecd9346..16ccbad560a415b54a0481659add74b1a416935c 100644 --- a/images/c9215395-70cd-4da5-af99-9ba36cde858a_352d4ed9-383d-4f19-90ac-4167fe22c6e2.png +++ b/images/c9215395-70cd-4da5-af99-9ba36cde858a_352d4ed9-383d-4f19-90ac-4167fe22c6e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4facadf0c65e3ba94d2dca08a451550d21b0a520c1c16579ae7f95d21316fff -size 736288 +oid sha256:4d98279c5cd2de15f75aab4cc7fe4f75c2459fda3769ed45bacb44a461d785d6 +size 989280 diff --git a/images/c9215395-70cd-4da5-af99-9ba36cde858a_c9b80332-9b10-41ac-b0ae-09330173af4f.png b/images/c9215395-70cd-4da5-af99-9ba36cde858a_c9b80332-9b10-41ac-b0ae-09330173af4f.png index 8cabe3efdd2d43c943082db9e639d00291d9e6c5..c449591d980a587b87dae8f5eea5be09e72ca96b 100644 --- a/images/c9215395-70cd-4da5-af99-9ba36cde858a_c9b80332-9b10-41ac-b0ae-09330173af4f.png +++ b/images/c9215395-70cd-4da5-af99-9ba36cde858a_c9b80332-9b10-41ac-b0ae-09330173af4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c74e216ee91dbf0b2b4d123ae9f8238c845dafd37976dce24a13eeb776b75ed8 -size 2533984 +oid sha256:588081f97a91f34935aa8aea042a9ca85a0cf0396ffd145eb120f7bba857f54f +size 822490 diff --git a/images/c9215395-70cd-4da5-af99-9ba36cde858a_e9787ce7-b544-442f-bfc9-3c56c68ad182.png b/images/c9215395-70cd-4da5-af99-9ba36cde858a_e9787ce7-b544-442f-bfc9-3c56c68ad182.png index 95f668629f9a3ecf2d2581a5fbb49b000b56cf5c..1d957aefa807919b1f1c1e4c8bb8df1132de1662 100644 --- a/images/c9215395-70cd-4da5-af99-9ba36cde858a_e9787ce7-b544-442f-bfc9-3c56c68ad182.png +++ b/images/c9215395-70cd-4da5-af99-9ba36cde858a_e9787ce7-b544-442f-bfc9-3c56c68ad182.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d79fe3aea8ad28559106b6b9436ba4eab422d6f8cda8f1c3b9738db9ce7b7593 -size 1236037 +oid sha256:53e20c5f1a0765292da3640b0c5a09e60eb0cbb2c9afabb89e8a9be65a1258f1 +size 1345777 diff --git a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_90c6af14-70ab-4b7d-962e-c01741f97a29.png b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_90c6af14-70ab-4b7d-962e-c01741f97a29.png index 853aebb9c76db13e99f245b1b031e1943f5865c8..c487ce6caf800ca839fcaa168191dd261845e418 100644 --- a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_90c6af14-70ab-4b7d-962e-c01741f97a29.png +++ b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_90c6af14-70ab-4b7d-962e-c01741f97a29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64a724f64b9cc50110c6ede052c9d69284a155738d47d7c3d8ce89bc2b59a6a0 -size 1415546 +oid sha256:111759cc98f05d11e536a5cbfaba1b5ed8682b5e6a278ba9a821469f14dc6f04 +size 2524131 diff --git a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_cbfcbc6c-9ccd-4e7f-8376-6ec56fbf2469.png b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_cbfcbc6c-9ccd-4e7f-8376-6ec56fbf2469.png index 3fd4305b2cd42ea3946ce7b9ae335e2cd315c089..e233a8f207e8bc621d48b9f2ab9ebdaab5972184 100644 --- a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_cbfcbc6c-9ccd-4e7f-8376-6ec56fbf2469.png +++ b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_cbfcbc6c-9ccd-4e7f-8376-6ec56fbf2469.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a533751acf2575aa792ed2296f9a3f96a8264fc5a9acda40b7172066cce289a -size 2135967 +oid sha256:0f6bd12f630cb4b6b2ab43e0a2b4ee95cb16719eafc199e136e957cb9c634cf5 +size 2302872 diff --git a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_d468c98a-de53-4b08-b5d4-d7fd7df98ef9.png b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_d468c98a-de53-4b08-b5d4-d7fd7df98ef9.png index afa9d99ed1bc07346b3cf6e4631517cff12cf851..e6bee33c5343f5346cc6d45b27c1cf9204406197 100644 --- a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_d468c98a-de53-4b08-b5d4-d7fd7df98ef9.png +++ b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_d468c98a-de53-4b08-b5d4-d7fd7df98ef9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40f912e142ffdd2e778e674da9eacc172dfa5daadc42efc404a3b70ec9a0589c -size 1747840 +oid sha256:9e27c90a8298f7ea32f609aa03b4617b4c476cd05720c43ea1067720bfe39510 +size 1225849 diff --git a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_dcdc1299-4778-4bab-934b-25b7f85f4e65.png b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_dcdc1299-4778-4bab-934b-25b7f85f4e65.png index ce35e54e55aaaec4ca9bb52472e718dddb0b1083..d6922c5ece175cb284db329e670038f9bd6ff03a 100644 --- a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_dcdc1299-4778-4bab-934b-25b7f85f4e65.png +++ b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_dcdc1299-4778-4bab-934b-25b7f85f4e65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1519a788cf74475135001787f67bfad056fd23c2ba00da46b0ffa49959d910b -size 1350522 +oid sha256:5f17afc19c907801440a7628cee182b192ea04548be8ca5c95e2e85762f5c640 +size 2825320 diff --git a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_e4e44d28-113f-4bfb-b728-767731f4cab0.png b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_e4e44d28-113f-4bfb-b728-767731f4cab0.png index 8d2c2fd82f4a562730cb2a7321e7d6a98e99c0b4..158e0e2b28b8ae042d5a2a2c71d8472ecc8d3d1e 100644 --- a/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_e4e44d28-113f-4bfb-b728-767731f4cab0.png +++ b/images/c95ac388-3fea-457d-bb5d-fede3785c6f6_e4e44d28-113f-4bfb-b728-767731f4cab0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b08542529399c27ec03ed9b5ae4e1766603759d6eb8f3c20d3fc7943749326e -size 2136182 +oid sha256:4ffec0631aee405150b68f54af6b719557c05273c83a5acf6844c73485efc86c +size 1835462 diff --git a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_0a8c70e6-c258-4a5d-90e4-9b1e497ecd19.png b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_0a8c70e6-c258-4a5d-90e4-9b1e497ecd19.png index cff8326271dced661476d3f92392275759518619..98b1fbb3cf41bc4e6041a5b8d668c13e4d1fb192 100644 --- a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_0a8c70e6-c258-4a5d-90e4-9b1e497ecd19.png +++ b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_0a8c70e6-c258-4a5d-90e4-9b1e497ecd19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:016abe34c48d60792bbdad0095d4197289e75bf50049b8565559863163145d5d -size 1232569 +oid sha256:4f8ed0f1513ee6be4c9a19ffc8e1cae5db842643690145f8f6a5df7417d71724 +size 1609154 diff --git a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_442b42be-0716-4617-8548-d72fbceb218d.png b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_442b42be-0716-4617-8548-d72fbceb218d.png index 0d4760773907796a5996ffd4017f1eff60664ab1..06a81264e9ae84f6694f466979da95f29f340a3f 100644 --- a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_442b42be-0716-4617-8548-d72fbceb218d.png +++ b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_442b42be-0716-4617-8548-d72fbceb218d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b9d68e3d9cf18873c01bd64e0099328e71a732b1847881ff17a6ad76a76bfb0 -size 1598382 +oid sha256:39973fc7f638758c50fa485c4a7b45f7ce3180c7f9f3b39d3da1b747b4b043f1 +size 1405228 diff --git a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_ad14875a-2bee-4b4c-b9a9-5229a9213f46.png b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_ad14875a-2bee-4b4c-b9a9-5229a9213f46.png index 517f19928e8045b77027fe99d57af5868acfde6e..3c0616b715d2fd187f7f95f2714944f0ef4d019a 100644 --- a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_ad14875a-2bee-4b4c-b9a9-5229a9213f46.png +++ b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_ad14875a-2bee-4b4c-b9a9-5229a9213f46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:995eb01a7bb4ec41864b2b1f7e52df1afbb69029a39e82d0e8845e69163d9b77 -size 1586100 +oid sha256:ae91d0cb247a3c2a8ebdcc8c383bdff680b7a73b8ad9c52e03834eccd4214d23 +size 1585889 diff --git a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_fbfca76a-4bd4-4f4e-a063-acbbe70803c9.png b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_fbfca76a-4bd4-4f4e-a063-acbbe70803c9.png index 14088cf59184ff4fba7fd4e416aeb308dab43dad..22a6d807a26525be123fa64cafe003fdf7ec6990 100644 --- a/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_fbfca76a-4bd4-4f4e-a063-acbbe70803c9.png +++ b/images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_fbfca76a-4bd4-4f4e-a063-acbbe70803c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8630ebbbb2cc86e5e71fdb45a5fdf6663349ca9b0ba7958c7ab6166c08dc87d5 -size 1046228 +oid sha256:b0c3b6d4e460a6ff3fd60772ed0753c0cb97a53032efa4dcd1dd41743273ca13 +size 2037198 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_2dd318f7-faa9-4bec-891d-2d35b588268d.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_2dd318f7-faa9-4bec-891d-2d35b588268d.png index bf8c155c92032d8fd921106dba45a4d76faa9eba..888f918bc0ac275ffa466e57daba402b4a4e50b5 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_2dd318f7-faa9-4bec-891d-2d35b588268d.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_2dd318f7-faa9-4bec-891d-2d35b588268d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:701e5d5372ec06eba6579f9923bdb9e057eae49c3596297e07d7b850dabd9b8a -size 1237501 +oid sha256:4ac2dcb9575eca1c9b98741e68e6170a477988c5f72a8e81d451f98cc456dfab +size 774944 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_442e1ce3-2522-48da-b947-c9d0c670411f.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_442e1ce3-2522-48da-b947-c9d0c670411f.png index 2cfc2d66e83e85300706fb33a3699117257f2263..702ddfb859047bc7d02ca69d082901e091730441 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_442e1ce3-2522-48da-b947-c9d0c670411f.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_442e1ce3-2522-48da-b947-c9d0c670411f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ae411ec550c9495629108c4ed85ed69ea360c3920f0ce412cdf2bbec56b5ddc -size 1174330 +oid sha256:c5207e60a784b872f6c6d7c8d1a265e2c0fb2c6edd7274e4e11c6facd4b2d830 +size 1268371 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_54802f88-fafb-4740-bd79-f5e3717f2733.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_54802f88-fafb-4740-bd79-f5e3717f2733.png index 298ada7e85a419f09ddf99d048a4d698bf4e5505..bad2229060ff78496340f418456a10ee72457c9d 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_54802f88-fafb-4740-bd79-f5e3717f2733.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_54802f88-fafb-4740-bd79-f5e3717f2733.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4bd9bf851b104e934d6080618a3c1f8978acef08ed7c63a49779116727b7682f -size 1572157 +oid sha256:c8279baf95f6945fb562c6e65da9549e8f3a56269e060855448d4d84f53939fc +size 1056739 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_603d8a45-cc7a-4dc0-a899-c74f8c86e870.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_603d8a45-cc7a-4dc0-a899-c74f8c86e870.png index d7e5d3c5dc1d6b874d8ccdd57536d71f09fa8acf..ed3acca5f3fdeefe956b346c72116178c256056f 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_603d8a45-cc7a-4dc0-a899-c74f8c86e870.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_603d8a45-cc7a-4dc0-a899-c74f8c86e870.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80ecacfeca489a97c40478a6c62f479e9b40250e3e1b7561ae790af129cf6634 -size 1707538 +oid sha256:28a9d8f5a9801d1081393ac551448135e7a538cafcaf1711f4248b1894cff46d +size 1753301 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_68a47f57-3330-41da-9000-1e28dd7d0151.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_68a47f57-3330-41da-9000-1e28dd7d0151.png index 8eb06d0e3af858c63da4f7f35d0f29f06a38dc52..8cc8fa6520f7650c392842d01af5cf016fa83b4d 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_68a47f57-3330-41da-9000-1e28dd7d0151.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_68a47f57-3330-41da-9000-1e28dd7d0151.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:375eebc921cb1e860d1baceabd798d8d957579f439598572e029d57b00227a8c -size 1259879 +oid sha256:e725b8622b46ddfa0a68e82718c14d13f2b4cc0e1056cffaeeba10741bdcf35d +size 908701 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_7d65206e-629f-44a2-9720-1d58f8889d97.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_7d65206e-629f-44a2-9720-1d58f8889d97.png index dfffd131cdf3f612af525fe6b0e34ecbf4f968fe..ede3889e42ed34a25f7e970ee203172c455a8302 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_7d65206e-629f-44a2-9720-1d58f8889d97.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_7d65206e-629f-44a2-9720-1d58f8889d97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ca55d08a8481b8c7ee8fc1d10a517ba0b3cc269c2d1ed5fec8cc24c32013edf -size 1496289 +oid sha256:f4cd362ca13f68d70dcc68996e7b1a589a9e012e3453a39d0e89dd97408476fc +size 666237 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8442397f-7a50-4c98-b836-3c9da40df35f.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8442397f-7a50-4c98-b836-3c9da40df35f.png index 9e477d892e1b9480165d42d48a6f599db3f1cf25..74c2b1a34601bda59d3b85490fcf3659543739cd 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8442397f-7a50-4c98-b836-3c9da40df35f.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8442397f-7a50-4c98-b836-3c9da40df35f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4be63b554f851de4115bfbb8e092fb1ee1c298988db7d8383801ebcb4c31f327 -size 1774576 +oid sha256:7dfd262911a6b351638af63f91cc37c4774028ed1205f9faf123e88c12d0c8ad +size 1717446 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8fd0040d-0a18-4fb3-8f00-7426b7c53bd0.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8fd0040d-0a18-4fb3-8f00-7426b7c53bd0.png index 4968b46b23fd6697cb84f508a2a0655dec40509b..0cf9f9ba08d54ace243c8445653f80957472269a 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8fd0040d-0a18-4fb3-8f00-7426b7c53bd0.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_8fd0040d-0a18-4fb3-8f00-7426b7c53bd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddc3f5dbf6472c3f2be023555b908283aaf6147efddcdc049a5ec35096176e57 -size 1183926 +oid sha256:2a12d5e3471e15d560c4701824f1c8a103ee72c5a5761fafdc7ae66d4a37ecfa +size 1041870 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_b03e5747-77f1-4f8a-9590-ca38c6b62ed3.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_b03e5747-77f1-4f8a-9590-ca38c6b62ed3.png index 0a2ff509c5a440d4a46c59dc7c48f08cb6a8a3bf..c17c22fcf78c9badfe83a5ad153afe8bf80153ca 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_b03e5747-77f1-4f8a-9590-ca38c6b62ed3.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_b03e5747-77f1-4f8a-9590-ca38c6b62ed3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff464ace36effaee21dcfde1b69f394675f26f6a39aeaa28e9ee0b6673fc3590 -size 1259521 +oid sha256:27ee8d1516ea7c40497fd6033087a5b506ea8fb2c7887a4b5366316d8bc696df +size 890145 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_bc897867-f54d-4a70-aeda-56cdfe8b25e6.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_bc897867-f54d-4a70-aeda-56cdfe8b25e6.png index 7414cc72806147ee8ce9daa0d0371d093a0255e8..190d6f86a28d04c4caa83ae60359503e59ddd486 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_bc897867-f54d-4a70-aeda-56cdfe8b25e6.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_bc897867-f54d-4a70-aeda-56cdfe8b25e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e225081e237c35226ba5ba01784d5e587416e0046af99bf4eb212fc27313cd4 -size 1847164 +oid sha256:651803c92b83d39f8519ec8db012353d2a877cfd59bd6b426ebae5a1e717704a +size 1661783 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_cc798f2b-dcc5-486f-b9a8-98d352b378e5.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_cc798f2b-dcc5-486f-b9a8-98d352b378e5.png index f88d3c20f7a0a631ae389ae841ec3649a98b75e2..9f97e5bcb778bd615afdde018e9e73c9fbda87c8 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_cc798f2b-dcc5-486f-b9a8-98d352b378e5.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_cc798f2b-dcc5-486f-b9a8-98d352b378e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0b2287ece66ecd056f8be1adebc2347eb06c1aa6d6af047e0bc53578de11841 -size 1676202 +oid sha256:eacef46234e3c53dc20195ab885ed1df21b6e04600a69a9ffcb4da79df6a682a +size 1584887 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_d8ffd9d0-a450-410b-843b-e17ac7c12022.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_d8ffd9d0-a450-410b-843b-e17ac7c12022.png index 02ba4d4722ef7abc86079861f720cc453bb2f48d..9fd8a9cc12d8a2a1ed87ad6e0ae456a2cfda38db 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_d8ffd9d0-a450-410b-843b-e17ac7c12022.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_d8ffd9d0-a450-410b-843b-e17ac7c12022.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af2bb341a3570d432dbd01930dcbf71def4fd7fe37e50d32e69f1994f3ce7f46 -size 1316556 +oid sha256:958efb78f3ab13049b2d3843d66250750f849a26b79c88996ca71e5499b00613 +size 1079050 diff --git a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_fb405733-e00b-4fc3-b323-85e0f0a76157.png b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_fb405733-e00b-4fc3-b323-85e0f0a76157.png index ece85d29e954d7700b43bc54204553961961daca..062143b8be45952f05bd600c1bb4cf84368f999a 100644 --- a/images/c9740663-a6aa-4aee-919a-330a9f2b3091_fb405733-e00b-4fc3-b323-85e0f0a76157.png +++ b/images/c9740663-a6aa-4aee-919a-330a9f2b3091_fb405733-e00b-4fc3-b323-85e0f0a76157.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7303055dcce71ed331a75484902934bb190b95487204eacad2cd1154d64247cc -size 1184265 +oid sha256:5444d649dc9adfe9359e36e9647bbc2403f83e70d2a0eaf483c198dceb66aff6 +size 1227573 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_25e05af9-37da-4d90-b855-8ab0b7020188.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_25e05af9-37da-4d90-b855-8ab0b7020188.png index 2bc8828300a18669b9342eff68d07ca9583a1327..1f6eebe2c9df66a83f6600f4bf8f83a621027df8 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_25e05af9-37da-4d90-b855-8ab0b7020188.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_25e05af9-37da-4d90-b855-8ab0b7020188.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d9a7b457eac39c94520f295bac44a5ef8a4cc6d98f0b7122ce521ba7e96c9ed -size 837643 +oid sha256:3c42af34d6ada96b65258aafc32f4a701c499c3e1371a97cbf6ba9051279ab0b +size 1190365 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_2786788a-6a55-495b-bcb7-19ceadaa2632.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_2786788a-6a55-495b-bcb7-19ceadaa2632.png index d27d40ff4c84344805fb45118857e6dbedd77e38..3c9ed78035338df80b95d6e5e64c088b176823b4 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_2786788a-6a55-495b-bcb7-19ceadaa2632.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_2786788a-6a55-495b-bcb7-19ceadaa2632.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:227f988948bc3428c276f87fd9b5adf831e7f481de4d60d3c26335ccc85093ef -size 1131763 +oid sha256:18a0fdd0b470420710f569b212902573095b754352826921caa15dc5a34a6a88 +size 1405471 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_3169cbf4-b8dd-4854-af0b-bad280e9950d.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_3169cbf4-b8dd-4854-af0b-bad280e9950d.png index d5ec3a5abb64a711b08cbc87c580d604f25d0919..82d10867b577ed311a3d6267071477c539146b35 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_3169cbf4-b8dd-4854-af0b-bad280e9950d.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_3169cbf4-b8dd-4854-af0b-bad280e9950d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:952e8a1bf2bb9a51198b82f09c33adbb7a0bdaa186830aa550332fb2284c988e -size 1655906 +oid sha256:947939d29e4194bd568265452a081c11b81caab64429ef92463c686b5c2462ab +size 1971449 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_398e93e6-b97d-4290-846d-dc5d8ec462a7.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_398e93e6-b97d-4290-846d-dc5d8ec462a7.png index ab8e46cdad5d73b26d86a806e81bb3f78ad378ab..78bfb4313df9c3ba8ca40a4450a29e4f4f75794c 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_398e93e6-b97d-4290-846d-dc5d8ec462a7.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_398e93e6-b97d-4290-846d-dc5d8ec462a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1269c9d7d20335035bac5e209be0c38babb6ca7ddeab09382c5aa0aa6fee4b02 -size 1215330 +oid sha256:04603f2ce20962800aa9581c9902c081f4c91f78abd41cd2838d686a2e9b8cb9 +size 1809639 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_6f943877-135c-4690-bc5b-ee941b3a8565.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_6f943877-135c-4690-bc5b-ee941b3a8565.png index 3d04c34caf34e4d0761c830168ff13906b9036b2..33fe32434f6e558e57ccc4c91f7efbb6faed4e60 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_6f943877-135c-4690-bc5b-ee941b3a8565.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_6f943877-135c-4690-bc5b-ee941b3a8565.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eece7f0ca2aaa7fdaf0332f15f847def4b532bc6bb0a735d2688c6551caf4ba6 -size 1038556 +oid sha256:d8ff72b20ef4830ac2cb46fea87195219969ac720462a19e156e28cb2d995739 +size 1386529 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_999f432e-b9a3-4a3f-87fc-7f3e4c568500.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_999f432e-b9a3-4a3f-87fc-7f3e4c568500.png index 90b539b3f78065cd4a1a93f422708ef1f28dac60..f0fe70dbb796f9d8bf558a178db66e1bfb3ee1dd 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_999f432e-b9a3-4a3f-87fc-7f3e4c568500.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_999f432e-b9a3-4a3f-87fc-7f3e4c568500.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae4d379b8b5044098fc0215ec658e7f30d0402daf0f6a4588b2023584202995a -size 1116737 +oid sha256:050dd45cd562880ee7e83c043e755cc31ae62ff2f06abcd1c4bb873c9408f6d1 +size 1898350 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_c3453d54-f335-4c14-ba7a-4675249cedc4.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_c3453d54-f335-4c14-ba7a-4675249cedc4.png index ee238ca6e396d10edce62c89e5922897b423b117..91c80faeb5f9f9aba1effde24ff6a5ea6d297a4b 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_c3453d54-f335-4c14-ba7a-4675249cedc4.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_c3453d54-f335-4c14-ba7a-4675249cedc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7245fa62453963e51433525bddedf4aceae473aac55e19b29ae6bf742782f0f -size 1022931 +oid sha256:98b8bf26a34210da1349c3aae4de030396b7c31ad5ab729b6797c78adb127590 +size 1605329 diff --git a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_d85c223a-4914-4d3b-b8fc-be2bf865f05d.png b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_d85c223a-4914-4d3b-b8fc-be2bf865f05d.png index 97a0e12fc734269b70b3f74d0c26f00ac3e9a305..6a2373d9c8430d63f087faac6496c9ea002e483c 100644 --- a/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_d85c223a-4914-4d3b-b8fc-be2bf865f05d.png +++ b/images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_d85c223a-4914-4d3b-b8fc-be2bf865f05d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5133e5abcc2dc9fdd2511ccce70b0727fe227b5a925f80c1d9e99253854d6243 -size 1129790 +oid sha256:2c5cd782f3d1a42b6fa815a80441ff110e711b1ffced37fa7f4b3fc877377981 +size 1531324 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_1bba89d9-59d9-446b-8e59-2ff15fcb4302.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_1bba89d9-59d9-446b-8e59-2ff15fcb4302.png index bb363e938525ff4dc297a9ee8754e2aabe35fdd9..6699d6e7ce1b9e28d254fd65773286bf0a10e6dd 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_1bba89d9-59d9-446b-8e59-2ff15fcb4302.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_1bba89d9-59d9-446b-8e59-2ff15fcb4302.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6df89f87fe8aaa6fd77705a050632e83f893aa387cb3e32b0c7a91eb20ee1149 -size 1219110 +oid sha256:cd5d7e9c04d2e259e2532a55f60439182fb9d75a917beb40496e2584bc170cde +size 1133755 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_3dc9bc1a-201e-4f46-beb8-69950bc0d565.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_3dc9bc1a-201e-4f46-beb8-69950bc0d565.png index 4e71496bd03759a0f99c38eaa7c969d475bd0eeb..9f36b15c3dececdc24cbab8e62b9dca3fce28d88 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_3dc9bc1a-201e-4f46-beb8-69950bc0d565.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_3dc9bc1a-201e-4f46-beb8-69950bc0d565.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87fa5194514f516ba1b1e97d18b5da26969ecba10e869824cff3a095764c6521 -size 948269 +oid sha256:ef1eb20ad1381143853ebd98ec02c7a3b372abdb90a37cd7c129394280ad96c8 +size 1559487 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_75619055-a693-4aca-9b29-0f8fe7efc2db.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_75619055-a693-4aca-9b29-0f8fe7efc2db.png index 6254e52075b09a335bdc972b2de875f9ac1f48e9..572da57079d0906d1c7351e8ae346c023a64bd5e 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_75619055-a693-4aca-9b29-0f8fe7efc2db.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_75619055-a693-4aca-9b29-0f8fe7efc2db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98637905f5809fb098f8e6b23cd26490ba2092d1cb33f4bcb82fbb3f1658d244 -size 1219329 +oid sha256:b34b7dc4b6898317b20c0bd7f37bd3f5ececff558cd710d876c0890877d97c13 +size 1224761 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_8ad738de-57cb-45e4-8224-58518d4392df.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_8ad738de-57cb-45e4-8224-58518d4392df.png index 3631e5fe33a304d069c5f15a1a5356533d57c703..f94c8fccc9357f19f338b1e204f11d19b7d424cc 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_8ad738de-57cb-45e4-8224-58518d4392df.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_8ad738de-57cb-45e4-8224-58518d4392df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fdfae11fb078cb79c4201c106de8e654b64b9d420256544bf768cac63f390359 -size 995115 +oid sha256:7f9951cd1b1b67d462f0631fb906e5b6ba5b2fb2c36160e02784573f30bd32af +size 1277879 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_8b4597c5-e6f7-4480-b226-acb1effa91d1.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_8b4597c5-e6f7-4480-b226-acb1effa91d1.png index 423381018f5a8056efd81fd18a39709adc2caa1e..c06de0eb075c46f144489fb5be4b44772c2f524b 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_8b4597c5-e6f7-4480-b226-acb1effa91d1.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_8b4597c5-e6f7-4480-b226-acb1effa91d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f46588a52e9a16aac1254daf23d7384cbb927b5d465b21ee2e5c79982e35807 -size 1457697 +oid sha256:8ad6cc5b10541d5c8825f6f8c973586dae035cd9e1aae48c455d49952f8d4d99 +size 822492 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_8c5d4102-d5ae-4d01-8751-7c37609f5fce.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_8c5d4102-d5ae-4d01-8751-7c37609f5fce.png index b07ea2f99888ccf1e520cf3751e4f90943815669..7aefae89b8da7da3cd545ed56113f86dd5f3da8b 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_8c5d4102-d5ae-4d01-8751-7c37609f5fce.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_8c5d4102-d5ae-4d01-8751-7c37609f5fce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b843bffae0e6076e112ccf62ecb7883d33cea346aa4e64fa6379b9c7b247b030 -size 1426867 +oid sha256:548db5c75dd4989992e666992adb0b0c20cc92c2f236bb79af4c5fc5c33ef952 +size 1531698 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_9d559cfa-a819-4c5b-8d50-446d5a0538d1.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_9d559cfa-a819-4c5b-8d50-446d5a0538d1.png index a00db599ed971d448e79c79311f76fb910c7cd3a..9f8a899c5c218a0f8b49a4e6c372cef8ee7f835f 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_9d559cfa-a819-4c5b-8d50-446d5a0538d1.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_9d559cfa-a819-4c5b-8d50-446d5a0538d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b65a5f6097779159319a6a7402846abe37e6211db33ad722de9fb27066992e2 -size 1083407 +oid sha256:2ec4212161585f571707f717699f754cb15296a0fa667f78183128e9c7ef31f0 +size 1518255 diff --git a/images/c9f2258a-07c3-46ed-a974-01543606b31b_eac36fe2-54cf-4f5c-b064-426223357844.png b/images/c9f2258a-07c3-46ed-a974-01543606b31b_eac36fe2-54cf-4f5c-b064-426223357844.png index 2f5f66b599f4ec7bc6088f141d5136b66479257b..25f71a30437657d69821ca9960486993091346a5 100644 --- a/images/c9f2258a-07c3-46ed-a974-01543606b31b_eac36fe2-54cf-4f5c-b064-426223357844.png +++ b/images/c9f2258a-07c3-46ed-a974-01543606b31b_eac36fe2-54cf-4f5c-b064-426223357844.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acf1599b95c46419b3daafdf06c432479a691cca30158e4712bdff1d53d9ea9a -size 2128454 +oid sha256:5d0a33efa8257d043375363cbc9094ca0dbf547f96fbcb8a01066914e632750a +size 2117736 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_1bc3cce0-b1be-4e81-8248-4525ffd46b09.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_1bc3cce0-b1be-4e81-8248-4525ffd46b09.png index 6fa06f49762497fd50c1e8532e25efdca26de9b6..cb2bd7cc3d0f460443e6fbd712c02abaee2f098f 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_1bc3cce0-b1be-4e81-8248-4525ffd46b09.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_1bc3cce0-b1be-4e81-8248-4525ffd46b09.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08791f79db3a73f43f95a8c8510d8ce8ac7dd4783c0dd91f12150d9066e0972f -size 1881848 +oid sha256:fe193ad44537c8668af5a34ee10393964f6084dd32b4b3cdf9b93cff09756132 +size 934556 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_4b5fe889-0eb4-48b7-b3a0-4be0ddcf6d3b.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_4b5fe889-0eb4-48b7-b3a0-4be0ddcf6d3b.png index 698458d3c480731ea2cb73ba96d11141288fc6af..1f94f26cd848cf2f03d3f6b6a3abe10098aa8ac9 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_4b5fe889-0eb4-48b7-b3a0-4be0ddcf6d3b.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_4b5fe889-0eb4-48b7-b3a0-4be0ddcf6d3b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b5b64bca5e17f61b806741bdc57fee2af7f19106e92c8714deab600213e3f17 -size 1089374 +oid sha256:091747531e214814f9414a9bc9d2994b1f5ae038a833e756c2a0ada31e065c8f +size 392961 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_52c8c1af-9bd2-4aa9-aeca-c781ccba7366.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_52c8c1af-9bd2-4aa9-aeca-c781ccba7366.png index 3337dda1e6d33ad170e3816682eeaefca33b1c37..379d44851191b5eaa984560f37775b5e03ad26c8 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_52c8c1af-9bd2-4aa9-aeca-c781ccba7366.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_52c8c1af-9bd2-4aa9-aeca-c781ccba7366.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5bf6f539f77e382e6ffcb52244a04e7f0b64d1affdb650b195ad54e6f1dfda3 -size 1099747 +oid sha256:95fbeef6c50b4eb47dc26d3f85a1323c3568fb3744f7608d37a5c58a5b600a68 +size 207176 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_6283deae-3b1b-4134-a5c2-b272074c2708.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_6283deae-3b1b-4134-a5c2-b272074c2708.png index 04cdd912faa2b31c49ee1be0670180da7dec1523..6b0b1efc977e7b7f6adfc13c6ecc2ea4f721ed64 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_6283deae-3b1b-4134-a5c2-b272074c2708.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_6283deae-3b1b-4134-a5c2-b272074c2708.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d87d084e74720584c2900669c08b23d94b72d50caac6e7781256aee12300c03f -size 1028977 +oid sha256:ff939d5547e8b97c7b069eb23ba63ee107a86d1c07407439d46f10f042fc1dc9 +size 227728 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_70fd7564-5163-44de-b8cf-c90ccb96379d.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_70fd7564-5163-44de-b8cf-c90ccb96379d.png index fcae6c193bc31216a24e86087fc33a17db316e6d..34b2f362a8eff6027f8f560d2e74a381458d7e80 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_70fd7564-5163-44de-b8cf-c90ccb96379d.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_70fd7564-5163-44de-b8cf-c90ccb96379d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81808570e3e7581e2e70d04f62ce4c9c193af7e7053d75033777e5e6eb2b7520 -size 1102641 +oid sha256:2009c6f72eaa82f98e2cd382c5af0c0be5426b024f53a94b35a3d3b7ae596711 +size 293017 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_d4f1c897-c5aa-4cb7-afa2-3ddd845df114.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_d4f1c897-c5aa-4cb7-afa2-3ddd845df114.png index c96b764d326df7af6ac8facd1681807ca7301e04..ce4ec65881a50a23d512b8662dc15d464785c4c3 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_d4f1c897-c5aa-4cb7-afa2-3ddd845df114.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_d4f1c897-c5aa-4cb7-afa2-3ddd845df114.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a4aa0d40f2199c423473c0f0c7f812836220eef5a4ff2229928709cd02fef75 -size 1087444 +oid sha256:5528a3fb1b4959253a4a374296dacf2cf0577f2bc1de8e1f1ff638688b8da43b +size 618879 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_e3c9691e-c9cc-44c9-b2ca-c8f93c388a5d.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_e3c9691e-c9cc-44c9-b2ca-c8f93c388a5d.png index 0f0759f444081cb3d2cc8388e472397315f5c455..57da69be4c1a0122947fa3ba9237319f40e97d70 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_e3c9691e-c9cc-44c9-b2ca-c8f93c388a5d.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_e3c9691e-c9cc-44c9-b2ca-c8f93c388a5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80fe43617fb03c16b49b1ab87f24a1eadabbf324b355232da19219ba3bb29958 -size 1085473 +oid sha256:f85421f92d156275ab5334c1c8a55ea7aef278b074339bb42962433adca88043 +size 740767 diff --git a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_f5d1ec4a-c8d9-4910-9f1e-577da891b353.png b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_f5d1ec4a-c8d9-4910-9f1e-577da891b353.png index a41357d54cd178a467e60db2f0cf10de8eb3e376..8ad6667cecb1ad3f46bacc5ff6bdf18f04de6b3a 100644 --- a/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_f5d1ec4a-c8d9-4910-9f1e-577da891b353.png +++ b/images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_f5d1ec4a-c8d9-4910-9f1e-577da891b353.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:895952318d3ceafdc75616390ff783a4a2fd497064db0e134a25a31f458026b5 -size 1096285 +oid sha256:2ae4ab7af208c14022c59120b51c2198cda6eb15665e9aca275035287a153299 +size 151241 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_06ae5b02-03a9-45f9-a324-e9961b31c3e7.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_06ae5b02-03a9-45f9-a324-e9961b31c3e7.png index 8c49eb544b7c199009d8b4c4c4d1aa9541f407b9..8592de8d68117627cbb001061d0bf96850babd58 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_06ae5b02-03a9-45f9-a324-e9961b31c3e7.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_06ae5b02-03a9-45f9-a324-e9961b31c3e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98ae5a21b2a95bb64e20f3e9c32f4061bf8439c10af264b70714cd50298cd528 -size 994464 +oid sha256:7792ef414c9a32e1e53bd76f3096072e567cd94b81a465c31806ce11a9f5fd25 +size 1309940 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_37e9f402-9fb7-4e3e-a1fe-8756bffbffbb.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_37e9f402-9fb7-4e3e-a1fe-8756bffbffbb.png index b84a9b9e2b5752bebf15fc9171383a723e824541..6e395624f96c5af2b5d5190048d5610f6123e462 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_37e9f402-9fb7-4e3e-a1fe-8756bffbffbb.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_37e9f402-9fb7-4e3e-a1fe-8756bffbffbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5ad59bb9a5cebddd454bf8ab5fd48395ad484f73e6ee1c008d83da37c69ebeb -size 1034844 +oid sha256:b74ac4c2ba0c5e87cc760e71f8f97b538cdd007490a8c855278ae56d9a9b0146 +size 760813 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_45f14dab-05eb-4113-a9ba-6dd6aad8acd6.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_45f14dab-05eb-4113-a9ba-6dd6aad8acd6.png index 57c7b5d3fc55a71b2c9c00f032da6a8b63664bcc..8557528f3924834336f8bb4cfad18137d911b7b3 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_45f14dab-05eb-4113-a9ba-6dd6aad8acd6.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_45f14dab-05eb-4113-a9ba-6dd6aad8acd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:714098e05ee94cf5236851ed2a27ee6dfd5bb3632eab7fce6a2ce79c2a596fbf -size 818220 +oid sha256:cbc9cb66dfbe4cd73f4d31598004d97919c325dc02b899029e1d3067b559b7d4 +size 875407 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_8c84c4cf-69a8-4598-aae3-828c35e95aa6.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_8c84c4cf-69a8-4598-aae3-828c35e95aa6.png index 2bfd01ef7c43b2d35fdfbd01e35f757193bd61f3..ea72552f4e39b2fc0bc45dcca9bd1a895fdb304c 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_8c84c4cf-69a8-4598-aae3-828c35e95aa6.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_8c84c4cf-69a8-4598-aae3-828c35e95aa6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a551997a880ad8266df19c08239430eae34e6359c16a0e8d1597fb8722bcb346 -size 2068920 +oid sha256:b9e5a1881e1b35efa7c593cff1a3ba31933c8396523fe1f767b852395c0bbd9f +size 1176132 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_8db7043e-11fc-4825-a35d-c65b0acbcbcd.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_8db7043e-11fc-4825-a35d-c65b0acbcbcd.png index ff93d30dbf212633e6902bf9e7b0c7aabb8135e3..c3d22ec91daa232ba636762cafced77eb9087ad6 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_8db7043e-11fc-4825-a35d-c65b0acbcbcd.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_8db7043e-11fc-4825-a35d-c65b0acbcbcd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:701f2e69cbcd078977d2369192f968e9ed2dfd838332ca983dd806fa99f5e7d1 -size 946345 +oid sha256:a10ec2135c2490535364018c35b12243cf01d3009c72cb7165bc908de059f989 +size 1052592 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_e8eeebb3-8539-4adf-830b-b6bfeaa8a609.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_e8eeebb3-8539-4adf-830b-b6bfeaa8a609.png index 57c7b5d3fc55a71b2c9c00f032da6a8b63664bcc..e04f8b9164afd85e23d70227bda0e3290fc978cd 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_e8eeebb3-8539-4adf-830b-b6bfeaa8a609.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_e8eeebb3-8539-4adf-830b-b6bfeaa8a609.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:714098e05ee94cf5236851ed2a27ee6dfd5bb3632eab7fce6a2ce79c2a596fbf -size 818220 +oid sha256:b6b8c7165d1bd9ebd8ac86162fbe56cfd6c126e533e1b3045fc818a505a6cfbc +size 1445835 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_f254e9fa-ac68-41f4-97fb-e721299de39d.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_f254e9fa-ac68-41f4-97fb-e721299de39d.png index ed8dd5db93706d80c1b3fe077e8f1d1477a69768..30b7a60f5f314953aa1172f1e0084561bf95df7d 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_f254e9fa-ac68-41f4-97fb-e721299de39d.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_f254e9fa-ac68-41f4-97fb-e721299de39d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b046af282f3171d791aa339f7adcf74f09bbc30b90b410dc4f00e62d54e7056a -size 2064056 +oid sha256:8624e4cae465a46a2d4460272ac83180a11660d88313ce8de11069645608a115 +size 1176938 diff --git a/images/ca049641-9721-4593-95c5-a47e22365b5b_fd1a35c4-45cd-4e46-ba38-ceb3203b6cb9.png b/images/ca049641-9721-4593-95c5-a47e22365b5b_fd1a35c4-45cd-4e46-ba38-ceb3203b6cb9.png index 2e844550efd7d3dcfe04618dd6f8c247b77a5037..9314500c58804823ba85f6d5acbd1958ba906845 100644 --- a/images/ca049641-9721-4593-95c5-a47e22365b5b_fd1a35c4-45cd-4e46-ba38-ceb3203b6cb9.png +++ b/images/ca049641-9721-4593-95c5-a47e22365b5b_fd1a35c4-45cd-4e46-ba38-ceb3203b6cb9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd5bf7424ac60675be843baf6c9ba1ce1f6d4b4842a81024818ff85faba6b61e -size 1063230 +oid sha256:52089ffc06b8a5a759b8cd925b82504aaa6baef60d1a4efee87fc1cac310fa69 +size 989517 diff --git a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_2fe12b3f-15f7-4cff-a0da-d485f189cb4f.png b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_2fe12b3f-15f7-4cff-a0da-d485f189cb4f.png index 9aa7b809cbdc6d92badddf6af8fb929773a51060..aa8c31f3613586107c08cb7935aad002108e7415 100644 --- a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_2fe12b3f-15f7-4cff-a0da-d485f189cb4f.png +++ b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_2fe12b3f-15f7-4cff-a0da-d485f189cb4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6665ae60fc8d8cf8cec2a3bb2df4c28a6659874fee08000ec79732f3aef9809 -size 1445830 +oid sha256:dfa8227344258174a47fc9cf4b6c8838a2ae233c4edbdb90af9b3a63948e01b7 +size 1580056 diff --git a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_3b878620-0274-48d4-930f-73ddb4e39492.png b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_3b878620-0274-48d4-930f-73ddb4e39492.png index 08d840410950573b37f4ed75874dae2f7becd42a..36e3287b5f00d06269a716457ec54b254f93a4d8 100644 --- a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_3b878620-0274-48d4-930f-73ddb4e39492.png +++ b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_3b878620-0274-48d4-930f-73ddb4e39492.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fcdb2bef85abb9c62c9e84ff342f87b0063885cdc0ef6a6b8c639fe178d982d -size 1095797 +oid sha256:49c1db6d271e9413a5de102dfc9392d2d3d87753fba6ec584bc98174eb536676 +size 860669 diff --git a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_4b023b85-2772-4077-afb4-13b4e39518dc.png b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_4b023b85-2772-4077-afb4-13b4e39518dc.png index a6c5125f05a254b322c1ca9eb9a174a7b886ae27..1a7bd84073e855e1edcc36751c9cd872ae7ef438 100644 --- a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_4b023b85-2772-4077-afb4-13b4e39518dc.png +++ b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_4b023b85-2772-4077-afb4-13b4e39518dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:113d9e6aca2ed4c008a43db2eb8416c93e182d42354d16723e2a12bc4f9a611c -size 1664402 +oid sha256:33227f16995abf8eb665d95a158aa9d850c3c15ad03faf33e3fc63c0af56e703 +size 1729154 diff --git a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_83815d26-fe3f-46de-8fdf-b8d347a78e50.png b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_83815d26-fe3f-46de-8fdf-b8d347a78e50.png index ac36ed74723ebdcc4c62e3611f027322445dd6d0..5b54ad304ac8dd95a58aea56e4cf428fb1e41bec 100644 --- a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_83815d26-fe3f-46de-8fdf-b8d347a78e50.png +++ b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_83815d26-fe3f-46de-8fdf-b8d347a78e50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f586228a342b857683a7ac99fc7996c11c571802f479fcdeeae997a8b763ac4 -size 1434876 +oid sha256:189abf87834b970e6c6025bdec70bdf1338380b7217a2f12fed57d029fdfd988 +size 1367249 diff --git a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_adb3f628-dbad-4824-8d8b-c53ac8161b15.png b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_adb3f628-dbad-4824-8d8b-c53ac8161b15.png index 55377f1734ad27e2c173818f858550091a75f33f..eaba6553ba9187260631f0324e80953ceee6b3f0 100644 --- a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_adb3f628-dbad-4824-8d8b-c53ac8161b15.png +++ b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_adb3f628-dbad-4824-8d8b-c53ac8161b15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6cd80cce1a964c78a8678e08bf1d0c827c46e70296d811134747c1077b50e79 -size 1653689 +oid sha256:78c280ea3f727b467addf9fdba760114ef5e07e4c21d8ab041098025e86e85b2 +size 1441609 diff --git a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_c10a7d0d-2f2f-4def-bba3-816048aa552e.png b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_c10a7d0d-2f2f-4def-bba3-816048aa552e.png index c4ba41bc7a6b31f90f949f3cdd80348a3c709c26..3978742a66e08bde9db8262e376c52dc16811af4 100644 --- a/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_c10a7d0d-2f2f-4def-bba3-816048aa552e.png +++ b/images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_c10a7d0d-2f2f-4def-bba3-816048aa552e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0097b97a0ea94815ed32f6ccf8adf5430a3bbc38bbb0eb6c7b458b27d5b76d1 -size 1102047 +oid sha256:c871a69fdbe831d4298e618f8b4000e5c23599a81a3074ec0dad09f66c0c35ef +size 719913 diff --git a/images/caafd610-202e-49d2-85d1-3f167f3ab443_5f017f7d-93a9-4835-b53f-c1af4eccc6e8.png b/images/caafd610-202e-49d2-85d1-3f167f3ab443_5f017f7d-93a9-4835-b53f-c1af4eccc6e8.png index 5d341796c9c5422df3e32c3fb1bb2ffd9444b9c9..777ede2aa20631a8ea72c11c8b82283069127323 100644 --- a/images/caafd610-202e-49d2-85d1-3f167f3ab443_5f017f7d-93a9-4835-b53f-c1af4eccc6e8.png +++ b/images/caafd610-202e-49d2-85d1-3f167f3ab443_5f017f7d-93a9-4835-b53f-c1af4eccc6e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac11dcd53322f2fa2d05fc4794cfe10eededf3dca965f45d668a7293d65d4512 -size 3177735 +oid sha256:7777dbe84c7739f7da0aeb80add946f9f9b796b775ba554be808b0566c7dff88 +size 1820884 diff --git a/images/caafd610-202e-49d2-85d1-3f167f3ab443_8ca7bf3c-75bf-4fbc-80d1-7d527c476669.png b/images/caafd610-202e-49d2-85d1-3f167f3ab443_8ca7bf3c-75bf-4fbc-80d1-7d527c476669.png index 19879b331da51d65d41451c9d6b40305dd2f4fed..b6d841b4c303d37c0f3cece552d0a52e1aa02610 100644 --- a/images/caafd610-202e-49d2-85d1-3f167f3ab443_8ca7bf3c-75bf-4fbc-80d1-7d527c476669.png +++ b/images/caafd610-202e-49d2-85d1-3f167f3ab443_8ca7bf3c-75bf-4fbc-80d1-7d527c476669.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d47e6b85884fd32d0b6bcd6207f397125bc1ee6a01f969911ff2c23cfac38cd1 -size 3293026 +oid sha256:aeb53feea40be1792fd2223d5a87e5b3717fbf421a940e3334478f0d6a34ab60 +size 2341635 diff --git a/images/caafd610-202e-49d2-85d1-3f167f3ab443_b39e9045-b25c-47a9-afce-478fbf734715.png b/images/caafd610-202e-49d2-85d1-3f167f3ab443_b39e9045-b25c-47a9-afce-478fbf734715.png index 30ace4ecb18d8a3bba3e2a97fd68284a1a3d8f41..ecdee3e27f81763cf70677819dfb8588d71fa0cd 100644 --- a/images/caafd610-202e-49d2-85d1-3f167f3ab443_b39e9045-b25c-47a9-afce-478fbf734715.png +++ b/images/caafd610-202e-49d2-85d1-3f167f3ab443_b39e9045-b25c-47a9-afce-478fbf734715.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aecb59e0508214d26646271dafa795c8c26a907932a70338aea330d5d80098c7 -size 1655861 +oid sha256:9e738238a2200dcabcc5d6bb47114d098e4257c023376ed25873bb41b6c29309 +size 2088584 diff --git a/images/caafd610-202e-49d2-85d1-3f167f3ab443_d7bfb473-8c73-4808-96bc-187d00be5ad7.png b/images/caafd610-202e-49d2-85d1-3f167f3ab443_d7bfb473-8c73-4808-96bc-187d00be5ad7.png index 47bc20312b26c9644c2d7c1621cc94ecc99c4c9d..545966a1bc3aa75f885a6cbddcfe614ed01ea3f9 100644 --- a/images/caafd610-202e-49d2-85d1-3f167f3ab443_d7bfb473-8c73-4808-96bc-187d00be5ad7.png +++ b/images/caafd610-202e-49d2-85d1-3f167f3ab443_d7bfb473-8c73-4808-96bc-187d00be5ad7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d68169c28d3ac722e44e8508aab3f2d6c96bb0a7d8281ebd53cec5bf04ca44a6 -size 2430007 +oid sha256:9f5d0091d10943f837f4a413596c4bdc70725cd29eff01fa1fb83cd0c60d93c2 +size 1536498 diff --git a/images/caafd610-202e-49d2-85d1-3f167f3ab443_e537a59c-12b2-4a02-b0eb-399d677e5b81.png b/images/caafd610-202e-49d2-85d1-3f167f3ab443_e537a59c-12b2-4a02-b0eb-399d677e5b81.png index c6831e58773516c2b79793b0532eed89a2d4b22b..ef0fe9e0edef7df5d16162d51076e35c76bbd97d 100644 --- a/images/caafd610-202e-49d2-85d1-3f167f3ab443_e537a59c-12b2-4a02-b0eb-399d677e5b81.png +++ b/images/caafd610-202e-49d2-85d1-3f167f3ab443_e537a59c-12b2-4a02-b0eb-399d677e5b81.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4472be7da58678705de6de7b369ae57d5b43db9f2f60732da13c16add6662807 -size 1441602 +oid sha256:6c4911f4fabae9e93361f5afde170be8b5e0182e4454ae139a50d7a05a5683f9 +size 2366088 diff --git a/images/cb07d410-75ff-483a-920c-3ce2a295524f_580c3525-e4a3-44e7-8480-ff930ac9b5ea.png b/images/cb07d410-75ff-483a-920c-3ce2a295524f_580c3525-e4a3-44e7-8480-ff930ac9b5ea.png index 1c4cd24780a69858afda8162546c87c17e1d9e2c..003ec45059c98dbe02092f15a164ed6d7b073899 100644 --- a/images/cb07d410-75ff-483a-920c-3ce2a295524f_580c3525-e4a3-44e7-8480-ff930ac9b5ea.png +++ b/images/cb07d410-75ff-483a-920c-3ce2a295524f_580c3525-e4a3-44e7-8480-ff930ac9b5ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c41e022287d25b3bba1b680c059a747876d17b5070052c3500bb723ba2a6671c -size 1057328 +oid sha256:0d09e4353a849c73a4034cf1cd6bb352c96004d7d0a981659e65420e13a4c404 +size 1061266 diff --git a/images/cb07d410-75ff-483a-920c-3ce2a295524f_7ba6a708-e79c-4eb7-a274-40acd111584b.png b/images/cb07d410-75ff-483a-920c-3ce2a295524f_7ba6a708-e79c-4eb7-a274-40acd111584b.png index d246f71f8d3ac83e9c213a9731e622eccc4f6c36..a8ea791ceb0102368b3f32f006efc3ac0d8a1c57 100644 --- a/images/cb07d410-75ff-483a-920c-3ce2a295524f_7ba6a708-e79c-4eb7-a274-40acd111584b.png +++ b/images/cb07d410-75ff-483a-920c-3ce2a295524f_7ba6a708-e79c-4eb7-a274-40acd111584b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afc00bd263625419dc2c910ea11c8f06b3d2533682a113687059c84690dfa09d -size 528522 +oid sha256:2e29a57f1ed2629a85d3ce55b194dd69cc5552430ef924a4be7e6ffdbed00ac0 +size 659601 diff --git a/images/cb07d410-75ff-483a-920c-3ce2a295524f_ee5016b8-c8d1-4c30-9157-5cef60f94e71.png b/images/cb07d410-75ff-483a-920c-3ce2a295524f_ee5016b8-c8d1-4c30-9157-5cef60f94e71.png index 368684152e1f5a1e3c4f2704ad834da6d80122bf..c9402a58337f101bdd0ce2d2cf9e1104fc4206c3 100644 --- a/images/cb07d410-75ff-483a-920c-3ce2a295524f_ee5016b8-c8d1-4c30-9157-5cef60f94e71.png +++ b/images/cb07d410-75ff-483a-920c-3ce2a295524f_ee5016b8-c8d1-4c30-9157-5cef60f94e71.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d49a25962531706f785433d062257506363ee892dff01b5e03966a50410c09b -size 630950 +oid sha256:28538aa6db4596fd28646284e86084eaa201cd656cd9ad33671b30bbd25c4a59 +size 632920 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_0900afdc-1ed0-4605-857a-0d5f0a186230.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_0900afdc-1ed0-4605-857a-0d5f0a186230.png index 188387d106384774aceb76d6d784193b4fb3650d..c9e6e93c499e7e6be51b604b8cde0c505402e5f5 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_0900afdc-1ed0-4605-857a-0d5f0a186230.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_0900afdc-1ed0-4605-857a-0d5f0a186230.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c1adff3c75e3978a35905148250ed65af73611f7ede9b37338f15e31369881e -size 575547 +oid sha256:91ef2de96e8e218a58596fe438bbfbb9c1778d7ad9e34959c578acb991376c69 +size 783965 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_1a0e511f-4135-4bfb-8e74-71286dd71adf.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_1a0e511f-4135-4bfb-8e74-71286dd71adf.png index 9362bcfb3dcee0e95b973672c98e74ce5282c82e..9ba31a5340b0a0f3036424758e4747e3f94f5a05 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_1a0e511f-4135-4bfb-8e74-71286dd71adf.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_1a0e511f-4135-4bfb-8e74-71286dd71adf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c5b6a46944aa2da7ef269c99d02ee50efd31eca2bd61c110572c78e1067b37d -size 679476 +oid sha256:affbfa04bfddcba56b0734ceb41c1bebc717e1cb727b84dd08820d0d47b45e4b +size 569232 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_3db12611-f0a1-49e3-8ecf-cbdc23b3a727.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_3db12611-f0a1-49e3-8ecf-cbdc23b3a727.png index a17c140f1a9ed3a9b9f90b5e892733e8ce0e9950..416090454ba0173cfce70e17c2339a7170008d03 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_3db12611-f0a1-49e3-8ecf-cbdc23b3a727.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_3db12611-f0a1-49e3-8ecf-cbdc23b3a727.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3fe6c8e5aeda2a2fd56d686de5abcfc8994b91cc5dee2b4454181fe9a4291b1e -size 400509 +oid sha256:66ec22efa5d259cb8ae44599112aaa40e0cf47a43c70647c0a2cb0d163a456a5 +size 439947 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_4e296369-5864-4c0d-b372-f4deb8454461.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_4e296369-5864-4c0d-b372-f4deb8454461.png index e0a56a261c532797bb0f2fef8e88f438ee6a7eed..f540efffb6f3c3aad7eff6fe56dde7d7c9c037fb 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_4e296369-5864-4c0d-b372-f4deb8454461.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_4e296369-5864-4c0d-b372-f4deb8454461.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f4eb7d03472a600736f80fdb368718fc9a26f9e8910381dd70e6e0fdfddd88b -size 778889 +oid sha256:ed18308dfe49934bab74b7e44bf2c66e6478b5cc5919247cbfbdcb8da1f413d0 +size 848599 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_50f2d198-a0f4-4230-9b83-ed557dc56d79.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_50f2d198-a0f4-4230-9b83-ed557dc56d79.png index 5eabda3c36b94f441b84196d14bc2d9af72ff54f..12695e32afcb37488d6389ad6f7055ad4eb90981 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_50f2d198-a0f4-4230-9b83-ed557dc56d79.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_50f2d198-a0f4-4230-9b83-ed557dc56d79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19a092cc904d5c5eac124d846cbe9a4b08922cc0f667a59621ac181458a20393 -size 769341 +oid sha256:80b2e84e936b5e491f46165359686fd8428a89466d04a4b5dd0e68d6369470a0 +size 870949 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_6cf0b91a-fc8d-4494-a0c8-fb11ed928aaa.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_6cf0b91a-fc8d-4494-a0c8-fb11ed928aaa.png index 3eaf24cebb9cc66683683a367301e1ec710b8f5a..525f4196e3bf6042b3fe235f6f4d758a04210541 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_6cf0b91a-fc8d-4494-a0c8-fb11ed928aaa.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_6cf0b91a-fc8d-4494-a0c8-fb11ed928aaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e8d19b232a4edf022c11d7b0958ab2d3c7b38bea766593a53bdb5c46dd133a3 -size 937850 +oid sha256:4232ec04cd650a14d1964bb6fd110f01df3f49fd1e5901b5161ec22c1e73d9df +size 611770 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_70e3d5b7-ec0e-4e31-ab82-ab367b0aa9f0.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_70e3d5b7-ec0e-4e31-ab82-ab367b0aa9f0.png index 900ea74775f266a4c89fe692c867fa21b8082009..62fa7d0d3fb124b6b88eab7fcf27e9c6f358260e 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_70e3d5b7-ec0e-4e31-ab82-ab367b0aa9f0.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_70e3d5b7-ec0e-4e31-ab82-ab367b0aa9f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ce126823edab9bdcb5c9ff31e70cdd41827b685081dc2e103b1aa592ebe79c2 -size 676039 +oid sha256:6c7dda3de005085f8521059963c31f83a411b0218a564bf23d8ef03ac9e2f207 +size 527794 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_853e26f1-d8f8-4821-b800-f3357b988e5e.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_853e26f1-d8f8-4821-b800-f3357b988e5e.png index 27bdc56d539330b49a07b1aa30241364938856d6..9181a7add5f237a9fbd50051110b28fd244e87f9 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_853e26f1-d8f8-4821-b800-f3357b988e5e.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_853e26f1-d8f8-4821-b800-f3357b988e5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48dc78fe0e95738f5511d642c0ad570a3e92985673a2091a253483cbb97b88d7 -size 805194 +oid sha256:029e6ad718b33b791d1f8f85145bb9a912d1041aa39570deede60da3ced8588a +size 606950 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_9a06fdfb-25fa-4319-903f-ca492483c9fb.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_9a06fdfb-25fa-4319-903f-ca492483c9fb.png index b9088481a335e2f574e1156a1a84c47da2d2028f..e5f4b6fb6083cc0cd90a2cea53dfe565d31b45ba 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_9a06fdfb-25fa-4319-903f-ca492483c9fb.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_9a06fdfb-25fa-4319-903f-ca492483c9fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:687d837824a0df80304b79b4b6fefcb951dbdf7169b46116a8292114ca608f85 -size 606069 +oid sha256:05faea3eb99e0aa1e13c003a9357f3af73268fb3a617c223d878b391df61661f +size 641215 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_a0f73b91-7f84-4c1b-b00f-816a3592bea6.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_a0f73b91-7f84-4c1b-b00f-816a3592bea6.png index bee54ecd5014fc6c7cd45ed527784924e0f02fa3..c1e9243faf47c1fa3ee7a05752b788c71e7527e0 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_a0f73b91-7f84-4c1b-b00f-816a3592bea6.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_a0f73b91-7f84-4c1b-b00f-816a3592bea6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1c3fde3a77b9621fbca5462fb075f373d55ca797d07b8e71649c7496db6d05c -size 935172 +oid sha256:263a059c6c21fe50ffb0b67b1ac0f067d09dd1f8255e0fd98cb9339301cc6e7c +size 559371 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b07896e7-2e85-4045-9080-9134edeafe41.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b07896e7-2e85-4045-9080-9134edeafe41.png index 981f7273573ba943954d90e73c2d38bcbe65fd67..2c15549c054388ba5b73cf9ae76e8b99357a6883 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b07896e7-2e85-4045-9080-9134edeafe41.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b07896e7-2e85-4045-9080-9134edeafe41.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6245bf18f757f0248f9b1c58152861d99918cc3febc0537546e93978cec6acd -size 1425078 +oid sha256:e58a38541649fb6b1047c8d9a3ec4245210ec2708026c3fec7cfb9f7cec43dd2 +size 1409862 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b5d844b7-0b88-4b88-9174-7ba4c6f5423d.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b5d844b7-0b88-4b88-9174-7ba4c6f5423d.png index a557870c67987ffb8ac5bced6b0f9f915bed8a5b..2436d72cee97c761ece09372121f0ccdb0526200 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b5d844b7-0b88-4b88-9174-7ba4c6f5423d.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b5d844b7-0b88-4b88-9174-7ba4c6f5423d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4b740a886ff3291bbeace86b2316517a4e64e24dd17189ccb216a16ff646f0d -size 875169 +oid sha256:a7e2d8c9ba5cd5908be11880999e17701bfbaa4a2878ccc281dd3d84504e6b9c +size 888000 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_c9ed5c49-2af1-457d-851d-2214eea40c77.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_c9ed5c49-2af1-457d-851d-2214eea40c77.png index 70de0497929c7b2500fd9d6af8814975c41b714f..a894858175012a35c5d0a0e6a5319b2f0e4e5174 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_c9ed5c49-2af1-457d-851d-2214eea40c77.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_c9ed5c49-2af1-457d-851d-2214eea40c77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43ea5b0e6687627de9d3309886c353361c346f38dc544eb63db9dffeba547280 -size 962135 +oid sha256:e1c0cd7639e69dc9f23617145c465f37314b6a4786211696e3fee1546f5b452f +size 963313 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_d0942028-ea6c-4aa1-b417-3f768a2c6013.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_d0942028-ea6c-4aa1-b417-3f768a2c6013.png index 760c9b80d5ac8e855e553dc17e4a82dcbb36f08f..c0b6316ba5ca314f9a9450d06d1ae2db5c6d4bf0 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_d0942028-ea6c-4aa1-b417-3f768a2c6013.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_d0942028-ea6c-4aa1-b417-3f768a2c6013.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1492edfef2a56502b0be519724a2f4aea92a28a1760854ae0789e91544a9ca4b -size 736589 +oid sha256:992e966918df311175a4fc38128891de24267d2fb8cfdd5129692fbcb725de4a +size 805597 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_db292549-3e50-409d-9242-d3fed37a72d8.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_db292549-3e50-409d-9242-d3fed37a72d8.png index 516dc19757d1211cb6b60583dfaec6be765fb665..d8699caccab0edbe2c154b019cab921fe5c2259a 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_db292549-3e50-409d-9242-d3fed37a72d8.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_db292549-3e50-409d-9242-d3fed37a72d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aec5020e44b1def4932bb4c95b252fcd46d9b60cece52560e49e3fc5e3af265b -size 634166 +oid sha256:067c48249529a3658e872a6b6be0e5ab166ee52749abcd308f02e2ac2d25b736 +size 641237 diff --git a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_f60eedf6-96f7-429a-bb55-803977b8efe9.png b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_f60eedf6-96f7-429a-bb55-803977b8efe9.png index 1d6072e98962f441ab05c954eb5045fafc62fb7d..717939d07014c0a8ffcf89a31ad137565edf2d94 100644 --- a/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_f60eedf6-96f7-429a-bb55-803977b8efe9.png +++ b/images/cbfa5c92-41cb-4481-97b2-9fb41298be13_f60eedf6-96f7-429a-bb55-803977b8efe9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:159fd64f8ede921e143e0f1be24e9eaf27907dd65d2745c4f280c4e866e6cdd9 -size 566546 +oid sha256:a9dd15a61fb595b2c52fb465f9c66e441a03a71cc15cb2635cd26bdf940268d0 +size 403203 diff --git a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_48077a71-0bd3-484d-8f19-596861e7e8fb.png b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_48077a71-0bd3-484d-8f19-596861e7e8fb.png index 2dae5d3cf858a6ca11d3efe7956a120cd7afecdd..a303e088b0bce4db57e93e92127dee6be21127a6 100644 --- a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_48077a71-0bd3-484d-8f19-596861e7e8fb.png +++ b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_48077a71-0bd3-484d-8f19-596861e7e8fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9811edfb2129d9eaae3537d431623949052ba9678f56442ec8bea2ecd9d4acd -size 362556 +oid sha256:3ddc2c6b3b5f9a5491d1e492c901bfff78a5b49fd9807fdf4a63caf36a848cd6 +size 1792334 diff --git a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_5df6fedf-afb3-4095-b06a-c9a3317e485a.png b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_5df6fedf-afb3-4095-b06a-c9a3317e485a.png index ef31c3e060f06f3b51b844230564a81dfcb942c2..38073b64f913fb16105be590f43ab3ce045f3e62 100644 --- a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_5df6fedf-afb3-4095-b06a-c9a3317e485a.png +++ b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_5df6fedf-afb3-4095-b06a-c9a3317e485a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30a42b47422cbb6cceff3b4e0f191f3bacb62a338d6a87ef01001ac8d5ebb9df -size 602394 +oid sha256:fac73fcab9679f17b3398638b377a7d7d7f7069a652e2b08e431708ab9e5c491 +size 872798 diff --git a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_77d2fade-b9c9-46c2-b41b-81e8bc671d15.png b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_77d2fade-b9c9-46c2-b41b-81e8bc671d15.png index 280bfd3691df3dd6101fb3af1fb8a9ab7ab82324..35231d92011d335a8a92749608ee480104ca6c32 100644 --- a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_77d2fade-b9c9-46c2-b41b-81e8bc671d15.png +++ b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_77d2fade-b9c9-46c2-b41b-81e8bc671d15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b78e5787d8ec398f406a884639034c896a173ede1536cdd6ed69c1c858546408 -size 539083 +oid sha256:48fcd053e7549cf79da548247d4d2d30aaa8901c61f31f58dbe62d618c7c47ed +size 1598721 diff --git a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9d6b2e4b-880a-40d0-ac9d-3f9fdcb3e7eb.png b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9d6b2e4b-880a-40d0-ac9d-3f9fdcb3e7eb.png index c50ffa6701669af237f47737ce371d455d97c73d..92cab731de338324007774e3b003d98a01ed1681 100644 --- a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9d6b2e4b-880a-40d0-ac9d-3f9fdcb3e7eb.png +++ b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9d6b2e4b-880a-40d0-ac9d-3f9fdcb3e7eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2000e3f4f32cbd83d3f3d96173b790186a1ff57c21a756a3c416e1146760ecf -size 714539 +oid sha256:9cb745d4c2cbf758145aa2fdb8aca609b6f676982d59b29b0e9dc698d215b272 +size 786094 diff --git a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9dc761aa-1a13-4905-a922-726a3f469738.png b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9dc761aa-1a13-4905-a922-726a3f469738.png index fbba12b352f9d5aac2a5404698ffcb2a2d86eb96..dcd7def17c49177faa12d585233f16137d4c4e42 100644 --- a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9dc761aa-1a13-4905-a922-726a3f469738.png +++ b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9dc761aa-1a13-4905-a922-726a3f469738.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a62db91462ea652e29f11ba504bc465c207c1777de584f2569d5ce8bbda4eb6b -size 660709 +oid sha256:5ed66f1c3c95a1fefb8861205233a2a9af6dd2dbd54ab913ea9870394931fd8c +size 691896 diff --git a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9fd3a843-608c-4ab6-9bd2-3adee3a14559.png b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9fd3a843-608c-4ab6-9bd2-3adee3a14559.png index b6322e442411d496fad41b7fd2f3fb7ccfacd5fb..2db38afd1f504c21375fffa99533078b07851936 100644 --- a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9fd3a843-608c-4ab6-9bd2-3adee3a14559.png +++ b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_9fd3a843-608c-4ab6-9bd2-3adee3a14559.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:265fdfccce98830b7ff208326e43491942ca0c23d1a18db7df2cdead9f5aeb9c -size 724487 +oid sha256:554cde64a0de834a53a03466a86abeef6b7c6c1eff0e717aaaab9abe0c9bb64c +size 861573 diff --git a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_dde39bfa-ddfb-4dab-91e3-1f242a32d253.png b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_dde39bfa-ddfb-4dab-91e3-1f242a32d253.png index 029db11eb0f1646dd348d560a868730e41ae017b..c4adbc6653663316fb486370b00d037b983b61be 100644 --- a/images/cc174cb2-520d-49c3-93da-f93a1c485c03_dde39bfa-ddfb-4dab-91e3-1f242a32d253.png +++ b/images/cc174cb2-520d-49c3-93da-f93a1c485c03_dde39bfa-ddfb-4dab-91e3-1f242a32d253.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6654a2a059409973ecfb8f2956b354c87b24295dbb3ae788b356441e47ab2eb4 -size 686598 +oid sha256:9fe1c8be250cab439ac1d31653327a13d1b59655a57d0eb586830305d218d6ca +size 815292 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_016e7d79-50f7-4e96-b822-70b91f99a2e3.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_016e7d79-50f7-4e96-b822-70b91f99a2e3.png index 06191b0e5cca7a89da75a4edab9967a6b15f25b4..864b3711dea1f1ef1d6594aa52c5438e86f09aed 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_016e7d79-50f7-4e96-b822-70b91f99a2e3.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_016e7d79-50f7-4e96-b822-70b91f99a2e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29e99ce89ccfbfc76394af2391913a49e13f8c4a09d8f5de22a85b33897eee07 -size 746029 +oid sha256:1d66d6bb02bfd5da0e2278033b817e07c99f7b9ac083282ffc5584e8d0bed64b +size 969215 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_0495b180-3090-4b4a-901c-07cd307f9e82.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_0495b180-3090-4b4a-901c-07cd307f9e82.png index 2601f55e7a624e22a1e3ff784e28b6a5f1281547..9c5b391d2dc4818771073665f92da83971a8151c 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_0495b180-3090-4b4a-901c-07cd307f9e82.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_0495b180-3090-4b4a-901c-07cd307f9e82.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af8265d411f0be21767925f3de3db5bfd0b6e3138b20290aa847b86436ea2285 -size 721341 +oid sha256:bd6712aa0f5ba534ecb38dcf5540386b534fca8fc20a4ca9a4ed12ff6270bc3a +size 953880 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_1e9e9fcf-9e4e-4520-9442-9f6cdef14eac.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_1e9e9fcf-9e4e-4520-9442-9f6cdef14eac.png index de685c72ecb922a0d5062a00f460517b3ef2d9c4..460d3b9ef0a412fa61150af16629dc7de90bdfe4 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_1e9e9fcf-9e4e-4520-9442-9f6cdef14eac.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_1e9e9fcf-9e4e-4520-9442-9f6cdef14eac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ded152f833da3498b62b49b11bbe215fa2d7aaa43caf000186a14db45737cf0f -size 680914 +oid sha256:b324a8f80954a864f84c83be9641c7babc74964d7dd8097c9dea73a607da7f23 +size 606884 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_2ed285c2-d5c2-47a3-9cab-96af7b698f2c.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_2ed285c2-d5c2-47a3-9cab-96af7b698f2c.png index 28b84f563357328d3b1cd39681ad71cf1a69758d..024328e7bd0bbfa71c473bc81b3ab56d228700a3 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_2ed285c2-d5c2-47a3-9cab-96af7b698f2c.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_2ed285c2-d5c2-47a3-9cab-96af7b698f2c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2588425958da484250364062d561f9f4f1affb51fd30e5a7f40ef579a0d207b -size 723852 +oid sha256:ad4ecaae34e9beb76bdb61f86638a8e718f29b0d88c3329cf99fd021c39f5b27 +size 797355 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_5ff7d430-6fc0-4de3-9a6d-d9eb5dae3fdd.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_5ff7d430-6fc0-4de3-9a6d-d9eb5dae3fdd.png index 2c6e94c162e2d6d527ccc184bd107baaa64196a8..6534bbd9d2d9196cd2d15a86eb71e14d9af38d8b 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_5ff7d430-6fc0-4de3-9a6d-d9eb5dae3fdd.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_5ff7d430-6fc0-4de3-9a6d-d9eb5dae3fdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7461f9cc6b35cdf59b7f409ae010cbc64464f178478536e35d77d6f20cbe010 -size 733516 +oid sha256:59f9ff4bb1c77c2363a2cacd328c2fc606a74dd05f65d6b8e6e5c4ac0c7cf9ca +size 829737 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_903f6b4d-3315-47b1-b88d-15cd49d43bb7.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_903f6b4d-3315-47b1-b88d-15cd49d43bb7.png index 03327c080904f53486cea44c511b907646a0d29a..0cae9784274a6b7a4b5aeec4e51101a2fc58d549 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_903f6b4d-3315-47b1-b88d-15cd49d43bb7.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_903f6b4d-3315-47b1-b88d-15cd49d43bb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:848fd4f64a70a5b0e7a4dd93c34476d37cb276aca28feba28c5573d4d40f7d65 -size 1689827 +oid sha256:a75ab3d51de633509bd8c51bf72e1bfe090a9928bada7715792d05b052c0bff8 +size 2093614 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_96dab6e9-ebf8-4646-813a-ded293589c49.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_96dab6e9-ebf8-4646-813a-ded293589c49.png index ec194033fa884b2a6fe3560137e1726f361f855c..ef9ace23cd6c7c39f395d74f613b4fa848f3dbba 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_96dab6e9-ebf8-4646-813a-ded293589c49.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_96dab6e9-ebf8-4646-813a-ded293589c49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b925fe5f2e49b4e905391c9967f1d3e6cec5308990407579089d180ea2d51304 -size 681405 +oid sha256:73e7fa0c71f8cb6ed5817e9743463f0223c5432ace21a3f2597129eac1f0a3de +size 291933 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_b5929444-c8ea-4c84-aaa2-f91432a827fb.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_b5929444-c8ea-4c84-aaa2-f91432a827fb.png index 863bd123260e94d4e768d9a8a9260813bd99e4db..44d717fe01cf39eb16d9747a7167ee7fa047764f 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_b5929444-c8ea-4c84-aaa2-f91432a827fb.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_b5929444-c8ea-4c84-aaa2-f91432a827fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e8b0976464964e35dadfc1bf6fb3a7a7b416d91a2df962d2973e28fe0085868 -size 787212 +oid sha256:1a168de4db9956eb93cb9952b0544adb103aa765ca008a68306486e9394f460b +size 1148799 diff --git a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_ce1dba77-1add-4cad-889f-7a90b54c5ccb.png b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_ce1dba77-1add-4cad-889f-7a90b54c5ccb.png index b8df55a5bf6f83eeadd312cda7ae1a6005a683be..da20232205687cc2b5b38ffc2c8233b3ae0d9243 100644 --- a/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_ce1dba77-1add-4cad-889f-7a90b54c5ccb.png +++ b/images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_ce1dba77-1add-4cad-889f-7a90b54c5ccb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5b063c349aa9e78ad35d71d8ab1ee556d8bf4f0f3fae9ccf62d33315fc3d61e -size 760148 +oid sha256:66a2d76a726ccfbbedb80d94266528e562d04755d516efb6e4a369c481d52c1e +size 1102768 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_08603353-f42f-498c-8b7e-74aceb336815.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_08603353-f42f-498c-8b7e-74aceb336815.png index 8d1afd4aa2ca838718acab3b35a12731e0c03334..d1f7664f65e858a30d5287c1506a011b63b19b4a 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_08603353-f42f-498c-8b7e-74aceb336815.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_08603353-f42f-498c-8b7e-74aceb336815.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5369e518f472b922f73885f2d18b959b4be21756211d383c8bbb8ac88fd70cb -size 1467331 +oid sha256:fa9cec4dd7b98d03b0b15589bb78e2f3f0efc229aea0d6aa2be35ab4431c0265 +size 1682293 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_09199a3b-66e4-43b0-9207-2b16c63f458d.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_09199a3b-66e4-43b0-9207-2b16c63f458d.png index 6852ef9aeca3323fb00936f3eb03a65e55f3b37b..36931f1682f2515c03d4ccc2d9eae7daac604829 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_09199a3b-66e4-43b0-9207-2b16c63f458d.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_09199a3b-66e4-43b0-9207-2b16c63f458d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:064e157cfe1f031ee6bcc05fafd3ea60de41659d7d75e492e9cfbba9a6ae268f -size 1367712 +oid sha256:b7074acfa14b01dc61ad18024c62786a3537524050a5ad5310617d9095a9f505 +size 1584385 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_4f967b7a-9ed5-4a01-ac5a-4bfb8c5cf276.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_4f967b7a-9ed5-4a01-ac5a-4bfb8c5cf276.png index 33a3a25975e32711e5331f27ec02176e95a3bed3..07859bbdcd281c8d1a35ffb0ed6444b3b0ddb7dd 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_4f967b7a-9ed5-4a01-ac5a-4bfb8c5cf276.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_4f967b7a-9ed5-4a01-ac5a-4bfb8c5cf276.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:695f2df61334b9bd4ec308d80c7ecb43ca0bef4cd166719a41e3b0f58320a679 -size 1466293 +oid sha256:abbd807ac6652b634bfe452405da65d2d1d681f95d34153a7513a20300d70328 +size 1774807 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_6e941c23-f01a-484c-bfdb-cbbac7ea4727.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_6e941c23-f01a-484c-bfdb-cbbac7ea4727.png index 21136c3c7d8093441cf0c7b2450d48c362b04687..57a98fe0c7a2eb2ce0bf9ae4d4d1fa2f33464e01 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_6e941c23-f01a-484c-bfdb-cbbac7ea4727.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_6e941c23-f01a-484c-bfdb-cbbac7ea4727.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:280cb6a05c13308af63c3d6369329c37ec2f376016e9bad14c33d1f0d4678245 -size 1520195 +oid sha256:2c84ec222350862c0d7ed03673152eb4124efd5a4c566960cbc0323377a4b26c +size 1520291 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_85ba8675-1e8b-485c-a4ac-87ead92a45a3.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_85ba8675-1e8b-485c-a4ac-87ead92a45a3.png index eb8dd5b454f5000a885332d3243840c2a0cf3615..a6946b36b5fa24cf527ed858ce681103841eaa36 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_85ba8675-1e8b-485c-a4ac-87ead92a45a3.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_85ba8675-1e8b-485c-a4ac-87ead92a45a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:530b1ad3e82f8d84beb42778cd807c270be358819db2ab97b64b83940232cd04 -size 1465723 +oid sha256:526c836943512316b7d8cc438cde58bbaa2cdf8fbe93b74ffd956fbfec2c03d7 +size 1529827 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_9681f5d0-e126-4c7b-91ca-97c50520ae5a.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_9681f5d0-e126-4c7b-91ca-97c50520ae5a.png index 49412c1041aa3a739b11378ebaad3323ea43b3d4..2c4b1fe198b7f45f577c2e321c4143d05b454a21 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_9681f5d0-e126-4c7b-91ca-97c50520ae5a.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_9681f5d0-e126-4c7b-91ca-97c50520ae5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:972740007c24af44749a84beb1f6a372b084da365f77ecaa1ede91cbab7d0203 -size 874576 +oid sha256:d5e56ae014974f1536f558aa0f2868f2525e00e98b2367f29784fe0bf70a7f8b +size 978558 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_c80dc072-b45e-4ca3-bc8a-42454e1554a4.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_c80dc072-b45e-4ca3-bc8a-42454e1554a4.png index 5935e7c40809351a8a8e0378f1b9c0c3defc94ae..b3f0501c75226b27481c0ef7716df26fdc9637bc 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_c80dc072-b45e-4ca3-bc8a-42454e1554a4.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_c80dc072-b45e-4ca3-bc8a-42454e1554a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4e618fcbf5d8aed048eee9391e1edb88fd9b25caeed720c6263d77647d63790 -size 2282707 +oid sha256:68507cd8fd569de875f6a4a05e8543b6e1b7bfbf1232237be9b87bf5facfcd8c +size 1630647 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_cea8fdc0-7489-497d-b118-515681b710bb.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_cea8fdc0-7489-497d-b118-515681b710bb.png index eca1af466d0a1a058328e7a0e9aed91a5db3e1c8..2f75bfe89b4503ee855b8ad77339409fe7afb81d 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_cea8fdc0-7489-497d-b118-515681b710bb.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_cea8fdc0-7489-497d-b118-515681b710bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cd661802ad2520aa6959427b679d4f2222b2a7fe05b6cc26e4e6d705ac91e6d -size 2199870 +oid sha256:5bce9d909923e8ea6676c453e7b7d4f56dee7f0395ed53c355bfeb83b9c0ec5f +size 1232039 diff --git a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_ec2b8835-2edf-4769-a89a-5c36d204ee52.png b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_ec2b8835-2edf-4769-a89a-5c36d204ee52.png index 347dd178b9b2d6af1b4c13619abe2ab1c106c1cd..580b59e8975e4386ffffc7a6698db60e1bd70abe 100644 --- a/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_ec2b8835-2edf-4769-a89a-5c36d204ee52.png +++ b/images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_ec2b8835-2edf-4769-a89a-5c36d204ee52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:722ac12027790548687e1f4fea4c5ee530553ceedd5c808f2f48a279da8470dc -size 1346070 +oid sha256:1d1b5dcde495ceffbf37e9074ee0aaf9b240e4779899c374ce97419dd35ed12e +size 476188 diff --git a/images/ccb7c231-8655-4613-a28f-beb8074c523e_38c21a93-8c5b-4d31-b72c-06acc63a2afc.png b/images/ccb7c231-8655-4613-a28f-beb8074c523e_38c21a93-8c5b-4d31-b72c-06acc63a2afc.png index dbc4a800911074517b623ff07b48edf93e531e9c..b67d676602050808ba15fe99f4af12bde54eb2f0 100644 --- a/images/ccb7c231-8655-4613-a28f-beb8074c523e_38c21a93-8c5b-4d31-b72c-06acc63a2afc.png +++ b/images/ccb7c231-8655-4613-a28f-beb8074c523e_38c21a93-8c5b-4d31-b72c-06acc63a2afc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d146ccb1f44c153f4a20720644395b2a62304a69fb0a6bf14e8b2ec0a1846c36 -size 855530 +oid sha256:bf7cc9b859d24d7df6ab40ee12140e16989aed7cbc1315c95b4d06ac7ec63ff4 +size 922219 diff --git a/images/ccb7c231-8655-4613-a28f-beb8074c523e_4d9d8608-8e78-44ec-b091-592d1433d369.png b/images/ccb7c231-8655-4613-a28f-beb8074c523e_4d9d8608-8e78-44ec-b091-592d1433d369.png index 9042b24734291ce31b975256e0e3229fc42852ff..c41da1aa57c4d32ed95e07293a75435ed49088a3 100644 --- a/images/ccb7c231-8655-4613-a28f-beb8074c523e_4d9d8608-8e78-44ec-b091-592d1433d369.png +++ b/images/ccb7c231-8655-4613-a28f-beb8074c523e_4d9d8608-8e78-44ec-b091-592d1433d369.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97b7c66e38d6e9eb6b6eaeabb3eda33752772d41f2df270635ffd8fb68f2aecc -size 786203 +oid sha256:f899cffc380f56ae64a01a7744e10068febfb73159987d78d5807f0a72374cf1 +size 967754 diff --git a/images/ccb7c231-8655-4613-a28f-beb8074c523e_96d386cb-2247-46a8-8589-52d65dd3f735.png b/images/ccb7c231-8655-4613-a28f-beb8074c523e_96d386cb-2247-46a8-8589-52d65dd3f735.png index 4fe2b0fbcc4a3d6dcd9ab7fc7a2a7c3eb6dc9fa8..a9a71bcabb6495c49f58902a8f1d15081cf2a92d 100644 --- a/images/ccb7c231-8655-4613-a28f-beb8074c523e_96d386cb-2247-46a8-8589-52d65dd3f735.png +++ b/images/ccb7c231-8655-4613-a28f-beb8074c523e_96d386cb-2247-46a8-8589-52d65dd3f735.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9755a71c7b0f542c85330631cd7697b9ec7c705a699a002e82f0ee4bd46fd7d8 -size 845198 +oid sha256:fa149ccf08ef9fdddb60ec477359014296e4d279a61d7e1d265545727c7b49ec +size 929866 diff --git a/images/ccb7c231-8655-4613-a28f-beb8074c523e_a8a56cd5-cf3c-46a5-a241-47d55e04c119.png b/images/ccb7c231-8655-4613-a28f-beb8074c523e_a8a56cd5-cf3c-46a5-a241-47d55e04c119.png index bfec04d7f53b79876a9ba0dfea86b3f12e399b89..95b777b69edc6a9d54a9083c77caf963363607ad 100644 --- a/images/ccb7c231-8655-4613-a28f-beb8074c523e_a8a56cd5-cf3c-46a5-a241-47d55e04c119.png +++ b/images/ccb7c231-8655-4613-a28f-beb8074c523e_a8a56cd5-cf3c-46a5-a241-47d55e04c119.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ed3c4692db18f6e564f9f759a7e8af703fb8bf9b8bc6e7245397177e328ae2a -size 267661 +oid sha256:8f357f4f3e3ad4da8544c195796e03d4d3d373d86e4793501ce2c693991d9ffc +size 267628 diff --git a/images/ccb7c231-8655-4613-a28f-beb8074c523e_b61e0bec-bcd8-4a74-896a-1014bbf71f7e.png b/images/ccb7c231-8655-4613-a28f-beb8074c523e_b61e0bec-bcd8-4a74-896a-1014bbf71f7e.png index c107313a27ea1c645983f8b040c6a429861ac1ed..5a491ef267cc23b7bdc4e974fec92dabe2f96569 100644 --- a/images/ccb7c231-8655-4613-a28f-beb8074c523e_b61e0bec-bcd8-4a74-896a-1014bbf71f7e.png +++ b/images/ccb7c231-8655-4613-a28f-beb8074c523e_b61e0bec-bcd8-4a74-896a-1014bbf71f7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f51506be5c5d007f5c5d558e4c9c9f3b93ad4acafffbe4ec327e482452b9832a -size 1091500 +oid sha256:858e5d75e2ac8ff68c9f53dacbdd25d7d04e427192020193dc8b80dcd81c192a +size 1216307 diff --git a/images/ccb7c231-8655-4613-a28f-beb8074c523e_d8be4795-fcaa-4230-b948-82c117e3d0d7.png b/images/ccb7c231-8655-4613-a28f-beb8074c523e_d8be4795-fcaa-4230-b948-82c117e3d0d7.png index 94fb4fd5466a2758fbd67678c8133234507d1c0a..b371e4693a10bc50e96f398fee7c86916d685063 100644 --- a/images/ccb7c231-8655-4613-a28f-beb8074c523e_d8be4795-fcaa-4230-b948-82c117e3d0d7.png +++ b/images/ccb7c231-8655-4613-a28f-beb8074c523e_d8be4795-fcaa-4230-b948-82c117e3d0d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ff2d9730cd5af8666e4c54a280cc0fd01dc5d715173e7a8891e93cbb874a9a99 -size 3482695 +oid sha256:1a0ad0d7c2e38bf647b2e1c1ec4d00522a9ed38d81f66a837759cd1ce10a0044 +size 1681624 diff --git a/images/ccb7c231-8655-4613-a28f-beb8074c523e_f9bbc023-4e46-4803-a374-743e972eb8df.png b/images/ccb7c231-8655-4613-a28f-beb8074c523e_f9bbc023-4e46-4803-a374-743e972eb8df.png index 01c2f911c40f97f7ed54a27f8306f15590d70405..dbad72e8b9725292a6a18737a904146c7571d10e 100644 --- a/images/ccb7c231-8655-4613-a28f-beb8074c523e_f9bbc023-4e46-4803-a374-743e972eb8df.png +++ b/images/ccb7c231-8655-4613-a28f-beb8074c523e_f9bbc023-4e46-4803-a374-743e972eb8df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0731a4c52c5c7d7563fba696608251f95670c9a67548958c8f91c783dfb87ea4 -size 918326 +oid sha256:919c7ec3ef11681b4031a2ea39877136061e9068713359b39869ef8e459a159b +size 837900 diff --git a/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_16b24021-bfdb-41dd-a733-ca9415863d65.png b/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_16b24021-bfdb-41dd-a733-ca9415863d65.png index 6e7a1981c070669d843d010863806594d8600edd..da5c5539a032789e74062c66745a9ac27e8cdd0a 100644 --- a/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_16b24021-bfdb-41dd-a733-ca9415863d65.png +++ b/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_16b24021-bfdb-41dd-a733-ca9415863d65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20400669207e9e3e73c93457a75520eefb200ece27165cb0ac06ab85040790f6 -size 1436211 +oid sha256:06831e3e72a19a72ffe8b409e6e84186ffb66290392106f836c2ce49995a60cc +size 1216793 diff --git a/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_37fa00e2-64aa-432d-ae02-bc716b3c0726.png b/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_37fa00e2-64aa-432d-ae02-bc716b3c0726.png index 427a509aaca98c891de2a5133524343beacbacc2..834cb77c30d8e4cc33eaf500f7230968c6482770 100644 --- a/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_37fa00e2-64aa-432d-ae02-bc716b3c0726.png +++ b/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_37fa00e2-64aa-432d-ae02-bc716b3c0726.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45651458a0589844181ec4b798efc74a25ad3af7dd2d5eb50c7d4b49c880b1c0 -size 1010971 +oid sha256:17f7e203140e1f9e51922291796dff10d50b520fdc599b192220937696a786f3 +size 818986 diff --git a/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_f88b3369-3cf8-4294-8704-fd8c5c30361c.png b/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_f88b3369-3cf8-4294-8704-fd8c5c30361c.png index 2f61691ab50b99698798b19af51aa87d1d57c475..de3f8d5f7094d10b8460bde470ebd7779ca119a9 100644 --- a/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_f88b3369-3cf8-4294-8704-fd8c5c30361c.png +++ b/images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_f88b3369-3cf8-4294-8704-fd8c5c30361c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a9cc6e4bf02bd0fc2c34174bab61038e8fb477db71cc77f4c921ab2e2aa2ed3 -size 1050099 +oid sha256:47c0593f52e8acea797388206bbb6f1c151416bcb00f9ae8b2538bdfe7d1c1dd +size 1145500 diff --git a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_33c5ade5-919c-4875-be8a-5061b1ed3947.png b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_33c5ade5-919c-4875-be8a-5061b1ed3947.png index e7685ca485423382d5970a6e305ced7c742dee7c..cdedd65fede1562cf020eae45bf1e9ce5830b431 100644 --- a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_33c5ade5-919c-4875-be8a-5061b1ed3947.png +++ b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_33c5ade5-919c-4875-be8a-5061b1ed3947.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9224a339d4d7304ae749c37b1c70f8e4ac838633bc605019840ba01307b8269 -size 619535 +oid sha256:77399314bc23c5360938488fda28df412bf9ea4cd460eaba0d46282bf34bb8ce +size 1094582 diff --git a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_8ae64ebc-8538-42aa-bd7e-f0675af9c375.png b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_8ae64ebc-8538-42aa-bd7e-f0675af9c375.png index cb711fbf4b2d2fadf88af387f5740278ebb2358f..bd70c28554a1de24e09597880880a4715d57a416 100644 --- a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_8ae64ebc-8538-42aa-bd7e-f0675af9c375.png +++ b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_8ae64ebc-8538-42aa-bd7e-f0675af9c375.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:62b5bddbc28b031cb8cd2af02f4c4e71dde0953e24257bb2b5125b92ae85e816 -size 447068 +oid sha256:ccdf6f1598d7c765541e40af423f0441435dc93029a01fc0875ab29a117c064a +size 503485 diff --git a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_a1884569-f749-4e3d-96b6-2808db697b6f.png b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_a1884569-f749-4e3d-96b6-2808db697b6f.png index 945b2397492bcccbd133d74717ad2675a31bd37a..6598cbecc2927cd75a695317c2b686670cfde2ce 100644 --- a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_a1884569-f749-4e3d-96b6-2808db697b6f.png +++ b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_a1884569-f749-4e3d-96b6-2808db697b6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:962b6e8a4a2574ac6438a86f701f61dce6fdef9535f5360d9a34a11a5778c51d -size 528483 +oid sha256:1b56aa4ab73ed99106e38e83cc6cc2e776d9a62de5544bf34948da0befc2ca2e +size 873223 diff --git a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_aefbb363-74ca-43d6-9af5-1ed82db20b26.png b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_aefbb363-74ca-43d6-9af5-1ed82db20b26.png index 1c83fac0a63e20b71531ff80898cc609bac77a4d..5293bed75b59ca66a23dbec64a7a35ec4f9e0e31 100644 --- a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_aefbb363-74ca-43d6-9af5-1ed82db20b26.png +++ b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_aefbb363-74ca-43d6-9af5-1ed82db20b26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8f2cd488b841123f5726eefe98f47b2c11ac19ca8f7c9402a2a17c14d69af94 -size 619515 +oid sha256:c7088a3dae76c49e601f9660da595b1b8f0d3e7a3f107a881c28cd4a14d97219 +size 628925 diff --git a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_eac1ae66-8d5a-48b1-b2b1-a0fd06cb5690.png b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_eac1ae66-8d5a-48b1-b2b1-a0fd06cb5690.png index 0006d03f231fff13cc10aeab95d081f5b6838084..c059e3da55a63a4e744b450289a61427cb6121b9 100644 --- a/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_eac1ae66-8d5a-48b1-b2b1-a0fd06cb5690.png +++ b/images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_eac1ae66-8d5a-48b1-b2b1-a0fd06cb5690.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aad8b5f9b9d72747cb19bc7054bba30897d92c66d63b90662b74fee5c125788b -size 511284 +oid sha256:13d56250a8bdac0b22c1d56d4de1f191e3aeb1325f92c9fc949cf3c000155f68 +size 567406 diff --git a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_6a89e073-7ebe-4c7c-8623-e310034d7e6e.png b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_6a89e073-7ebe-4c7c-8623-e310034d7e6e.png index 0d4042e961d3f7e67586adaa4611e24d4e41037b..4ad7148a82e75ab99c06077df75282bc1f9b441d 100644 --- a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_6a89e073-7ebe-4c7c-8623-e310034d7e6e.png +++ b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_6a89e073-7ebe-4c7c-8623-e310034d7e6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f840b14fee7cda4456bec5c075ab7fc21d351358357e60d9d6cd064fa7e535f3 -size 1088152 +oid sha256:dd4ce49ca5206fef31afe912bb3773a592235f92d0c196bde33a77fefc951cbe +size 1594292 diff --git a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_71bf576e-88fa-448e-bcf2-cfefb6a34fba.png b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_71bf576e-88fa-448e-bcf2-cfefb6a34fba.png index 59682919aef4a8a6713949bda21701f73555c947..d529b51782baebd7076fd479651319b2d39745e6 100644 --- a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_71bf576e-88fa-448e-bcf2-cfefb6a34fba.png +++ b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_71bf576e-88fa-448e-bcf2-cfefb6a34fba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41d41ec8fda4e27e025e0c7eb5e47bca431b28c8f221ce518807579e52b6b1fc -size 593132 +oid sha256:b427d6f0194f61508fdb27b025875a38f41ad5dbe357af61cbb8eac4eeb3bd54 +size 593388 diff --git a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_b2a2f8a2-4de6-48e2-bbf7-6eef4fff3631.png b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_b2a2f8a2-4de6-48e2-bbf7-6eef4fff3631.png index 65088f48f00e71c92f2963d2b984883160efca83..bf00d68e7d42e4b2f4672511872918861e7ab718 100644 --- a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_b2a2f8a2-4de6-48e2-bbf7-6eef4fff3631.png +++ b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_b2a2f8a2-4de6-48e2-bbf7-6eef4fff3631.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d69ad059de2789bd79c099fe9ea3204c864e0e876f365ca57e2b30782c2540b1 -size 1092396 +oid sha256:3f2cb4cc88bec3169a958be32464356bc763355f52b424db5556be823e55dbb4 +size 915240 diff --git a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_bccd212d-8178-46a9-9e6c-adc13537d091.png b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_bccd212d-8178-46a9-9e6c-adc13537d091.png index 3435a0217a40e2388436cd2c34480e1407a45bad..5a0f63b827c1242bba03ad183190af2674691041 100644 --- a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_bccd212d-8178-46a9-9e6c-adc13537d091.png +++ b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_bccd212d-8178-46a9-9e6c-adc13537d091.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ace2159930cafa0786e384c41cd53069ba370c0c087c06f052c5f1cd0198d98d -size 905000 +oid sha256:e0fd88ea853d94d0c3d7528ea6d1b8fac684a7d87a8ceb4a2536daa6d0aebb39 +size 956012 diff --git a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_c99f7ced-46a0-4187-a323-0ce345af5b76.png b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_c99f7ced-46a0-4187-a323-0ce345af5b76.png index 6a0f4c3a46da12bdbe280d9e07ff0ee00ce4131e..49f05c7dc3d4957950782bfb7b22f0e706ac7647 100644 --- a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_c99f7ced-46a0-4187-a323-0ce345af5b76.png +++ b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_c99f7ced-46a0-4187-a323-0ce345af5b76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b071f3c87e852757a370a1367f41c0fa55a58289c90d204f8aa2e9043f2281e7 -size 1326832 +oid sha256:6950ba38e9b5e2923cbc61c366744956755b94a6fc9b729b4e6c1a24ec649fc6 +size 1241784 diff --git a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_d4a3cabd-8df7-4d25-a8a7-2ed784bafd3f.png b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_d4a3cabd-8df7-4d25-a8a7-2ed784bafd3f.png index 23ec6850bced59c162af7ec5d615b25a787c6e6c..6064d6fac5c0c2c947a46fabf23c58d74a82e2ce 100644 --- a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_d4a3cabd-8df7-4d25-a8a7-2ed784bafd3f.png +++ b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_d4a3cabd-8df7-4d25-a8a7-2ed784bafd3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8556a36d10628092f9af39e6ea463ba13a3e0902c140b1eb13c97f726b8e82a0 -size 581348 +oid sha256:532fe90395b54599e4c20c1868b86abfb5494cc33939cc4f7f4150251d4132f8 +size 575854 diff --git a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_e3cf226c-7d99-41ea-89d5-a56659d29b84.png b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_e3cf226c-7d99-41ea-89d5-a56659d29b84.png index 5b4c5c1c910b3e5998b069c1bb36fa9bcdf8883c..8dc7c61d768b98adaba9d490d22ce1e7c697d89a 100644 --- a/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_e3cf226c-7d99-41ea-89d5-a56659d29b84.png +++ b/images/ccf98191-100e-441a-93e8-8ff4076aeaa0_e3cf226c-7d99-41ea-89d5-a56659d29b84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67ee4e8a0e6c681276ee9df52164200740748dd07a8a98c4ce8717859effafcb -size 971874 +oid sha256:4bc3b1f94b949989afa428ac5284b3f1e3ae4608c9ec8f7bf6944b42cdfb6e2b +size 1303883 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_4038a1d1-b391-48f7-9093-45bec729f442.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_4038a1d1-b391-48f7-9093-45bec729f442.png index c28f931852df6c9e5c4d4b31347b2bfc67d06815..a277fdbe2f15e4520103404124bbd9cb801ca197 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_4038a1d1-b391-48f7-9093-45bec729f442.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_4038a1d1-b391-48f7-9093-45bec729f442.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30e0df2527c528e60002f4f1bcb76317ca23c3f24916003ac49ec48a476feb5b -size 1473727 +oid sha256:4df4ad7a0ac1f8efc7c3322c4b9c7ad94c23b4c35d208d03af5718bb360bc7ca +size 1548512 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_65155648-ef0f-46ef-bde1-64d693369f03.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_65155648-ef0f-46ef-bde1-64d693369f03.png index a1abf0cdac8155d2831717991db05933ef2d11b4..0a82ca555c84993c0c740e21eeb4bfb656d00755 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_65155648-ef0f-46ef-bde1-64d693369f03.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_65155648-ef0f-46ef-bde1-64d693369f03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3d65e17fc8df7515e1b91c82bbd437d520167c4b9c8cf3b3792a6d0723fd1fb -size 1438453 +oid sha256:f507f545c5eb9d26081434efacac0e6e39f9b8e0a740cd06b75fd0f38dd46fcd +size 1520246 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6c1f5f61-3aa6-4eba-bd8b-ef20145ac9b9.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6c1f5f61-3aa6-4eba-bd8b-ef20145ac9b9.png index 88b80772927bc34b4554bf62e5efc3f3dafed6eb..fbc99d1f4ab1560bd2bc9c1a51c8fa86deb371a1 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6c1f5f61-3aa6-4eba-bd8b-ef20145ac9b9.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6c1f5f61-3aa6-4eba-bd8b-ef20145ac9b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7f1ee065f741cdc0adf92aa7233c48df3d2c9aa64dc6be8fc41bfaef6a3579a -size 1458174 +oid sha256:1ce90c832992f9c92a353026339259e878053c0c8aa37146232f7b196e3de059 +size 1473118 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6e91dd35-e5c5-4066-9548-60e357a91b79.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6e91dd35-e5c5-4066-9548-60e357a91b79.png index bcefda5ecf64372ddea17f3494829ed1f71ffc5b..e79cb772453386f75bc76b2652d8d55f975b98c9 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6e91dd35-e5c5-4066-9548-60e357a91b79.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6e91dd35-e5c5-4066-9548-60e357a91b79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba01df1e1cdd5e4f525770153742a247762742a3efffd5d12f115342f5f9e46b -size 1534407 +oid sha256:321100ff8f679a04732f8ad827176e4ad1dda34809d2e2b0a6387046b8920b7e +size 1584351 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_71b2d6dd-b624-46d6-9dbe-46939c0d6916.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_71b2d6dd-b624-46d6-9dbe-46939c0d6916.png index b9c67085134fd372077462ea6d990f2fa648c10d..358f7bd293c0c0866edfc06620e638633635392e 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_71b2d6dd-b624-46d6-9dbe-46939c0d6916.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_71b2d6dd-b624-46d6-9dbe-46939c0d6916.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a76eea3c152c7662419401e559023e4a95e4e8253a4272f388301be0561e431a -size 1475475 +oid sha256:917e987eeca361a82a754723cfc26c4b3ea0d2a7023c708642ad59b682c8f7a2 +size 1580281 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_8b05e2c4-bf97-4b1b-a37f-8c1cf88209f9.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_8b05e2c4-bf97-4b1b-a37f-8c1cf88209f9.png index 0de024527b94d658bd62d6f40d38135f37995d5b..a12648d7eda06746c221a70b17d51fec366e5a0e 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_8b05e2c4-bf97-4b1b-a37f-8c1cf88209f9.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_8b05e2c4-bf97-4b1b-a37f-8c1cf88209f9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:092dc5c2175230d66df6f6f492f3a415d544c9705eb51fca1b5a9f1bacfa530e -size 518978 +oid sha256:5b7171f3514a4125de117152444c582a5dcc64203a4e5a61f4b62e87727535ff +size 551397 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_99d9fbae-2e31-4c14-b028-651a7fe28b17.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_99d9fbae-2e31-4c14-b028-651a7fe28b17.png index aa679b01c32319602fd09efe5015e05d1f0eb5d6..7174cfaea29a504a971fc61aef44e67817bb7122 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_99d9fbae-2e31-4c14-b028-651a7fe28b17.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_99d9fbae-2e31-4c14-b028-651a7fe28b17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0f52eccf2e79445eab9917880d518cf260e0f95eb18d9c78f02f8a237d82607 -size 1473501 +oid sha256:5b812d900cbe9b89cde7dfce50ac802678429a0a5459e82d33f97ef01ef606d7 +size 1472537 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_a8e10c19-7b5f-40c7-9779-b0a96c1e1733.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_a8e10c19-7b5f-40c7-9779-b0a96c1e1733.png index 38153fb5a7382b1158a3bfa3a63c8b3e494a83a2..3a71a84b27bd78f2ae84641ffa75e63a62fb20a4 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_a8e10c19-7b5f-40c7-9779-b0a96c1e1733.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_a8e10c19-7b5f-40c7-9779-b0a96c1e1733.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e48f986e502b6f811a82ce72f4d135a89f76d73184e6f6f65b5a58e2ffc2ed84 -size 290762 +oid sha256:785e3308712c26ca2a3702190148cff71f3a699e095c97f61fb78e1548feb8ab +size 226186 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_b2d669d1-77b6-48b8-8769-60bf2b316324.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_b2d669d1-77b6-48b8-8769-60bf2b316324.png index 9de82b2eb3c21b56a0e8fb36c2e4fcb6bd0559a4..423af4f2f205801d6164714f661d045f32ebd758 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_b2d669d1-77b6-48b8-8769-60bf2b316324.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_b2d669d1-77b6-48b8-8769-60bf2b316324.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acc3dfbe4567b948b969b214323674df5a471ad93b97241c64134261d37dab90 -size 1473553 +oid sha256:9a7198d74b532a31ef62ff0bd367408e7529d807a6ea2fb4a80a8ee748bea506 +size 1474389 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_bb572b4c-5926-4951-8d9d-69a1dea4bbde.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_bb572b4c-5926-4951-8d9d-69a1dea4bbde.png index 2ad71c63838c8418b699a13db913aca25fbed562..b2b7397ef8c3ea0a5329abaf05948cdec542873d 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_bb572b4c-5926-4951-8d9d-69a1dea4bbde.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_bb572b4c-5926-4951-8d9d-69a1dea4bbde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54d9445ffebe22142925201ac3326cc40bdf3671f420e4d0e36ae26a84cdccb3 -size 518184 +oid sha256:8fc5775186769ea94f468c4c05186d1e0c3e3d544104332af7e919630a8b4bc9 +size 299435 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d25fbaa1-54f2-4aaa-9446-6f113794dfc7.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d25fbaa1-54f2-4aaa-9446-6f113794dfc7.png index b40a35d8dac1d5ce52f24963566ebb11e2562e32..14c61f36e04238334d73d06c25dc2b79d4c6112a 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d25fbaa1-54f2-4aaa-9446-6f113794dfc7.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d25fbaa1-54f2-4aaa-9446-6f113794dfc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61a9ee40d5b30c707107eae527620041bf8956880710b1f0ddeb2db4d7ddf39b -size 290760 +oid sha256:903db7a52f2d76093fe85d016e5a537a5ba3f8c6f6a18432dd4fe82bc5156892 +size 256991 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d97dd54e-0198-46e3-b4d1-78883c9422c7.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d97dd54e-0198-46e3-b4d1-78883c9422c7.png index 4930407d17dd35b8c610e44556b2082877cc18b4..1d40bc2779f862a8361eee8989734c6258e708f0 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d97dd54e-0198-46e3-b4d1-78883c9422c7.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d97dd54e-0198-46e3-b4d1-78883c9422c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ec1e77a003e8b7ed6b22f94b4da93bef63f5a34c5b029176a69cd532009ffd3 -size 290843 +oid sha256:3391a0a1e10bf949e7ae7c45874735ac988d2a541af00260bd7775552a7afda3 +size 324130 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e1d01996-7299-4eb2-80e2-36f60c02f589.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e1d01996-7299-4eb2-80e2-36f60c02f589.png index b95eb65eb888bd404cee72c598e917c142966a57..9095bd6ec7f1b232738476192d505bb35508e6f5 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e1d01996-7299-4eb2-80e2-36f60c02f589.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e1d01996-7299-4eb2-80e2-36f60c02f589.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e3ae1cfccfa5210cde26b31c5d5161f14563fca913cc257e3676b204d613484 -size 290838 +oid sha256:d0bc99a135f88f3cc0b0e78568b4fe5bc142f90d7dfcba616c088ce857d1d502 +size 333625 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e9bb1cf2-1d7f-41d7-a17c-ebf215c4e011.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e9bb1cf2-1d7f-41d7-a17c-ebf215c4e011.png index 89ebf441cf989ca29fa0a9c1ddbc340e85a77e7f..2ae9059abb8ec51a433a8573e9ca09b2ec56de27 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e9bb1cf2-1d7f-41d7-a17c-ebf215c4e011.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e9bb1cf2-1d7f-41d7-a17c-ebf215c4e011.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbade79b3a32aa44452dfd155492c30a26f0fe41c1328682cb1ccc8b1026665d -size 1524046 +oid sha256:fd12d26401ab176a54004e9966413f2680c829dfb4e3f9aa418a46094b655425 +size 1525314 diff --git a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_f6a205a1-171b-4d72-ba1e-49aeaff0f3c7.png b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_f6a205a1-171b-4d72-ba1e-49aeaff0f3c7.png index d5401118ed318ed8fec9e3354d1064e2a3cf2360..08d337d83784222d887771699131cc66be8e1bb1 100644 --- a/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_f6a205a1-171b-4d72-ba1e-49aeaff0f3c7.png +++ b/images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_f6a205a1-171b-4d72-ba1e-49aeaff0f3c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c5796d89c32d1526c3a6dd7eee62d34c619ab912ac2eafe7f696760505cbc1e -size 1413271 +oid sha256:3274df3ca9f4fba554eee85efcfd35569c5e106dfd7b8eb9318cc8b05570ae8a +size 1458335 diff --git a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_2d9747a3-5834-4128-9975-1d676e3eff45.png b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_2d9747a3-5834-4128-9975-1d676e3eff45.png index e65e5ea5a346b5d2418847492eab59f66f7ac609..34313159606b41751f1670e68cbddd502c3a0bd2 100644 --- a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_2d9747a3-5834-4128-9975-1d676e3eff45.png +++ b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_2d9747a3-5834-4128-9975-1d676e3eff45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06989c55ae7d676326ac315afbd8a52e40b0d2e6d29a696b8e5adddb50d4e17e -size 790245 +oid sha256:9fb5e8d8977708e12f8486c8769d78f293e207dd44c5c4c01c76ed12fa404e11 +size 837650 diff --git a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_a58ae00f-38de-4c81-a24f-d32bd6933d7d.png b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_a58ae00f-38de-4c81-a24f-d32bd6933d7d.png index 28c1c515bfc4a0aab1f16d0a632bbf1194009d93..53003f355e66a7e9f3fadce5ec828415c60c96d7 100644 --- a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_a58ae00f-38de-4c81-a24f-d32bd6933d7d.png +++ b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_a58ae00f-38de-4c81-a24f-d32bd6933d7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4b482754322585a89bc34b7bc7dcbc4bfa9eba52686d25bf0c0e711b3094a5b -size 774528 +oid sha256:427c3912040e0e45f4cc5fcb45c6cabd4e4c5ffc9b7fab4a0cd063f2c81fd81b +size 679503 diff --git a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_ab2175cb-f9af-4c04-a557-c9671e492e76.png b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_ab2175cb-f9af-4c04-a557-c9671e492e76.png index 06149d7d906e5c1eee37d0117b6a4e391e426b3a..ae07220da5b2f8c8b80f72ec8b02cc969eb05b39 100644 --- a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_ab2175cb-f9af-4c04-a557-c9671e492e76.png +++ b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_ab2175cb-f9af-4c04-a557-c9671e492e76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d94ce9663e2d12cdfed00b5ee5dafd5c7108427a34e09a96cfb0919f9c35d06 -size 789015 +oid sha256:e34b5ecc4b94d961ced1a665f9131db8c03ef34cc692e0424cd6b53d4209578d +size 744550 diff --git a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_e8afff4d-d27f-4877-99d3-bc44942bf223.png b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_e8afff4d-d27f-4877-99d3-bc44942bf223.png index 5e6adb81d6a8668f636cbe90ab3ae031d02e1f02..5cf67b2f589288ac8db5d49797ba7b9217f3ffa1 100644 --- a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_e8afff4d-d27f-4877-99d3-bc44942bf223.png +++ b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_e8afff4d-d27f-4877-99d3-bc44942bf223.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8816177a8a6e902294af10261e01968bf5d70b3bdfa6f4910cb75b1e980473e5 -size 844036 +oid sha256:497ddb679cfa85ed499daf5abcf002c83e5c36522771835c82ee67eb949f255d +size 965281 diff --git a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f562f61b-3bae-4e8a-b712-fdd0b009e09d.png b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f562f61b-3bae-4e8a-b712-fdd0b009e09d.png index e9fedaabfa3d3fcee8dd3893de967dfcda3d5582..f72fc6c6d5cbb2def10636fe953300066d116975 100644 --- a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f562f61b-3bae-4e8a-b712-fdd0b009e09d.png +++ b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f562f61b-3bae-4e8a-b712-fdd0b009e09d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de392b16cb62db97c890d5808e8054df60837b04d058ae2126f039763ffc1a32 -size 813364 +oid sha256:284d8ebe31713ad998ad050f17acc81b990104a4440bb801b0cf36c32301853a +size 747178 diff --git a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f617e4ee-1c75-459f-a9a8-f9edf83abf98.png b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f617e4ee-1c75-459f-a9a8-f9edf83abf98.png index 89a16983da884620b7f3d924d1ce6ef40b0ec366..9be704dfb825178f1831a63ccdb501950dc80ef5 100644 --- a/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f617e4ee-1c75-459f-a9a8-f9edf83abf98.png +++ b/images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f617e4ee-1c75-459f-a9a8-f9edf83abf98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9be9e3b3c4cc185348858b45f91ded5771034d0fdb2e827e88a474085ea92a8 -size 774669 +oid sha256:7606673a80dd27aaa390111813faac874ff2bb5ebd2249aa6afe6c1d723bf522 +size 777197 diff --git a/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_08a60925-4f62-45da-aa46-c69d90ef1915.png b/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_08a60925-4f62-45da-aa46-c69d90ef1915.png index 4f1b4bfa7b0158dc157d17a38be3585cf1091e43..114be8b0d8113ed5dde5fdb119c3e4a2adb75ecb 100644 --- a/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_08a60925-4f62-45da-aa46-c69d90ef1915.png +++ b/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_08a60925-4f62-45da-aa46-c69d90ef1915.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:875a472dc6b7e42e1042b88a37b777385624a7ba5392cd01da681d5b6638dab4 -size 392635 +oid sha256:d2daca372a7ba92a6ebb5217fbde293b780fd06b17b5c09620751adb2b5fb58d +size 443747 diff --git a/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_45868167-8aa0-44cb-972a-cf3113815043.png b/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_45868167-8aa0-44cb-972a-cf3113815043.png index 4d77414d0850686747e104b131d1702a9042929a..a67352655c0651eb8a7262cc5d8fca95e748de4b 100644 --- a/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_45868167-8aa0-44cb-972a-cf3113815043.png +++ b/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_45868167-8aa0-44cb-972a-cf3113815043.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5063bd189742952dec092dff558d2ab2d4b137f83017b464502dca1d5ed6dd3 -size 1998667 +oid sha256:ccf7d80db269395595ff900a54f868883552735e0b41e1a086e439c848b446ae +size 2036328 diff --git a/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_86f31177-b948-43d1-bfb7-32166655b35a.png b/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_86f31177-b948-43d1-bfb7-32166655b35a.png index cf84992d4e11bc4e715e0ada1d7219a33ccfd80a..2cbd8794afef0e97f141fab3163322c330d9b5f8 100644 --- a/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_86f31177-b948-43d1-bfb7-32166655b35a.png +++ b/images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_86f31177-b948-43d1-bfb7-32166655b35a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a2751b09523936885d278fcbfd9fa0715e06614a5ce4e144b28902f5148bdbd -size 1755739 +oid sha256:b6942a98400c3b61b16b00c5de6dbd1bc0ad3b8f6ee7159b26095d4061b4872f +size 1849481 diff --git a/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_9585a0e8-17ac-4cc3-9f8e-3616fc8ef354.png b/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_9585a0e8-17ac-4cc3-9f8e-3616fc8ef354.png index 936f96683025c5869728706f4581e541f9745e49..f5730190e030ed1804e029bb18a1cfb3f239c484 100644 --- a/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_9585a0e8-17ac-4cc3-9f8e-3616fc8ef354.png +++ b/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_9585a0e8-17ac-4cc3-9f8e-3616fc8ef354.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19a96f993282c9d169cd1b120991745627d04de6d693c2b08eea8f06f529a180 -size 1282203 +oid sha256:8c9ce8d76412072dfaa54f1bfc76eba653d036a5a1a345f984fdd638f1107a8d +size 2298992 diff --git a/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_961171c8-d057-442d-9bd2-68aa64900442.png b/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_961171c8-d057-442d-9bd2-68aa64900442.png index 6851be63e612c4687118395aa91271090b1f4cb7..5f2df11649a06b34409048ac8fb500ac3851de6e 100644 --- a/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_961171c8-d057-442d-9bd2-68aa64900442.png +++ b/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_961171c8-d057-442d-9bd2-68aa64900442.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2188376de0db1a780ba96920ccc463bc33c400527bdafc7725b62dfa8e8db456 -size 1882024 +oid sha256:e26107ad0739343cc81ee63667278ff78a5c2e9c006b5a7fc1140d0c0dce4fdd +size 920978 diff --git a/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_af0979f8-d69c-48e8-a772-a069f0d24a84.png b/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_af0979f8-d69c-48e8-a772-a069f0d24a84.png index 4dc3f0fc78074fcf3d91d94a2724c290e8167b43..ee14991801069a6dac9b3bf3290a74dfdb83edf2 100644 --- a/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_af0979f8-d69c-48e8-a772-a069f0d24a84.png +++ b/images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_af0979f8-d69c-48e8-a772-a069f0d24a84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:922bc5ba74f95abf91e87fc503849955fb7ef6b3223f023d1ebcb97b3d5084ac -size 948141 +oid sha256:d6cd3509dae88fc0eaed8ddf369f290ba646e1e2d98ab4fec4aa8eecdad4dd56 +size 1197260 diff --git a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_028f293a-f011-4c5a-a8d7-75c3024c70c1.png b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_028f293a-f011-4c5a-a8d7-75c3024c70c1.png index 1003eb7c1ca2546b4cbdc40c5700b2b8058c6893..19a310334b8a3d68b01416a40ed60bc5c2820c91 100644 --- a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_028f293a-f011-4c5a-a8d7-75c3024c70c1.png +++ b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_028f293a-f011-4c5a-a8d7-75c3024c70c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eeee5cc51d4b2ea1251b1290861f1d81eeefc6bd99a67fd3296795e6719dee6d -size 38659 +oid sha256:699f207d5da3ed3dbb7056bddfba6facc17de631c30f12e09617c57e9c3b3b9f +size 37359 diff --git a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_3d14978f-be25-44dd-b3f4-bf95d170d4f3.png b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_3d14978f-be25-44dd-b3f4-bf95d170d4f3.png index a51a01706d7423472455c09f11a82bbc365b57ad..9041be6b701891d6fb874b5a0801181fd13a3fb5 100644 --- a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_3d14978f-be25-44dd-b3f4-bf95d170d4f3.png +++ b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_3d14978f-be25-44dd-b3f4-bf95d170d4f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d70752c266d333da962d2a0e76f3b8cadcd174531b777d01124e8ef46762647f -size 3517644 +oid sha256:5a34ac03325f9506d0f7b7e03a7003867882868c4055f0328705f98312d8a3e5 +size 1221230 diff --git a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_b75cd0fb-f7e4-4a75-a5aa-58ce641b02be.png b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_b75cd0fb-f7e4-4a75-a5aa-58ce641b02be.png index 9365ad5b0b06940dfae512c2d0f33224818d892d..99f0eba139ebffc3a29b8832a37a84e85fb9cdc0 100644 --- a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_b75cd0fb-f7e4-4a75-a5aa-58ce641b02be.png +++ b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_b75cd0fb-f7e4-4a75-a5aa-58ce641b02be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71719e1930f0e42cdf205b8f6df8bf32348fb64f5ddee6a50c22b3a719514b4b -size 1920586 +oid sha256:10582ebb0d20958b87fa39696e4c071807c39d000418ee1654664a61aae64b84 +size 1397264 diff --git a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_e2a55480-187b-4c9f-8f3a-28a19a3c7931.png b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_e2a55480-187b-4c9f-8f3a-28a19a3c7931.png index c724ee28d03fe27aacb6c42ac5421ed625fa2387..7c178429e47e71ba0516026c248d7f7bf775c3ef 100644 --- a/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_e2a55480-187b-4c9f-8f3a-28a19a3c7931.png +++ b/images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_e2a55480-187b-4c9f-8f3a-28a19a3c7931.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c291d3ef7a66ca59d424cc10a9a154cf7e3a3b35f0cd33cffc02e9ac7dfad309 -size 827817 +oid sha256:5c25943fbaf580c2669201bed5af35070dd1fde2193275f0b95baf567f111b7a +size 949375 diff --git a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_10cae0d4-c04c-40c7-ace9-d4a0bd6a2993.png b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_10cae0d4-c04c-40c7-ace9-d4a0bd6a2993.png index ee443c06465f870450eba83fe7369c3e15ed66bb..5cbdba7a32d97dfeb6f7533bb82d3a5278276683 100644 --- a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_10cae0d4-c04c-40c7-ace9-d4a0bd6a2993.png +++ b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_10cae0d4-c04c-40c7-ace9-d4a0bd6a2993.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5916bae7c47f00986c3423a215229f6e7d1719b7667982f26b10b97a00cfddeb -size 1257730 +oid sha256:ff7ff0f733b66aa7dd973bc278a6262d191b087f704efe0b84147b4b3fb32544 +size 1106806 diff --git a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_24d287e5-d848-4bd8-bacd-eb06b15a0036.png b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_24d287e5-d848-4bd8-bacd-eb06b15a0036.png index b324eaf1e9a149ee7539335c254c52338cb59ec2..b2a7e2db2ed0e99ffc6f6dff0b41e6bb8b8740a6 100644 --- a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_24d287e5-d848-4bd8-bacd-eb06b15a0036.png +++ b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_24d287e5-d848-4bd8-bacd-eb06b15a0036.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b62e2ac34cbafa7900779b48b6b16a5fc1dbaeaa1a1835bbe53d1707602c09f2 -size 1337704 +oid sha256:6ea66a9a877730e53a897a9e6902bddadca5e151212524e652aa29c7c6d26a5d +size 1513142 diff --git a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_d602e0d8-9092-45a7-99e7-c0d26107ca55.png b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_d602e0d8-9092-45a7-99e7-c0d26107ca55.png index 2cfdf83c541ae62ea5763a74f5b10d5b46a94518..031b5746665245325e146bc16dc92de2a56f29d6 100644 --- a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_d602e0d8-9092-45a7-99e7-c0d26107ca55.png +++ b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_d602e0d8-9092-45a7-99e7-c0d26107ca55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5950fa88b3490d3532fd50dbbb35540cd730974c3f307f48b6d43a36c8e63344 -size 323849 +oid sha256:0861bb137289060b9feab23967e75e098a26705426f9841b4cc516c616957256 +size 276651 diff --git a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e49fc4a5-1f43-41a9-9d9a-ec9b5a65e2b1.png b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e49fc4a5-1f43-41a9-9d9a-ec9b5a65e2b1.png index 73b0c827a774d3a6777db46a60db2eab8b625027..ed3b34a0425bd81ff26eaf566d16bec1b82ae3de 100644 --- a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e49fc4a5-1f43-41a9-9d9a-ec9b5a65e2b1.png +++ b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e49fc4a5-1f43-41a9-9d9a-ec9b5a65e2b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c18f3d41afff9a1cd2cd2a8f87ec425793194dd5df83189662c10f1c51f3603 -size 1650867 +oid sha256:fd83c24e2c2661b9320ac1474ad7d4cc5c0d9e7d2f6f2df3b30cb694f6c4b508 +size 1130300 diff --git a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e6c9df9f-b289-4bb9-8552-e6367e9e3057.png b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e6c9df9f-b289-4bb9-8552-e6367e9e3057.png index a31ce80aaae7d5b37a564842e8aabf0ef751c0a8..dcb48701ce0a7cce2cdb953458663cbaa17a7a7b 100644 --- a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e6c9df9f-b289-4bb9-8552-e6367e9e3057.png +++ b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e6c9df9f-b289-4bb9-8552-e6367e9e3057.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6f031a8ca2e32cfbf8038c290bc10e39704839fb4bfe16c11638257275542fe -size 584414 +oid sha256:fdee7e1f0019e74398b32e7d8b7def367f9198ae197718a2e3cab88c9c6002dd +size 575496 diff --git a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_ffe46fa1-15a2-44a5-8017-0b39e33fa3b2.png b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_ffe46fa1-15a2-44a5-8017-0b39e33fa3b2.png index 5b1fe8bbe9dc61e0145735a043965557ad96cef6..6ca439712aa1e0e1dc1dc923c168802e354c06b9 100644 --- a/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_ffe46fa1-15a2-44a5-8017-0b39e33fa3b2.png +++ b/images/cdb6b70d-6083-49b5-908c-a6c0f711c175_ffe46fa1-15a2-44a5-8017-0b39e33fa3b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67e0e8ab31fa721ec047f3a9875f95dba2f867e4c9ad9b0a2c3975e4241c7f7a -size 1304780 +oid sha256:7f2c1c968fb027e9fff7dc031fb650097f6f89bb7f3620056ac42527b2e64f1c +size 848614 diff --git a/images/cdbd410d-170a-426d-b6d2-60dafaffe853_1c12e058-d63b-4514-bba9-ca7c1cec49d1.png b/images/cdbd410d-170a-426d-b6d2-60dafaffe853_1c12e058-d63b-4514-bba9-ca7c1cec49d1.png index 3faef628ff9ac32747f4015fb9e64cce1fa7096f..3a584df9707d90a2d042baa63a1d11621ffed77a 100644 --- a/images/cdbd410d-170a-426d-b6d2-60dafaffe853_1c12e058-d63b-4514-bba9-ca7c1cec49d1.png +++ b/images/cdbd410d-170a-426d-b6d2-60dafaffe853_1c12e058-d63b-4514-bba9-ca7c1cec49d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19ea5aed72ce2a67ced5dc4638b1620c033f4f5745800f3d3f89bc0a20982741 -size 29031 +oid sha256:0de96c3c27d4a39b21941ac2a9d4016e8c4671967e65ba1962f238d7599c91b4 +size 38673 diff --git a/images/cdbd410d-170a-426d-b6d2-60dafaffe853_76ee2054-93df-48ad-8b0f-8af2935d3b97.png b/images/cdbd410d-170a-426d-b6d2-60dafaffe853_76ee2054-93df-48ad-8b0f-8af2935d3b97.png index 7dce5eb4e3e2b5e226ec27333c418d17d21f60f0..7bc448815e9f483b9c55032a0628b8c443635f45 100644 --- a/images/cdbd410d-170a-426d-b6d2-60dafaffe853_76ee2054-93df-48ad-8b0f-8af2935d3b97.png +++ b/images/cdbd410d-170a-426d-b6d2-60dafaffe853_76ee2054-93df-48ad-8b0f-8af2935d3b97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9beac312b66d8f763a3458e235b22903807777fba4f449b17efe7492cb563655 -size 586232 +oid sha256:76c5e979c744776cb5aaa09b46aae0942e65dfa8ea2303f04679fffda759ee66 +size 686546 diff --git a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_088d7365-0d88-422a-b819-ff3660ebdf1c.png b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_088d7365-0d88-422a-b819-ff3660ebdf1c.png index e6e0817f03826a921de089256391afd2de5f56cb..01e2ee58099ca98d19c1136dc174d9c3a0ca232b 100644 --- a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_088d7365-0d88-422a-b819-ff3660ebdf1c.png +++ b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_088d7365-0d88-422a-b819-ff3660ebdf1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb5f5b7a9dddf59ed2f3c82acc7b6b41da0e279867e8771a07520dfe844ea38d -size 475640 +oid sha256:17ffb4498bff55d0b45edf78b2e9aee430901482f32c53c97d530f7e9b3c603a +size 723181 diff --git a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_13a6b6cb-b169-43ba-a0e0-64b556025f7e.png b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_13a6b6cb-b169-43ba-a0e0-64b556025f7e.png index 02180c22eec46915dfbd79bfbca0697ffb387d4c..4ed240fa46e9074f1ac5b30a9959a326f431dc9f 100644 --- a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_13a6b6cb-b169-43ba-a0e0-64b556025f7e.png +++ b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_13a6b6cb-b169-43ba-a0e0-64b556025f7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e35d4bc49aa09a9b244db7427279df3d200cafe854971e36bac1ca50af019c45 -size 471084 +oid sha256:17e22e1530f6ae6bc549d5edddd5a7de107447023e34050be593515b8e9db3e3 +size 867195 diff --git a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_2bc91564-8a2d-4caa-968e-f6d6713349e0.png b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_2bc91564-8a2d-4caa-968e-f6d6713349e0.png index 70309cbb70f653ed75c8511a0e25a4b3ee53e070..9075d36ba6accc63afc822f8cfb17e4ffaa7e3b8 100644 --- a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_2bc91564-8a2d-4caa-968e-f6d6713349e0.png +++ b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_2bc91564-8a2d-4caa-968e-f6d6713349e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3312375047d7af77e2e5c6e40dc3cac5882ff5737493de1b33513d2b0bad3193 -size 831623 +oid sha256:54faf1a064077967c2efbebdbec27649ce7d5c4e1720e7b65f14651a5e049a69 +size 575357 diff --git a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_463a2f6e-e1fa-42fe-beb0-1e4fbe74ac51.png b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_463a2f6e-e1fa-42fe-beb0-1e4fbe74ac51.png index cef7257ac5d0c11bac8a792a13cdbc34baf4ed92..dd5b18694a28475d4be7626c27461bfdf72e4b93 100644 --- a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_463a2f6e-e1fa-42fe-beb0-1e4fbe74ac51.png +++ b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_463a2f6e-e1fa-42fe-beb0-1e4fbe74ac51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:500e9ff60f30b61a85322195c3767eb1bce1b1cda63bafbca5f40bdfb84bd8dc -size 589078 +oid sha256:86f556452d4422c789cef510d038e78b445e369b0d84814ca54864d2bf23d3a4 +size 835547 diff --git a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_8183f7ea-b058-4050-9a78-7d016c2f1e14.png b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_8183f7ea-b058-4050-9a78-7d016c2f1e14.png index 0e6c003b28ca84a6044ddf2a295702203079f090..f800a3e32f641cbf1cf1efa4c32f0a84c9b06042 100644 --- a/images/cdd64586-2f2e-4000-89fa-200feefcd97d_8183f7ea-b058-4050-9a78-7d016c2f1e14.png +++ b/images/cdd64586-2f2e-4000-89fa-200feefcd97d_8183f7ea-b058-4050-9a78-7d016c2f1e14.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e9050db38a5d5a8ea20e948a96ef03e2ae000bf465de3f17dbddf4fce19fb8a -size 585679 +oid sha256:a1ecbb8b4865a2ce43a6511b499409d561cbe2319ddbc8d16503ca21de098740 +size 667971 diff --git a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_2d74ad4e-f2c3-492b-8a78-ae86a999f90e.png b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_2d74ad4e-f2c3-492b-8a78-ae86a999f90e.png index 8407eb80421c8aa9f7d871f808d9d4d46ec627a7..d2b1d703ebee89730f484a17716dfcd87f633931 100644 --- a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_2d74ad4e-f2c3-492b-8a78-ae86a999f90e.png +++ b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_2d74ad4e-f2c3-492b-8a78-ae86a999f90e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f9ffdabcd650a8dbee7ebebcef16e77c0dce839f4f35a9a96d2721fdf092877 -size 1714035 +oid sha256:a840273f050a57924f4c70dcb155f105ba8891ef6b26fbcf3009896531915af2 +size 459651 diff --git a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_4a747255-e268-4174-9aae-ef927747e463.png b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_4a747255-e268-4174-9aae-ef927747e463.png index df0d7f344f560ef987e47abb48c30ae74c3d3e29..ed5e57786719d637119cde64c441e9cd87508488 100644 --- a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_4a747255-e268-4174-9aae-ef927747e463.png +++ b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_4a747255-e268-4174-9aae-ef927747e463.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0c25df097f8a8afd94c6806aec738aadb1f095943254909418f3075e28bb391 -size 1874455 +oid sha256:e578a81cfa5c7dcd80474044738f68b17177735764f0813f487f4beee3fcaaff +size 1076502 diff --git a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_5f599e93-da0c-4046-a99f-5ee9b6b91c4a.png b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_5f599e93-da0c-4046-a99f-5ee9b6b91c4a.png index f6e2be7239f705612b73e7a66d71818698fbb5cc..bd44e7d9adc359e3d6126fa290e86f4b3a6a6df5 100644 --- a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_5f599e93-da0c-4046-a99f-5ee9b6b91c4a.png +++ b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_5f599e93-da0c-4046-a99f-5ee9b6b91c4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6f5fd5f26db980024852d9aabc5899026b0a561264985c8c4b9bd3087c84ade -size 2366062 +oid sha256:dd6c798009f6edffab71c0375f783bdda6562ea94209f37dab50afaead7f2a25 +size 411877 diff --git a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_70093fcf-8cdd-4fd6-acd9-a4ba14673610.png b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_70093fcf-8cdd-4fd6-acd9-a4ba14673610.png index 8407eb80421c8aa9f7d871f808d9d4d46ec627a7..c5d162214be44f8c341df213e26e7b10c57ab64b 100644 --- a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_70093fcf-8cdd-4fd6-acd9-a4ba14673610.png +++ b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_70093fcf-8cdd-4fd6-acd9-a4ba14673610.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f9ffdabcd650a8dbee7ebebcef16e77c0dce839f4f35a9a96d2721fdf092877 -size 1714035 +oid sha256:edaaf9d221386c33dd56d8bd7b09c27955d0115d7e849b8f73ffd7a27a842f35 +size 665638 diff --git a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_aee3baf0-fbec-4ac0-8ff2-60d01149dd39.png b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_aee3baf0-fbec-4ac0-8ff2-60d01149dd39.png index ce868557cef0a2621757a84b9c0cca8107cda407..9b543bd4338b80bc0951ada4b84a374c41e80862 100644 --- a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_aee3baf0-fbec-4ac0-8ff2-60d01149dd39.png +++ b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_aee3baf0-fbec-4ac0-8ff2-60d01149dd39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e936b7e37a7ca09bc4b75c0113b9490c44cf22e8881ca1330d3a619ed0949dea -size 1763203 +oid sha256:76dd5fbd4401855992bf2e6bae2a3fd0c91c4ed2ac58e58c547f5ab7f28f042a +size 442685 diff --git a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_c7822f35-404a-4681-8945-0b6ac3c36b54.png b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_c7822f35-404a-4681-8945-0b6ac3c36b54.png index 16688c2033c15e8e4a11ba453b3b8284aeb4bc4c..3c97b48bd3c9f655f03862f1b9641829e1eda3f2 100644 --- a/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_c7822f35-404a-4681-8945-0b6ac3c36b54.png +++ b/images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_c7822f35-404a-4681-8945-0b6ac3c36b54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:600d6dab7ea35727e000544a67f2cf41b8c8c794c50b1cac84cfad1c650f11db -size 1029770 +oid sha256:e15a9601c261ecb5578f08b6e51dc037317aaae725b3965622c37a837d8b2f84 +size 464531 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_143b4db4-0c32-4579-9fe3-edc5b7cdf40d.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_143b4db4-0c32-4579-9fe3-edc5b7cdf40d.png index 8ef14b18a294ad1a17f1c8d5a7bc0bb1d44cd028..1a73bb299a0a5cd94bb554982421292a4a3f14c4 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_143b4db4-0c32-4579-9fe3-edc5b7cdf40d.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_143b4db4-0c32-4579-9fe3-edc5b7cdf40d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78941123ff755a8d1d333e6c0ddcf1dd8106829a57104b0c8bef84292272660a -size 938319 +oid sha256:a40af079f7dc7c4ec5d48a293e43bffb5ae06f9611b344188b44a1189200057d +size 770809 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_18a087b4-2e0a-4bb1-828e-6eabfe01b850.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_18a087b4-2e0a-4bb1-828e-6eabfe01b850.png index 86afb1445e6656ff6f7dd380f20b308d6f85be26..8779c6e958743003c94530205bbf491015eff933 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_18a087b4-2e0a-4bb1-828e-6eabfe01b850.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_18a087b4-2e0a-4bb1-828e-6eabfe01b850.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acff6b714aa5516c6685cfb0a9e96d1fd901a9ec7df859a637079e6164fb8d73 -size 939044 +oid sha256:f02d82329def4c56fac9ab4c3ec7220291402098bd78c7a359fcc067383cc9e4 +size 1260245 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_230628ec-5182-4284-8bc5-a4bf221832e7.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_230628ec-5182-4284-8bc5-a4bf221832e7.png index 07dd8ed6558a714b85ca02bb66488944e8a247ea..0e54ee037b9efcfb293a659875e405bc097814bc 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_230628ec-5182-4284-8bc5-a4bf221832e7.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_230628ec-5182-4284-8bc5-a4bf221832e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fcd4ba972758ec835e9a04ecdfdc6c4914b118cfb25f4609cafea1edbe547c9 -size 668811 +oid sha256:7b8fce79753fda458826f2d2ea3f1c9ac74f50a2b2306827f12ceb442ed3c558 +size 1230877 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_24d96aba-20ef-4923-b4ce-41d35ddd7a45.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_24d96aba-20ef-4923-b4ce-41d35ddd7a45.png index 8c9eb54688307315184c675b13e31063f37eefe0..20e2f618d7e255900bc2e702f6f3a1c072d54559 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_24d96aba-20ef-4923-b4ce-41d35ddd7a45.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_24d96aba-20ef-4923-b4ce-41d35ddd7a45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca5aae5d72a19b6dab90b0c2939e52e25d82c89d9df2d70988e5be903773e522 -size 939217 +oid sha256:6ded7e6436bc5ee094b57dd1bae20189220d7ff5c51caaed370a5a1e640dcae2 +size 769797 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_25ed6cca-1f43-4bd0-a185-8d4db5e858e9.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_25ed6cca-1f43-4bd0-a185-8d4db5e858e9.png index b9efa532088890c1f5165d03a3b5633aed7c4243..2ad18c064630c2fb0dae555d321892de2ca067c4 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_25ed6cca-1f43-4bd0-a185-8d4db5e858e9.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_25ed6cca-1f43-4bd0-a185-8d4db5e858e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1d0f30cc79a9a13165ac1559b0bef96fb2eb3966dd39215e10cad836d4e617d -size 931292 +oid sha256:8c892861a02dfe8befda10ab9768461f07e2ed821c3d423a30ecc6ee88366776 +size 882940 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_2de322fb-c659-4be9-90bf-9c7010ba87e3.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_2de322fb-c659-4be9-90bf-9c7010ba87e3.png index b7ccd86acba997884c5b5ed5c8635d156dae6c21..b28bba94fadd23bd0cd9d0ddf0e2e8ce7922575b 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_2de322fb-c659-4be9-90bf-9c7010ba87e3.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_2de322fb-c659-4be9-90bf-9c7010ba87e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ee25f1b04e21c9d0aa8dd3415c06406361d46b6f12cee56cf1ddb53ee65de68b -size 675657 +oid sha256:3d8ae9bb4bf702a30b534e203afef13c2eabb5fc91099a29e6e31199c116b1b3 +size 1031724 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_3b8ad033-513a-4bff-9546-bda8e4d9c844.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_3b8ad033-513a-4bff-9546-bda8e4d9c844.png index 9072994dfbcf2f983a29601b7e20fec9da9a5712..b0dc164cfa701267f70d961e33cdbb99f8cdeb6a 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_3b8ad033-513a-4bff-9546-bda8e4d9c844.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_3b8ad033-513a-4bff-9546-bda8e4d9c844.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30581c340e6e586e3b7fa5a70be9a8bee66a136e357254a38782eca1e025b12d -size 1290386 +oid sha256:400c7422c132716de3dce4cb2c1beb0523b1ac48e8f0952db757a61321618012 +size 843240 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_82b45e0c-6d98-47d6-9691-3d2d8a21abe6.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_82b45e0c-6d98-47d6-9691-3d2d8a21abe6.png index 394970fbed029309be707b5c230c856c28dd374a..79226c99eed234acbefedc4541d67a272316f2d8 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_82b45e0c-6d98-47d6-9691-3d2d8a21abe6.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_82b45e0c-6d98-47d6-9691-3d2d8a21abe6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65862fb397f96ebcec1bd2e3f8f641cef86982828a43333590df8dfc842605cd -size 938089 +oid sha256:ca852bdb237da4c25c503f94046cc52d281f241c98a6564ea36b9376254a6341 +size 1219416 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_b2fae695-2147-4942-8629-9379ac0a96e6.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_b2fae695-2147-4942-8629-9379ac0a96e6.png index 0f734500d66739e475c3bc3fa0dad34dc17840ef..7bde34cbeded619ddd2ce6ca770291b9a262998c 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_b2fae695-2147-4942-8629-9379ac0a96e6.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_b2fae695-2147-4942-8629-9379ac0a96e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87b3f912ba08255b4e2a9230c3252f61c2913db9d169f7cc677e490e88ddf33f -size 931107 +oid sha256:2241cb181d307cd3b48e27b7fa52dbedcc0573a6ff436112ed2bba7c7b684b32 +size 1483386 diff --git a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_f7a5ee05-b056-4d74-8dbe-1a6ea359f004.png b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_f7a5ee05-b056-4d74-8dbe-1a6ea359f004.png index f4628130a60937aab92b55158d22bcbc9a38eda6..3f85c5f70c55abd9e58089191157d5d583a832c4 100644 --- a/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_f7a5ee05-b056-4d74-8dbe-1a6ea359f004.png +++ b/images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_f7a5ee05-b056-4d74-8dbe-1a6ea359f004.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09c66b79999acf09e041e46fb02e2e7c7f385039157ff960fe61f388ccbac4f4 -size 926006 +oid sha256:9f222c975bb5b9c0a496a5b949266d466e1186602e6aba17239f2fea5a01cbab +size 721566 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_09c7f72f-1512-4342-b3c3-ae639ae8cdfc.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_09c7f72f-1512-4342-b3c3-ae639ae8cdfc.png index 98627edaf1df2694a37a8620e315dfc77ae4eeb1..952d1ae57d909ec2dff5f7b28bae0fef14a95e06 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_09c7f72f-1512-4342-b3c3-ae639ae8cdfc.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_09c7f72f-1512-4342-b3c3-ae639ae8cdfc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7f96627151ca27230dfa5be3801136af10d090124d84a2d25226fc7f12b20f8 -size 636944 +oid sha256:bc4a3e9cd7fa04883f6d610753881874e58ceae1b6b7289784459d9c6e1a4fea +size 794692 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_191f5324-8b5a-4b2d-aecd-47c7c053d9e5.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_191f5324-8b5a-4b2d-aecd-47c7c053d9e5.png index cc409f4ad47e2d622ac5dc63b8180a1ab4e30bc8..51d75781ac9f85b0c63618fcfa35e51de42ae4f1 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_191f5324-8b5a-4b2d-aecd-47c7c053d9e5.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_191f5324-8b5a-4b2d-aecd-47c7c053d9e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c234f6dafa2df6e1355b742dc4706640ce9198328d3df117583fa77e80a982cc -size 594269 +oid sha256:12e98033803192bff8bebf0eabb58bf8542a71045b33b61e35bf8bc9247ee71f +size 631402 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_1b698a2f-400b-4069-aa08-252e1b41c7e8.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_1b698a2f-400b-4069-aa08-252e1b41c7e8.png index b07a64d75107bc4968971332319b8162e56f642b..d2bd4973f4db812262565df7957b0391f5439215 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_1b698a2f-400b-4069-aa08-252e1b41c7e8.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_1b698a2f-400b-4069-aa08-252e1b41c7e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26c7794163862d2ae7507c78990702de16d606be62648ed4ff6efe580a4fee61 -size 479922 +oid sha256:4dbc00fc0f848a4959971c1fcced1b86bcb0085c2d53e4abb4ca70225a2be7af +size 980438 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_1e35aad2-d7c4-415f-a641-d0f2f2249eae.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_1e35aad2-d7c4-415f-a641-d0f2f2249eae.png index 0a4a7cc9f66699287f5428db0194d6129c894e0b..7e98849e7bf582ac769c039bd83079bdf21e62e9 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_1e35aad2-d7c4-415f-a641-d0f2f2249eae.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_1e35aad2-d7c4-415f-a641-d0f2f2249eae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe3b4699e3ef2eef958182fd798c48c9da9ee19b38dfa7db10ee71235ab9a8c0 -size 563593 +oid sha256:e7ae064267570ef7164df11ecb8e9de9d3f0ec621e0eb7c804db101c34148a1e +size 1056021 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_2190574e-5045-4a7c-aeab-5f9d88e544cd.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_2190574e-5045-4a7c-aeab-5f9d88e544cd.png index bed2a0ba6907bd575f7768c5107ed7fa52b885f8..e0b14f950816ff9bb96236ecb900c31d0e312668 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_2190574e-5045-4a7c-aeab-5f9d88e544cd.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_2190574e-5045-4a7c-aeab-5f9d88e544cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93810b1377dc7aec52890dcb6c2a3f1fb14bf2a71a587e66cf3019908da83d67 -size 990217 +oid sha256:b235a8a120592d200fb9b7fc83e4027986f64b9e0a5672be882970244b0317df +size 969288 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_227dfbfe-6b1f-473c-b87d-101c9dfd7306.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_227dfbfe-6b1f-473c-b87d-101c9dfd7306.png index 059f3e72bb9eb85d2a5e25dc1b737d16bf1c88ea..df3e208cb1bdeea026c69e9b8b336a6b975f92a0 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_227dfbfe-6b1f-473c-b87d-101c9dfd7306.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_227dfbfe-6b1f-473c-b87d-101c9dfd7306.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d8bc23d4bf247fa0683e2a65f607a63a45f175b7ecfd4941cc5b85e30bee451 -size 542810 +oid sha256:ebf0044a242359a6847c3773372143ec1e7717452269b3b08fae293a8c9e0b5a +size 627721 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_230a1bfe-cf97-4bde-8268-f70287809032.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_230a1bfe-cf97-4bde-8268-f70287809032.png index b65ca376ecb206dbbca1c47e8991b14e75160781..3e95c4f230799e6394a2fee8cfe9c007668a6fcf 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_230a1bfe-cf97-4bde-8268-f70287809032.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_230a1bfe-cf97-4bde-8268-f70287809032.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c672b42f99499ffcec62afdeb8ca62d6c3c47617d7316e6a286095bbf3bdfb3b -size 580891 +oid sha256:2d8d5fbea3162db874abe45777a58304f2ca1324810b80fdba40f1cd873f166d +size 631769 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_31ddff52-15df-4d0d-916c-18e1fb240ea2.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_31ddff52-15df-4d0d-916c-18e1fb240ea2.png index c232be45d24310afb3d8c4802f099f6abd0e6268..59cd5f6c28bc226e12e3474a04801038936f14f1 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_31ddff52-15df-4d0d-916c-18e1fb240ea2.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_31ddff52-15df-4d0d-916c-18e1fb240ea2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe0c201e4e080494f951dfe35db5becbc38bcfb06450027612e299fb5c4dea22 -size 823322 +oid sha256:89453bb7b679d41ed9d2486d7705c2ebbb370a1e3ef597afed300de8279e6e92 +size 646231 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_33e7bd87-795d-44da-81be-390346b9829c.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_33e7bd87-795d-44da-81be-390346b9829c.png index 796c0f98c700adec9de0150ba4ce6fb88cb2fce0..db25fde9fdab44e64cfcb938535ac3c8b4df2880 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_33e7bd87-795d-44da-81be-390346b9829c.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_33e7bd87-795d-44da-81be-390346b9829c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:536ede11f955614a0828f48636e3878c73d8c8f9b46aa02195357c23dfa12517 -size 655753 +oid sha256:1064364bd4541b15c72ab4fac359221380d7d8faa4a9d433f2c3b4e873a84a34 +size 893003 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_3fed27c4-2cb3-43d0-b92a-16275e1f8178.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_3fed27c4-2cb3-43d0-b92a-16275e1f8178.png index 42bcf47a48fbcf15b4ad62d7c5ea70b4ca084319..cc1e125d9f88919fc0a5cfdd1f032901c098b7cd 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_3fed27c4-2cb3-43d0-b92a-16275e1f8178.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_3fed27c4-2cb3-43d0-b92a-16275e1f8178.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e82e8fa8d6ebd7944fd793e659e86dcc715adba14a2a9df262204cc5c2e3cf7 -size 645708 +oid sha256:9e98427405c5f5c695d3b322ea59cb7507b687bfd172912e114640df3270b46b +size 709559 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_4e2e6234-ed23-40c6-a6fc-c82108cd2f49.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_4e2e6234-ed23-40c6-a6fc-c82108cd2f49.png index 9bb3c766a3c3b5dbc08628cf1f60a7a4ee7bf74d..2ef44268892edd0a97392a25ab3a4beb04b46b96 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_4e2e6234-ed23-40c6-a6fc-c82108cd2f49.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_4e2e6234-ed23-40c6-a6fc-c82108cd2f49.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f7e07b4ce663fa29db948f9f64362f56f1d9d9f043aef17eb16230cc2fa0e38 -size 546135 +oid sha256:fbcf4a5906c17f0ea7872c84347a2b26353b426b2ad26e3c526a6707d22bde7c +size 394111 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_657a05b7-3405-4ec1-bc74-6e2dcacc2244.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_657a05b7-3405-4ec1-bc74-6e2dcacc2244.png index b7c44c5e64e188a31aeba8623a2c131303b4a362..68d01f88ea79ea20c0a174ad4f6e983f6b030238 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_657a05b7-3405-4ec1-bc74-6e2dcacc2244.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_657a05b7-3405-4ec1-bc74-6e2dcacc2244.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a86e1f54634778d45444c1367c17983b4128c8d81647be9fd76ef2ac375da13b -size 563193 +oid sha256:56599c67c2170119fc8cf834a9878af3cd41a44bd89920b6b90e31f7fe6957a6 +size 650898 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_7d52e0c3-c338-4214-889f-318d4ce15d5b.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_7d52e0c3-c338-4214-889f-318d4ce15d5b.png index 4b1113d0991e64a1947f8ffade71de677a54a769..45b1c0a1c6ff567f4563e3b887dd0d7fa3b62844 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_7d52e0c3-c338-4214-889f-318d4ce15d5b.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_7d52e0c3-c338-4214-889f-318d4ce15d5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4850f6dc16b4b42e44667891cee82f03ff0376d7496da0ae5d0de0a3b9cb0998 -size 565322 +oid sha256:cb00b73b23b9343e672a5ef12a153927d7615ca1d4f16b5d4a053bf6cdd0f966 +size 509379 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_837177b9-fc1d-4b15-8035-b15efd915693.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_837177b9-fc1d-4b15-8035-b15efd915693.png index 1686adcb017003b38c643b9ee4b4ad7faa8b8536..e82cd57f736ad451414a1d868cb311e0f55df7e8 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_837177b9-fc1d-4b15-8035-b15efd915693.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_837177b9-fc1d-4b15-8035-b15efd915693.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b697e1a15cef8c4ef8731dbc93cc9024a205fb3859a87328122020117e63ec18 -size 566223 +oid sha256:0296d96345b75604b532e6149d10bfa4a29bbb8c79409e78cc6af7a5c00fe5b6 +size 693467 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_b15f4f6f-413b-4ae7-bae8-1ca7e0b4d75a.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_b15f4f6f-413b-4ae7-bae8-1ca7e0b4d75a.png index 95feaeee9b50461f0cdf5379f8c535416a224536..cb51c3ab46a57a0eea7cbdb81b60e9c1aaa49c0c 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_b15f4f6f-413b-4ae7-bae8-1ca7e0b4d75a.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_b15f4f6f-413b-4ae7-bae8-1ca7e0b4d75a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9cce24111f8d87fdd51d318067150a79dc01a06aa453451ee4257821f767b94 -size 526655 +oid sha256:5d824f4f76a0f29cd9a612cfcf39017e38b0afa881e56add8b3f8e605288b547 +size 762017 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_cf9d27ef-6bc2-4be0-a3e9-39527c596408.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_cf9d27ef-6bc2-4be0-a3e9-39527c596408.png index 597649027e87697f92526566361cb54f965cd78f..5e13d442c683591891a844bd79e2b648fc81dd03 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_cf9d27ef-6bc2-4be0-a3e9-39527c596408.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_cf9d27ef-6bc2-4be0-a3e9-39527c596408.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ef55eaa7688d5b1d9178cd588bdf1e1e6a8756c6613570c57dad2e05e196232 -size 544404 +oid sha256:e45197dd5dc38908ec02b22739a975d0969a447783ba60e5b8472ea3da90c6ca +size 435285 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_d4384cf9-8eba-4b9b-8cf6-fedec53fe0db.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_d4384cf9-8eba-4b9b-8cf6-fedec53fe0db.png index dcd6daca6f601a4804bb622efeda7bd8600562fd..82180c226d7aafefff845fecc8d9d88cebc63726 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_d4384cf9-8eba-4b9b-8cf6-fedec53fe0db.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_d4384cf9-8eba-4b9b-8cf6-fedec53fe0db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18ca63cb81cd50b498f7c9967d8140f0520a9db8fd986c21ec50460b6cb555e1 -size 607191 +oid sha256:e5719fa8d454e6458287d7f628cda03c07e704196e63cc85f4cea2cc8a33fb65 +size 501969 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_e0c7be7c-eb3c-4ce7-b04d-d385aea37cbc.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_e0c7be7c-eb3c-4ce7-b04d-d385aea37cbc.png index f751bcfd69daa24c35dc6b5868bc375d93b462bd..02a75a70a25a4eb1755e6eb9e4c0d34e12394f4c 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_e0c7be7c-eb3c-4ce7-b04d-d385aea37cbc.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_e0c7be7c-eb3c-4ce7-b04d-d385aea37cbc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:203cd54e5ea015a53fd8d1299567b8e5614dfbcdd61e185b0ca706ffae36890d -size 666123 +oid sha256:c524e627cf814fbdf6178396a19392160342e552c3bb9d5e3e09725cb48fc3e5 +size 670799 diff --git a/images/cf361c84-6414-4b05-a7a1-77383997150a_f493650e-0cf6-4904-89e7-62105a3d029a.png b/images/cf361c84-6414-4b05-a7a1-77383997150a_f493650e-0cf6-4904-89e7-62105a3d029a.png index 8442db9e007cc596f3ff64a8d739764231ef6a34..a08cb14be7bf141f3baf82845b356466d00f09c9 100644 --- a/images/cf361c84-6414-4b05-a7a1-77383997150a_f493650e-0cf6-4904-89e7-62105a3d029a.png +++ b/images/cf361c84-6414-4b05-a7a1-77383997150a_f493650e-0cf6-4904-89e7-62105a3d029a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:58bc3f2267bfe99c29860dde66780cffe7e54d9168a4f61e8a3c4f5a5957f324 -size 579549 +oid sha256:45bcc8dbfc3c3d0a2da0f9bccc6cd96bb208b85edab07ddab7be2820ad6c9fad +size 581161 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_0043be09-27a3-4b47-81c8-cc4ee1cb996e.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_0043be09-27a3-4b47-81c8-cc4ee1cb996e.png index 176409f1031dbabe8c1b764aaf8904e9d7c12ab6..945ad7c752ed8710c93b55002311d2a82f59ba5b 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_0043be09-27a3-4b47-81c8-cc4ee1cb996e.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_0043be09-27a3-4b47-81c8-cc4ee1cb996e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b65834db03c0effa194b3ed5792f5b8c63a4de46c9b2f2fb9892c23a836af22 -size 1311944 +oid sha256:45aaeab26afe9f37b65cd6084c4d944b22524cd9e6ceeb47f5ec88039c0912a4 +size 1402850 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_032dd419-18b5-4870-988f-085b2fa6d74a.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_032dd419-18b5-4870-988f-085b2fa6d74a.png index 791ed2fcdc71522888a3a716eb93247dba4fa582..778204c4490df2400bb04df42a109721f84c4f10 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_032dd419-18b5-4870-988f-085b2fa6d74a.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_032dd419-18b5-4870-988f-085b2fa6d74a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a58a4bf5b20c671d6d1753c49a36fec7919f7e9b14576a3841499269a75a1179 -size 1312742 +oid sha256:74ae1458d6e076e2e5388f73b0c7ce7d627ab10f18e30bb28ac8b4ce1d6a4937 +size 1536138 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_20dce0e2-8e16-4412-aa55-23f7a1d13681.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_20dce0e2-8e16-4412-aa55-23f7a1d13681.png index 97dfb95c68b42e935e8eb747b1082951d6127390..94cb5e0ecb61644239a004b104ce02d9caed92d4 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_20dce0e2-8e16-4412-aa55-23f7a1d13681.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_20dce0e2-8e16-4412-aa55-23f7a1d13681.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68115de08867c5af31186e0048aeeea0326423ae6efa8fdcbb8daf90af8c82a7 -size 1388964 +oid sha256:901fcb0c9380384c61c8b097be86760102dce17bff41257277ba78790ad2b8b7 +size 1538797 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_3253cf50-b912-4446-9ca0-00ff7813f42a.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_3253cf50-b912-4446-9ca0-00ff7813f42a.png index 8eb34aa17420f1c6186a2e4a58be3a87d87a5b15..2ecf5719592735534e70a8092757cc9cd256b0df 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_3253cf50-b912-4446-9ca0-00ff7813f42a.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_3253cf50-b912-4446-9ca0-00ff7813f42a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cfde6b19935343f6ab947947a7c112f16d613795c9b09af9338c5ebf9b93a23 -size 1303943 +oid sha256:b7da328d6ca4ce0d698c544519eaf5dbcd354d22ee63ee46fa51e739cc1804ec +size 1542159 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_84bf94c3-3a36-4743-a629-a73720bfa17f.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_84bf94c3-3a36-4743-a629-a73720bfa17f.png index 54e81717b8028c3f650aa979472ec1bab0f4c250..68fa805faf0994bc6643c501a3ec2e179d85c6f8 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_84bf94c3-3a36-4743-a629-a73720bfa17f.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_84bf94c3-3a36-4743-a629-a73720bfa17f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb2a71ed33e9c538f1a5fa3e644964fd8be0a73af0fd953a061a982f7ad348e7 -size 1311173 +oid sha256:66f778098778b7f7c697429e5c53723f9df6d4eb664f0db6630806925b7e60c7 +size 1458779 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_8fd5373b-93ce-4726-8b01-9cc2688e631e.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_8fd5373b-93ce-4726-8b01-9cc2688e631e.png index d3e665a05f17333af225e098c96aa5fc895732e0..2244c9ac40ca3b85f4ab15da85069ccf70b4f1bd 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_8fd5373b-93ce-4726-8b01-9cc2688e631e.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_8fd5373b-93ce-4726-8b01-9cc2688e631e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82cb077511d99a962f05251534c3c35594f25be9343bb2fcc9bf1a23cb5a91d2 -size 1373972 +oid sha256:9e5c710ab1c5e9f0e990b0d31ec23bad31c7be3e20d052d36e9ff281069f305c +size 1631559 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c1d3ecd1-6112-4505-a707-fd619bd9f991.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c1d3ecd1-6112-4505-a707-fd619bd9f991.png index 72de0e846599b3fa64e32f3df673e6805349793d..ef498f735792f560e0113fff34bc5348f48ba376 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c1d3ecd1-6112-4505-a707-fd619bd9f991.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c1d3ecd1-6112-4505-a707-fd619bd9f991.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b364bc7bed0a7f356a0ce80d26b852c2e654ad1ca7d324c9f6bd330a8e6a4eda -size 1302436 +oid sha256:e6a4aa83fdf297c12e931dbbde0debd1164ee26e8e135968c9543ea2f8562bbb +size 1492797 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c2d1eb13-7383-4947-af51-5a8233988ca8.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c2d1eb13-7383-4947-af51-5a8233988ca8.png index b68ca2a3186cc1f799b69f35e1b4f0202f06a298..6251bfc66c3588834429c06980aac98bcf70fe16 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c2d1eb13-7383-4947-af51-5a8233988ca8.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c2d1eb13-7383-4947-af51-5a8233988ca8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:982281daeecf3ac3183ee5061fcd80ec49e143216de2b54c3a7527d1c5ba10bb -size 1428384 +oid sha256:dc1cb176b516400f8605eacf0ed8801b304a0d0e3d49d6ca410ae5ad36d1c340 +size 1700470 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c30f4d49-9308-4bb4-95e9-925b39ffde9a.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c30f4d49-9308-4bb4-95e9-925b39ffde9a.png index f7822ec7346eb325c194167a7bb4bf161d263569..05bb0cca60c7941c9655a94bbc4429aef4bcb080 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c30f4d49-9308-4bb4-95e9-925b39ffde9a.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_c30f4d49-9308-4bb4-95e9-925b39ffde9a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:313ae78b5ee45f9c9324067ada97cc89f6926499f3bcc30ac5a6e9444ce77e6f -size 1303259 +oid sha256:86eac44bc9ebdefed6a8fcd0188b150097d9c0e6c749fdc707feb67956c5a4ff +size 1376369 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_cdab6084-b5e6-4dc2-908a-907ef2e36ce8.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_cdab6084-b5e6-4dc2-908a-907ef2e36ce8.png index f7312908ce8b95e868c358f563e8411205c0512d..67d55f30dbad934a86f0928b75bec9e913898859 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_cdab6084-b5e6-4dc2-908a-907ef2e36ce8.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_cdab6084-b5e6-4dc2-908a-907ef2e36ce8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7dff60e371016e8d98d6aa615d3a8d4075f4ed86bc2d45dd25c1168ed0bf4d9e -size 1412248 +oid sha256:546732beab14b0daffb1e42ea81bb77136a425d5d539b1a92013fc851b329b27 +size 1665352 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d694c8b7-923e-4d3b-97ec-3a475e4463f0.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d694c8b7-923e-4d3b-97ec-3a475e4463f0.png index ffefaaa50c6d91eb1a9db5da72526559114f28c0..52c4152f5989ae3b7cc2dd19f97f67a95b34002b 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d694c8b7-923e-4d3b-97ec-3a475e4463f0.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d694c8b7-923e-4d3b-97ec-3a475e4463f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d8c80cd62080c243f2110b15f25b7a74d7cc1f52cd6daa54735d10a068c410e -size 1412592 +oid sha256:04b384131a938819bfa781b36511a2679a1e2e1eb09f3cfc7e700d9d9df586a5 +size 1693081 diff --git a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d787b502-0951-4e7d-8f76-7883935a9359.png b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d787b502-0951-4e7d-8f76-7883935a9359.png index 36743ba0462e89fc120fa1b0296cd672593a29a7..afe7f35128f712f63237f7da77c5d1014befe418 100644 --- a/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d787b502-0951-4e7d-8f76-7883935a9359.png +++ b/images/cf89b0ae-39ca-4451-80df-260d46c62f21_d787b502-0951-4e7d-8f76-7883935a9359.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55927dbdc6af422908729c13846b7b681365e76a33c33c259d35f47de2cc19ed -size 842271 +oid sha256:d9989c2b3ca3cb1fbe372f9bb719e36ae2d58b493608687720f7c2451bdbbfdb +size 1011226 diff --git a/images/cf8b2846-ac33-46aa-887c-174de6184057_cc95f693-0bf1-441b-91b7-7129f8b0361a.png b/images/cf8b2846-ac33-46aa-887c-174de6184057_cc95f693-0bf1-441b-91b7-7129f8b0361a.png index ef85406e14bc103a1952ccabaad2bf833ed91dab..2d83ef0c7517c0754216657da7bf1f9f5d18b219 100644 --- a/images/cf8b2846-ac33-46aa-887c-174de6184057_cc95f693-0bf1-441b-91b7-7129f8b0361a.png +++ b/images/cf8b2846-ac33-46aa-887c-174de6184057_cc95f693-0bf1-441b-91b7-7129f8b0361a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7326ee1fde17ba9146de7bb2c6846fbe43a718ec3a9e0309f92a9e85897ca91e -size 2542234 +oid sha256:9984e27aacb822184f5892c2de2b3b99b61409a70350cb77c7c76ffd247c54b3 +size 2643898 diff --git a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_04bab092-de15-4b34-8d45-a444c6e6b1b6.png b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_04bab092-de15-4b34-8d45-a444c6e6b1b6.png index 0db85039c89878950e0eda98a8bec0e578326046..aea60c97bd28bd847efcff0a6aa704ae12568ce9 100644 --- a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_04bab092-de15-4b34-8d45-a444c6e6b1b6.png +++ b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_04bab092-de15-4b34-8d45-a444c6e6b1b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:633550a2ba8822e0e256a7a7a8c066b9980df396e703b07642f7cac3f7408131 -size 1019663 +oid sha256:ee2984c1f5c7e77e49b7b9889275fc702e083ca104084dbe5e3e063596ff329d +size 1019647 diff --git a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_0db62d4c-e735-47bf-bd3f-f5a51ee7f6fd.png b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_0db62d4c-e735-47bf-bd3f-f5a51ee7f6fd.png index a80f745fa6b80a85208db7dd4ec87be955045ec8..8f3e4e74a4b16fa7191a0e36dac05274fce4ceff 100644 --- a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_0db62d4c-e735-47bf-bd3f-f5a51ee7f6fd.png +++ b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_0db62d4c-e735-47bf-bd3f-f5a51ee7f6fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb9b4bc6bddbe2a038f4b6995beadc42818db788668eca5bccf61b3e44814b60 -size 585335 +oid sha256:b6d9ba2904996698324a0e72287317720fe636ddceaaeed60e07517b39734f3b +size 971294 diff --git a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_561a3105-2605-4f2f-abbe-2b622948cf16.png b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_561a3105-2605-4f2f-abbe-2b622948cf16.png index 5151c379050157ef327095207c223b01bffe7250..62f49e15b447f99db3f2e3b194402abbad163f1c 100644 --- a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_561a3105-2605-4f2f-abbe-2b622948cf16.png +++ b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_561a3105-2605-4f2f-abbe-2b622948cf16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b691deb300a29cdd4bbb3a0d29553551060eadc7951577e86e34620894be9fb -size 669021 +oid sha256:bd6164ca4b2f93e623c2c40f396e84d7d611d8dac70ad9bcf213132ed8db4838 +size 922103 diff --git a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_65009bac-a256-4768-969f-c64e4ac76638.png b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_65009bac-a256-4768-969f-c64e4ac76638.png index bda292375e929c587e6e8c4df87bc1947b3b278b..11d9ef96d011e8053eef156531d2b1b29495efbf 100644 --- a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_65009bac-a256-4768-969f-c64e4ac76638.png +++ b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_65009bac-a256-4768-969f-c64e4ac76638.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c36fe1f8ed9f24116f42a9debfa41d415c5118e090579fba5bdbdf7966b443c -size 1215423 +oid sha256:a23608a8a90da7cffc1d59f93b624c98cf0d242ffe178d320f9587cce1958f46 +size 737527 diff --git a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_765af537-4144-47d1-8e0c-838a365b423d.png b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_765af537-4144-47d1-8e0c-838a365b423d.png index 41049c353c07b2363af9960531baf88859594f56..28efbdded9f6f5540b398ff26edfbe188d84929d 100644 --- a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_765af537-4144-47d1-8e0c-838a365b423d.png +++ b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_765af537-4144-47d1-8e0c-838a365b423d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd9ccfd8c989a16e7d93d2cb8a696c045ff315af94551e2d647eab4ad3dd0a1f -size 991721 +oid sha256:cd160557b9deb992d6e683fc1e7c52ba64352206abea63f85154355de556d32b +size 1197018 diff --git a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_bac428a8-a55a-4c2d-a416-51ae11d42509.png b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_bac428a8-a55a-4c2d-a416-51ae11d42509.png index c6584d333f52b9af1dde2a257ca4fea938ab827e..a5ac5d7040d5ce51a972d95c3e7bb0806ae06009 100644 --- a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_bac428a8-a55a-4c2d-a416-51ae11d42509.png +++ b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_bac428a8-a55a-4c2d-a416-51ae11d42509.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5cebbd38f63a7a95b814c3b3506ed3d99c067709dead9791878594d6c2b563d -size 650490 +oid sha256:4b14f69ea132b9a8a0f983c886a4c0dd9bc9783409c2788a5071517fc7aaed98 +size 804461 diff --git a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_cde960df-47f6-41cd-bd34-9082cede2dfa.png b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_cde960df-47f6-41cd-bd34-9082cede2dfa.png index 823a68675b6471b7eb60d0b6dfa6a471ad115d5b..4791e25e6031ef52aa87b93bd176ac6e0f16e5c4 100644 --- a/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_cde960df-47f6-41cd-bd34-9082cede2dfa.png +++ b/images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_cde960df-47f6-41cd-bd34-9082cede2dfa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1362c8899ac15cb66c6684881954e75ea8448fa3acadfe6063d882b4dfff267 -size 969131 +oid sha256:6c3960f8edd56f768059b9382575ed0f1196e58bdf2467b6da769d8cd5d42ed6 +size 713699 diff --git a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_2c3f007d-c4df-4247-8a58-bf4b58db1530.png b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_2c3f007d-c4df-4247-8a58-bf4b58db1530.png index 65a96e33135f0aaecf8b033a609add60679ab050..fdb6b69588cd0855e7d704b41f06efb07d9d9e59 100644 --- a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_2c3f007d-c4df-4247-8a58-bf4b58db1530.png +++ b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_2c3f007d-c4df-4247-8a58-bf4b58db1530.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3332592b24bf27b46cd2cf5a7e7129070118de626550216700ecdbd03dff990d -size 1275535 +oid sha256:e3601756464cdf3b3dc763fb437870db5174bec9f7e14bec19f9fe7d2bb4480d +size 1311811 diff --git a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_43918fa3-9bec-465d-bd86-e9fe67bdd317.png b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_43918fa3-9bec-465d-bd86-e9fe67bdd317.png index 4818689bfe35652d0ea69685d1fd12fa425e15d0..b56c7a4f631cc215be4d18c20167f6e200b75e40 100644 --- a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_43918fa3-9bec-465d-bd86-e9fe67bdd317.png +++ b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_43918fa3-9bec-465d-bd86-e9fe67bdd317.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47d5ca9e5cd4fe1151fc4afc03026e86c7691b6a4fb78c79ec0078da9bccc418 -size 1094373 +oid sha256:ea2b37fdedb4736d8960a220b23cdf714cf19ab1ceded130495313080f898038 +size 448404 diff --git a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_86b72b03-cfd1-47ce-9f4a-1dbb46866645.png b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_86b72b03-cfd1-47ce-9f4a-1dbb46866645.png index 63e34e0e6ecd7a876a6d15a5bd2b9d48826dc35c..08e47a50d967e87df6e9c36e7fdc0caf8f6685c2 100644 --- a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_86b72b03-cfd1-47ce-9f4a-1dbb46866645.png +++ b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_86b72b03-cfd1-47ce-9f4a-1dbb46866645.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c6fd04f1d481890cb6f5a611a5fc4a9acbc6907207e861b1d574f620e923b7d -size 1368753 +oid sha256:2c79ae23bb6e11bf755286bacb94f5366d30a52badfb6fc4e4f1ebad1d6b101e +size 454877 diff --git a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_cf4a59fc-c8e5-42cd-9278-2a65679a02c2.png b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_cf4a59fc-c8e5-42cd-9278-2a65679a02c2.png index 0a64a8dbb078ad589195aafd8409fe137bb0bae2..827840ebedacad5f59d0c98a294f6b0d353be133 100644 --- a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_cf4a59fc-c8e5-42cd-9278-2a65679a02c2.png +++ b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_cf4a59fc-c8e5-42cd-9278-2a65679a02c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f89e4940ebfdf93f1413362cf0d1ab7b837b2f286847b52a5c8ad69e9935089 -size 591911 +oid sha256:65100c996c9c9e34391127dee7aa05e986c7e98ac70cd3df08bfd1a33efd1881 +size 593954 diff --git a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_ef74239b-9f8a-4f92-aeba-6ba4ef836c53.png b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_ef74239b-9f8a-4f92-aeba-6ba4ef836c53.png index c6c36ded39604768169c7f6b10e35e21567f8204..dd38902c1e1638da23a82f44aa3f0c76231410df 100644 --- a/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_ef74239b-9f8a-4f92-aeba-6ba4ef836c53.png +++ b/images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_ef74239b-9f8a-4f92-aeba-6ba4ef836c53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60b86cc6f83d0fbe6ee22773744b89e405ee3c16bad3039b51eaa46adbb84059 -size 928256 +oid sha256:c9b561c84a4c92f910f2081f721912dc265ef0a4370398b308222119b9fda78c +size 1273102 diff --git a/images/cfb351f8-804b-4074-9826-0b3525f68727_073133f1-988d-4c73-b606-4934148a72ff.png b/images/cfb351f8-804b-4074-9826-0b3525f68727_073133f1-988d-4c73-b606-4934148a72ff.png index a28f84278b7fd5162c6b1325d4aaf2c7638c4718..5d21544edd35011385a42f13f628d08d947a4a7c 100644 --- a/images/cfb351f8-804b-4074-9826-0b3525f68727_073133f1-988d-4c73-b606-4934148a72ff.png +++ b/images/cfb351f8-804b-4074-9826-0b3525f68727_073133f1-988d-4c73-b606-4934148a72ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb512589145c86a7a62f7b7ffe96ee02be410937558a9df0b6a2c3899a2512ba -size 1246630 +oid sha256:a8215b4df8b86fe36bbb5fcab859f1f5af94c08ed973cb4af20bb680bfbff71a +size 971182 diff --git a/images/cfb351f8-804b-4074-9826-0b3525f68727_4aabad19-31dc-4141-a99c-6d665544a782.png b/images/cfb351f8-804b-4074-9826-0b3525f68727_4aabad19-31dc-4141-a99c-6d665544a782.png index 0011ac63ffd4f28bf0b3b3481208d6cf303b5c03..1bc93bf431c1072acb6e334e70e97f56a87e5e84 100644 --- a/images/cfb351f8-804b-4074-9826-0b3525f68727_4aabad19-31dc-4141-a99c-6d665544a782.png +++ b/images/cfb351f8-804b-4074-9826-0b3525f68727_4aabad19-31dc-4141-a99c-6d665544a782.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0ce8b61a1dcbc27c47abd3ee83e665403401ac4156c690bc0f193b2c41c26c0 -size 1387152 +oid sha256:02a493735972b4c38345b377836afd36abe30302f115050fdf0e5fb7db9a6408 +size 917353 diff --git a/images/cfb351f8-804b-4074-9826-0b3525f68727_57afa34e-90f7-4742-b214-dbeae90b3f08.png b/images/cfb351f8-804b-4074-9826-0b3525f68727_57afa34e-90f7-4742-b214-dbeae90b3f08.png index 8ed9d116873e4c32570edb600ea386d8cc77b249..4f8b9a4afc5e9e3d9290dc895f029baadd5209fc 100644 --- a/images/cfb351f8-804b-4074-9826-0b3525f68727_57afa34e-90f7-4742-b214-dbeae90b3f08.png +++ b/images/cfb351f8-804b-4074-9826-0b3525f68727_57afa34e-90f7-4742-b214-dbeae90b3f08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1c136f7f0f3681dbc89b14a91a49f81f31d01eb444fb16ea28a3bbde95c4a03 -size 1745935 +oid sha256:1a590cf1abb9a8bc6e4a9fb911464801cde9674d00e6c002dbb5ae33c17fd670 +size 1785354 diff --git a/images/cfb351f8-804b-4074-9826-0b3525f68727_c01c84b0-81b3-4e6a-93f8-d1d319e101c4.png b/images/cfb351f8-804b-4074-9826-0b3525f68727_c01c84b0-81b3-4e6a-93f8-d1d319e101c4.png index 5247ea603c348486a7ec04807815fde3696f3e65..3f4cdc9a3b56fa1e4a15a43d772078678d4a8efc 100644 --- a/images/cfb351f8-804b-4074-9826-0b3525f68727_c01c84b0-81b3-4e6a-93f8-d1d319e101c4.png +++ b/images/cfb351f8-804b-4074-9826-0b3525f68727_c01c84b0-81b3-4e6a-93f8-d1d319e101c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eca4b751ff80f10e9e60c2911c681584fbf84cd90a1e7842a71bcb0ca71cca9b -size 1930172 +oid sha256:6fbdb151f667bc55aac911c5471a6fabe6b566c73a915202b89730cbb0846f89 +size 1270224 diff --git a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_2c62f1f6-f57c-482b-9321-5cf44af07e07.png b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_2c62f1f6-f57c-482b-9321-5cf44af07e07.png index ff17c90271a94ad672ef0319565e55f1750efe77..38a5b9f9a19184060f7c7edbb475d9dc4b0bfc4c 100644 --- a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_2c62f1f6-f57c-482b-9321-5cf44af07e07.png +++ b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_2c62f1f6-f57c-482b-9321-5cf44af07e07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ab0bc1cdbda87b1ca17114f8e08af15a8947b9e5b96902d54c67e9ce0fcf99a -size 915422 +oid sha256:4186939e6482baff4bde65080f43fed64d968a11bace6310c09f0775740d349e +size 907322 diff --git a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_4164e2f1-5ef8-43d2-bb38-176244354c7f.png b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_4164e2f1-5ef8-43d2-bb38-176244354c7f.png index 68f84ea7b4be86c9ba3d2b22ffdd22bf147310c0..6494792857eef04b76fd0ae629431b0c05fc54c3 100644 --- a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_4164e2f1-5ef8-43d2-bb38-176244354c7f.png +++ b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_4164e2f1-5ef8-43d2-bb38-176244354c7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e57aa04e54e34df9b86ea74bfc6ec4b424a3799f8ca4036788498c6f8d00ed1 -size 1697128 +oid sha256:9674a3b1fa351fa4afa964ca2710903a50e1d6cc8ac5bb2a47d4edc6fb818763 +size 1902152 diff --git a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_5e1ff95e-1727-43e6-9876-c4e2480529f4.png b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_5e1ff95e-1727-43e6-9876-c4e2480529f4.png index eff24a23909b85f92b3c5f8435addf9e017d4ae7..2f0bfab3f6da69544838032a354500885c00d4af 100644 --- a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_5e1ff95e-1727-43e6-9876-c4e2480529f4.png +++ b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_5e1ff95e-1727-43e6-9876-c4e2480529f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:309edddbdc55e4f357be5e62934eb68e7c235012c435ad3feccb355b0ce1b8d9 -size 734638 +oid sha256:4b1b0643a005267f33e886cf10a010fb1ca650a651d00fa9b602074853de5ce8 +size 663243 diff --git a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_a6eff9d8-88ca-429b-9bdf-a7955bd4eb06.png b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_a6eff9d8-88ca-429b-9bdf-a7955bd4eb06.png index e512c71b1394dbccd73f5967bcd3d4ebd3bacd31..bcf25f2a419119059a31f8790873ada6bee5d295 100644 --- a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_a6eff9d8-88ca-429b-9bdf-a7955bd4eb06.png +++ b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_a6eff9d8-88ca-429b-9bdf-a7955bd4eb06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6176887b05674859e6b7cf5641b8500148b0cc3f6e71110679ac707738b20d70 -size 516264 +oid sha256:e6fbf2e70e1b2fee6607c6f32a08ba813ec407f6fbe1e0dbd186df0fa3a4a61a +size 575509 diff --git a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_e02471ae-a287-4366-858f-e1e9c9166463.png b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_e02471ae-a287-4366-858f-e1e9c9166463.png index 8e2293ad4cbbb7b6a29da039568613a269cab82f..b99fcc895daec94b4208d7d20f3869aa168769fb 100644 --- a/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_e02471ae-a287-4366-858f-e1e9c9166463.png +++ b/images/d042ee7e-a2eb-448b-9942-b7aacc9115be_e02471ae-a287-4366-858f-e1e9c9166463.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9be562c316242b14377fa9807ed7b9898e27f4c7cf1be37841373d3dfd25e977 -size 808759 +oid sha256:f57ecd4b20149382bc0329c8ef61258a538057ef3cada3c2a7da616a1052acac +size 667414 diff --git a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_363d203f-3721-4830-bd17-b3ba4819cdb4.png b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_363d203f-3721-4830-bd17-b3ba4819cdb4.png index fcc139a0b1d9dd12d62d6fb7384a3f381f6094fa..466bd40889093466a8be2329cfb34c0dc7f54f0d 100644 --- a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_363d203f-3721-4830-bd17-b3ba4819cdb4.png +++ b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_363d203f-3721-4830-bd17-b3ba4819cdb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9120c735d91c168075a6ed1081bd48ce7709b330b1bfc1201a37fa63f5666f54 -size 1174371 +oid sha256:f7048004af6eb53e24832e0bbe38410707f3c3aa73eb074f94aff4a1cf0eadb8 +size 2208581 diff --git a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_9e27020b-4af3-474c-be31-2db12fbb98fd.png b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_9e27020b-4af3-474c-be31-2db12fbb98fd.png index 1ca750927670362a561c44551bb994eddd7905f5..b4e03d2bce00d5b57dc0ad8d77db91f5797691a9 100644 --- a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_9e27020b-4af3-474c-be31-2db12fbb98fd.png +++ b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_9e27020b-4af3-474c-be31-2db12fbb98fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:830c46f26e290ab60e9681bc506b59a1becca63b3729a72ef6f48b1acd6aa5a5 -size 963665 +oid sha256:bf20beb17eae65fead49f764868e3b48a8f3c3bc365f8a2960288c0991ed67d4 +size 1787108 diff --git a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_a848bcb3-b6ff-4c1a-9a10-e66ec68c5196.png b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_a848bcb3-b6ff-4c1a-9a10-e66ec68c5196.png index ec156d058aeb1517a6069c328c6fdec9afc1ca27..451ac502accdcb03df7fd40f308c9f3ee855e63a 100644 --- a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_a848bcb3-b6ff-4c1a-9a10-e66ec68c5196.png +++ b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_a848bcb3-b6ff-4c1a-9a10-e66ec68c5196.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db8e7ef7024f401373b33e4c9fc73be995c0686ff98bbd38e6d3f060c28aca03 -size 1175419 +oid sha256:4a85d7dbd7df6d32a563efae09be237a8fda8421c1e7d0567079833d9290f90d +size 1802735 diff --git a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_fef21c26-cc60-438b-935d-d274235a5ce6.png b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_fef21c26-cc60-438b-935d-d274235a5ce6.png index f9cee4af5d3ee6451e94a62eec33fc7dd9f764ff..4f8d1e8abfab5911d5a27508319bf9ed4c273b72 100644 --- a/images/d070774f-9ca2-43c0-a7d0-221697791cf0_fef21c26-cc60-438b-935d-d274235a5ce6.png +++ b/images/d070774f-9ca2-43c0-a7d0-221697791cf0_fef21c26-cc60-438b-935d-d274235a5ce6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7faff85d8caf2a8b158f29f9ba8452211e0ee530474d3adfec41e94f89e313f -size 1808705 +oid sha256:1680ee2a55ceec5f758821a3b557d4b776e7cdb932c4b57a28b217f6be082d0c +size 2480570 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_06e9c069-d60e-483d-936f-6a14544521fd.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_06e9c069-d60e-483d-936f-6a14544521fd.png index 4129cf2b48e406787bc486335652a15701be1e64..6fb081b742f885e0c3fa4064337d738db924aa2c 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_06e9c069-d60e-483d-936f-6a14544521fd.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_06e9c069-d60e-483d-936f-6a14544521fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6cb16440754b6fdd72b352e1323f3ec3afff94999fc6d554c315cab2597fc87 -size 776885 +oid sha256:84435073c6a1d873189c2e539ddd99032f3f9318ddb926f2379d54443a8f4d15 +size 340462 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_095d6d7a-0df6-4731-83ef-14e17d810b5d.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_095d6d7a-0df6-4731-83ef-14e17d810b5d.png index bbe8b4ebf44d5c67dd4983c0e23355c15da65645..d4b43ca0e30c4259dc621ccb42d407dabe08e048 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_095d6d7a-0df6-4731-83ef-14e17d810b5d.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_095d6d7a-0df6-4731-83ef-14e17d810b5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f70cde3d701d72de1903badd70833f46cdc3734c78f31ccdd2861c346129c6f2 -size 614078 +oid sha256:fe7a465a9fa399f094cad920dc1c7564b0e1bd08b617eb883968eeb5253a06e0 +size 613606 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_18ed9e91-64f4-4929-8827-5d7634c0101a.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_18ed9e91-64f4-4929-8827-5d7634c0101a.png index acc05462451f8eaadb802fa0df9d8132154e7c2d..1b346d2f700381bfa1b689fda176e26e949c1f1b 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_18ed9e91-64f4-4929-8827-5d7634c0101a.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_18ed9e91-64f4-4929-8827-5d7634c0101a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0539c54f5f08158ef6dd121c2cc032b97307666bb8c66c9671119818a8e63174 -size 661957 +oid sha256:28a0a7146a63393a2ba6b73830bcd06c1fd668433d44b693738a4014f024d7cb +size 514985 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_4c7c0d34-e5bd-4c51-b699-e4ff6f392fce.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_4c7c0d34-e5bd-4c51-b699-e4ff6f392fce.png index c75bb18c5fc909f5ac316a593b9f3566abcf1391..04a015db98a72eeef1f4058c8e27f27f0abd5982 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_4c7c0d34-e5bd-4c51-b699-e4ff6f392fce.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_4c7c0d34-e5bd-4c51-b699-e4ff6f392fce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6ce7c4b7135af1bc70daf2a5faf5e69984ed45652b6bca6d7e46dcd8f9b57db -size 390704 +oid sha256:5518feb410cef098eab013c7b9818e5ab5c32bd84c7c9e097c1ef9b0cea3032c +size 391324 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_6c991c31-464a-4ba9-a214-c6f849212ea0.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_6c991c31-464a-4ba9-a214-c6f849212ea0.png index ada59baae6517855a00aa9a27828d09d37bf3b77..9fddd247e831de87485ee6aee1af316d1843de59 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_6c991c31-464a-4ba9-a214-c6f849212ea0.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_6c991c31-464a-4ba9-a214-c6f849212ea0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e07687bd92b535e6f93b070834832862d2af968d0386eba8f718fad3c95b16c -size 682249 +oid sha256:2d2db7abf6d0429587549716945143c30612f3a5164bec3317d87d338b001034 +size 605521 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_92f8e2ae-f711-4a6a-b624-4c42d87fb214.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_92f8e2ae-f711-4a6a-b624-4c42d87fb214.png index 4129cf2b48e406787bc486335652a15701be1e64..0c04f3cab2a238b14f0b5cb0df90422e5541d027 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_92f8e2ae-f711-4a6a-b624-4c42d87fb214.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_92f8e2ae-f711-4a6a-b624-4c42d87fb214.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6cb16440754b6fdd72b352e1323f3ec3afff94999fc6d554c315cab2597fc87 -size 776885 +oid sha256:2243b5044ef0881792f4f2d1e51d4d0e11e04de73f0fe90df7901a666d1205da +size 405190 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_b46bd2d8-3838-4c77-9166-af6ada07da63.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_b46bd2d8-3838-4c77-9166-af6ada07da63.png index 485a3957871eda733a05c04cb0ddd37247668e44..ab384f7d895278e769f2b62f8e584aec5827a2ae 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_b46bd2d8-3838-4c77-9166-af6ada07da63.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_b46bd2d8-3838-4c77-9166-af6ada07da63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d1ec328e77c56331ebfb957cc18deb4974067c969eb1e968efd925d9dccc199 -size 751190 +oid sha256:eadfc233004bbc189e1c1a976a9057390daf6a345f05c29faae4deee5846a521 +size 751138 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ee0f6005-30c0-42c8-a5be-131d002b1322.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ee0f6005-30c0-42c8-a5be-131d002b1322.png index 927611af99f4c372de95a4dd968f1d40c8ad8301..49007fddf1da0e10b0efa7fcb58eff4fd72636ab 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ee0f6005-30c0-42c8-a5be-131d002b1322.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ee0f6005-30c0-42c8-a5be-131d002b1322.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8fe7594564da871e9357e749a42c091edc2ab4e67eb8b27e93e1dae62b992b7c -size 431739 +oid sha256:8597c0315336cc87c80add16b3460023c0424578ca968e81e996fe8034e061c6 +size 1320152 diff --git a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ff2b5ac7-e294-4f9b-afef-8dbb37c61efb.png b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ff2b5ac7-e294-4f9b-afef-8dbb37c61efb.png index fa8c5d7128cf8dbb49d1431ab35d8c611ae13d19..68738667352e48cf446c8b6467b86dc63b4fd213 100644 --- a/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ff2b5ac7-e294-4f9b-afef-8dbb37c61efb.png +++ b/images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ff2b5ac7-e294-4f9b-afef-8dbb37c61efb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8030aeb2c475cdf3efac3688949c3e521f853fffcdb7cec7a0cbb224ccb809f -size 389152 +oid sha256:3637ba757eb95c8a99390c82ed60d1ab393e55c4961e1997ad13c77eb25036fc +size 457994 diff --git a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_302070c4-a7ae-4fc4-957d-f31444de6ed6.png b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_302070c4-a7ae-4fc4-957d-f31444de6ed6.png index 1fd03153f1c3a0075aa2bbd244e2d227c32ef54e..1b7aaeda18b86dd68afb270dfb12be5aa30dfb30 100644 --- a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_302070c4-a7ae-4fc4-957d-f31444de6ed6.png +++ b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_302070c4-a7ae-4fc4-957d-f31444de6ed6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6bb091487698d8d25b3c6cd1ea55429ec98d91b2c22b0f0e0d2f64546426a1ee -size 2077135 +oid sha256:ba22c8890bbc01a19c0583b920667f51853c315756b3683b88d948a038562ea6 +size 1058997 diff --git a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_6928e47d-af7a-4f49-89b0-1b72a2516909.png b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_6928e47d-af7a-4f49-89b0-1b72a2516909.png index ec1e77ce45e146be74e1d59b81e993f9447080d4..c6cd4be3181794371e78a3f6dca402b83c648352 100644 --- a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_6928e47d-af7a-4f49-89b0-1b72a2516909.png +++ b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_6928e47d-af7a-4f49-89b0-1b72a2516909.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9667aeac9a6b168062debf3c7fafba4315de8d445cec3ed7eda6416701a0e1b -size 430718 +oid sha256:7b3ba55863d00ac34f57236e86eecc69d54ca924236f146368ac69e4ffbfdea3 +size 729061 diff --git a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_80261cdf-aaba-4a97-976e-a2d72d013c4d.png b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_80261cdf-aaba-4a97-976e-a2d72d013c4d.png index cda071f919e916180bef078a2956edaeb738f9c6..8db72f9ba7c329581aceec204b5a3fa11a2e0bc5 100644 --- a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_80261cdf-aaba-4a97-976e-a2d72d013c4d.png +++ b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_80261cdf-aaba-4a97-976e-a2d72d013c4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d6b85b5876f41ab0fc6ab871d5d9ce126f742f83b4f30bacc59e6421c1e519b -size 873481 +oid sha256:949117c0125cf831710ac2723abc16ea66280f949ee146b5a639d817ac2750e9 +size 772648 diff --git a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_cb7b348f-dc7d-4c76-b1a9-0fc02a87f46a.png b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_cb7b348f-dc7d-4c76-b1a9-0fc02a87f46a.png index 5edd85037f6acb27a8cbe2498b9af00e389d23b5..1c9adbecc79a8ab1c2ed24f45cc43fdcf2d7ed9a 100644 --- a/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_cb7b348f-dc7d-4c76-b1a9-0fc02a87f46a.png +++ b/images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_cb7b348f-dc7d-4c76-b1a9-0fc02a87f46a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48f13952f4d49b2c0e22104a12c69ea318e0224ca0d01923075ae21edad0c482 -size 619037 +oid sha256:b26a0c503b0a62b236340e48ff6a3d24f179070c2f85814259636231b94ebd1f +size 830741 diff --git a/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_6f9d42c6-bb53-4235-aae9-30a81afc7180.png b/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_6f9d42c6-bb53-4235-aae9-30a81afc7180.png index 27f711f1f5250283c9217f278352de12f5df8e14..89e7a37cd2b55b4595362116c48a3b6df05c2b2e 100644 --- a/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_6f9d42c6-bb53-4235-aae9-30a81afc7180.png +++ b/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_6f9d42c6-bb53-4235-aae9-30a81afc7180.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6cf666d73f35a6f2eee061552db4b292415cf604e885bf2dc91d5a7baefa27a -size 605582 +oid sha256:5d7f891425a930ce4db91f617916e8ec3f7968993bb1520b5a6ae26436bf7103 +size 581463 diff --git a/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_b6a55c9d-ad1f-4ef5-aca1-093ccb6731d0.png b/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_b6a55c9d-ad1f-4ef5-aca1-093ccb6731d0.png index 6de4104c7f8ad55bd578863edbd41bd5ff2d8fde..04a4e539b449a0e6299f7771ed749038db8c8f88 100644 --- a/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_b6a55c9d-ad1f-4ef5-aca1-093ccb6731d0.png +++ b/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_b6a55c9d-ad1f-4ef5-aca1-093ccb6731d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:526a0ec617864f3d0339cdadf296b8d6eae3c6dc4a62adefd572ae73ff3ee40e -size 356387 +oid sha256:493b5769076ede0c373ce19d9bbf9c89a8c866c7b3414a8c8dcf2d801ccef110 +size 350976 diff --git a/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_cba371fd-cedd-44b5-bc73-f66ef9af18f6.png b/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_cba371fd-cedd-44b5-bc73-f66ef9af18f6.png index 6a6dc3d2886d7b56f846158b9961411fa1a8ea34..4c5fc861ebb49805e0eb252af0e147af120b6f24 100644 --- a/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_cba371fd-cedd-44b5-bc73-f66ef9af18f6.png +++ b/images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_cba371fd-cedd-44b5-bc73-f66ef9af18f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67af8314996903c766aa63ed53a91d2b7a994c767a4af8ea655554ccfabe8123 -size 966336 +oid sha256:02af87df64941e5990f97f4faca80cc5cefed4738286a29fb43c8321740b9244 +size 860489 diff --git a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_385a6a91-88f4-4837-83e1-2f3c5b92b626.png b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_385a6a91-88f4-4837-83e1-2f3c5b92b626.png index b8f3130b65fe3eed77561c88b8d1c6ba5e8141a7..c3be48158b035fc97550aff053c4ef678fc30965 100644 --- a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_385a6a91-88f4-4837-83e1-2f3c5b92b626.png +++ b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_385a6a91-88f4-4837-83e1-2f3c5b92b626.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93ca3d9d9fe6f478781aaf0db206ddc9828d88888ecd12aa3993b5f04156c734 -size 1004382 +oid sha256:b5416432ac124951fcdf1e780c65fe55c8a07fe75c8fe46d6e3c5fdb714fdf3e +size 326556 diff --git a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_4703ca2c-dcb8-47c4-b517-1e71d5bdca63.png b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_4703ca2c-dcb8-47c4-b517-1e71d5bdca63.png index a8c301bec9cf7f13fe8b1413257861542f81cd87..994fc6046fdc125cf9c49a7b58e72162f5b0d727 100644 --- a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_4703ca2c-dcb8-47c4-b517-1e71d5bdca63.png +++ b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_4703ca2c-dcb8-47c4-b517-1e71d5bdca63.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e93386dcea4ca6e4098ce63ef92d5dbb2311a1e6197de1ba920f35a10558fec0 -size 1419826 +oid sha256:ff6d9df574a434ca6a351bf88e50c62dcdf617cc4098aefdb03b0f3a933ff938 +size 1252957 diff --git a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8203c2a7-4502-4ff2-ac52-1440d2c847ac.png b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8203c2a7-4502-4ff2-ac52-1440d2c847ac.png index 25d1ee99a22bd9c8a774d6ebd66bb84331b85e7f..e50d7fdfc03ce4aa2ee0e2ee3c69df32e4eaa387 100644 --- a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8203c2a7-4502-4ff2-ac52-1440d2c847ac.png +++ b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8203c2a7-4502-4ff2-ac52-1440d2c847ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad839a2e0d28424883f957cf0748d69c9d7c688277a814eda7a04bfc45bb6f39 -size 1871650 +oid sha256:ea8ea6e811f4cd18066a9a13b77050df39082dfb9cdb3f312e136be684f86604 +size 2685564 diff --git a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8b5bb82d-a7cc-4864-a16f-ed089b55f45b.png b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8b5bb82d-a7cc-4864-a16f-ed089b55f45b.png index d493e53a73c01644d1f10736d10b7f53b4c34ac8..49982b02df680251ef0dc5cf1e040fae9ef141b3 100644 --- a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8b5bb82d-a7cc-4864-a16f-ed089b55f45b.png +++ b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8b5bb82d-a7cc-4864-a16f-ed089b55f45b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b389f1d0aeffb90f3e3a4b11c90a44c30cf7fa7ee2bce082ecf4766ee1f1ede6 -size 1782268 +oid sha256:c0916045c67b09a3c043844656a4e57eb8aed0bdfb439e383aa5c043c8958584 +size 1769513 diff --git a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_c8a0567f-38f0-4def-b8fe-9a7508661566.png b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_c8a0567f-38f0-4def-b8fe-9a7508661566.png index 3e5b1f73030b9878d45792fb787a1c7c970013b2..6cb130d988571a3a34105e9d400924828c030667 100644 --- a/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_c8a0567f-38f0-4def-b8fe-9a7508661566.png +++ b/images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_c8a0567f-38f0-4def-b8fe-9a7508661566.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb494df00c670db4db1041371297052b809ce10f1d7483dbbcf7ca3ef8c39943 -size 1869712 +oid sha256:9e055f566a501f58ad41c1c503da7d8da04bc06d4ba2d343d187c52c380658ed +size 1955218 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_05bed7b6-3573-4132-93c1-7cfe12b02c17.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_05bed7b6-3573-4132-93c1-7cfe12b02c17.png index f293056bbb5e068ea8d00c46d03ea64a2bd2db92..088ff0a007ece90cf8c6e77dfe64342095360135 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_05bed7b6-3573-4132-93c1-7cfe12b02c17.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_05bed7b6-3573-4132-93c1-7cfe12b02c17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5d6ae3cfb9d55a61add93e62b02ba04f5c8ea0a92c1916b2e3e317b258db40a -size 946675 +oid sha256:7e3e6ef196f8cfb4a7bed15f49dabb89c9675fca6e3be52027144d28f124f227 +size 833108 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_12a902e3-b65c-4644-86f6-53b76223606d.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_12a902e3-b65c-4644-86f6-53b76223606d.png index aa46aa0d6febea21526c82a7647a4045034833d4..b748a1d99f8652eb37760deea70d88456edded72 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_12a902e3-b65c-4644-86f6-53b76223606d.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_12a902e3-b65c-4644-86f6-53b76223606d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51910d4a894d7ed51c3d57f7dfac733f5063a71d63d792c867701474316a4e0a -size 946752 +oid sha256:ba92ecaa1599e51f109c6bdfaae01e0c1780a889805560b48353affacaebd762 +size 764471 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_14af7c4c-eb5c-4ec0-bb9f-33a24e6fcc22.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_14af7c4c-eb5c-4ec0-bb9f-33a24e6fcc22.png index 9908d69051fb91b897c7b2d33fc2a12ac79648d4..d23f0f30efd0b56b9d313e80756b96d1b86b39fe 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_14af7c4c-eb5c-4ec0-bb9f-33a24e6fcc22.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_14af7c4c-eb5c-4ec0-bb9f-33a24e6fcc22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ae7cdc4ae7e16b08bb640b7826745ff9140e36e820ba6b23d6f34391735396c -size 936628 +oid sha256:6f5149e95d56a5791a1925503e7cc4d804952c0356d3da2414c2077cab5d2e9f +size 1042327 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_1bbbf339-0ff9-4326-b590-da7e3b92be27.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_1bbbf339-0ff9-4326-b590-da7e3b92be27.png index 9508a73b9bc6cfe07642f1b7ba652231e485ec18..837b71ae0ccd8db29167019c1d481750d324d2fc 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_1bbbf339-0ff9-4326-b590-da7e3b92be27.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_1bbbf339-0ff9-4326-b590-da7e3b92be27.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:776a8498c8d655beac877bbcb8b4d4298fb89ca0465af409d85a1c0724775fbe -size 909572 +oid sha256:27c9a635f263cb2cf7e52b19300509d4d85ce4f7bc2ac24040bd4fab5401e6ee +size 883533 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_27999ff5-6e84-4a07-995a-919b679d68a2.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_27999ff5-6e84-4a07-995a-919b679d68a2.png index 52b51827fa36251fa6306084ca157dfddcf06bc9..c1f8ce94d088c888a276dbcb3216233895002c5d 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_27999ff5-6e84-4a07-995a-919b679d68a2.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_27999ff5-6e84-4a07-995a-919b679d68a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a8abc0255d404cd329003b3d2170e8675c3054a522a94f354503da85ec43297 -size 1054222 +oid sha256:66ae5199067cedc0f04760f2c2deae5f8d7128a538c279bc2fe3f2753ef29a21 +size 546043 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_522acf7b-1e3a-4d27-a685-9133c8d1a5c9.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_522acf7b-1e3a-4d27-a685-9133c8d1a5c9.png index aee363abd5bbb228fc9b77052fae6f909ee6e279..142a873765a963b0e85359506c6c351ab9ce4072 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_522acf7b-1e3a-4d27-a685-9133c8d1a5c9.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_522acf7b-1e3a-4d27-a685-9133c8d1a5c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06539cc05618d9b6687204d500a33c71f8a47e0abcd004ed250695e31063a4bf -size 906614 +oid sha256:eb8e3cad91e44dc0be05577e7f6c21b01f85f6712455a64c1e1a2a75189f87d6 +size 981316 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_796c803d-8f81-4cfe-a335-d7313478fdb0.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_796c803d-8f81-4cfe-a335-d7313478fdb0.png index b76c39ab3600ec241856917d60e1ac0e0af332a6..2806479f55dc95c0d2224c559ee82fc27bce7217 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_796c803d-8f81-4cfe-a335-d7313478fdb0.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_796c803d-8f81-4cfe-a335-d7313478fdb0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:586adffa190de0c119e983ed86c803d65d607828dbbb7deebd80f420bfe7742f -size 945843 +oid sha256:81bb400c4903fe7ec8ae14ce19d46bdf354cec95a0a487fe7474775f02e44038 +size 1025798 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_9b472dc3-9d6c-466a-ac5b-3b787e64dbd8.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_9b472dc3-9d6c-466a-ac5b-3b787e64dbd8.png index f1b913a195831feb0cd76c9a223c0e38099e46a0..c59f384bef574cc047b2914168dfa87cd57b417b 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_9b472dc3-9d6c-466a-ac5b-3b787e64dbd8.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_9b472dc3-9d6c-466a-ac5b-3b787e64dbd8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edcddda29a1d3be3dc9dd0f8f2c7fccc17f68f4e05ea193de37bc6bd538c3e58 -size 593487 +oid sha256:d871e121db61536ddd997fccf7af52c8c598de736a5f29d1e6f37bd065bda401 +size 770738 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_a91196cb-b774-4575-a1f8-0d09f1aba6b4.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_a91196cb-b774-4575-a1f8-0d09f1aba6b4.png index 46ad0839e9198b58371a984816d2636a7f4b2e90..eb201f3830a24dc2518bdb169a47d33af84f6fe8 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_a91196cb-b774-4575-a1f8-0d09f1aba6b4.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_a91196cb-b774-4575-a1f8-0d09f1aba6b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:288722aace927d6bab249e4e9b2a4d5f8573d231b6c63e2c301cc0f93a025593 -size 801400 +oid sha256:660588bafc5de6029dfb2e6da855bebc6756ed8091ba1510f8a0c998d8517c88 +size 866319 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c3442abe-d676-4250-9bb2-7fab9a09ab8e.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c3442abe-d676-4250-9bb2-7fab9a09ab8e.png index e3fcfbaf2e5eb2a77fbbaa80db2382e310913d41..d12c786bb140c8972dc1f2f45f178e2a9f5718df 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c3442abe-d676-4250-9bb2-7fab9a09ab8e.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c3442abe-d676-4250-9bb2-7fab9a09ab8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8e502a5b3109c2809390524283b4d8f0fe2c093a0f89b3b09b53cd347d8c4f1 -size 946607 +oid sha256:8ea1dd9728c9985fd5cbd37033491a8b385858c3a8e79e7576a99723d82d0875 +size 1026579 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c7e2d653-d028-48ff-987d-7d48b3fc1bbd.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c7e2d653-d028-48ff-987d-7d48b3fc1bbd.png index 4ac60292fa133aa1eed2f3da07366905a58c24b6..13faec4fa729a125847058e35fe4d7a3e44db006 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c7e2d653-d028-48ff-987d-7d48b3fc1bbd.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_c7e2d653-d028-48ff-987d-7d48b3fc1bbd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b29099cfc52ae98ef7d7e71650de1e779e423d0405be0b1c57847684fccad8bd -size 944405 +oid sha256:fb6a048075c6ce0ad8f717851d9b938f01a6b41a38e2c6e8d3767e02d3cb1c33 +size 765694 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_cb2a47dd-1c83-45d4-9186-65d56dd7ca78.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_cb2a47dd-1c83-45d4-9186-65d56dd7ca78.png index 61ca4e1a6e36f7ae80e3b320763f517c728e3eee..ff5561d0ad838dcf7ece164b4d0d584a019b9a4c 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_cb2a47dd-1c83-45d4-9186-65d56dd7ca78.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_cb2a47dd-1c83-45d4-9186-65d56dd7ca78.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11c6e80776d5beb2fcc81127461024f3f5423783bf0b568f0aab72b7729c52d1 -size 909791 +oid sha256:376738d84c8aafdd6cc086b4a74d6fe8ca1779dd97c9630e2c4bf18a812fa107 +size 1000035 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_d683e390-ec8a-47db-8772-cb52166ae30d.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_d683e390-ec8a-47db-8772-cb52166ae30d.png index 4e9f1ed94ac8629b6aba7c8f07b8053a729d5b86..c36763d65c167abd27268d396f3dd9054f0cac9e 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_d683e390-ec8a-47db-8772-cb52166ae30d.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_d683e390-ec8a-47db-8772-cb52166ae30d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a67b84c9ea15b87b1dd5b7c9c68424e638b4a4a0a6958527609d5ab2ea94cf6 -size 561165 +oid sha256:c577422d9ef032bc9c92b650d9f88d230a27d28da5b9cee722372946a28d7fd3 +size 453583 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db1a7212-3913-4f4a-97d9-dea87a43cf1a.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db1a7212-3913-4f4a-97d9-dea87a43cf1a.png index 9e53e4fb4f91e34d294fd71023b6f4abd162e737..07c6511a7c2b795b52190a404f8bdb584f65dd31 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db1a7212-3913-4f4a-97d9-dea87a43cf1a.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db1a7212-3913-4f4a-97d9-dea87a43cf1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0382eddb25092848bfeb8729554f8d26a9f43e7f547011b5a27f7a1183b92991 -size 681827 +oid sha256:9703400def6392959040791eca7709f11431568a87d63714806143a16c350fcf +size 556776 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db324c07-d4c8-4133-bf12-0b1be073d6e8.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db324c07-d4c8-4133-bf12-0b1be073d6e8.png index 21fb8e9b0b03a56356702fe3257d82246b9aae40..a1c25c07967895b77e2089c4efa8a3febf7ba8ef 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db324c07-d4c8-4133-bf12-0b1be073d6e8.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_db324c07-d4c8-4133-bf12-0b1be073d6e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92b5cb8301109b7e5b61e93a620e09d2ed6587f011a4dabc7d32d09034e03dea -size 986711 +oid sha256:fb668757cfda196240f745e5c93ce7741ca75c7016b21560999a16b0bd198c92 +size 1077026 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_e1f18ee3-1577-44fb-a283-1be215e5ae52.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_e1f18ee3-1577-44fb-a283-1be215e5ae52.png index 1de799b79e4f3832feb76db534e641219e3b9cad..7b8c21cd55a785a49e0eb6b93fa7151031e5ff69 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_e1f18ee3-1577-44fb-a283-1be215e5ae52.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_e1f18ee3-1577-44fb-a283-1be215e5ae52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:beaed6cdd10fa5b4d5cd3a74a4c3c3a796d6fc179c47b33b5f6351447dacb1ee -size 837822 +oid sha256:6b194c151c2c56eb8b5e43f18bee5c05626aefaad62d7e839c017b3240ef8a0a +size 1025747 diff --git a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_ff237f12-b8da-44a6-a94b-44c986bde324.png b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_ff237f12-b8da-44a6-a94b-44c986bde324.png index 5778a2df140e84afbbc3b56c007ae55e1137d7c0..fd7bd0c6ff817fb21e17b2ac84c574edcb522ebc 100644 --- a/images/d1de3d1a-3df1-4421-98f3-f8d078752893_ff237f12-b8da-44a6-a94b-44c986bde324.png +++ b/images/d1de3d1a-3df1-4421-98f3-f8d078752893_ff237f12-b8da-44a6-a94b-44c986bde324.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edcdef8f02f0c98fca4ff6ab88dc0d1a897b84a8bc306f9a038e176ca238f27e -size 893611 +oid sha256:d5cdf914386412ad49db59aacc4cb81abd7023f60a9993960e6211400c5da47a +size 987765 diff --git a/images/d1e46885-62b3-42ae-837a-474b1541348a_12598eea-0be8-4d16-bf8f-2114636a2c15.png b/images/d1e46885-62b3-42ae-837a-474b1541348a_12598eea-0be8-4d16-bf8f-2114636a2c15.png index 6987a743931a3214bcccc7f5a34aa4beef753510..c8171280ce4bde06150d887d9b0d7dceaf61ae23 100644 --- a/images/d1e46885-62b3-42ae-837a-474b1541348a_12598eea-0be8-4d16-bf8f-2114636a2c15.png +++ b/images/d1e46885-62b3-42ae-837a-474b1541348a_12598eea-0be8-4d16-bf8f-2114636a2c15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d885257e3b9a378b0232d106a62257800e2604918f51350e20401e1b170bc20 -size 1255895 +oid sha256:8944dbee9d918030d752a1ce74e85b16b7e124d20b7726c335c2c783ab306787 +size 995988 diff --git a/images/d1e46885-62b3-42ae-837a-474b1541348a_235eb14d-9210-4c53-a3d1-0afe2b3c737a.png b/images/d1e46885-62b3-42ae-837a-474b1541348a_235eb14d-9210-4c53-a3d1-0afe2b3c737a.png index b69c6661725af0326dc3e43b1c621c4141177d7f..9ee19fc220b2c119693f2423b608eb8951a84fbe 100644 --- a/images/d1e46885-62b3-42ae-837a-474b1541348a_235eb14d-9210-4c53-a3d1-0afe2b3c737a.png +++ b/images/d1e46885-62b3-42ae-837a-474b1541348a_235eb14d-9210-4c53-a3d1-0afe2b3c737a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69bcfd28fe57e393ef17f8974b706f3dea13c11f7960899060f8151966bb2527 -size 2038659 +oid sha256:9622718a75e32de5d462fe46885544afe1d74ed29cdeb3c98d4e5a516faf0dc6 +size 2381753 diff --git a/images/d1e46885-62b3-42ae-837a-474b1541348a_5d2eec92-a8c9-436b-a89a-ed85f6174d4d.png b/images/d1e46885-62b3-42ae-837a-474b1541348a_5d2eec92-a8c9-436b-a89a-ed85f6174d4d.png index 23faf4c539ee474059d7d129a48d36730b9f65ca..c511867e3f9c0c306faa73727a363ea16f608ca8 100644 --- a/images/d1e46885-62b3-42ae-837a-474b1541348a_5d2eec92-a8c9-436b-a89a-ed85f6174d4d.png +++ b/images/d1e46885-62b3-42ae-837a-474b1541348a_5d2eec92-a8c9-436b-a89a-ed85f6174d4d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec343cd6386f6fa3263ca46c77e0374767bcac09f6ee8a76f78058806080ebd4 -size 1182103 +oid sha256:d04d280c426d4494fc19a3a202ada7e0e35850406c9c9f12e255f889642612d8 +size 1109995 diff --git a/images/d1e46885-62b3-42ae-837a-474b1541348a_6a689c11-d9a8-4139-b828-7312938f530d.png b/images/d1e46885-62b3-42ae-837a-474b1541348a_6a689c11-d9a8-4139-b828-7312938f530d.png index f7f65d711c35ed8cd95bdb9aa6bde5d8d2974e06..8629cc030561b3c98f5dfb0122ca4cf4491151ad 100644 --- a/images/d1e46885-62b3-42ae-837a-474b1541348a_6a689c11-d9a8-4139-b828-7312938f530d.png +++ b/images/d1e46885-62b3-42ae-837a-474b1541348a_6a689c11-d9a8-4139-b828-7312938f530d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15c1525cd62aa6436304a5a576de6bf9e7befa7ce6a7060ad669a9c16bdae7c0 -size 2184623 +oid sha256:5247a26fd020fed90893afa34f020b912cdff2ed08fc35cd2192771405be5d77 +size 2102624 diff --git a/images/d1e46885-62b3-42ae-837a-474b1541348a_a03e1463-bb7f-481c-9579-9caa826a8644.png b/images/d1e46885-62b3-42ae-837a-474b1541348a_a03e1463-bb7f-481c-9579-9caa826a8644.png index 0940205c4129d1e6ce118cacdf862bf835bad0f6..05444eecb3dd3c58fdd45577e525671db6af017c 100644 --- a/images/d1e46885-62b3-42ae-837a-474b1541348a_a03e1463-bb7f-481c-9579-9caa826a8644.png +++ b/images/d1e46885-62b3-42ae-837a-474b1541348a_a03e1463-bb7f-481c-9579-9caa826a8644.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73ff3c64c75badc2f6057d85de1c951e7f6053bd7f345e3c56518e6b5d5fa376 -size 947342 +oid sha256:8d7c301cd1a72453048ba39c5e668aecb40e09a2fc487931c67ab72a7953d737 +size 837113 diff --git a/images/d1e46885-62b3-42ae-837a-474b1541348a_d980a252-0916-403f-8778-bc2e09948456.png b/images/d1e46885-62b3-42ae-837a-474b1541348a_d980a252-0916-403f-8778-bc2e09948456.png index ec457eb48a035b86aeeeb02b6fe6aae239dd06fe..d03d10be30fad3830d207c2c9a59bda62686af20 100644 --- a/images/d1e46885-62b3-42ae-837a-474b1541348a_d980a252-0916-403f-8778-bc2e09948456.png +++ b/images/d1e46885-62b3-42ae-837a-474b1541348a_d980a252-0916-403f-8778-bc2e09948456.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9225a6bff235f24a4a95cb947f8955e3e2e999d184aadcb85e0cbe87536887c -size 1931057 +oid sha256:69ba98f1494a869f9f0aa3f64270eb888c2b6ab214182d012ec2c9de5a13556e +size 1931303 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_06264c9a-c4ab-4c01-ad10-8b7cd5d82367.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_06264c9a-c4ab-4c01-ad10-8b7cd5d82367.png index a4a09deed9fba83daf5231820b243ff1bca97ba4..d596d5a69015da03f06ed4886d9067b7bcaa8293 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_06264c9a-c4ab-4c01-ad10-8b7cd5d82367.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_06264c9a-c4ab-4c01-ad10-8b7cd5d82367.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afcd5076fc4258c76c56313623de05383c899231deb7ea6b32518370070cfbbb -size 1431207 +oid sha256:9ee56257c2216207e79fd9e950e8842922289b4356356297f456ab56e5916353 +size 1897746 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_0d3428c6-2527-45c7-9bb4-64c3bca723bc.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_0d3428c6-2527-45c7-9bb4-64c3bca723bc.png index ba0228ef1bf89178dc8d259f27ae5885e3d21ace..5cb1ef67167cec7592b62b81ece5ac18de9e97e7 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_0d3428c6-2527-45c7-9bb4-64c3bca723bc.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_0d3428c6-2527-45c7-9bb4-64c3bca723bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b0f61563f5e61785ee58e71a8ee170c1d4edc9e4a5fe2b7da4022aaa50e159b -size 1906586 +oid sha256:c3be60f8d43b72eb47cd35faac16a51290799fae5a3f20a92a6702e0f2490539 +size 2323735 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_18adac7f-bb79-4f9d-85b4-e4ec43f9775c.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_18adac7f-bb79-4f9d-85b4-e4ec43f9775c.png index a58a696d410f90f1f04f10d934b44031c60f161a..07932ecd06dff23f1dcf8c49721fb63a8b00cdbe 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_18adac7f-bb79-4f9d-85b4-e4ec43f9775c.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_18adac7f-bb79-4f9d-85b4-e4ec43f9775c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93c211781816a10c1f0798d6ba4e73e6f358607bb4cda4ce961ca7995f3bf076 -size 1688894 +oid sha256:9487dc87aa29f7a90bdf86fab2438bf844d22c17e2e4f9107d591dadad7223eb +size 1122947 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2d2144f4-0a3d-482e-95f0-7f07ca0bbf5b.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2d2144f4-0a3d-482e-95f0-7f07ca0bbf5b.png index 9e06f73672915acc69efbefdc25e38929da8c71f..8716dea048bfd3d4978ff771e937e480dee0e346 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2d2144f4-0a3d-482e-95f0-7f07ca0bbf5b.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2d2144f4-0a3d-482e-95f0-7f07ca0bbf5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:944f57a7c0fcc7bc92cce1fc6f8d6de7da7ec8a9f39ace48ec24d3b6d9a2336a -size 1285066 +oid sha256:e484cf28951aff7df17d53b5ab0f7fa830f8d0cbdcf9f0697f7e29a185071cbd +size 1667386 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2fe0fae8-1e67-4bfa-92d5-61c9c7eb65ef.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2fe0fae8-1e67-4bfa-92d5-61c9c7eb65ef.png index f15e15f5d48996e5ba3c9303df32f50dc6170609..6e273bde4a54e5de34df78d3e471caec6ec00e6d 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2fe0fae8-1e67-4bfa-92d5-61c9c7eb65ef.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_2fe0fae8-1e67-4bfa-92d5-61c9c7eb65ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84e05cf0dc2f088967c2ceac13fdfc425096fe9f7255862a3fd8f1b8f550c332 -size 1448810 +oid sha256:da32fdbf30d5a4e6b2b245e9a5461da94b99eedc585ccaff6923fbeae9960a17 +size 1609368 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_94e90ad5-7d9a-4601-9812-255a72709a36.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_94e90ad5-7d9a-4601-9812-255a72709a36.png index 16ee88d69d44e503747f702b195083819084c35b..24c169bd49cf483ff1a1efcf13378bde94f164d0 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_94e90ad5-7d9a-4601-9812-255a72709a36.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_94e90ad5-7d9a-4601-9812-255a72709a36.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fb77688a8c50c95e2e7fc8fb467980f589ad39143378cafb609c035646c1947 -size 1968881 +oid sha256:e47428747178b90878df9eef5a47d3ff4e82ddf6a714db6021153137e0bcc92c +size 1598942 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_a686b3cc-e59b-43b8-bb1e-22e1bef857da.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_a686b3cc-e59b-43b8-bb1e-22e1bef857da.png index 45b6bd566ee1cb5f64d8ba2833f6a98037717554..5d3b737414024862b045787c5154e917b7e8b535 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_a686b3cc-e59b-43b8-bb1e-22e1bef857da.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_a686b3cc-e59b-43b8-bb1e-22e1bef857da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61fdcc5afe7f8817eebe5e821e9f9b40f9527d015d120a8d159019f8a5114251 -size 1478444 +oid sha256:28b9d458546151a8bc9af1a4b687396d6f6905526ce381e0af9085c5b2e627ed +size 2044112 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_c95ca89d-2256-4891-bbe7-98503507593e.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_c95ca89d-2256-4891-bbe7-98503507593e.png index deea1f32e84c55ff11c10674d2c7ba7b348396e7..7a4487d624f2f1862a714abd364cb18c7d5b5cff 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_c95ca89d-2256-4891-bbe7-98503507593e.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_c95ca89d-2256-4891-bbe7-98503507593e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a2292ea525ca1f3dbeeab678423814e7569bf4c05cd0535971b21d8ae509935 -size 1434673 +oid sha256:032282a7f8f1614f7393a85c4e4607f771532b0f38edaf016559799ba5cbed0c +size 1995543 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_ca5e412b-7659-4de8-b48b-d24749857658.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_ca5e412b-7659-4de8-b48b-d24749857658.png index b08bc530616626993ed064d1aa638f7f789dc3dd..710b8c3bd729da1cab286eefb4fb4034ee121611 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_ca5e412b-7659-4de8-b48b-d24749857658.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_ca5e412b-7659-4de8-b48b-d24749857658.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f3d1c1fc262a12188eb4600fccc86f0bc0385656ab929e5e80c262ac46cff0d -size 1439281 +oid sha256:e1b60d8646eb0e08c498b500d0f0ca0a126c9433ceb48d699bad0d1fe2a8eb03 +size 1942805 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_d5d0ca15-7313-4bb4-8d8d-0bf611109aef.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_d5d0ca15-7313-4bb4-8d8d-0bf611109aef.png index 8f64c07602fbbe943ea712f01e6970af81319f2d..3c5da9249a007600452665adecc9a74f557fd546 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_d5d0ca15-7313-4bb4-8d8d-0bf611109aef.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_d5d0ca15-7313-4bb4-8d8d-0bf611109aef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:709f2afe23cf2eb4532a67d89ae2e0de805f51da24ab2832e16da0409827eea7 -size 1387390 +oid sha256:549825be8193bdfb8da3138bcb688c5fbbdda7abfbaf4f76ab0b4c02dffe5d7f +size 1168093 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_daa6154d-0580-423c-9d14-633a3de4fb59.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_daa6154d-0580-423c-9d14-633a3de4fb59.png index ab413fd79b518239d9e743e5b287920527b38399..e712f8f7c97fdda5273b4057e0d12ee33b3ae600 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_daa6154d-0580-423c-9d14-633a3de4fb59.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_daa6154d-0580-423c-9d14-633a3de4fb59.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b271bbbd5cba00c15ba78b0dc09cd7d1480fa4e90a6e69b9cedc2e6c743c5c2f -size 1444194 +oid sha256:e4697ec3f2b26a79052649bc3a5bc6db03ef8cf27f46d8e13aa771c0263145c3 +size 1022706 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_df163dcc-4779-4f0b-ad7e-ad149da8f2de.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_df163dcc-4779-4f0b-ad7e-ad149da8f2de.png index 50cec9c5bb4619c9226cc8b332841283e9f7f19a..813c0839db22767bdf03d63655a0a0292d765efd 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_df163dcc-4779-4f0b-ad7e-ad149da8f2de.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_df163dcc-4779-4f0b-ad7e-ad149da8f2de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a899f2454157666e3152a53cd201de8a53edbe769ffd8b779ec77c8e9a62284 -size 1483866 +oid sha256:25029f57ba57088d7bf05822ac0eb879c03fa7e6ea855c7c275706bf4dba59ac +size 1922952 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_e35d2867-2a3f-478a-a454-b0ba703b2765.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_e35d2867-2a3f-478a-a454-b0ba703b2765.png index 76858b56d1b92dc609c4681de2e17516de24dc90..5424678fefee95fbe1e645365e1818d1c2556727 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_e35d2867-2a3f-478a-a454-b0ba703b2765.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_e35d2867-2a3f-478a-a454-b0ba703b2765.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d6c8267437fa90ddd8007a5fe46f1851ca2f1defb115f69a455913eaa36ab42 -size 1367111 +oid sha256:59820ab25dead2d3a05c61b210471807e521b8421c627c448da12b0d9e9be3ea +size 1983879 diff --git a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_f4fb861e-2b80-49a8-9c84-909518e0e7c0.png b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_f4fb861e-2b80-49a8-9c84-909518e0e7c0.png index 0f7a2cf8101b481773634fffecc20205f9656327..894f946e24c885a05ae1785c206f75217bb0f145 100644 --- a/images/d22ce493-c49c-43f7-939c-4a429797c2a3_f4fb861e-2b80-49a8-9c84-909518e0e7c0.png +++ b/images/d22ce493-c49c-43f7-939c-4a429797c2a3_f4fb861e-2b80-49a8-9c84-909518e0e7c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a98b2ad32a2505ceca3cbb24cde6349cd332930f4651fc97d978d679e6a18b49 -size 1436032 +oid sha256:b5b9ecaa8362606e3f1a4ae011452b7c6dcf16f247075414811c3285f9dc1ebf +size 2006995 diff --git a/images/d29e8a14-ee66-4330-b282-09cb1955aad0_548c5177-531c-485e-83b1-5c1773bd3068.png b/images/d29e8a14-ee66-4330-b282-09cb1955aad0_548c5177-531c-485e-83b1-5c1773bd3068.png index c398d91050fa8ed432179b7b9be81b213643cd73..d9352e8873577fa04593d093b975f8429fd88fa0 100644 --- a/images/d29e8a14-ee66-4330-b282-09cb1955aad0_548c5177-531c-485e-83b1-5c1773bd3068.png +++ b/images/d29e8a14-ee66-4330-b282-09cb1955aad0_548c5177-531c-485e-83b1-5c1773bd3068.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:adb1f9f81fdd2c98a2c37acc7ce629120ffda610044072f9d1a3e0085a591745 -size 692774 +oid sha256:5b8f7cddc445a33bd02d5091ed98ee56077379a797552c5dbef6003d3ac6a37a +size 1200180 diff --git a/images/d29e8a14-ee66-4330-b282-09cb1955aad0_5600ca04-7659-49c0-b34a-7c7de417fea1.png b/images/d29e8a14-ee66-4330-b282-09cb1955aad0_5600ca04-7659-49c0-b34a-7c7de417fea1.png index 46472de076d2dc3c53673ae3acb4d769ca496bbb..3c0e6410a7c78acf04ffcf0dd95f0c4f27376d1d 100644 --- a/images/d29e8a14-ee66-4330-b282-09cb1955aad0_5600ca04-7659-49c0-b34a-7c7de417fea1.png +++ b/images/d29e8a14-ee66-4330-b282-09cb1955aad0_5600ca04-7659-49c0-b34a-7c7de417fea1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0feab484003a0a2dfe7ad446c875cb0062874747ba6ad79733d70eaabf6e78c0 -size 249742 +oid sha256:581c3f2434d5c2f8e6887884afe701646a2aa1f77fad9d604ed04687417a33aa +size 260885 diff --git a/images/d29e8a14-ee66-4330-b282-09cb1955aad0_98f2a61d-1e65-44d2-b21b-8856adfb16c4.png b/images/d29e8a14-ee66-4330-b282-09cb1955aad0_98f2a61d-1e65-44d2-b21b-8856adfb16c4.png index 6643c8f646e4adf263c0c35b6c4afd407d769b7d..a82432ceab76ab0fc0d1474d2fc9d85559bc4fbe 100644 --- a/images/d29e8a14-ee66-4330-b282-09cb1955aad0_98f2a61d-1e65-44d2-b21b-8856adfb16c4.png +++ b/images/d29e8a14-ee66-4330-b282-09cb1955aad0_98f2a61d-1e65-44d2-b21b-8856adfb16c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:816696e99bca19a85e8ccf9048734347c1a0b949cc1d089b7a9dd0826fbb8ca6 -size 737414 +oid sha256:0accdf51629de9e2bbcc679c3f0a4c5f341f4817be23fc1dfcfe768cca8d34b8 +size 593068 diff --git a/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_0944f5d6-5126-4eeb-a660-bb87994aeb13.png b/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_0944f5d6-5126-4eeb-a660-bb87994aeb13.png index efa751219a9a8c8eb039b0ecb32dfbc0be01b9bb..5e7b176ba1060eae7cf569081d8c09394ec92113 100644 --- a/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_0944f5d6-5126-4eeb-a660-bb87994aeb13.png +++ b/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_0944f5d6-5126-4eeb-a660-bb87994aeb13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8fbd58ec22cffb9abd5b52073f8752eec2b2e786e25b9f3a446140c798fb573 -size 2040973 +oid sha256:a3b34ae39290db9c989d6e0d99aaa891c0b4087700bc8ab5afef12ff752857fb +size 2472429 diff --git a/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_9d01e4d4-cf3a-47ce-98a3-26f66887e574.png b/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_9d01e4d4-cf3a-47ce-98a3-26f66887e574.png index 5bd8cf8351c9ecd1a44ffa2c7420bb17a20eea7b..ccfbd5ecfe4c1ad3a520722b9963d10499d531a1 100644 --- a/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_9d01e4d4-cf3a-47ce-98a3-26f66887e574.png +++ b/images/d29fd2a4-2305-4276-8a0e-2599291d0a17_9d01e4d4-cf3a-47ce-98a3-26f66887e574.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1af488b847dd79065045e103794945c4e612287a11d036058ff3b8e94d5f57da -size 2596587 +oid sha256:2a82ccd5fccde5444870abb3fd66a96139e3546afd36ec4646b32e3fc726d1f9 +size 2046739 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_0c9c1694-75c7-446b-b8a1-4585a8561f79.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_0c9c1694-75c7-446b-b8a1-4585a8561f79.png index 802bf5306f58362f6b0b2703de272c778f87c5f3..1463de06788168fbb668adf45290b45efc72570b 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_0c9c1694-75c7-446b-b8a1-4585a8561f79.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_0c9c1694-75c7-446b-b8a1-4585a8561f79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:697d32f2ef28aa55302e73524b641d0d72f0eee03d64b2d49da8d404a9b15ee9 -size 775060 +oid sha256:89204088ea7e5a91fcfaa4d543e99a277da079c1d3e99e6f4870b6267354b495 +size 760531 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_10d26c2c-7db7-44d7-b5cb-ae1e2a15f5dc.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_10d26c2c-7db7-44d7-b5cb-ae1e2a15f5dc.png index 95db3ba095f70d9709d0260a93becc1382b2e92b..ed2cdd01fa89f3f2246cd75e6f9c0f333b80ace8 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_10d26c2c-7db7-44d7-b5cb-ae1e2a15f5dc.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_10d26c2c-7db7-44d7-b5cb-ae1e2a15f5dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb6c5cf405aa51375942f04705aae41f319b9ece9e4cbc3c708e1109e91c1aa3 -size 568804 +oid sha256:593772b94d00f858f6496fe125d3e6e5b5a55f8aafd760f3d7671963a86b8572 +size 578484 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_44ace67e-82b4-4aa8-9f39-85b7fb1c3059.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_44ace67e-82b4-4aa8-9f39-85b7fb1c3059.png index 51dc6cbafc077dac6e086f06bfc1b413e0a4daa1..db0a72e036a48bd7392933aa160397dee23f2368 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_44ace67e-82b4-4aa8-9f39-85b7fb1c3059.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_44ace67e-82b4-4aa8-9f39-85b7fb1c3059.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f9bcacca22dd9dc283c72abcbae8951973d7a081fec88d04b0a698f47da18b4 -size 557296 +oid sha256:9255e196016a6830d2bf061bec30e1da0234ec077f93791c9335e01fbf622bbe +size 648147 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_51f6ca95-3089-4b4d-b3e4-ccb627fba834.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_51f6ca95-3089-4b4d-b3e4-ccb627fba834.png index 18917a8c0a31f34d29af7d49c3a674d4a759484b..c38d6ee6edfa7e17af0566e7b3435522db4cc60b 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_51f6ca95-3089-4b4d-b3e4-ccb627fba834.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_51f6ca95-3089-4b4d-b3e4-ccb627fba834.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d402bf16bd0686af492743ae7e1b0245432e4ef2c2244d27beaa3273674e4c6 -size 568825 +oid sha256:6bb5093a093ee546157f2bbbb79f24fc3258a2163b4e86c8391c349b6ed8a7b8 +size 578378 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_6d22158e-f615-4bae-b167-22f650edca52.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_6d22158e-f615-4bae-b167-22f650edca52.png index ea311ee69caa7b0cdbd9269f6a724781abb07be9..c5f6be500079b2e61107f75493f15345b6440b2a 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_6d22158e-f615-4bae-b167-22f650edca52.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_6d22158e-f615-4bae-b167-22f650edca52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32092478f1930b5d911e701d892f003648ae649bd0c251a94eb8304ea3c5b123 -size 577987 +oid sha256:d48fa7972a663ee825ff9c7db01fee88c62d7358f8840bb6689ca454cb9fdfb3 +size 385262 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_9a9fafc5-8b22-4d00-a724-188153f1c7b5.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_9a9fafc5-8b22-4d00-a724-188153f1c7b5.png index 4099db87e0bc88787b458e7de2fc09bb98eebf3b..3b02297234d71e24b6acdcbbe75703b9995b3028 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_9a9fafc5-8b22-4d00-a724-188153f1c7b5.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_9a9fafc5-8b22-4d00-a724-188153f1c7b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7bd542aca8cff074ca88d7e55749f547a21caceee0ece1916f08279cb57233ef -size 804291 +oid sha256:cba50cc5fbb9e66ba0262520c2a5d9e0cd5b3224ed0ef87124a51409d590b516 +size 887359 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_e9a53ae9-3b3d-444a-9dcc-a92bec2b77de.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_e9a53ae9-3b3d-444a-9dcc-a92bec2b77de.png index 1acee67cda109639a66bc4c3943bacab8fb1981d..c8cb8a77434b0041225573c5250459ab7fa602e6 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_e9a53ae9-3b3d-444a-9dcc-a92bec2b77de.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_e9a53ae9-3b3d-444a-9dcc-a92bec2b77de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15fab53ade07c47f451fae836c7b6ba9be0038158f6bd1f1a70e9ab3bf6b30f6 -size 1280479 +oid sha256:ace4b25ba71d65ee60724f97a33d37a52fb7b39f7396b47bcf8e244a8595e209 +size 671202 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_f4d5366b-3609-465c-a8e6-285b40935b03.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_f4d5366b-3609-465c-a8e6-285b40935b03.png index 21823d93e0b7e3b0aec089a5611fc23ff27c21f0..fb7546c8066437eb4ad3e951c6b836678df51597 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_f4d5366b-3609-465c-a8e6-285b40935b03.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_f4d5366b-3609-465c-a8e6-285b40935b03.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:740498a9daa25c3b3fb09da5069f27b617cf719c3aab08fd15904df15163f737 -size 534469 +oid sha256:8bbe898af70f49bb0e6ef8e7d1752612e0952e9f0fd28b74f0dda2ec777dcf68 +size 418770 diff --git a/images/d311891e-82b9-4a16-ab46-6af92f054f94_ffaf6b03-f8a7-4d88-b8b3-e95d8ac0b97a.png b/images/d311891e-82b9-4a16-ab46-6af92f054f94_ffaf6b03-f8a7-4d88-b8b3-e95d8ac0b97a.png index 67d238247a58d3d57d3e2e8294907bc39970ac8f..65801e631b6d37d50780df70e24d58836ed62382 100644 --- a/images/d311891e-82b9-4a16-ab46-6af92f054f94_ffaf6b03-f8a7-4d88-b8b3-e95d8ac0b97a.png +++ b/images/d311891e-82b9-4a16-ab46-6af92f054f94_ffaf6b03-f8a7-4d88-b8b3-e95d8ac0b97a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b90bf1c31f7a8eb82af88d4d01b0a9190764c79a8a72ee73618a7d4d3b7a728a -size 785644 +oid sha256:7719cc8241e775331efb2f7a022bf9849df18401629b05c645b3ede86d7301f5 +size 938616 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_0310c46b-bca5-4bd5-b568-7af5cce54b97.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_0310c46b-bca5-4bd5-b568-7af5cce54b97.png index 504df786738bdcb465a53bf665a6a95f0a5404ad..53008b937f02f30474a65a902306a924f1f9a317 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_0310c46b-bca5-4bd5-b568-7af5cce54b97.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_0310c46b-bca5-4bd5-b568-7af5cce54b97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1475dd6b87cda05d69a8d09276e7ba5bb40f4729460b8fa936d854ffd8d64473 -size 378887 +oid sha256:da38ca70acc1724b32433ecd145dd28fe1d5ff97aee7c0927c40afb4569a6be2 +size 663796 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_1d0e7048-7e14-408c-b0ec-c8d6a200c859.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_1d0e7048-7e14-408c-b0ec-c8d6a200c859.png index 4a7975cf22eb1084d57a359497573648e2c2611c..8b93ca9131d28b909fe366d568123ccfcdd8946a 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_1d0e7048-7e14-408c-b0ec-c8d6a200c859.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_1d0e7048-7e14-408c-b0ec-c8d6a200c859.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cbd24cda5576a1b9eea97ba199258725c9a369e12ce870576054a30c102d452d -size 594810 +oid sha256:15a244ce822ab78691dc4f50bcdfc21008fc859e453f6da9c24b82757fa3f211 +size 787779 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_330af54f-7d87-4d91-a5a8-f393a0f6e0e7.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_330af54f-7d87-4d91-a5a8-f393a0f6e0e7.png index 9c866ac5ce2cdc6ed6f238f4f512da4f16fdb71e..3b63433a61754bd6dd41a2ae5c832acadcca3eb4 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_330af54f-7d87-4d91-a5a8-f393a0f6e0e7.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_330af54f-7d87-4d91-a5a8-f393a0f6e0e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04428f41683c751cfb4c5baf614961614e39b77060f39ff7e3e780523c23e616 -size 382303 +oid sha256:22182c84f6018aa1861289e4898c8730b8c3d372d9e313c8c629d3d8cff16488 +size 627953 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_3ed13d1d-b299-4bc0-87d9-be6a6aa17641.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_3ed13d1d-b299-4bc0-87d9-be6a6aa17641.png index d0a2099e031f879d4ed707519bb3f9b153c4d843..cb7abb513885ee0ba92f071caae9ed3076a889fa 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_3ed13d1d-b299-4bc0-87d9-be6a6aa17641.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_3ed13d1d-b299-4bc0-87d9-be6a6aa17641.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65e11eb2da2067355886ab60de28c6e8ac6c05c75eec27d446fad122a7b99774 -size 1026678 +oid sha256:3974c434d36459ac0abdac31e3c7806a244ccb1a306f2042b5ee953a31f9f372 +size 1175991 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_4b7d1d2a-fd6c-4c7c-b09e-31e4ead7df5e.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_4b7d1d2a-fd6c-4c7c-b09e-31e4ead7df5e.png index fabd7d56a06b1fbca0e2f3bf65173af938509b87..8b3e7950c8eb58ea537515d33360cbe4180cfbb5 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_4b7d1d2a-fd6c-4c7c-b09e-31e4ead7df5e.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_4b7d1d2a-fd6c-4c7c-b09e-31e4ead7df5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb34248b3bc2e534e9eb2c540bd1674f88f4d7c9ec3249a693c015d57c2cd920 -size 695436 +oid sha256:ca3bca854e5affdb732dc14a904f447763f463fb37110053cda15bbea993dbe1 +size 1155701 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_59161f2b-4e6d-4fb2-be23-4c27eeedefce.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_59161f2b-4e6d-4fb2-be23-4c27eeedefce.png index 275454113ebd60e27be939ab944e2f7ad0a77602..c819cacecf63af587bfba93edb5529c8a291a9e8 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_59161f2b-4e6d-4fb2-be23-4c27eeedefce.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_59161f2b-4e6d-4fb2-be23-4c27eeedefce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a28408f240580f04857e0bb297444b6256e6b8963ddbfd72af395312698a3324 -size 555243 +oid sha256:fa91f0adcddba481b956fcab46423298f6d38585ddf0206e03bbc81e1601bf09 +size 864372 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_5c1467a5-8401-44d3-a4d2-2beb7cfeb39e.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_5c1467a5-8401-44d3-a4d2-2beb7cfeb39e.png index 7f4c98696c80d8a3e9c621dca50fadc9c5a1acc9..4b9f4270dc799b88010dc619097319f8342458d1 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_5c1467a5-8401-44d3-a4d2-2beb7cfeb39e.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_5c1467a5-8401-44d3-a4d2-2beb7cfeb39e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:745490917ad46e4547f1283ce59cccf0463ca91d7b7b84863926f3c190db45e6 -size 511549 +oid sha256:59c2fa1b02c06080da9e8fb5885c3cad0d27301283264bb63842eb08e3085467 +size 786357 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_983e2ebf-fc39-4f67-9991-ae7008c8a9e5.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_983e2ebf-fc39-4f67-9991-ae7008c8a9e5.png index 5d48fd2b7f2fd8b604eb2e3355493bea703efb37..7fad2c9a3c2498cc17d8d7622f108509f1a10833 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_983e2ebf-fc39-4f67-9991-ae7008c8a9e5.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_983e2ebf-fc39-4f67-9991-ae7008c8a9e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6580e38bfb96a05f4391ffc73c282410e497f39a2badd9ea439c47a456a530a -size 25291 +oid sha256:e83eb80977ab56436325f1d736e0c4fc17d8ca279a39fb854f644efc7125eb08 +size 25032 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_a61a15a9-e82e-4e0b-a708-692906fc75a7.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_a61a15a9-e82e-4e0b-a708-692906fc75a7.png index 99b5de1a71c2cd480a5f69f81a51a63010715c14..cffae351307ff5e7f7c0b27171f0e5c7cdef5f73 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_a61a15a9-e82e-4e0b-a708-692906fc75a7.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_a61a15a9-e82e-4e0b-a708-692906fc75a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a16074b9ec77afa75830f35c82753a8ea1eeea8f8ce22e752d8277f1d578b806 -size 723706 +oid sha256:2ee0801513dd14190fc48392f371bea4d29c1bfe434d2ac6eb1e2217a0b0d107 +size 731126 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b0412512-a7db-4bf6-9cf1-06586305037f.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b0412512-a7db-4bf6-9cf1-06586305037f.png index d0a2099e031f879d4ed707519bb3f9b153c4d843..b786de6af7a18c750bd1f219b02bbdca782369d3 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b0412512-a7db-4bf6-9cf1-06586305037f.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b0412512-a7db-4bf6-9cf1-06586305037f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65e11eb2da2067355886ab60de28c6e8ac6c05c75eec27d446fad122a7b99774 -size 1026678 +oid sha256:faa13dcd9510ffdfc3af511bac37d544d7b267323e199dc70613044db62e6a8f +size 1114181 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b054d49c-5467-4b56-be24-b91fbf14da65.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b054d49c-5467-4b56-be24-b91fbf14da65.png index 6198f2d72cd8b7cf2be92481936dfc3483152cde..1b9d6f2b6b97fd882cb0cd03027c0653f277ddde 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b054d49c-5467-4b56-be24-b91fbf14da65.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b054d49c-5467-4b56-be24-b91fbf14da65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:313817366c5384e85cec28ad037b28cdb9023bcd20c63c42bbf85d8a5456808b -size 533125 +oid sha256:74b419c88c73cd5812faf92e75a1d7a2340dbca0bc03a8707d4f747f321efafb +size 692355 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b546074a-9ac5-4304-9d50-cb5dbc2fb3da.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b546074a-9ac5-4304-9d50-cb5dbc2fb3da.png index a4dcc26d4309c4b7e7c37bfdf07dc193d9f35cb1..d1a2c074d56a440dd3d19bdae542b5135c3dbae7 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b546074a-9ac5-4304-9d50-cb5dbc2fb3da.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b546074a-9ac5-4304-9d50-cb5dbc2fb3da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ce8a06a49af33691885c94560bea50487f58c870586d84ae4c982a4333b2543 -size 381336 +oid sha256:aefcc1a75d37ca2808f61af2467c3c59887d6069b4123fada0b3a3e4d00c2a84 +size 434284 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_c42f6c09-f6c3-462b-959f-2973c7f727bb.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_c42f6c09-f6c3-462b-959f-2973c7f727bb.png index 12ad407706f525b4d8746f98d926aa462a8a2ea5..305db85df4bcb4f22fab4f61866cfc6837c8e202 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_c42f6c09-f6c3-462b-959f-2973c7f727bb.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_c42f6c09-f6c3-462b-959f-2973c7f727bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c9d10c150a8d40a5545b4749756e472ac32d822d03b8cdd789b5cf0bc10f334 -size 734469 +oid sha256:d43c3440cc8bf899fbe055c782815433adb896726150629aa8a4566ae653d794 +size 905529 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_d5a67096-558b-4e6c-be09-957a0e4ae20e.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_d5a67096-558b-4e6c-be09-957a0e4ae20e.png index 5d7ee7bb4ad9112eee1d6600cf389c5cb02f722d..ce2dda4a0844bdf8260ac345942286869013a147 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_d5a67096-558b-4e6c-be09-957a0e4ae20e.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_d5a67096-558b-4e6c-be09-957a0e4ae20e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:954d4e20b63160db27ad095beb212f0754b6b5a253fd46c5e27bd1d907e7bd17 -size 458242 +oid sha256:d00ddf1019890d7af0847ab6037702fb409f1314521776c069a884d99539006d +size 524676 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_db1977c3-e244-4d3b-9ff6-b0b0cc554c7a.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_db1977c3-e244-4d3b-9ff6-b0b0cc554c7a.png index 49a1e7ea0c8308a37ba24e8b9352121e42cefac8..d7e1022b52385a020a2f6b1e047f98de62ea1e04 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_db1977c3-e244-4d3b-9ff6-b0b0cc554c7a.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_db1977c3-e244-4d3b-9ff6-b0b0cc554c7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa5cf31d6985ea4892e3c7b5b668eaaacda48d8797cdf1b7f06ce794e1e9c45c -size 447343 +oid sha256:b072c38edf5a9d9e7d439db1a3ad0bb69fb1b2de71b7e8a05053c2c6ab40bb0f +size 453972 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_dcdbcae5-1f3a-4f5a-8794-b75c41184eae.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_dcdbcae5-1f3a-4f5a-8794-b75c41184eae.png index 10e908720704a07a2ce5b89bb683eeb2d61a55de..e035d0e40c3196dce32ba1e3220b7ea2e42a6a77 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_dcdbcae5-1f3a-4f5a-8794-b75c41184eae.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_dcdbcae5-1f3a-4f5a-8794-b75c41184eae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01ca8e1c0c76d05cf48f221bd45be2bc0e8e2bfc9af44c24c15f9a73e01b71de -size 529341 +oid sha256:9ed0a409b8750cbe87173867028b887b881860fc34b55a77cd8065103720cf24 +size 1081772 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_e9377db7-e0c7-4d52-b555-c18621895092.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_e9377db7-e0c7-4d52-b555-c18621895092.png index b4bde05425a3778950760620f68a32030fbd3ba4..a02d65d51b0a68fa31dfe1cac1c7b7408a557d5f 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_e9377db7-e0c7-4d52-b555-c18621895092.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_e9377db7-e0c7-4d52-b555-c18621895092.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a4d99d9d9846ae206102876d6c40084f07fb1a5e488fa529b21c78780f955d4 -size 369185 +oid sha256:54ec33a3627510ea25d51bbfd29c791f6c93cb315f05658a1916aa93fa413496 +size 752975 diff --git a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_ebbf95db-06c5-4ac5-8355-504f1f77a72a.png b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_ebbf95db-06c5-4ac5-8355-504f1f77a72a.png index f55aaefeb021d2d61778aa2948574a1314e790f9..7520a9d23079677553f78a980ede54a32a88a415 100644 --- a/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_ebbf95db-06c5-4ac5-8355-504f1f77a72a.png +++ b/images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_ebbf95db-06c5-4ac5-8355-504f1f77a72a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72be330694018536f2ed4cd0fcde4d9b193f9d396c8dd348f80b8f1471af3458 -size 533181 +oid sha256:1d00760086e550cfe1528e886121c40d0dc72b768057c4536d166e111d606798 +size 953975 diff --git a/images/d3ca5294-89aa-4028-8776-be08edc63783_0727b0e3-b43e-4257-91fe-d0522d9f95ae.png b/images/d3ca5294-89aa-4028-8776-be08edc63783_0727b0e3-b43e-4257-91fe-d0522d9f95ae.png index c04d9b7b095d503e525b098bf597410c88203305..3ea0db0007b8a0273f0c9340d72c5596aefef3fb 100644 --- a/images/d3ca5294-89aa-4028-8776-be08edc63783_0727b0e3-b43e-4257-91fe-d0522d9f95ae.png +++ b/images/d3ca5294-89aa-4028-8776-be08edc63783_0727b0e3-b43e-4257-91fe-d0522d9f95ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9128aa85662ed87698b060d0eaa6dabb07687009f3fc797410ee6d4078fdc8e8 -size 2944316 +oid sha256:aff572f8372f38c5932d0758cb105fb033f527dd6345e70fe2548fa441bb93e9 +size 2040303 diff --git a/images/d3ca5294-89aa-4028-8776-be08edc63783_64bd7423-2284-491d-be4f-1c12ee2eaab0.png b/images/d3ca5294-89aa-4028-8776-be08edc63783_64bd7423-2284-491d-be4f-1c12ee2eaab0.png index 4e78037d88bcc34f2e2a0f3146944c742dfded37..f9c5cda152d46178d2ef9d53bc3b4a64f83c9cb6 100644 --- a/images/d3ca5294-89aa-4028-8776-be08edc63783_64bd7423-2284-491d-be4f-1c12ee2eaab0.png +++ b/images/d3ca5294-89aa-4028-8776-be08edc63783_64bd7423-2284-491d-be4f-1c12ee2eaab0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cd86c7ec30c1232e7263ac98faf74efe9b841ac117904afbacb0216d74aec1e3 -size 2643046 +oid sha256:b002321c7099778d13466bb0d37b761efd92baddde94b7fc8783467951f09a39 +size 1982886 diff --git a/images/d3ca5294-89aa-4028-8776-be08edc63783_8cfcf8da-5ec8-4836-87c2-01cfc886d515.png b/images/d3ca5294-89aa-4028-8776-be08edc63783_8cfcf8da-5ec8-4836-87c2-01cfc886d515.png index cadf6a40ae6530d1213369d20d8ea29360451d48..a4c17425517a301d3cc25e1360f02c5ac52d5537 100644 --- a/images/d3ca5294-89aa-4028-8776-be08edc63783_8cfcf8da-5ec8-4836-87c2-01cfc886d515.png +++ b/images/d3ca5294-89aa-4028-8776-be08edc63783_8cfcf8da-5ec8-4836-87c2-01cfc886d515.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9111544c1f2ba570cc29f3e04e4558ff0209a424cc0787ad93686d768b2781e6 -size 1725568 +oid sha256:c3f79659a394d1af69c318c76e70870034b50003e1aff7bfde7afcea4edf924e +size 1733335 diff --git a/images/d3ca5294-89aa-4028-8776-be08edc63783_c240bc73-00e0-40a0-8a8d-283b016b4d66.png b/images/d3ca5294-89aa-4028-8776-be08edc63783_c240bc73-00e0-40a0-8a8d-283b016b4d66.png index 79516d8fba4c4d037c6a8c2ddea922ccacddf530..bcf942d3ae8e0aa902f2d241cb29f0e5afbe5e67 100644 --- a/images/d3ca5294-89aa-4028-8776-be08edc63783_c240bc73-00e0-40a0-8a8d-283b016b4d66.png +++ b/images/d3ca5294-89aa-4028-8776-be08edc63783_c240bc73-00e0-40a0-8a8d-283b016b4d66.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9acbf20b4275031f149c9e0aaaa7e2050fdd3c35fba4c0edf1a5e36fd6dc840b -size 2113637 +oid sha256:1050c180513229acfe1c49265c02a1152c1663329bf7a13f37cf209ec6a77184 +size 1722527 diff --git a/images/d3ca5294-89aa-4028-8776-be08edc63783_ca514e43-ce9d-4aeb-bd96-9c8fc7f2017d.png b/images/d3ca5294-89aa-4028-8776-be08edc63783_ca514e43-ce9d-4aeb-bd96-9c8fc7f2017d.png index 3bb47bab610dcb7525097f89722061876b247fe3..4b06bcc1816a46ba772f616cc8d7c59831ec4099 100644 --- a/images/d3ca5294-89aa-4028-8776-be08edc63783_ca514e43-ce9d-4aeb-bd96-9c8fc7f2017d.png +++ b/images/d3ca5294-89aa-4028-8776-be08edc63783_ca514e43-ce9d-4aeb-bd96-9c8fc7f2017d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31b5d36de55264724456a4940bf38a6eaf1cd5d10180810b858c514c75ba6d26 -size 3270167 +oid sha256:2fbfbfe463f165db5c2dcee9856be9666ff7fb73675aeab1473a05ab288e953d +size 3959305 diff --git a/images/d3ca5294-89aa-4028-8776-be08edc63783_e491ad98-a4ad-48c3-aadc-ace9647b8eb3.png b/images/d3ca5294-89aa-4028-8776-be08edc63783_e491ad98-a4ad-48c3-aadc-ace9647b8eb3.png index 709c5134cb37698943896b65eccdca027de79ee2..378a13ee595e7eca9b743b4daf05ab01c9be62bc 100644 --- a/images/d3ca5294-89aa-4028-8776-be08edc63783_e491ad98-a4ad-48c3-aadc-ace9647b8eb3.png +++ b/images/d3ca5294-89aa-4028-8776-be08edc63783_e491ad98-a4ad-48c3-aadc-ace9647b8eb3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3a3759931ba2bfd1d5177eca7fcf56cbf776edfe64976a0a708d44210fda98b -size 2417285 +oid sha256:066377752026a33618e595070fc8bc98f25446352470f0669337e5bc9d49112a +size 1778560 diff --git a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_1451ea0d-a18c-48d2-a5fe-55d780698313.png b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_1451ea0d-a18c-48d2-a5fe-55d780698313.png index c88379352cccabbcdbd2d2862dde618da50450ff..3b72c7d6243074e1c9b645a02ef128bda448a5b8 100644 --- a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_1451ea0d-a18c-48d2-a5fe-55d780698313.png +++ b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_1451ea0d-a18c-48d2-a5fe-55d780698313.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3da874c5313437197c8185ad89e4a945d56a1e25cc9f00ff508c0ac5224ea836 -size 493564 +oid sha256:43af3c36b35f29bdcbe6b42dd83e4474d88686af007649086e4c6a375b3d6df0 +size 642905 diff --git a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_33fe3c9c-2201-4208-b663-d6bc5160c097.png b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_33fe3c9c-2201-4208-b663-d6bc5160c097.png index 94df8a461e516db6b9d077b2193c5aa4c236f974..976123e968f6fe401f17dcd63982c1b7ac757d2a 100644 --- a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_33fe3c9c-2201-4208-b663-d6bc5160c097.png +++ b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_33fe3c9c-2201-4208-b663-d6bc5160c097.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5fc0ea84e3e574133e5de55696dcd2937d2cd8b28eeddb3ded6c0ae563a27241 -size 466153 +oid sha256:2deb373cfd5ef91ba4a414fc85a6725f511307510f9a17742a49c34f9ac6e76a +size 1044243 diff --git a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3b5d10c3-ef68-4cc2-ad78-d1a5886fbfec.png b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3b5d10c3-ef68-4cc2-ad78-d1a5886fbfec.png index 4831304133a71e57a95e4f592e52226ebc564212..a9ff5f9b20ce1167f14fa8427fec504732777fe3 100644 --- a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3b5d10c3-ef68-4cc2-ad78-d1a5886fbfec.png +++ b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3b5d10c3-ef68-4cc2-ad78-d1a5886fbfec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9582499a8346689abbcb8382b843233a57ca11cc279c7f2659f08ce523c4393f -size 662258 +oid sha256:4665fd3c6d9f13df3254d2b0b235d1771152f6e67e4ab09a18b143ca423b01bb +size 826877 diff --git a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3e652678-e0e8-49b3-9954-f5076aa0631e.png b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3e652678-e0e8-49b3-9954-f5076aa0631e.png index 910fc62eade88e044ca176897b9ece34d291fcde..60b41ae343ba6ac5e0888df55f518dff0e7058d8 100644 --- a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3e652678-e0e8-49b3-9954-f5076aa0631e.png +++ b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3e652678-e0e8-49b3-9954-f5076aa0631e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56150ae06a1a1f7c215358689928e51d2754440ee5e6fcb5e4f237be26645716 -size 660064 +oid sha256:eace15dc13e6a1d73585f13924c783df04fab1d7047a61df528cf74da4076959 +size 537450 diff --git a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_7fe8d3a7-525e-468f-9651-e4b48a64e849.png b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_7fe8d3a7-525e-468f-9651-e4b48a64e849.png index 86d76f563e49ff25851a102bda81dc89ddf4ee5f..dd805983e532aecdfeae421edaf12e7650468a28 100644 --- a/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_7fe8d3a7-525e-468f-9651-e4b48a64e849.png +++ b/images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_7fe8d3a7-525e-468f-9651-e4b48a64e849.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:880bfc78d61cf26b595eb9317c5ebe9774d8fa6d32ed8e2f2414bf74e7b95b41 -size 466192 +oid sha256:29a7312364d91aa0620a26fbedb0c9879937c722a6bf2e09fc43bad693aa1f57 +size 1076274 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_041c26fa-ce1d-486c-ac07-f01db497d492.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_041c26fa-ce1d-486c-ac07-f01db497d492.png index 4fc6a99c7ff3cea21037894215c4c2d89a7adec6..85e57c6ab5813811d7c616628319027796b9c766 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_041c26fa-ce1d-486c-ac07-f01db497d492.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_041c26fa-ce1d-486c-ac07-f01db497d492.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d6825f0db77371f7748c45b80522f667244aaa2f8ffa953eca85130615f449f -size 1787532 +oid sha256:f5e718eeecf5a88509ae7b323372231707cb0886d9e586d824f6acef98bf9c3c +size 2285830 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe1c066-5e3b-4124-8973-50ca217bed17.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe1c066-5e3b-4124-8973-50ca217bed17.png index a62ff39872a74fe2a8dc9e8c73730b7e80c62d88..351c8a885374c869f09135eba0ecc168aefb1ef4 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe1c066-5e3b-4124-8973-50ca217bed17.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe1c066-5e3b-4124-8973-50ca217bed17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b5b067527ccc09a5c6d8000a64af5871f61eef6ab1df953f4cad853dfa770a22 -size 1795813 +oid sha256:1d72729ede75b453ce8443624a89bab818d27da91b05ac9464fd07606f7d4e33 +size 2289579 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe33bdc-459b-4a31-96a3-c8439d26ed77.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe33bdc-459b-4a31-96a3-c8439d26ed77.png index d1d4755390ded95eec4694db889a965610acf82d..79eabac76a07bb04754d7bef93dcc57804bf3acc 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe33bdc-459b-4a31-96a3-c8439d26ed77.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe33bdc-459b-4a31-96a3-c8439d26ed77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35ba5b0dead163c46ddacec30a0f302e15f7d1e53f881810872c70a8fa4e0fc1 -size 1643144 +oid sha256:82b2c6222c8d21727d2bfc1eba874feee760356de15c429e14a555cc5482453f +size 2082339 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_2db82559-908a-4e1e-a73b-9cba33575c47.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_2db82559-908a-4e1e-a73b-9cba33575c47.png index 211895b635c5fac852ddf6271e3ee8ff95ff5f8e..d9b4198732b6ef795423406db469c5810af4dcb2 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_2db82559-908a-4e1e-a73b-9cba33575c47.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_2db82559-908a-4e1e-a73b-9cba33575c47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11a5b8fb77b221ac854cf34953db319a81581a3092af80451f89107125ec0e47 -size 1755270 +oid sha256:d324543f66c4d84d0ac4ec3308119c3251e50e6dd09653c2abdc42cddfc5e24c +size 1249850 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_4f02b46a-27e4-4252-b903-79e909d5cd42.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_4f02b46a-27e4-4252-b903-79e909d5cd42.png index ff97b1e78be2cdeea393b320b45c5e98b7cf2e04..34e2aee9710fb671bdcc0b4bb1066c4ef564c518 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_4f02b46a-27e4-4252-b903-79e909d5cd42.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_4f02b46a-27e4-4252-b903-79e909d5cd42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:daa50e70a2597e94a59fbe0288962187eb7013f16426ce1910f4de9d020d7566 -size 1802829 +oid sha256:3151758790e208f3c8908e37934edf40bd485f49a6fc7f934231f21585c0d016 +size 2256859 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_6fdf95e8-5479-42b1-b0cd-7c701cb370b3.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_6fdf95e8-5479-42b1-b0cd-7c701cb370b3.png index 6a41fd5e60bd086338efb291d9417a5cbacd3f8a..df4a538fcee1b1bedd75b7ecc72cbe5abee1f00d 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_6fdf95e8-5479-42b1-b0cd-7c701cb370b3.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_6fdf95e8-5479-42b1-b0cd-7c701cb370b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4edaaaac89e1f231380ac9070fd5cc7118d69ecdc03ebde22959c48e590f5d68 -size 3206868 +oid sha256:71ac2e74b5b6bf93787a84516b606be832aecdccf2674f69c24d7ece98f571c8 +size 2931381 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_812eb1e7-0b27-48a2-b770-544d5bccbd76.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_812eb1e7-0b27-48a2-b770-544d5bccbd76.png index 024df4affa13a89424596c2dee67d674d4a5d33d..b959e8329a32acd42c18246b95700c444d57f7c4 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_812eb1e7-0b27-48a2-b770-544d5bccbd76.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_812eb1e7-0b27-48a2-b770-544d5bccbd76.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:09d641640168e23ea6de12c75fd66bc00e8cd9dcef2adc6eb493a53adf77d320 -size 1842940 +oid sha256:ca84f8d67525df1de784e162254770a9760695b4ef828fadf6f34cad43cfcbce +size 2360767 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8811440d-7710-4542-87cd-217dbe94a7b2.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8811440d-7710-4542-87cd-217dbe94a7b2.png index 465355831b34904c1587313bf0e532b24a3930af..d46f3864d4883ca542d38d2115cb15b0e4d83bb7 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8811440d-7710-4542-87cd-217dbe94a7b2.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8811440d-7710-4542-87cd-217dbe94a7b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d60eb20433d95bcf530d1c5a962319f4f69b76d1d9b0989b41767a518f3f8cbc -size 1785465 +oid sha256:012a4c228b203299299ada8e0c2a69600b5b84a243d4e2e5b37cc7b0932b4603 +size 1293360 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8ef7b552-971c-4c7e-b142-a295424b5e0d.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8ef7b552-971c-4c7e-b142-a295424b5e0d.png index fa58b7a262a83d0ed0828dd50155a95fdc90f7a3..384e1586bf0a3b6a0f914b06374ce47a87650c6d 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8ef7b552-971c-4c7e-b142-a295424b5e0d.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8ef7b552-971c-4c7e-b142-a295424b5e0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dce09a7a020371e5970743255a705b376530479e84d8df46e9387e8257b9df19 -size 1650622 +oid sha256:a5f35d83aef49eab9a658f1247db4827269d91e95347b533d2b95eb735f642b9 +size 1803981 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_b47f5256-ae9f-4c40-8a64-189b47fe6849.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_b47f5256-ae9f-4c40-8a64-189b47fe6849.png index 881360ad1824db66c555a8b097dbc0de000ef7cc..5430447b0053ce9c4e183df72347ab0a8d52d34e 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_b47f5256-ae9f-4c40-8a64-189b47fe6849.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_b47f5256-ae9f-4c40-8a64-189b47fe6849.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:986d72b8b3526641cb6fe18f67c28f52321d4806ff78b6d66eb6c54684604090 -size 1709995 +oid sha256:8ab2f3dc0fc2e2ef22a56610073de07e40aa6c511c4833a65e6fc5d79dfada80 +size 1113800 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_c5203087-62da-4044-9189-5a59dd38004b.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_c5203087-62da-4044-9189-5a59dd38004b.png index 4bb535b163fab5b3b0bbbbcd5de4462f5a56bd8b..4ea6ad279f799d54fb35e8bc6c9e7b6fe49400ce 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_c5203087-62da-4044-9189-5a59dd38004b.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_c5203087-62da-4044-9189-5a59dd38004b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8ff115229065af2932c8418047dc6e4e4d4341d4cd3564ef06d6826626f6323b -size 2225189 +oid sha256:63eac2eaefe635b363a638982bc69431f43a4a4e8dca38dd5d0af6fa096f3f3d +size 1653402 diff --git a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_fcf2f0d4-5415-4a02-9ed9-5cd383824fe0.png b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_fcf2f0d4-5415-4a02-9ed9-5cd383824fe0.png index a6cf599705ef89613e6571228d79c2f20215123f..f1e0f4799debbc3fa97d24ff04f904a21c3ac617 100644 --- a/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_fcf2f0d4-5415-4a02-9ed9-5cd383824fe0.png +++ b/images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_fcf2f0d4-5415-4a02-9ed9-5cd383824fe0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7927c56b50ba124ad5d5b79072a7498a916e33f91be3c079ea15f9a6dc27bf13 -size 2272575 +oid sha256:71648ae4b4c34aa05efee7d28a04111d234427c4a4f386884f081449ce4e9901 +size 2253520 diff --git a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_1b9b9aeb-9fc2-47b7-88ba-d9aa35bd748f.png b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_1b9b9aeb-9fc2-47b7-88ba-d9aa35bd748f.png index 989b14cb6c08b4e15180618ae23c1ab3cffc3cf3..53c363f2afa89fb81f90c1759985d9ee98852748 100644 --- a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_1b9b9aeb-9fc2-47b7-88ba-d9aa35bd748f.png +++ b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_1b9b9aeb-9fc2-47b7-88ba-d9aa35bd748f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67b78119cdd8dbea67027a911b2f3fa2b6edb3592053dac8c064f42e45931265 -size 1950420 +oid sha256:35bab17723362a6da201819c0efa186a1cb3e0fc09158cba585ec2cc0e67b36d +size 1347139 diff --git a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_415f38ea-8042-4f3e-a62e-b7cf6a488379.png b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_415f38ea-8042-4f3e-a62e-b7cf6a488379.png index c4a9817a2e2fdddad12a5b573ad4be45c4f3783d..04e0f10b6616c7eef40282cb09fdac16d484d439 100644 --- a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_415f38ea-8042-4f3e-a62e-b7cf6a488379.png +++ b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_415f38ea-8042-4f3e-a62e-b7cf6a488379.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79bf3fbc0367ad8a45a2590119d7925a0d622fa5ed67c2fa4ff9e3bcaf6c4e74 -size 1630049 +oid sha256:71dca0f903a6661cb446cbbf09d664ae90e2ddf04f3c8bdc58cbe06d2970e2ff +size 2001613 diff --git a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_8ba82d19-87d2-49b2-889e-97dbe607f7d8.png b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_8ba82d19-87d2-49b2-889e-97dbe607f7d8.png index a24a63908087fe3e30fbe13c1e46361170449de2..0d10006e2bfbdeb24e634d3bbf8a25919067a158 100644 --- a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_8ba82d19-87d2-49b2-889e-97dbe607f7d8.png +++ b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_8ba82d19-87d2-49b2-889e-97dbe607f7d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbbadf0e494de130801d3d84f67d84ac746f9552555903deb1f518d68420301f -size 2782238 +oid sha256:d057a5ed0eb6d7c1e2255ec4d3e07aadb2e1b9d15cab9e94d5d0e5770d651b93 +size 1671277 diff --git a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_a5faede6-890b-4518-9ff8-94f1cd3d1460.png b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_a5faede6-890b-4518-9ff8-94f1cd3d1460.png index 1a3db7d0f084f34179e2033eaf3e3dbebc469da2..fcc9b83434c7382b9e21bf9b08344bd0239cffb3 100644 --- a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_a5faede6-890b-4518-9ff8-94f1cd3d1460.png +++ b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_a5faede6-890b-4518-9ff8-94f1cd3d1460.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4715691ae102847c28f4c5fa11ea6f5189bdfd9d502d011af9a6ddbe491d43a6 -size 1952974 +oid sha256:b66ea838eb8bd9590bd2f349a034e2adffaadac468f595cc34b72561f6a68743 +size 2330544 diff --git a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_beae922c-80e6-4d7a-940c-8e6b259f2e64.png b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_beae922c-80e6-4d7a-940c-8e6b259f2e64.png index 856e347481513c3ce1da6ddee8ec983394cd42b5..6a9e8fe103576bbbe38408c201122125dcacb1b7 100644 --- a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_beae922c-80e6-4d7a-940c-8e6b259f2e64.png +++ b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_beae922c-80e6-4d7a-940c-8e6b259f2e64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b78dfc4a929d1783bbaaf0f7e1f8345e79eeaab17a56007dd97a1bef2803ea82 -size 1466848 +oid sha256:94912fc2f5842019ec3cc0c11df04f812cd8c6c3eb1565e12d7eeea4384d1e54 +size 1801924 diff --git a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_d2dc023e-7146-43f3-88e4-dd00ce65a2f2.png b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_d2dc023e-7146-43f3-88e4-dd00ce65a2f2.png index 82051c6c5c9e11ef3809f7244ff0f5c9aa2e72ca..2864185a54cafe6c76becef569837ac35699bd49 100644 --- a/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_d2dc023e-7146-43f3-88e4-dd00ce65a2f2.png +++ b/images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_d2dc023e-7146-43f3-88e4-dd00ce65a2f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:feb10343676604dcac750462e65c5204a3185149dd4bc365eb38f981671791ae -size 1946304 +oid sha256:dcab6d45005ccca3a07a3850cb36cd10345081c7aa84cc3c821e377408824eca +size 2241002 diff --git a/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_45ac5967-b39a-4abb-abe9-314ab611fcc0.png b/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_45ac5967-b39a-4abb-abe9-314ab611fcc0.png index f8f4c8b440200103d618e906e62379da77925aca..7d094cda983725c28b3ee630eef9e9a26a0a99be 100644 --- a/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_45ac5967-b39a-4abb-abe9-314ab611fcc0.png +++ b/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_45ac5967-b39a-4abb-abe9-314ab611fcc0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b85a2f93fb4ba408a7f7e8c843f3e055fc0f43c8aa49d5a9a09d39925f59f7c9 -size 962028 +oid sha256:defd7189526f85d53f8672180014ebc07f4b192cf85863aaea11829814b4b7ed +size 890396 diff --git a/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_be041995-ae1b-4f8a-85ed-f83b8b1ba907.png b/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_be041995-ae1b-4f8a-85ed-f83b8b1ba907.png index c84a90386398cf2e107ea45ecfba310715097da4..24677a17cb2edba607f88be626f2dd6f16079531 100644 --- a/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_be041995-ae1b-4f8a-85ed-f83b8b1ba907.png +++ b/images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_be041995-ae1b-4f8a-85ed-f83b8b1ba907.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a01cb1ae7eae9ae7c833c4e6d88c8860a54439ae644f6daa8a4c6e62f0a935f -size 1790824 +oid sha256:05e07b86ef17eef98884c980392f36723a56e97c1cc466abf17b3ce35f385020 +size 1501690 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_0d47ad52-d333-48b6-9718-abf6fd0dcccf.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_0d47ad52-d333-48b6-9718-abf6fd0dcccf.png index 13c92b305c7f4d23588c4adb6745a30b6c49e845..d41b51ea98e418dbd9a39eb10cde30f86703c376 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_0d47ad52-d333-48b6-9718-abf6fd0dcccf.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_0d47ad52-d333-48b6-9718-abf6fd0dcccf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:256e2e633a3116f0c27ce60b6a22da702981d3eee3e5ec0553ddb2dfd20c6e71 -size 602875 +oid sha256:b8e439644e7e252f7ca184aa9744f2ff1444337fb3e098ca8ab4890fc2bcb855 +size 592234 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_1113a688-3969-4e5b-9a16-d418ef8ac466.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_1113a688-3969-4e5b-9a16-d418ef8ac466.png index 9f5a6a9112df5a6afef26e4e8b3857bddbe12ca3..8e46fdab966e5b1cf653923c4f2b27e452129217 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_1113a688-3969-4e5b-9a16-d418ef8ac466.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_1113a688-3969-4e5b-9a16-d418ef8ac466.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4804a1c38fa6f22c55e539e0fa5b3d7b7151f83ca1579c8d81dbf2c6370c778e -size 592194 +oid sha256:e4101b28738bbad9254c9d534d2189918e6c915b2311a992b26f97bfcf45cc3e +size 524116 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_29373ca9-75bb-449f-9f36-2cd26f44674b.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_29373ca9-75bb-449f-9f36-2cd26f44674b.png index 0b5063bf6206b86ba3b604683c3ef000ed6c85d5..6c78e0e099a8beb24f2d5ed62ea1bd5d3724cd95 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_29373ca9-75bb-449f-9f36-2cd26f44674b.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_29373ca9-75bb-449f-9f36-2cd26f44674b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d4d23fba913ec0d84d0745367ad98b895b4fd02b6817646774decb9f533fe21 -size 677318 +oid sha256:a26173e780a5f2b1186bc8a28d30f6d3e944cad94f92223327c06defcd584f4b +size 509025 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_344fdc6e-858a-48cf-8dc8-073c98975aaa.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_344fdc6e-858a-48cf-8dc8-073c98975aaa.png index 0caf62a4c12221dba19c46a08afb2e9e3b1c887d..10e1ff322ba4c0028b39571d2603fd8ab40a7901 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_344fdc6e-858a-48cf-8dc8-073c98975aaa.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_344fdc6e-858a-48cf-8dc8-073c98975aaa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8dd0817e5804214ee86ae8e2b1f635f693c35d6ec7e8d28b74f439b3f41215bb -size 857980 +oid sha256:25769493a835bdffff377eb768e6a3deecdceb0bd2036bc12b6bfcc5156669fb +size 1226528 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_3eb249cb-72cb-4fdb-be0b-adee49627c52.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_3eb249cb-72cb-4fdb-be0b-adee49627c52.png index bccfda8a4dda5f26d90bae6bb6ba0a4c73d2c9f4..0cf2662811abaf8d65356da6793a6f454068f9ee 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_3eb249cb-72cb-4fdb-be0b-adee49627c52.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_3eb249cb-72cb-4fdb-be0b-adee49627c52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37e7dea0088d8e510a487f10339ada2a5ec6456addbd870ca1237354563c1313 -size 789989 +oid sha256:9d2b1f520bffddff00fae1bc7827b3bd90494db5b518e21e695aa429fb2dfd65 +size 543883 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_4b43afb6-5cfb-4405-95c9-4fefabda58be.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_4b43afb6-5cfb-4405-95c9-4fefabda58be.png index 7008ead9533d9a1d7163d71d25ebada0a1ee92a4..6e7aeafa1db93e55de6fea5aed11b0dce6159944 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_4b43afb6-5cfb-4405-95c9-4fefabda58be.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_4b43afb6-5cfb-4405-95c9-4fefabda58be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93923ff3479f88e968c3643f4f532a3d55adc2de616d247526a5911942d0fcd5 -size 809691 +oid sha256:b46b436024436ba74fac5203b07ff4f29ad5fa147d170864eaf49592e8b1b976 +size 565369 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_67c7cc50-4331-4340-93d5-90fa2a691741.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_67c7cc50-4331-4340-93d5-90fa2a691741.png index c8ba73544fe2e5d22d05e8111d68158dce01c970..93303fbe0d8dbe71e261743cba551534e0a85933 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_67c7cc50-4331-4340-93d5-90fa2a691741.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_67c7cc50-4331-4340-93d5-90fa2a691741.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37e5b61db5afcb94d969c16f31d307928c9cef761260f030baa43e80a514bba9 -size 674349 +oid sha256:c049c3e202f1692d95bce5b347964c6ecc247d630ed203e794aeb7cba010e131 +size 898244 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_6cd59e9c-d4c6-408e-bcc8-74cec11ee801.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_6cd59e9c-d4c6-408e-bcc8-74cec11ee801.png index 04685c0346b49a71832322b0cb3057bbb862b130..63ea6b067058eb1e58eb332be21098b6f26b4a24 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_6cd59e9c-d4c6-408e-bcc8-74cec11ee801.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_6cd59e9c-d4c6-408e-bcc8-74cec11ee801.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1617604d773009a8abe718372b2910c98203d3e587c6eb164eb014321be0a7d -size 927063 +oid sha256:59c6f9f23164488ba6a9d1163fcd5daf77ce8d60eb0d68845c5b72664cb9c7c0 +size 557631 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_760ccf46-fb45-43e6-adde-f9f3799c52bd.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_760ccf46-fb45-43e6-adde-f9f3799c52bd.png index d1cf3c13ccbd925dff148982113d04f5887b4890..4a36c6f69b3405b37e13fcb19328bba21802aabb 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_760ccf46-fb45-43e6-adde-f9f3799c52bd.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_760ccf46-fb45-43e6-adde-f9f3799c52bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a1e3a7d5e856905840926449967e8d49299434ae02609e0b638fbc14d66c509 -size 482771 +oid sha256:847fcecb64f8a9bdb6c9cddaea5304538b38b7f69fc9fb2a410a78940059c18a +size 485833 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_79195bd7-6e14-43c7-818b-83aa994a0f60.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_79195bd7-6e14-43c7-818b-83aa994a0f60.png index 35a4030ff25e35575a71ea1f230b9222b69f46c1..795b6225829bcff2e575beb3568911af41cafe8f 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_79195bd7-6e14-43c7-818b-83aa994a0f60.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_79195bd7-6e14-43c7-818b-83aa994a0f60.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:edc4d124ac5e16522d5ebee3777b838722276f7d9f90add840d8ef75c0d48439 -size 343946 +oid sha256:a319cd4cbf11547b41a5be0560b45c4090ded7ede78415dde28373ed9cfad851 +size 765706 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_a4034b18-98c0-4a92-8692-dd5255f8212e.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_a4034b18-98c0-4a92-8692-dd5255f8212e.png index 07456a7ab08905d593b598b87ce6b6c143802473..1ef63436a213c066eab0891037d19ceec7733e67 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_a4034b18-98c0-4a92-8692-dd5255f8212e.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_a4034b18-98c0-4a92-8692-dd5255f8212e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2c901c6a4c2f10087eb1d96a7bd8ddfa55066a452c8f3d9140813fb5ad74036 -size 591081 +oid sha256:c6aa195ca7cf026d5c9d61e4cfa75f97fdb897e99a5591dbce4c404aa633a832 +size 644326 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_bff2b2ce-8a02-4c9e-8262-f76461b7f237.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_bff2b2ce-8a02-4c9e-8262-f76461b7f237.png index d0a82ff3577c2507fe77d4b46815860ecc47ccb5..6908db7104a03ff500c1c6c942fa81c9df7a97ee 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_bff2b2ce-8a02-4c9e-8262-f76461b7f237.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_bff2b2ce-8a02-4c9e-8262-f76461b7f237.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef8887f04258f89ad359fced6d584b383350e667cc919156bfe699b92cc06c4b -size 1052207 +oid sha256:56293228f8cfcff98c8d52b1514aa6dce767e39aa5e4b83e6d0eca90799fac8a +size 1482720 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ce2b117c-d60b-4135-9f3e-406a601c7028.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ce2b117c-d60b-4135-9f3e-406a601c7028.png index 149f1a4b803a3d108a13ff0c0af97a0c6ea05ae9..1ef895a49b9c3c4bb877f519e07a631519f7b3f1 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ce2b117c-d60b-4135-9f3e-406a601c7028.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ce2b117c-d60b-4135-9f3e-406a601c7028.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a82c3dd9af8ae3956f958ca3b66e7109bae5bbf6525ce61466c600bf2d0b6b48 -size 490898 +oid sha256:bb748249cb4031394e8199c9db51e10b08598516b2263516af613f1150c4d8cc +size 394611 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_dae9e143-9012-43de-aa95-496ce9cabb17.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_dae9e143-9012-43de-aa95-496ce9cabb17.png index b2d089eac44635748d10e0ed9dc6560aeea38a99..9f8451e8b5b2dd6d075856fc5af4b5ad8017f132 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_dae9e143-9012-43de-aa95-496ce9cabb17.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_dae9e143-9012-43de-aa95-496ce9cabb17.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40c8ff631a452e1c0d2b59d7660960710cb0f35e885ca8c45be501c807890fcb -size 2037597 +oid sha256:8e4ca70e4a31e30be54f93bc61b3f72a05b7a91f4beb1a4c99e9427483d190ac +size 1634923 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ebb0133a-07b3-47ba-957c-3e48838a2827.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ebb0133a-07b3-47ba-957c-3e48838a2827.png index 121091d941c1d274b43da82ff35ab6b184b0e28d..3b5e68cb4fb3e81ede3546c0ad75f4f94f37ae75 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ebb0133a-07b3-47ba-957c-3e48838a2827.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ebb0133a-07b3-47ba-957c-3e48838a2827.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90b1910c61dbdbf5454a9d1b82b951007f8f1385f4c89c75d691e5cf1f6b224e -size 592194 +oid sha256:928d09538a1c70a5b457323f80e41206562bd509c5060106b9957ea448e541dc +size 629298 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_f9c51916-2ac2-4cd4-b949-bce0411788a6.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_f9c51916-2ac2-4cd4-b949-bce0411788a6.png index a87fe5ea033eee2f13758efffc3ece37aff46d91..97e37c755cbd3409b5473bb8bf4126bc1ec6087d 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_f9c51916-2ac2-4cd4-b949-bce0411788a6.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_f9c51916-2ac2-4cd4-b949-bce0411788a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e08be23658473fc6c22936eb49f8217d971307639e797fc7c4fb6aaa5fe27985 -size 590758 +oid sha256:bd73b5b67b6bc5ac753d6633d0b6d67fb977f8d1154fdd3ad7768623b0feb96b +size 621711 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ff1cb425-d1e1-4bf8-8ac2-2e6ef4dbf5d6.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ff1cb425-d1e1-4bf8-8ac2-2e6ef4dbf5d6.png index c08b7477207fbf96afed5e51bee8252f444bbfc1..9ef817d8008968937dcafde60b24462e17dd6a04 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ff1cb425-d1e1-4bf8-8ac2-2e6ef4dbf5d6.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ff1cb425-d1e1-4bf8-8ac2-2e6ef4dbf5d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b8246f8e1cca19d4f4eee6a89e0026d8ac768fee8bdda54eadeb2c25eb6993d -size 773423 +oid sha256:d701d17d65fed919e98e62eddc151a975c07d56bca630ae320139fba65965d50 +size 539879 diff --git a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ffc28c0e-9243-45a9-9470-4669e5a310e0.png b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ffc28c0e-9243-45a9-9470-4669e5a310e0.png index 446a0e3adf41ccc8003af6942d16187dfe446a85..0b61fc544eb8c6e466e8c5cc3424f447149898f9 100644 --- a/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ffc28c0e-9243-45a9-9470-4669e5a310e0.png +++ b/images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ffc28c0e-9243-45a9-9470-4669e5a310e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94623d0ad2f2b74259dfe6ea18946a3cd346510ab7da23676a91df996ac8e267 -size 1168723 +oid sha256:c5471987491e7f7777928adcf498b18d77518592539e6e50d52f0e941ef21261 +size 784927 diff --git a/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_51e09831-d247-402b-9853-bfaeb5d4399b.png b/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_51e09831-d247-402b-9853-bfaeb5d4399b.png index 88fdcfd36991afa12acc0182c1a861f45268dd51..3f802d83d32caa68972dc9f64695a6c0cec57eab 100644 --- a/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_51e09831-d247-402b-9853-bfaeb5d4399b.png +++ b/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_51e09831-d247-402b-9853-bfaeb5d4399b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb8465b6e9e7c2e3ab81a104f33e64a58373ed7bf07264f75d471b2a277b5f3c -size 1176723 +oid sha256:00170a80a4274b6abee1e7e4d9f705a47221942ef5d3877236b8979610b8a9f3 +size 999518 diff --git a/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_e909e452-ca74-4e47-8a19-0bae9174a58e.png b/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_e909e452-ca74-4e47-8a19-0bae9174a58e.png index 5b77f46e9974337acb85dd52e6d8961f66012a3a..96b4c7f2220f3f3aa7d66bc74b5c905a95ee1e56 100644 --- a/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_e909e452-ca74-4e47-8a19-0bae9174a58e.png +++ b/images/d516b2f7-b180-422f-9e24-6fb778cb4b55_e909e452-ca74-4e47-8a19-0bae9174a58e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68d3c02521cb1d752d8405d4d63d4d83cd0b994e77dc44fbfb0de68ffb111f6c -size 1411733 +oid sha256:9ce8f7e584930be07e3f4fc2d1671ebdb3baa3a587b3e84329cfca8223299acc +size 1072000 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_20f844f5-7336-4362-91e4-577a81d9d46d.png b/images/d538537c-ff96-4918-8807-af09b26199d1_20f844f5-7336-4362-91e4-577a81d9d46d.png index 500ed3c6e9c28e06a2d6a25dee638edcc535e82d..8c80d0129a776b8faf8a17c924cea16c9a548a4d 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_20f844f5-7336-4362-91e4-577a81d9d46d.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_20f844f5-7336-4362-91e4-577a81d9d46d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4e7c0b78494e2e9f3bac3ef61a469e93be60ce05a5839e77aa990d97600d4c3 -size 1486655 +oid sha256:b8098dd05e0b63872a99c250978ffc35c9e67a6807cdac0a726aab82076c2e65 +size 1782495 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_50059fe0-a21c-4c62-a8ea-ce6abbb1679a.png b/images/d538537c-ff96-4918-8807-af09b26199d1_50059fe0-a21c-4c62-a8ea-ce6abbb1679a.png index 68958e56dd1e5b52b007511342d6ed1251017f2f..334fad2bd80287e6ec7942657b8ab30deb113daa 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_50059fe0-a21c-4c62-a8ea-ce6abbb1679a.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_50059fe0-a21c-4c62-a8ea-ce6abbb1679a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:162cdc4cfab7f2a38041fbe7d760ec0c9650ccfda3ccdb9a05a4e851358570e2 -size 869075 +oid sha256:b66fbc3987e46c20c3b4f4981c5a6ea6a24b42869e45d4347e76a4a529add4fe +size 1181416 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_59b4a48b-83b0-4522-a66e-fec2fa0c6069.png b/images/d538537c-ff96-4918-8807-af09b26199d1_59b4a48b-83b0-4522-a66e-fec2fa0c6069.png index fbc758cbab1421f6a7f5280d07ea9aa57f57f66f..711763ea8d926b06ef115ae6d96d6c90a8dac47d 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_59b4a48b-83b0-4522-a66e-fec2fa0c6069.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_59b4a48b-83b0-4522-a66e-fec2fa0c6069.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a8a8dd6737807e5367280f71a00763102a080212630b4c9c769193ab0e7dfc3 -size 1450520 +oid sha256:02e1d31ed8a9761db668d26cf6ec991f6f34092f3c9cf9659eb719c0b5974bd8 +size 1004588 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_6dbd8788-9384-4c38-be4c-9511cdae63f7.png b/images/d538537c-ff96-4918-8807-af09b26199d1_6dbd8788-9384-4c38-be4c-9511cdae63f7.png index b6fcea9930c138294881bf5c406d5f278d87d30d..c20e4d462ae428bb25ffc3e51dfbc889eee6176e 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_6dbd8788-9384-4c38-be4c-9511cdae63f7.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_6dbd8788-9384-4c38-be4c-9511cdae63f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:455aa1b5286f718ccc3d87d19433852f1da4eaef4d9d3b13dadb3b01be3cdf34 -size 1451578 +oid sha256:a9f87466be3bd5e92f9707072007a11e7a2e9ba1e9ad24c9defe40290869fc0e +size 1531490 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_778f35dc-5a7c-4835-a404-f057a5b4311b.png b/images/d538537c-ff96-4918-8807-af09b26199d1_778f35dc-5a7c-4835-a404-f057a5b4311b.png index 1ddae0cf34cfc7d0f01d04224ef479cd5c3f47d7..1e60d1126ea3542d2d94397cd2c1e5ee5c063461 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_778f35dc-5a7c-4835-a404-f057a5b4311b.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_778f35dc-5a7c-4835-a404-f057a5b4311b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:145b7fd37fc6b716446e06c03efc03e96baede19ac1dc2e60187154315ee0121 -size 1435536 +oid sha256:6c57c3fba4322b06f9778974deef0158fc88c8c3cb631ae228b31fedf60e2ff1 +size 1622144 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_794b3de4-0e28-4ba9-819a-017558734d98.png b/images/d538537c-ff96-4918-8807-af09b26199d1_794b3de4-0e28-4ba9-819a-017558734d98.png index 0ba9f6e94c6325453a26b2711897a9fe80f78fa5..db762e44c611457128c6c9789545c1d445aaf403 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_794b3de4-0e28-4ba9-819a-017558734d98.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_794b3de4-0e28-4ba9-819a-017558734d98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6fb550eacaef6a904ca09fc859604fb4b55e84408380b17f3a0ef66ad1079913 -size 1003589 +oid sha256:441bac51799f36136d154ff59528b3b26bb8ca034322867ac012f727012708bd +size 1335805 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_80a3fcae-6109-4867-9a8c-89df2148fe40.png b/images/d538537c-ff96-4918-8807-af09b26199d1_80a3fcae-6109-4867-9a8c-89df2148fe40.png index faa870d04eddf693911ee3dd761bfc0086c65a27..1e962a94d773f90d700440c253446398e65d2470 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_80a3fcae-6109-4867-9a8c-89df2148fe40.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_80a3fcae-6109-4867-9a8c-89df2148fe40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cc187dc531c9ac66b19e790a8418eab062198c38fe41dbb316fb4523f000a06 -size 1454703 +oid sha256:bad33ceed93ed7dc69412ab82da8588779fdbe7c51e60350291132f85609b513 +size 1747819 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_a294c5a9-20c0-46b5-b25d-a4153b76d065.png b/images/d538537c-ff96-4918-8807-af09b26199d1_a294c5a9-20c0-46b5-b25d-a4153b76d065.png index 0f17e89fc7d521dee9b9650aeff3dd9ba591b808..d1e014d5c73d31a630685f0af62ab5cb67f49c14 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_a294c5a9-20c0-46b5-b25d-a4153b76d065.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_a294c5a9-20c0-46b5-b25d-a4153b76d065.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a85ccbe5b9de0c1699bfbd2a13ab274b8ce47a527b3bd247ef985c475c314eb -size 1435344 +oid sha256:5a41f9e0eadaea99fa2cd341307cfbd0890622bb0e98adfffe350a71288dfbe8 +size 1717515 diff --git a/images/d538537c-ff96-4918-8807-af09b26199d1_d8aed545-1860-46ac-a290-ce24e2ee12b8.png b/images/d538537c-ff96-4918-8807-af09b26199d1_d8aed545-1860-46ac-a290-ce24e2ee12b8.png index 1f87647a42e1901bd2b0bc02c6e9d97fc377c32f..94cc9d4040486657f8f8d0a88a1729bf0fdc285a 100644 --- a/images/d538537c-ff96-4918-8807-af09b26199d1_d8aed545-1860-46ac-a290-ce24e2ee12b8.png +++ b/images/d538537c-ff96-4918-8807-af09b26199d1_d8aed545-1860-46ac-a290-ce24e2ee12b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4804128d55f73fb3ae151cdfb8d2faffa1fa5b26d43827ee5e1d22dbe84f5ac -size 1434720 +oid sha256:061e68136aa8039787ad23c42675b848831ca4133c317aac92fc7d6680462b3c +size 1579264 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_06873a11-d752-4bbb-98ad-c892947fbbc4.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_06873a11-d752-4bbb-98ad-c892947fbbc4.png index 4443d35baf12c9837f05eca803b3e06498f6259d..dcb2c844304d6d84de2fc8ecb31c2dac2f98420d 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_06873a11-d752-4bbb-98ad-c892947fbbc4.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_06873a11-d752-4bbb-98ad-c892947fbbc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:681295b663f0d370bf0636c6e309e043dcbe8bacdfe88617fbd689f6bfa963c4 -size 1421835 +oid sha256:533da5452f0c8d13184e313076c564c7b82c0d7f9af119ae71793e04de0a33db +size 1465312 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_1c0713cb-8c16-4984-a9d8-a39278a27255.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_1c0713cb-8c16-4984-a9d8-a39278a27255.png index 4d3936c6af667461a1770a1cf9bb32219f686df1..605a96e9b833869551550db8d3b2171e9fc3966a 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_1c0713cb-8c16-4984-a9d8-a39278a27255.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_1c0713cb-8c16-4984-a9d8-a39278a27255.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:53cf52295e71bb3f965d8b231b26a5891bece50aab445af8b00f9f2f1b75d9fa -size 1428197 +oid sha256:689d2377eee042034354e34b0894963ec9f1bd44936a8e7ce7fb5664513357bc +size 1374424 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_5d55fb91-fc5c-44ae-b62e-0fc07d2d5bdc.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_5d55fb91-fc5c-44ae-b62e-0fc07d2d5bdc.png index f9a75fc88a0c587105d23f1f21db94964d4e6055..65ec532829f6f66a0cb98890239d862ce3cdaf0b 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_5d55fb91-fc5c-44ae-b62e-0fc07d2d5bdc.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_5d55fb91-fc5c-44ae-b62e-0fc07d2d5bdc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f132a4ff66d91053583732ed02c95542f6f9abb2b3ca770d1e967c668f2d22c -size 1420996 +oid sha256:9aac4a96686e6f9df58cd6468b0e1aad4b4ee13b4fbb1c6248c21054aca6638a +size 934141 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_70e105e0-2679-445b-990c-4c167caaa6cc.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_70e105e0-2679-445b-990c-4c167caaa6cc.png index 28b9470ccb2f3136c5d79b18ed21ddf0abf2a03f..4f7103fea21d0f3bed88cfa5427b403fddef40c9 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_70e105e0-2679-445b-990c-4c167caaa6cc.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_70e105e0-2679-445b-990c-4c167caaa6cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca60f0e89a8f46d1dfd5d9192fa63337b00e184bfcc9a4900f10f3a2133ce1e0 -size 1326394 +oid sha256:6a547ca2530dbc5de003697984585d02506cb04908ec81cb6e8f406a009a7a09 +size 1375793 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_857ae6d3-3942-4710-a68b-2ecaf84fda28.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_857ae6d3-3942-4710-a68b-2ecaf84fda28.png index 18e18cde92ea4735529d40a34b263f55633daf22..288cb78a5d92877709b9e3e34ae5ca9ff98806a3 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_857ae6d3-3942-4710-a68b-2ecaf84fda28.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_857ae6d3-3942-4710-a68b-2ecaf84fda28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:699e583782893be8ea6c90d9aabad5be58582b6593f5e2dfe9b94d010ddfe95d -size 575189 +oid sha256:ca611fd63c4fbc0937ffee4970697d0223161187c517ab50f9e0b660cb1e7b26 +size 435316 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_8bd8ad11-efd2-4c3b-b2a5-597daee6be65.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_8bd8ad11-efd2-4c3b-b2a5-597daee6be65.png index 4b4abd8a54049a088efe25413c5ba702e0165e3d..07e258b4cecd7e3ce8d2825dc01ec24484f80802 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_8bd8ad11-efd2-4c3b-b2a5-597daee6be65.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_8bd8ad11-efd2-4c3b-b2a5-597daee6be65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c2da7ef77e4bd04d5164a451cf6276aad5abcbd87eb7e7d112052424f6dbe9c -size 1421555 +oid sha256:f027294a639218c32ec3549bc3d86071d428e18a11dc0cb9a5c4a472b5f1af19 +size 1467650 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_9a5ca032-1f47-4c2b-b33f-5356a5d7116b.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_9a5ca032-1f47-4c2b-b33f-5356a5d7116b.png index 95667fe824c490187c29ec5e9759922a9972324e..8f5541479ff141bc7bc0993b37dd2df177877d8d 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_9a5ca032-1f47-4c2b-b33f-5356a5d7116b.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_9a5ca032-1f47-4c2b-b33f-5356a5d7116b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08e5282db62344798c5636bd8bad0b71970160cde408a2aba10b674e72ee3f69 -size 1364437 +oid sha256:889c88c0c33145135fb6ba118ed72d885e8e5404a9cb48832d45eb93ea1e7e73 +size 1504380 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_a148f761-c294-45ac-a94f-292cbf472e4a.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_a148f761-c294-45ac-a94f-292cbf472e4a.png index bf2cc1103d4f19a22e1cc9ed72b5c81964f7c912..13cdab264e4fff1c10bc2e621e89978f72c586a0 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_a148f761-c294-45ac-a94f-292cbf472e4a.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_a148f761-c294-45ac-a94f-292cbf472e4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aeefb5a07d62f4962bec0f8215e1f32726c07fb04e2182bfb4696b8cd1bceab2 -size 1427326 +oid sha256:cb86eb901573efa18fa896ec5605558e2e85e1854971a41af2f9086fbc35bd9a +size 1475187 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c3286ff0-a564-437f-b3c5-4362d51d4a5a.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c3286ff0-a564-437f-b3c5-4362d51d4a5a.png index 94f30c02331b655b5728a9131d39e71dbb22904f..fc3213ff2b9f969bd2f305bbb618130106e9063e 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c3286ff0-a564-437f-b3c5-4362d51d4a5a.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c3286ff0-a564-437f-b3c5-4362d51d4a5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2777ac43469bbcfeb950255cacf647da9649ff240f6da59de79884df98f894b -size 1422539 +oid sha256:234c402cf7e3eba3450822d229fa919de597cecb5dd7e0aea340718a4778bb40 +size 1422354 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c6851a43-51ca-4a93-b937-d560a9c4ce56.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c6851a43-51ca-4a93-b937-d560a9c4ce56.png index a2d9a99d75f138bbd680f278a16251f2543bdc60..6df49510011a6ef7ba7851907316a258b0e8ccc3 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c6851a43-51ca-4a93-b937-d560a9c4ce56.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c6851a43-51ca-4a93-b937-d560a9c4ce56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46716eb34fc0466869ebb9cf11dff979ddec509b97a37e8a2dbc1295259b8270 -size 1421526 +oid sha256:9ef34d9b0d27ee540172a6bb867c6a9ca399479e53ecb4fbe8a1c940ba2cc3fc +size 1422289 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_db6d2722-20ac-437d-ba01-65b4408ee420.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_db6d2722-20ac-437d-ba01-65b4408ee420.png index 59c9672216f197d4ab192d2685d215befb5a917f..078b1b6b40bd995d3e7ef8495a115719d9e017ef 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_db6d2722-20ac-437d-ba01-65b4408ee420.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_db6d2722-20ac-437d-ba01-65b4408ee420.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d2fbb735c6237699bddba7aa4d3c3bbd4c05ebe03f33d33cf255ee4465a7450 -size 1424604 +oid sha256:aea94de5e082b1e4c6f802b768a6d19576d6d22acb9b8dcf2dfe5abb8105b5ce +size 1423670 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_f299ac63-fe5e-4e4a-b93c-db89bfcabadf.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_f299ac63-fe5e-4e4a-b93c-db89bfcabadf.png index 5d6abe72e1abb4028f382b0a6dc4e48e40ba9199..421f4b050bd8c48032ca5ac610f632268921d046 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_f299ac63-fe5e-4e4a-b93c-db89bfcabadf.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_f299ac63-fe5e-4e4a-b93c-db89bfcabadf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38b63790795beadc98f807a9dd76ae056471971ed1f6d0a330ae1d9cb459c872 -size 1422233 +oid sha256:d9b19ec6f1a6bdb569eb97b681349f30217adefa5734ec62fdf56a67f1de01f0 +size 1424177 diff --git a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_fad534dc-b5e4-4d2b-85ae-4b2f16b7dce8.png b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_fad534dc-b5e4-4d2b-85ae-4b2f16b7dce8.png index c4d399f0ae07ac0584d1334c9f3e4a5a65d57581..11360c8acaaa438527c540c5b257b718658702f0 100644 --- a/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_fad534dc-b5e4-4d2b-85ae-4b2f16b7dce8.png +++ b/images/d56df06a-8234-4f31-8737-e74fe9d5fa04_fad534dc-b5e4-4d2b-85ae-4b2f16b7dce8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccddf4bd4a6659f939f2b8a7f64953e0fc1b135c5dc7f50dabdad2bc8b1eeafc -size 1411849 +oid sha256:415f938e621f60f1213b4864f4ee191c2d2049ff1acfdf0e61c4114d732e0fc0 +size 1400990 diff --git a/images/d637c171-dc6e-4a4e-a162-9c230e822932_3cdab44c-9799-48ba-a720-3dc25eb00579.png b/images/d637c171-dc6e-4a4e-a162-9c230e822932_3cdab44c-9799-48ba-a720-3dc25eb00579.png index 7ed73f7e361a0e204947c6b385253ed97766afea..0c24eb15e86054511ea8e0a0bd65504fb45715ad 100644 --- a/images/d637c171-dc6e-4a4e-a162-9c230e822932_3cdab44c-9799-48ba-a720-3dc25eb00579.png +++ b/images/d637c171-dc6e-4a4e-a162-9c230e822932_3cdab44c-9799-48ba-a720-3dc25eb00579.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65220043c297a03a8d331f564cd60f7eb5a5cce9d091045856b6f663d3210659 -size 2065268 +oid sha256:f5ee2f0b1f855ff8a0593effa63fd84cf6f15a8b045390ab599f16ad71f59c3e +size 2216177 diff --git a/images/d637c171-dc6e-4a4e-a162-9c230e822932_9053f0e2-da05-4721-87b5-13edf923052b.png b/images/d637c171-dc6e-4a4e-a162-9c230e822932_9053f0e2-da05-4721-87b5-13edf923052b.png index 4a13b9fd36b95aa135b31981c6a306efc2a72068..b445aec3b5564bf70aff490b0525b6a55f11c79f 100644 --- a/images/d637c171-dc6e-4a4e-a162-9c230e822932_9053f0e2-da05-4721-87b5-13edf923052b.png +++ b/images/d637c171-dc6e-4a4e-a162-9c230e822932_9053f0e2-da05-4721-87b5-13edf923052b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:610ec1139a895ce1e69f36cac1dc78a317a150a679e9970efa480018584aef46 -size 1564833 +oid sha256:7a5d240cb77056e66da5d219503e1bc6e2859ac48d134e9ac16b207c0e8d2fc7 +size 777472 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_0dfd6898-ef1c-4b83-9abb-6fb4630af976.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_0dfd6898-ef1c-4b83-9abb-6fb4630af976.png index 63ccc0c0bc5c395f10bbe10c578f2015eaa557be..c621a2084a50247405edacf787f171182b46c24f 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_0dfd6898-ef1c-4b83-9abb-6fb4630af976.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_0dfd6898-ef1c-4b83-9abb-6fb4630af976.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be614bcfb02f566d3cb37e58f8d19f2a0396f51ff82cd50ca995df975140581e -size 720410 +oid sha256:8ecdb31ddbfbc022aa625ed5b3b8874c7daae7dfea704a17954e58e905fa532b +size 1354262 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_18f60177-0fe3-4abb-a4b8-22f8e0c6cbf8.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_18f60177-0fe3-4abb-a4b8-22f8e0c6cbf8.png index bc3f98787aeb4013a745f7efa0bc2f550b985429..5b4ce865241cf1d7bf48d8decb372d60ad377afe 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_18f60177-0fe3-4abb-a4b8-22f8e0c6cbf8.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_18f60177-0fe3-4abb-a4b8-22f8e0c6cbf8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:872506316c9ea34a463711bd895c32f262977867f7e12295c308cac8ef78aa0e -size 1692563 +oid sha256:2432267d1bb322916eb34ca7b5f37af9335ab5f4e31254e66cc77c948deede05 +size 1456892 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_1a23c4b3-11dc-419a-a0c3-e4d328690204.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_1a23c4b3-11dc-419a-a0c3-e4d328690204.png index fa1807e0b027ede5175d72370e1912432981e32a..e60a3a721c0b1c1503233cdc84f9be7c4178ee89 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_1a23c4b3-11dc-419a-a0c3-e4d328690204.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_1a23c4b3-11dc-419a-a0c3-e4d328690204.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e4f3d790a7597667842a772cd37eaa22f62a989acbf1c3aaa749bd721e2102c -size 287374 +oid sha256:16344f455507f41f864af43b2c7be38d684dd4fa725b9f6cb24545e185a8bb5a +size 336776 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_2569ea8a-41ee-43ea-a7b7-6804a67043df.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_2569ea8a-41ee-43ea-a7b7-6804a67043df.png index 51ebf1e24f6d3ec3ed844e53613c105dc9f495e5..40c44c51cfeab4a9eaf452073de959d2115b2567 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_2569ea8a-41ee-43ea-a7b7-6804a67043df.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_2569ea8a-41ee-43ea-a7b7-6804a67043df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:352f432d94c6b05fb9362960960ae6a7829bfe26791ee309539d1b186ea989de -size 336440 +oid sha256:6e3dbd9e4608c06b1b4cee6d52a1733b3d8f05a95c0bfb8d7b101f73e948c202 +size 373669 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_2c9a65ce-531f-4010-b149-949ce3004142.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_2c9a65ce-531f-4010-b149-949ce3004142.png index e2c7b291f84ee29e4d5585c89a0a834a7bc505e0..7a2988f5687bd537b5ad7384677d5bb95cb4fbae 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_2c9a65ce-531f-4010-b149-949ce3004142.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_2c9a65ce-531f-4010-b149-949ce3004142.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1571a1f4fd3d306c96ac824e793a963906ec60eff1057ec02b8fdff386541a6 -size 681511 +oid sha256:09080e8657a76e81de177d732ff2c0e80fa026470d30fd1b82f331eb1aa267aa +size 458641 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_46135cac-6070-4b3c-a706-4ba121a6e9ef.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_46135cac-6070-4b3c-a706-4ba121a6e9ef.png index 57fd82c44319c1486f4df93e3cbc06c7d6d49e19..099904a24eff3195fb7e5717982efced9c2d155a 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_46135cac-6070-4b3c-a706-4ba121a6e9ef.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_46135cac-6070-4b3c-a706-4ba121a6e9ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e75e2a4bffbc584ffa7376b2aee3c048adaf6cd19ded40fcd532f092a747031 -size 1039801 +oid sha256:1bde829abc4f8afc297e576e3ef3e1d542d615e1f9371cf4f0ac3ddfc1469bd6 +size 294035 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_4dfdc265-d3cd-47da-8fe1-7808d1596608.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_4dfdc265-d3cd-47da-8fe1-7808d1596608.png index c0f7e0f3e5d024722d53662c089990b970016e23..5c29d2ab414aac40408afa375605a0b084845b16 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_4dfdc265-d3cd-47da-8fe1-7808d1596608.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_4dfdc265-d3cd-47da-8fe1-7808d1596608.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57fcf9617910cadf708d8f4147facf68e4979e99a3d2dc752a900d23216437d0 -size 1106737 +oid sha256:662a1a1e329fd57282ee185081f4202f76bbe28d6c78de823a0e8b3a1a452e39 +size 689925 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_5bb23b9f-783a-4b44-8439-6703dd7bf340.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_5bb23b9f-783a-4b44-8439-6703dd7bf340.png index dcc58086250ec8a5b960eb130973fbd54ed83c70..f608707cbd99558fb2fec856d64db2a6c976ea4b 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_5bb23b9f-783a-4b44-8439-6703dd7bf340.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_5bb23b9f-783a-4b44-8439-6703dd7bf340.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a519f2b0c9912c8a6d950ce64b5e9b8709d7bdd0e8540ab169819f8c5299c45 -size 299115 +oid sha256:63cf324debccd375bceab4908c037c13a67ac1f1ee3e37e7c556f9006b49c9cf +size 321874 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_6e7d4501-298a-4ea9-a266-e9ae3bc160b8.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_6e7d4501-298a-4ea9-a266-e9ae3bc160b8.png index 90b5b512fcb400a41786e5835cb352e03e7da722..4ddb110d889a106af2be6c0051943d5c57b568e0 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_6e7d4501-298a-4ea9-a266-e9ae3bc160b8.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_6e7d4501-298a-4ea9-a266-e9ae3bc160b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b9d4d6d7d5aacf33113be515466fa623e7ed4baf778605281e8a57accdd43eb -size 1068155 +oid sha256:b42e8bd2d7a07e34e282115cc67c13ee939968629e4d1875aadbe42ec315c8f3 +size 1037622 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_ac7fadf0-dae5-47ce-b122-a54664a3566f.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_ac7fadf0-dae5-47ce-b122-a54664a3566f.png index 2be56236ad899a997c73d62d5103637ef7ff79c9..c70b7c0b0d8cba4e00712db16c765959e83528ed 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_ac7fadf0-dae5-47ce-b122-a54664a3566f.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_ac7fadf0-dae5-47ce-b122-a54664a3566f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0df091c0f89b143b38514d56c9c3986a7b19ad525c33954b7d4d4a9c18a36ab0 -size 287824 +oid sha256:189051090461ad399162d69d9f308b820a65834e01df96b0d4c8f1911e173cee +size 338037 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_b3044a24-aa62-41ae-a42c-b6ab256132f4.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_b3044a24-aa62-41ae-a42c-b6ab256132f4.png index e359c2a3cd0883f528cc345adffd19e50e895940..55fd244456b12fdc86862eb5330c9594d2363b4f 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_b3044a24-aa62-41ae-a42c-b6ab256132f4.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_b3044a24-aa62-41ae-a42c-b6ab256132f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c323696266a0b6155607fba05252066cf216e5abcf30f1b766361d03ec7d5803 -size 303821 +oid sha256:0af4bbde6d64da7f9334b86c503add784a3fb511b84cb79020c3a54648e35607 +size 354107 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_bddaad73-e494-46f5-a14b-25dddc2c137b.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_bddaad73-e494-46f5-a14b-25dddc2c137b.png index 4fe3697b5bb9a6ff3c1e12bab11a478b5a0b917d..48aec223f1d8fb687ded2c91e9ec508834a4030a 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_bddaad73-e494-46f5-a14b-25dddc2c137b.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_bddaad73-e494-46f5-a14b-25dddc2c137b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0aba9ca36e851310dafb04438d36aab5df389687a7fd53d79a65f37e8a896721 -size 289506 +oid sha256:36c9159da5bfb994d729da14297e77d7c8d626b5dbd935a6f25640189e15b472 +size 338846 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_d816bb8a-ffc1-44f5-b1d4-87e9d0c46851.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_d816bb8a-ffc1-44f5-b1d4-87e9d0c46851.png index 33bfbba1c2924e98da660ccc76c097cf4de2bb38..7a197cf590e2c5415eb7820cf865c3e3cf0da002 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_d816bb8a-ffc1-44f5-b1d4-87e9d0c46851.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_d816bb8a-ffc1-44f5-b1d4-87e9d0c46851.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b348cd362e1c430f0ae36475dc369feefda3628c9f3c4d6b98f6432122439eff -size 286392 +oid sha256:4ed83f07d4e1fff9ce14ea7c17fc8825c2d53502af322229e5fce5dffeda8cdb +size 336430 diff --git a/images/d63b1715-688c-4be2-b196-dde9659bc59d_fa7ae12a-1f5d-4463-820e-4ff9e9211281.png b/images/d63b1715-688c-4be2-b196-dde9659bc59d_fa7ae12a-1f5d-4463-820e-4ff9e9211281.png index abdd34f60c0fdca7ca9700677aa880455eced3e8..69020b0eae00c8783ba6d96ee001afe2a9f3fba4 100644 --- a/images/d63b1715-688c-4be2-b196-dde9659bc59d_fa7ae12a-1f5d-4463-820e-4ff9e9211281.png +++ b/images/d63b1715-688c-4be2-b196-dde9659bc59d_fa7ae12a-1f5d-4463-820e-4ff9e9211281.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f22da9b4930eec010d575bce286eadcac0c8a9c90935d9bc0f0f432724d93e9d -size 346250 +oid sha256:936f25b1b347a19fd9bbdbfc2c5e2f705d9d34b232a70d466de7e39393dbe2af +size 396445 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_1abdf652-813a-4b30-8713-7c1777b532cf.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_1abdf652-813a-4b30-8713-7c1777b532cf.png index 3ef95d63c04f984bd7af49ecfc04d57112271e67..0f893d9db191b20daf60c7e4bcc43fc5ed68b676 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_1abdf652-813a-4b30-8713-7c1777b532cf.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_1abdf652-813a-4b30-8713-7c1777b532cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b3f315e07a96e8be9efdbb4d5f67c664b79879de91eb782b495a11707b2b66e -size 1155068 +oid sha256:696dad3b791ab0bf07114f730b404c55bdaaf4d746f0f980e594955f7997a0bd +size 952919 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_37c75273-7565-4e18-9ed7-981b670517c4.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_37c75273-7565-4e18-9ed7-981b670517c4.png index e9a908e326e85bf38f8410deb628cece917b34cc..70eb13ccc776d93b2e28fb56e76e4674b214e4f1 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_37c75273-7565-4e18-9ed7-981b670517c4.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_37c75273-7565-4e18-9ed7-981b670517c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2ae3839bed7a7848598da371ae481bb3ba3a250d92db49fa6671cb82c82c67e -size 1551732 +oid sha256:c62f5316eaaaf3a16522495ad99045d8dec2d64144a9c9dfa8a5f0eb87b92b2b +size 1489098 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_38cab605-7db6-4ce2-b910-1a9793ec2332.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_38cab605-7db6-4ce2-b910-1a9793ec2332.png index 8af099874326489d6e3cd111b18a66b01ad3148c..7585d1d1697ea836f3e1ccbabd0c94da32eb62a2 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_38cab605-7db6-4ce2-b910-1a9793ec2332.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_38cab605-7db6-4ce2-b910-1a9793ec2332.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b4a3417db0a5299dd4629838aa3274c54a9894dd99c7fdcbe949167649a02a1 -size 1443508 +oid sha256:c4abbe8303745cf547d432af9d6f7b75f3128387061838a76e3ddfe2fd871b35 +size 1884824 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_5f310105-9322-4ff9-befc-9e9ada33ba05.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_5f310105-9322-4ff9-befc-9e9ada33ba05.png index d1ebd72e7a0c2802a5850c1c3f31ab1d049829d1..9f5ab1bb118c374c901619f75a79d3c01c1a7f7f 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_5f310105-9322-4ff9-befc-9e9ada33ba05.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_5f310105-9322-4ff9-befc-9e9ada33ba05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ffe0bd67af49e55fe52fa318483836d92f9193cb7b631302c118d260afc0f0b -size 3890046 +oid sha256:2fa9dc590972d3c8706c0425290a41589c711559ded185c654d42aebdcdcf217 +size 2335155 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_76439ab2-9cbc-4b10-9a8a-10aa688d53aa.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_76439ab2-9cbc-4b10-9a8a-10aa688d53aa.png index 707e6d6a5daed2f99e0f7610fb6080436c3a57e1..5c5a1a9df13d61ddbc1818c116c63905e5b0261c 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_76439ab2-9cbc-4b10-9a8a-10aa688d53aa.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_76439ab2-9cbc-4b10-9a8a-10aa688d53aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:848d73bc32b8d278af332d8b9bacdd575abec829608d7e7065e15f27d6b2cf08 -size 3864100 +oid sha256:297d27057fb2965894b4ce3d2a5100f6681995a5e9ad7e74737bc66783ce74bf +size 1168586 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_86d43239-affe-4c5e-bc33-0670285d687d.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_86d43239-affe-4c5e-bc33-0670285d687d.png index caa9cef37c38c6d4090433a510be34de135db4fd..e53aabe851e96c96bf94ca0d7936267383833b9d 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_86d43239-affe-4c5e-bc33-0670285d687d.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_86d43239-affe-4c5e-bc33-0670285d687d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61ec392eee105148db8a33c73712720b9c7f4ec89e8518d46c9c54d73315d32c -size 1739078 +oid sha256:4fe0ea8936ebc7f1c65a91f0b6c333c661321c85a786bfcf3f815235282a6a18 +size 1154031 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_a4b9f6d3-602d-47c0-bb53-cbc05c2c73a5.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_a4b9f6d3-602d-47c0-bb53-cbc05c2c73a5.png index 7f6df229c03b2b772e0888822176ee4393a46765..34d4a2b38433004892982211d78c904069fc4d72 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_a4b9f6d3-602d-47c0-bb53-cbc05c2c73a5.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_a4b9f6d3-602d-47c0-bb53-cbc05c2c73a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cae643a49ce4585c8c598c62cfb7d3891f5d9f84631f17245f37459f6d99f9d -size 1127382 +oid sha256:d10616f4a5d8b7ba384eb67c6d513b0521bf58a862192849ef215559cd13f923 +size 985467 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_cf9b71f2-508d-43b2-abac-02d151aef07e.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_cf9b71f2-508d-43b2-abac-02d151aef07e.png index 31599a0f44efc8b462edf57645d9cbcefdb730e3..9fed83058c9e39497ef66801a51cc230ca0055bf 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_cf9b71f2-508d-43b2-abac-02d151aef07e.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_cf9b71f2-508d-43b2-abac-02d151aef07e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:927d9439bfacf50bd7fee06b3d529d69dbe6377fd64040ee7fc8f0165095f484 -size 1601909 +oid sha256:7ea6ca32cdb2ab755440ceeaa303e1df6141fd27de12519c1d025c6f1314d031 +size 1289616 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_d51b4396-e234-4878-91a8-2e31706e71ba.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_d51b4396-e234-4878-91a8-2e31706e71ba.png index 14ab0cf05d81dce45b30c018a7d9d44da43e8daf..aabfe5c29c3f7756ba1255d255eda94e11219e82 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_d51b4396-e234-4878-91a8-2e31706e71ba.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_d51b4396-e234-4878-91a8-2e31706e71ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:559a03ce8aacde6baed1b7b9ef86f0e9bf86d16f423fd0bc36c85404cab13f11 -size 1835079 +oid sha256:f195af721de650afd862411c3ef61558ae9cefa503fed0a4e31823f4f1c13713 +size 1313901 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_e238e976-5417-483a-bff6-e54699142179.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_e238e976-5417-483a-bff6-e54699142179.png index aa49333f061e15c583135e04ca266120abfa504c..e16ffc4388ab182c09373d00e88481a5808b1c8d 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_e238e976-5417-483a-bff6-e54699142179.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_e238e976-5417-483a-bff6-e54699142179.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59dc077669de502bab50c36ad44477cff56e977efea56538a250281bbf27b219 -size 1879582 +oid sha256:48e6d413d47d53f85db19912aec13f32eb041ea1cb069731dd505442e6651ae1 +size 1085201 diff --git a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_f8873fc6-9f55-4338-9b3e-08a10fba7047.png b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_f8873fc6-9f55-4338-9b3e-08a10fba7047.png index 4bff67b356a64b358237ff5ed14027599c310877..c4062c8556497d7ce537d39cee61474b1b1f05af 100644 --- a/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_f8873fc6-9f55-4338-9b3e-08a10fba7047.png +++ b/images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_f8873fc6-9f55-4338-9b3e-08a10fba7047.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac31dc3c047912fbc9da0fd26575c9a6f94c5d73d5ad74001e587b17135668af -size 2188522 +oid sha256:633490eef46d419f0c3fa405c11f6b314dac67903d3b8bd3fd4b5c85ea72a959 +size 1232764 diff --git a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2129d050-f557-464e-a1c4-932650bbc1a8.png b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2129d050-f557-464e-a1c4-932650bbc1a8.png index 01d3ddbcb520cbbface9f3ab17ce2cc8e86566d2..6c9e6b3f5ce8a3b5ae17ad377ed4e780feb0c1bc 100644 --- a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2129d050-f557-464e-a1c4-932650bbc1a8.png +++ b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2129d050-f557-464e-a1c4-932650bbc1a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef2eec37fa1791433405b1b974af4d773162afa2dce1c8275ed20c70a8253f19 -size 1182770 +oid sha256:eb8ca2016628a2e48e39236a4d6c77def186e2a196c590ef657d754a0525a423 +size 1260173 diff --git a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2ab926e5-d341-46ab-ac5c-48d1001bf00a.png b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2ab926e5-d341-46ab-ac5c-48d1001bf00a.png index cb684a4fb9980d776436f6ac88cfb8d5a75b34be..1b2a31bd1d0da4e9757c9fde06e68c7cad8207f9 100644 --- a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2ab926e5-d341-46ab-ac5c-48d1001bf00a.png +++ b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2ab926e5-d341-46ab-ac5c-48d1001bf00a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0538fb60c64535ac022eb6901b59272b713fc7f1a16b0676364878a20a054377 -size 1725749 +oid sha256:b4abc59b243a5158ac70e775d70b1512c36bf512431ef38e0301751007af0dc2 +size 1899607 diff --git a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_32e030ec-b522-40ed-9217-95c09cb73aee.png b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_32e030ec-b522-40ed-9217-95c09cb73aee.png index e735710209c0379369ca3b28a7874cf6b0a37a5b..6fa92273e9e37f6951a2555d966bf0d9ed54fbec 100644 --- a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_32e030ec-b522-40ed-9217-95c09cb73aee.png +++ b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_32e030ec-b522-40ed-9217-95c09cb73aee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f78b77f724ed7ac2789834edcf150bea0116c96a043449c71c312154284e8efc -size 1220141 +oid sha256:133ce7215433bfe969692a002ab2eaf9cf298111e9df343cfd997ba34d5d5c64 +size 1251267 diff --git a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_99759d39-96b3-4093-881d-b50db542dd56.png b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_99759d39-96b3-4093-881d-b50db542dd56.png index 9fe24f4baa24bbff739f261e23d9bceec5f5ec3a..5013b60c8e28045c31cf199ca33945a6c8231dbb 100644 --- a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_99759d39-96b3-4093-881d-b50db542dd56.png +++ b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_99759d39-96b3-4093-881d-b50db542dd56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b6c74238bf7d8d866bee10289152766f51159dd0b530352d1fe97ee96d6d36e -size 1741444 +oid sha256:f2f20f36bba71d84ec097b2182f62d3e76b2a088184900552aa7d89ec174a0da +size 1826062 diff --git a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_e94d39fa-877b-4289-81f9-0762467b1315.png b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_e94d39fa-877b-4289-81f9-0762467b1315.png index 64dda980375347745b7129b558ced6d41cf9e972..94fa1c0d003f256696b10f6711d69966818a13ad 100644 --- a/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_e94d39fa-877b-4289-81f9-0762467b1315.png +++ b/images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_e94d39fa-877b-4289-81f9-0762467b1315.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:823327860e21604de0f4f4581f1afb06afa87328e0ef5d6b2aa19e9a51354dd0 -size 1480816 +oid sha256:a17cc2be2035835e200ead3e9fe485bcd97da8cfb4a0a4afb606374e352588b8 +size 1180666 diff --git a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_0d1e50fb-654d-455c-96a3-27dd3238b205.png b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_0d1e50fb-654d-455c-96a3-27dd3238b205.png index c198ac2704aae9d35b649afbb2dba67df73dce2d..ae4018227b7464954b2aa4bbad224230a58cc202 100644 --- a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_0d1e50fb-654d-455c-96a3-27dd3238b205.png +++ b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_0d1e50fb-654d-455c-96a3-27dd3238b205.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0ebf354e8fe3db7bd955490ee5ee33fbbde73bb4a9eaa75e7d2cf9185e7ca2a -size 401426 +oid sha256:b5aa00d4d50e614fd4577f8269f149ec0510a3ef6963251be8a5e37a66dde5a6 +size 764444 diff --git a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_4993fb7b-d906-4cad-8fa3-13bfe605511b.png b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_4993fb7b-d906-4cad-8fa3-13bfe605511b.png index 47669a3d855c60841b9b285386b90abef5bc8874..15c004a17998729578b451823ba572165d01774f 100644 --- a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_4993fb7b-d906-4cad-8fa3-13bfe605511b.png +++ b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_4993fb7b-d906-4cad-8fa3-13bfe605511b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf3f0bf8074b738a404fab8d3e07e4154677c3e4cf2d4d96f107bd750633bbe4 -size 518773 +oid sha256:dd4e09bb5539639491b1de29a931832caa7003f65c28b5bd9275bb1df7b13b48 +size 1339938 diff --git a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_a921a26c-218a-4d0b-98bc-d5df89444762.png b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_a921a26c-218a-4d0b-98bc-d5df89444762.png index 9fa98721b37375580b6ab940054c03b0c14c5456..76d8ecb99044245289b127c4087b0be6e8eb9714 100644 --- a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_a921a26c-218a-4d0b-98bc-d5df89444762.png +++ b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_a921a26c-218a-4d0b-98bc-d5df89444762.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:513f02647e389d9c7a13a97af1dfd08fbd2eecbdb99fc68de61f9dc290c8bef9 -size 652181 +oid sha256:7290387ab1c52ec24cfe83d25a988dc655a06b08e91ccafc7dfac1ecb8a2735e +size 957242 diff --git a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_ef81e5e1-5428-498a-98ec-6410b14f4ad8.png b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_ef81e5e1-5428-498a-98ec-6410b14f4ad8.png index c198ac2704aae9d35b649afbb2dba67df73dce2d..ae8d110d7bb2f5078b1205c6847779b4f41d46aa 100644 --- a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_ef81e5e1-5428-498a-98ec-6410b14f4ad8.png +++ b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_ef81e5e1-5428-498a-98ec-6410b14f4ad8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0ebf354e8fe3db7bd955490ee5ee33fbbde73bb4a9eaa75e7d2cf9185e7ca2a -size 401426 +oid sha256:b0030518c4a17d98840136428f68f3f4b6eba8def0edf17b6315cdb8e54b41a9 +size 755520 diff --git a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_fa7bc1d5-ff65-4a55-8ea6-c8154a05c7df.png b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_fa7bc1d5-ff65-4a55-8ea6-c8154a05c7df.png index 6d8082d7349f60a7a343feef57d99b774f0be331..bbbc995ca96e19d23a64fdb7228de0b9b6116b7c 100644 --- a/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_fa7bc1d5-ff65-4a55-8ea6-c8154a05c7df.png +++ b/images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_fa7bc1d5-ff65-4a55-8ea6-c8154a05c7df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:046743db808175f16251b36f39b4b0ecacc48378f1379bc444c2f3cca7b5e5f6 -size 438163 +oid sha256:7f285c767f6f63119663cec5b841099af02ca4280f31cda2deeb4f0751700a0c +size 508410 diff --git a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_34028620-ebde-4b2d-8709-4c162b03e46d.png b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_34028620-ebde-4b2d-8709-4c162b03e46d.png index 767d5e1b6882c69a1c156805cbfe2ea3b976fd16..23552109a844ace3cd75e46f3b7a1c114982e62a 100644 --- a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_34028620-ebde-4b2d-8709-4c162b03e46d.png +++ b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_34028620-ebde-4b2d-8709-4c162b03e46d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e84070c777fa09d94050b853f05b8f109c9b8bd056e625cfe0b0fc892eb97ce -size 1195338 +oid sha256:9deb3c2a3a7db14548979c6c50624f6684ef129995cd7547659cc25ebe1f837d +size 1194314 diff --git a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_6e7e019b-e6dc-486a-9697-74aa496d4009.png b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_6e7e019b-e6dc-486a-9697-74aa496d4009.png index 63c472dd887665d04c741a07c45853c850eaf306..46532bfaf840147b8bd65ef366001eb7545708f9 100644 --- a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_6e7e019b-e6dc-486a-9697-74aa496d4009.png +++ b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_6e7e019b-e6dc-486a-9697-74aa496d4009.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ac1e6abc604174f47977dd0f903e7101397c27c53402e4cae7f5eba2bdacd91 -size 906530 +oid sha256:dba109b26a88e0647a99fda614347f5f41a9b109ffc53c3607300fac027dfd39 +size 1055266 diff --git a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_7412202e-0c5f-47b7-a72f-0570cd883473.png b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_7412202e-0c5f-47b7-a72f-0570cd883473.png index 2cd4f3b29d9f1b86f6968edf02b5aa3c810c2f74..d495903c4a0431a0e69ec64fc4ded921b4f51c0b 100644 --- a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_7412202e-0c5f-47b7-a72f-0570cd883473.png +++ b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_7412202e-0c5f-47b7-a72f-0570cd883473.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f25f59aaf79d8682abca5761c58ad056c17bddcdb9964de504c2ae8d2889ff1c -size 929245 +oid sha256:910ccec26c8e2aeaa9f3302530be10e4d969561b532fc457511b4061c9013e95 +size 782187 diff --git a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_87e9c2d8-c6f0-42c3-8bfb-f1456f4699d6.png b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_87e9c2d8-c6f0-42c3-8bfb-f1456f4699d6.png index fb688c9444ac751e682f06f145a6d0deb6f7e319..95f47b4140f06d6efd11ee358018122508107976 100644 --- a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_87e9c2d8-c6f0-42c3-8bfb-f1456f4699d6.png +++ b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_87e9c2d8-c6f0-42c3-8bfb-f1456f4699d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3fea70a59c1cfc03d4521c7adc1e473225e85d2f4ca88d2a960b9e08202c243c -size 934429 +oid sha256:df660809050253704717008169b01a6935a839d58f019e660fac0921e348e910 +size 957279 diff --git a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_db2eac70-945f-4c8f-aaac-b8ec140bc870.png b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_db2eac70-945f-4c8f-aaac-b8ec140bc870.png index 8404f0750652a9d878669d92c23f2422dfe489b5..87470cf46c5c9f9ec398732059c304bda11f7466 100644 --- a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_db2eac70-945f-4c8f-aaac-b8ec140bc870.png +++ b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_db2eac70-945f-4c8f-aaac-b8ec140bc870.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a38b210f98cdbf07c50f6e76af89e1955bbda3b37b676d708b083f50e3a2f76 -size 1287883 +oid sha256:29aa1c47fa7e7a8a2478571588c1b28f6c396bd0bab014004ca9d61c1d7bdbc1 +size 1555564 diff --git a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_ecd96a58-af68-400d-bac5-e637de08d916.png b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_ecd96a58-af68-400d-bac5-e637de08d916.png index 61d8a04abfb51782708f58e78525fb60a9fdee61..7c775b8a02c44fd46a3b8828b867a850fa9c09c7 100644 --- a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_ecd96a58-af68-400d-bac5-e637de08d916.png +++ b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_ecd96a58-af68-400d-bac5-e637de08d916.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35963a7f627663125cff45c99fdb429dd4b5a6b7d221251f98400a0f9838d5d0 -size 1163442 +oid sha256:c115b5936e6665daee0d3cd2ad733d1a4e44c27da15fa4a22bea36d906915c53 +size 940585 diff --git a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_f4a04bcc-0ce6-4a2c-a076-b96a65d0a7e6.png b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_f4a04bcc-0ce6-4a2c-a076-b96a65d0a7e6.png index e5b8173fc290a63ae202f37a6ec9ad3bc38d51c5..e1c764992831a3441784fd63bdaa501bc14fd8a2 100644 --- a/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_f4a04bcc-0ce6-4a2c-a076-b96a65d0a7e6.png +++ b/images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_f4a04bcc-0ce6-4a2c-a076-b96a65d0a7e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a50e457c125695cfe24479e957f46fcb57012b4060d3664bcd54e6d488ebdef -size 1361544 +oid sha256:686ea71339cd132851d20434e9014b8e08868937642899f2513eb01fb5b808ab +size 1221401 diff --git a/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_36aefdb1-aee2-4743-a3b8-54eaf1a6beed.png b/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_36aefdb1-aee2-4743-a3b8-54eaf1a6beed.png index c143cf7b85c1911db398b7d37ade8ea0485ce4c4..1b7fa85e896f054dc52502e20d89d8c98ce7efeb 100644 --- a/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_36aefdb1-aee2-4743-a3b8-54eaf1a6beed.png +++ b/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_36aefdb1-aee2-4743-a3b8-54eaf1a6beed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f8aadb31c38e85a889f1344e536a5ff28bbccc03d0e887776156cf1338e3006 -size 597345 +oid sha256:db372ebdf0446ca2723a1506a585f76c1668ff74c26b2048691ef882f77d753d +size 563514 diff --git a/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_fcdb8150-acc2-41e6-9b61-f2ac96016afe.png b/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_fcdb8150-acc2-41e6-9b61-f2ac96016afe.png index 2ef0b563ebe35d63e24a105c9867d77a0ad8a78c..e7ceb3d20f65b03694facd5021e088e74bc92d41 100644 --- a/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_fcdb8150-acc2-41e6-9b61-f2ac96016afe.png +++ b/images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_fcdb8150-acc2-41e6-9b61-f2ac96016afe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:75d61dcd6b3a76973ccefa6bbe673f47af618ed4b7e8fca38ca8d5fa68fda746 -size 130270 +oid sha256:8007df7570aa2c70e410444bdb0d7f9ecccffd7c852db6ea5c0c6a08dff60f14 +size 158079 diff --git a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_0ffc571f-2b31-4854-8ed2-2f542d6baa0d.png b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_0ffc571f-2b31-4854-8ed2-2f542d6baa0d.png index f178e43d8366350807a79557404dbc9a1717b291..8a7e0ecec6357e00dd41aad59790fdb38dd929c4 100644 --- a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_0ffc571f-2b31-4854-8ed2-2f542d6baa0d.png +++ b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_0ffc571f-2b31-4854-8ed2-2f542d6baa0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4954c4edab9b63bdf3d626b5570e9816bf583c65370b3a00f9775e70fb590698 -size 1181461 +oid sha256:021c23754ccfb5f8014ebe8078a3166b99a5533ed786c442da4b0caf285238c8 +size 920264 diff --git a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_4fc1cd27-721c-4c5c-a8ea-a8dd4b50f1ca.png b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_4fc1cd27-721c-4c5c-a8ea-a8dd4b50f1ca.png index 7c04e8bcc4b8d1f1514dc3874d7a0bfd032cf480..4adfe8cb7521b15a253707e62df37e5c96d719d1 100644 --- a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_4fc1cd27-721c-4c5c-a8ea-a8dd4b50f1ca.png +++ b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_4fc1cd27-721c-4c5c-a8ea-a8dd4b50f1ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d55e0f27848742bdc3427975d01a3bd7d285af2fe99f8a541693cde78fad991 -size 907078 +oid sha256:f7990e3cd290b4c8fa91786aa0ad3b99663ee13238dcfd5c591d68841147cda5 +size 666327 diff --git a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8177f07d-7a0e-40d0-8cd1-7185952cceb2.png b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8177f07d-7a0e-40d0-8cd1-7185952cceb2.png index b67ba6b6b2acbe56f8150b7729cf0fa80b158c73..9b0fe22c01dd3992b2f6979e3e6a555bd594201a 100644 --- a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8177f07d-7a0e-40d0-8cd1-7185952cceb2.png +++ b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8177f07d-7a0e-40d0-8cd1-7185952cceb2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f665c09f9a4260150812b744ac34634d637a1c5933c489924a97fa0907a15b97 -size 2725617 +oid sha256:450f5c66003de8b7ab999f436639390f5a0b55a2525ec6ac4788a68ac0de9aa5 +size 1756653 diff --git a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8bd2ba25-f3c9-464c-8943-020e48f3c1c8.png b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8bd2ba25-f3c9-464c-8943-020e48f3c1c8.png index 1f4ad2a814785361f99af4c27669bd9380685c5d..bb317849b9c15fd58e7d660061e8b6279de2c3af 100644 --- a/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8bd2ba25-f3c9-464c-8943-020e48f3c1c8.png +++ b/images/d7631fa2-ce46-4e20-a043-71ce70627c46_8bd2ba25-f3c9-464c-8943-020e48f3c1c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afaaadd77a1b4d5f388e7fb58031d55f467a0c74704bc1075c597e24e18225d5 -size 882419 +oid sha256:fa4b7378628eb8e5e387c4073438fcd15d734d7a7b29363ace27b64cdb911c48 +size 977638 diff --git a/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_ad2ccb9e-f110-417f-97cb-e2595afe0dd7.png b/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_ad2ccb9e-f110-417f-97cb-e2595afe0dd7.png index 517e48f9d8f4a5f24053ce4e654a1b9a0e8b5e3e..77556fa924742c805215873e036e10825fe7ab26 100644 --- a/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_ad2ccb9e-f110-417f-97cb-e2595afe0dd7.png +++ b/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_ad2ccb9e-f110-417f-97cb-e2595afe0dd7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83f777d58325281566c845224edb54fe0493fef2fc385ba8a749fd0ae9453cea -size 1860842 +oid sha256:5ba78f835c50b325f5b535046f5438ffe9d24fcaca6d04f0b7f2d63b0bb9c933 +size 602397 diff --git a/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_bf7273cc-5470-4d70-9726-f5baa1e05def.png b/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_bf7273cc-5470-4d70-9726-f5baa1e05def.png index 39f208e6cfaf1ba95ea6eee558bcaace35737e67..369afd60a36b6d55dfc775d08708092a099f50ee 100644 --- a/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_bf7273cc-5470-4d70-9726-f5baa1e05def.png +++ b/images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_bf7273cc-5470-4d70-9726-f5baa1e05def.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a7e70d6c08a7ac3abfcd043fa0bc3460cedb50e6ce129b2865e7af18845ea23e -size 2368466 +oid sha256:7b62d8b965855f3345dcfc101458ac496f4329da161a2a762ae1ffe80a2dca9c +size 1607651 diff --git a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_269e9a3c-8bf4-4d0e-81f0-3a2488d00298.png b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_269e9a3c-8bf4-4d0e-81f0-3a2488d00298.png index 8ec6999d5fb8948cd159a507da02e681b8420f9f..bcf71969c38e298365ef2c0661b9500a94dec085 100644 --- a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_269e9a3c-8bf4-4d0e-81f0-3a2488d00298.png +++ b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_269e9a3c-8bf4-4d0e-81f0-3a2488d00298.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4cbfcdd41b39ee022bc2e2126625b46d213af390bd1cf13174ee20777760a973 -size 169302 +oid sha256:5d3ef268249ae7d6fdf30ca00908305096dc13fad12bc83dcb11b59ac7d8e6f0 +size 13815 diff --git a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_339ca2c3-dd91-42f7-bbe7-f6d60bff35ec.png b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_339ca2c3-dd91-42f7-bbe7-f6d60bff35ec.png index c6a2e78097623a48aeac600a6fecaec4016d998c..e97ea46ab1ea0c3eea0d843cd37256766788b44d 100644 --- a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_339ca2c3-dd91-42f7-bbe7-f6d60bff35ec.png +++ b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_339ca2c3-dd91-42f7-bbe7-f6d60bff35ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:110b53901be1fce269b6ac80d9d2af026709eae5001bdce4c44404c674c87d76 -size 1753280 +oid sha256:f6c8e36e6faddaa8bba4b95dea7924b633c7de600d27b8d792c2a9b111c8d2a2 +size 1554617 diff --git a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_89927f7f-c3a1-4274-b1eb-a8f3086ceddc.png b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_89927f7f-c3a1-4274-b1eb-a8f3086ceddc.png index 10204606bee8c93c433331e4cf23465fc1906cc6..967b31b804af3edcfe4712d0e10e12e277ef0e30 100644 --- a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_89927f7f-c3a1-4274-b1eb-a8f3086ceddc.png +++ b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_89927f7f-c3a1-4274-b1eb-a8f3086ceddc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e443536fbae6da7bd63fd08ef76abfa640cfcd36852ddee33b1d6c10a56effe9 -size 719424 +oid sha256:1d93004b64f20133eb4f5c854c448d53ac3e35311f05aa4abd460163fd2010f5 +size 682852 diff --git a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_9b7e1386-5ab5-46e3-8739-701d711d5059.png b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_9b7e1386-5ab5-46e3-8739-701d711d5059.png index 3f919d59bf10f3cf3a7645bafe03408c014d7005..28900058f174aca07ed124391f69f5a722190a83 100644 --- a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_9b7e1386-5ab5-46e3-8739-701d711d5059.png +++ b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_9b7e1386-5ab5-46e3-8739-701d711d5059.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5333cfea3053404e0c30d1ed9b2d38a78ffea347229dbfa6696aa293a5cea98 -size 721934 +oid sha256:a6ae50b430e74bb20bf75b07100768ff8e840c37cf40f464d992926ee531b6b9 +size 748226 diff --git a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_bdc9ce47-f8ec-422f-a746-44d33de2b5a9.png b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_bdc9ce47-f8ec-422f-a746-44d33de2b5a9.png index 59bb56119a53822fd93d68059ac424d8e5e84a17..7abd1725d5101e994d01b919312cce4c34623f4d 100644 --- a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_bdc9ce47-f8ec-422f-a746-44d33de2b5a9.png +++ b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_bdc9ce47-f8ec-422f-a746-44d33de2b5a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b365b95e48ad1591026b00ea052deb90297f3f7c21979ba04f524e6946f62514 -size 690819 +oid sha256:89d8d46305c47f1014c7e0d1b080e23cfcba5af687d435352162a2c1302dcf9b +size 764244 diff --git a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_c0e76686-4c8a-44e1-8982-6cb008ef9a29.png b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_c0e76686-4c8a-44e1-8982-6cb008ef9a29.png index 599185e0092f636df5c7bd1126aa48b133c9d36a..3b9ea5e207e254db403c1f6c18baffbfe54f7fc3 100644 --- a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_c0e76686-4c8a-44e1-8982-6cb008ef9a29.png +++ b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_c0e76686-4c8a-44e1-8982-6cb008ef9a29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be968f654c988d7cfb4c1e2a5b1d550f95a9857448a4e16dbfe213c821b96341 -size 1032217 +oid sha256:d73c65cb98cad695d6d1e7f4413f0fefced9e3db85ef8708ad91d34fae614410 +size 1229235 diff --git a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_f187504d-f032-4815-a47d-c44cf137f3aa.png b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_f187504d-f032-4815-a47d-c44cf137f3aa.png index ddf581b3938e74f34dfa9d4edd64549cf6ffb846..8809e4d6a9e5a63a31e35e7ec994d3ea8ce93ea2 100644 --- a/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_f187504d-f032-4815-a47d-c44cf137f3aa.png +++ b/images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_f187504d-f032-4815-a47d-c44cf137f3aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84c8e6c6a614b9e91ce02765273d056dc9844908e02cd05b672971fb4b9fe811 -size 737644 +oid sha256:b778b82dac0775871f6012fa671e9ff0a6b31a2fd45052591e55cbb871718bed +size 1181098 diff --git a/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_14510cda-06ca-4191-bea4-39e0e54bb281.png b/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_14510cda-06ca-4191-bea4-39e0e54bb281.png index aeec82b075c6b499284f16c65a9807141f4830b5..627f0dcc711fd0c9ace018c2cf3c1bef0e5fc035 100644 --- a/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_14510cda-06ca-4191-bea4-39e0e54bb281.png +++ b/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_14510cda-06ca-4191-bea4-39e0e54bb281.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0502cd53fe083a3abf42ac332680664af632f142b2ebf112101dd30b1fe60282 -size 814194 +oid sha256:a7e46420260669e199f05f8c07b2f73e5553c21eaad4984e3e70167a4c1e3acc +size 311952 diff --git a/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_23f591fd-977a-437a-931d-4be0a372db4f.png b/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_23f591fd-977a-437a-931d-4be0a372db4f.png index 29b04e5a11760fb4c7852ab205356c4d37df757f..b0e667a3a4a15f991609ecaff9f66f2364561827 100644 --- a/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_23f591fd-977a-437a-931d-4be0a372db4f.png +++ b/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_23f591fd-977a-437a-931d-4be0a372db4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ad4b7e431816c81debcd6393d682633474f0f344270bb3eb6928fc7673971ab -size 2744828 +oid sha256:23ea52329e8a40d408c7ca70fb21ade4c875bf30c4ceb7e8d93e06aae9350805 +size 456288 diff --git a/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_bf283bf2-f76d-42be-b04d-3dcf4f25f1ea.png b/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_bf283bf2-f76d-42be-b04d-3dcf4f25f1ea.png index a37216041ee3e8f4420472cc421fbe5a68ccc048..03489438cee2526c575818af1f6be51187325085 100644 --- a/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_bf283bf2-f76d-42be-b04d-3dcf4f25f1ea.png +++ b/images/d7c3103a-c195-4503-ab20-ecae4d4ce419_bf283bf2-f76d-42be-b04d-3dcf4f25f1ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eab68ccffa793f6a8e840ffb827a2dba68552352535a3423c28771f394e780e1 -size 1229489 +oid sha256:e4faf81487210d2c9a2fe6fb25463abd33f1f6d91d1a659caeab97949614c85c +size 1704690 diff --git a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_2471b465-ed09-45d5-9c2b-ecf0efc16f91.png b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_2471b465-ed09-45d5-9c2b-ecf0efc16f91.png index 52ec78eb0273079b53ce36ed5fe902c8b73fe1d1..26547f05390acf757943790f69f71793fc14f96d 100644 --- a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_2471b465-ed09-45d5-9c2b-ecf0efc16f91.png +++ b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_2471b465-ed09-45d5-9c2b-ecf0efc16f91.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b55c24f8aea43ba589a94a8588990d048fe19a8b6632d38620923122bcec998f -size 746855 +oid sha256:e1a97dedfc91c68a5ea5049e9cba39553789ccce6acc5cbd62b4233946908df5 +size 633371 diff --git a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_73553ffc-fbfe-498e-bd3c-0f29651390e2.png b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_73553ffc-fbfe-498e-bd3c-0f29651390e2.png index 129fc9557c92d9cdb02f4b2e2dd512d7ccc29568..7d4066537095bcdb0db3ad296ff6f757b0d33f95 100644 --- a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_73553ffc-fbfe-498e-bd3c-0f29651390e2.png +++ b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_73553ffc-fbfe-498e-bd3c-0f29651390e2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbd68cef8689215ac4a00cb7a9812d277d28405eea596cccefd9129de2c88f9e -size 431289 +oid sha256:1c3cd5f0a2cfcacad9c5abf0ad75c496232a644b2880c2ff28221dc540fa344b +size 507900 diff --git a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_947fb47d-5ba4-4225-b3ca-4d4948db8acf.png b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_947fb47d-5ba4-4225-b3ca-4d4948db8acf.png index ab6dcbd144a687d81c99e00460eaa80260929aa5..b3569c83941477ccb5be29aa0577bb4d220cbe9f 100644 --- a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_947fb47d-5ba4-4225-b3ca-4d4948db8acf.png +++ b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_947fb47d-5ba4-4225-b3ca-4d4948db8acf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a68e19b4e9c9daa21de1c9e45b8f1ab57f0767b0036eb44cc5646b3d8dc8cfaf -size 2041199 +oid sha256:3002f14292bdacd3737d2d327ef8e6234bd2af2d257d0967b163f4c116c42059 +size 1900424 diff --git a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_ce167469-0673-4a2b-824a-db6bb26f2912.png b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_ce167469-0673-4a2b-824a-db6bb26f2912.png index 2778a422bff425d7b43c5fd6edff97b2a61b046c..e60c936d29d892e3d4dbe66d0f97011c4208b575 100644 --- a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_ce167469-0673-4a2b-824a-db6bb26f2912.png +++ b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_ce167469-0673-4a2b-824a-db6bb26f2912.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f473ca995bbca2fea5bd7aa636dbfe3cd4554580278c499853103573e566015 -size 2137646 +oid sha256:269ecdb3ab1e2e5affc592d634805bc98d76f0c7db0f15c9999d01b746bc51dd +size 1526655 diff --git a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_fca33043-62dc-44e2-b64d-f14bb211f687.png b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_fca33043-62dc-44e2-b64d-f14bb211f687.png index adb15786ff1085a2f259012130e77bd215994005..73deaae533fc75003332da7183ad1f8c63297993 100644 --- a/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_fca33043-62dc-44e2-b64d-f14bb211f687.png +++ b/images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_fca33043-62dc-44e2-b64d-f14bb211f687.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:728bcfde744d20445e7b189ae811c845569f1a21001889f4c84ed87f37d9dc4f -size 674820 +oid sha256:2a8f99fb394a8964103930be98bf193a285ebf69abf3efea4ee65b86da10892a +size 1000999 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_1d159c3f-a3f0-41a4-a733-ea456f96c507.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_1d159c3f-a3f0-41a4-a733-ea456f96c507.png index 1a3391cee6031e61c2428b37c721e57a09a0cb8c..c42c063c95b9398910909f44445d5d5f5b983fd9 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_1d159c3f-a3f0-41a4-a733-ea456f96c507.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_1d159c3f-a3f0-41a4-a733-ea456f96c507.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:191e9249280ff316eef5eb48b0f892bb90388e3e153c1739fb001c344fc0060c -size 815442 +oid sha256:7ca5f5c3dbb9337d7a77b7386bcc4380672bf359f0137d497f0f63035aa9bd86 +size 1713847 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_23546e33-8ba3-46f1-b1b7-f8e0acf5ebc2.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_23546e33-8ba3-46f1-b1b7-f8e0acf5ebc2.png index c027c12d15d63faeac9873a82fd028952caa6339..ba35f0636b1dfae73eb13aaeae5f53965eed6ec9 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_23546e33-8ba3-46f1-b1b7-f8e0acf5ebc2.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_23546e33-8ba3-46f1-b1b7-f8e0acf5ebc2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c080da66f61797adfaab2ae927e12f8150385af2c6df0e79d7784c2e2d65262 -size 814748 +oid sha256:276ce91b015622bb57a5e533e785359e2821e6c4eda20b9a140f74dbf8a0a1a3 +size 661300 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_2bfbc791-a24f-4b42-934e-a5d5e7ecd8cc.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_2bfbc791-a24f-4b42-934e-a5d5e7ecd8cc.png index 5839c7dddf1f52a06f272df2121fd095894a9e0f..1d069f50599051ce4ce311ea199517066e4ab1ce 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_2bfbc791-a24f-4b42-934e-a5d5e7ecd8cc.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_2bfbc791-a24f-4b42-934e-a5d5e7ecd8cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8221266befcfd070a86c62271c2cb1259a9eec251da397ca1d6141a8775942b8 -size 421344 +oid sha256:63b094a631edbce1e00c30a709aff3260d5f2d704735b9929147ee42753ed684 +size 1552770 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_6e4b5086-ab7f-4c94-8467-faf6a06f1082.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_6e4b5086-ab7f-4c94-8467-faf6a06f1082.png index c2b323d57df194d056575a9f72b423e5ecb7b484..c15938886a78c900247d886f9cc0dc0b41574659 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_6e4b5086-ab7f-4c94-8467-faf6a06f1082.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_6e4b5086-ab7f-4c94-8467-faf6a06f1082.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07fce4f289530467f430938ad79a9ae6dc47f8a17e7b4585e80df61eec02c7d6 -size 444488 +oid sha256:1260bed957001f663595af52919f286aaf3b010b8b6541669fc351292dc8ae03 +size 492947 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_7ff4adcf-0ad0-4b73-bae6-6d5f955da03e.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_7ff4adcf-0ad0-4b73-bae6-6d5f955da03e.png index 7cbdf7a116b9d89c004e3e0b2e96b41df0410611..80bef7a8788de90fa0b6987d36ab1aa8ec51e6bd 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_7ff4adcf-0ad0-4b73-bae6-6d5f955da03e.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_7ff4adcf-0ad0-4b73-bae6-6d5f955da03e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9c562132c61cb8fcdc0f80f01afcfa620c6143cb6ee095a267db2ae2083cb4b -size 424443 +oid sha256:07d2ea6af19289945c99f44e394ab62ddfa2f8b1253ed56cdacf03445c7ca530 +size 1177108 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_8d25c01d-4501-4078-9cd8-f51b5498b1ce.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_8d25c01d-4501-4078-9cd8-f51b5498b1ce.png index b49df6f1f28adc9eaced760cd47cde88629c0515..ebb87f04e438b0c0c34b492b9f07b23f7f711e66 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_8d25c01d-4501-4078-9cd8-f51b5498b1ce.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_8d25c01d-4501-4078-9cd8-f51b5498b1ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5e362f92633c1a4800e38040eddfddfe81377d8a3dc60beb3f770d1d51a998a -size 419047 +oid sha256:f8a4b479e811a6edf1b146a9eab137609a268d698d30ef6c21c006b9abf1cdff +size 1254159 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_a78c000e-8c5f-40cb-beca-5a3daeb439c9.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_a78c000e-8c5f-40cb-beca-5a3daeb439c9.png index bd96976bcec60f64c5e2543616c7497157ba679a..4f3bd75c3bbd66db4bbfe0822692bbcd932285e5 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_a78c000e-8c5f-40cb-beca-5a3daeb439c9.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_a78c000e-8c5f-40cb-beca-5a3daeb439c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6aa1599c18ef953e9de18a889db77b20ce3e5bed69833f8bb6d18accccb9d60c -size 557989 +oid sha256:2cf7dc01fcf831dcbf8cee69fcf9936800feef20bf614f4ce39a720aae67834b +size 1139266 diff --git a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_fa1cf227-c27c-409e-b9a8-dd7b6211f1d5.png b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_fa1cf227-c27c-409e-b9a8-dd7b6211f1d5.png index 093fe9af51dad1e7b886463963d7314cd4b6ce99..4bb8180c7d17acf3823c26acc2243919cce2f60d 100644 --- a/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_fa1cf227-c27c-409e-b9a8-dd7b6211f1d5.png +++ b/images/d88f70e8-9b66-4f08-9aa4-e49375b14920_fa1cf227-c27c-409e-b9a8-dd7b6211f1d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19eb4d55f71efc48f7ebbe21a6caee8539cf2d9cbf90a74ad18ba10e766a1e0b -size 598858 +oid sha256:b92471c64ff3b864460d0e74d78ef32a9a7ccf1996f13e44c8483d2bd89ff009 +size 778258 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_13288e86-8e09-4608-93f3-ed250f087a42.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_13288e86-8e09-4608-93f3-ed250f087a42.png index 0c797b83d90755e0073f56a5cb7f19e7c7e9d012..98b6c714c31d6b85cfe42b56f7b78630d56aac93 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_13288e86-8e09-4608-93f3-ed250f087a42.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_13288e86-8e09-4608-93f3-ed250f087a42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3c0cbf20eceaa016a6438419d575208c4c0ab75da10f888c35fdf27df19e78f -size 601365 +oid sha256:24b4325cad776244dbc9d1425755dad6b0078bca8fbae82e25966f568260c536 +size 912583 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_24838af0-91ab-40e0-808b-4f59a031f1dc.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_24838af0-91ab-40e0-808b-4f59a031f1dc.png index 6c760d02442a4cff178d6d7985c58d74d5b46496..7d0467b4e6e9ca25689be0a6fc8cf1bf1ad514b3 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_24838af0-91ab-40e0-808b-4f59a031f1dc.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_24838af0-91ab-40e0-808b-4f59a031f1dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:547f228eeb58f9f2d849b3a712b22b00bf4cbf67e4502d6bf44fd8394d1a7e42 -size 886959 +oid sha256:1234d700fb001077362115de2f32341504b73860c299bd5697c6903688f3b9e1 +size 931821 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_25de61d4-f92d-455f-8905-cbb26e30395b.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_25de61d4-f92d-455f-8905-cbb26e30395b.png index 8694a55f6e56805f41fb22c7b464e81e50b55373..a214022ab971a5b6941b317022ff77a759bf6f0f 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_25de61d4-f92d-455f-8905-cbb26e30395b.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_25de61d4-f92d-455f-8905-cbb26e30395b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cc8584ccbd64d94fe470b284327fa569a1e3ecd58103f7b7aff07d6a0b11e1c5 -size 1848560 +oid sha256:fd7c0340123fdb992ccaaf2f1d9d70efb34aab9b20d0b53a603f7bdf92a67c6c +size 831635 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_2a8e8987-2a95-4d21-a7b6-11eed00c07d8.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_2a8e8987-2a95-4d21-a7b6-11eed00c07d8.png index 1e4807d3a9a2792c0fa86a61f8e2105c5551b06c..7718fe5321c1d18dbbef3ed4545667e7f280171a 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_2a8e8987-2a95-4d21-a7b6-11eed00c07d8.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_2a8e8987-2a95-4d21-a7b6-11eed00c07d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6442a0317c232482ee4d1b2c1bfa3decfd7eba8eccd539c915010199a4da567 -size 896715 +oid sha256:cc77f9afeb6a40c9da2212f1fb7e038964479acd64398bda4283c32856ba0bae +size 878680 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_572f791d-97d7-4e35-adf9-34762045fb72.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_572f791d-97d7-4e35-adf9-34762045fb72.png index cd3674c5d57f9eada432c2a4c755fe519e1dd430..b82e161da0d938c22a2b136125f5807fd33c03f8 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_572f791d-97d7-4e35-adf9-34762045fb72.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_572f791d-97d7-4e35-adf9-34762045fb72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0dc342734c579987c9778af8c1c8591a5abb63b693b9dfe714cb071cea03b6e2 -size 897565 +oid sha256:83741c3337b06efceadf38d39b9536ac6088bc7da6dde999f05469a8c0ea3a3f +size 1198544 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_590646aa-5dd0-47b6-9181-4cadfe0cf58e.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_590646aa-5dd0-47b6-9181-4cadfe0cf58e.png index 3d1ed551784c29d9269ddd48bf3083f2ef37f424..75b5795b248f24bdb2daebc184b1c1c6f232dee2 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_590646aa-5dd0-47b6-9181-4cadfe0cf58e.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_590646aa-5dd0-47b6-9181-4cadfe0cf58e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e7bb9da2e8d79935b9be80950c23647699fd3e1513ca92f91ad86f76a0f3ebb -size 971510 +oid sha256:312791ac1682f188e14f037be32d64c455f1548c7ac7ec44b0cbddfa0b756410 +size 1205982 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6219d481-0512-4dee-8054-a5a7b9fac49c.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6219d481-0512-4dee-8054-a5a7b9fac49c.png index b0ddbb6f12b43347287ca365e5a6a51dfc0a0118..fd4db026a244cfc2fa63ff3dd47d6422fa8e6a0e 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6219d481-0512-4dee-8054-a5a7b9fac49c.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6219d481-0512-4dee-8054-a5a7b9fac49c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a6fe74434b54f01b5780b23f34d94b8b33208480b9321b52a2848b7f4735731 -size 1070968 +oid sha256:686d7edbe3cd0f880b0fa9946d5dc73323acf5ce5a57e3ba3a1f61681d07bf46 +size 790905 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_665f4508-101d-406e-b5f6-ebfe574eb34d.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_665f4508-101d-406e-b5f6-ebfe574eb34d.png index f53568813d56ca942f905dfe079db2893f7297c8..d3da6fd10fe6687a71b6d2a4a90c1fdcf53f582c 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_665f4508-101d-406e-b5f6-ebfe574eb34d.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_665f4508-101d-406e-b5f6-ebfe574eb34d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5805ac127151a78c28a61e38bc6b2e18d5d592d97d30072a0abb964dfc37fba3 -size 1628093 +oid sha256:a555d71b73a3a26d0b4d02d73db3f79115d0e083277c15fcb1593d9184a29518 +size 550444 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6cac47b8-d3d2-47b2-a53d-0457923f7d19.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6cac47b8-d3d2-47b2-a53d-0457923f7d19.png index d5905e492d3c4d26b818f0ab1aa7093b100371ac..c65a813b5ac3587c5134d751914f8e4f5f85832a 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6cac47b8-d3d2-47b2-a53d-0457923f7d19.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6cac47b8-d3d2-47b2-a53d-0457923f7d19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbd46a8c9ae46e30c4efb4d1b52dce2b7f99e4fcbd62db45ffa615a73d438f2c -size 901797 +oid sha256:4007e16bb945a55b53d409f939b838e0ee983af7bf054442e65b9744dc7a0bab +size 1202387 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6daab355-c31f-4f01-9790-b621f663409c.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6daab355-c31f-4f01-9790-b621f663409c.png index 90994499aaca6376cf6239937d52ad081a36e8d7..6d269b62de0ebbfb63feca413d3c2492ef980224 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6daab355-c31f-4f01-9790-b621f663409c.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6daab355-c31f-4f01-9790-b621f663409c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be4d7cbf9f861227baf1695eee635b61097f60b0bb11851543d355542148e5c7 -size 892572 +oid sha256:03c03f07a8b8ace6a1b74e042f9630990f2888b03a994f992045b8b2425416a7 +size 972516 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6f5a9bef-bc1f-4a26-8c39-1211813d1a79.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6f5a9bef-bc1f-4a26-8c39-1211813d1a79.png index 092840be047074aac8b7e0b76b58a882e2b9306b..e248de155983bb67fed6c38827c9de2a82ff033b 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6f5a9bef-bc1f-4a26-8c39-1211813d1a79.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6f5a9bef-bc1f-4a26-8c39-1211813d1a79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a6a7dc126e8012d109b59666e9d07f09e2575f2e24a44260ac927a8a43bd8d6 -size 897770 +oid sha256:014119daeb5f83b5516de9407944566b7c77904bb1b84e666aa6ecc65ed53639 +size 1198753 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_bb78dc80-dcd2-4a33-bc62-fba8db2989f1.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_bb78dc80-dcd2-4a33-bc62-fba8db2989f1.png index 6c8679c0165b9729653ef9ea44c78c737cd9bc3d..4d79b304986977ca647e03749f98a42ffd0f894c 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_bb78dc80-dcd2-4a33-bc62-fba8db2989f1.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_bb78dc80-dcd2-4a33-bc62-fba8db2989f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87d8bbc3e631cc5e9d88d2f80203903f2477d715b2247994be5952e8927665fc -size 896664 +oid sha256:eaec5a0071dfdf3b77c21d785f40d0906cb1dc61461e38b7b1a72626bcf47534 +size 997605 diff --git a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_d9c94040-4de0-473a-a1c5-6a909a5c5319.png b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_d9c94040-4de0-473a-a1c5-6a909a5c5319.png index f40c6128cf653fc020d66ddb84db3cfacdb7b14c..0a462252a869fbf9d28eb884dc0ded87c6b5dc03 100644 --- a/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_d9c94040-4de0-473a-a1c5-6a909a5c5319.png +++ b/images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_d9c94040-4de0-473a-a1c5-6a909a5c5319.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b99d516f19943e1b06f784f72d35e802ebeae019af01504e1b9da88c70afb1eb -size 785653 +oid sha256:7d51d4a5da0a485e37c34fab06f695b973825c9d852d7acc4c1d865c3f1a0326 +size 445693 diff --git a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_2918942d-cf82-4992-ac54-7ce758ca697f.png b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_2918942d-cf82-4992-ac54-7ce758ca697f.png index 3a9d0be887d46bc48cbfd0ca2e315900301b35a1..3e10edd3e785ee96eda7b83575b2373638505448 100644 --- a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_2918942d-cf82-4992-ac54-7ce758ca697f.png +++ b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_2918942d-cf82-4992-ac54-7ce758ca697f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:067d5e1141fa98843bd7b26c583dcdb6bdf7995ed069cf01bdfbd77e13f4b7fe -size 1776011 +oid sha256:9c590b0cda94c975e4cfb4c3464c234b1aa0cd90a4e3dac75abb880acbcd974f +size 1633966 diff --git a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_503a5c81-6a8a-4ece-9c8a-c80d7198f388.png b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_503a5c81-6a8a-4ece-9c8a-c80d7198f388.png index 74f1d517e06c2b4db5b4db1d7f9cbd2befb02336..5eed12f079f9225818f20bd56eaa9f46787ce18a 100644 --- a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_503a5c81-6a8a-4ece-9c8a-c80d7198f388.png +++ b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_503a5c81-6a8a-4ece-9c8a-c80d7198f388.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9604ae497bdf58ea88b0838e77f26f31a564fa477b890850d36f08819e51e00d -size 2864331 +oid sha256:54bb1a08a8fe197e1a5b31c2627be80da77b1d7db2815bc1156895bddfb6efb7 +size 2775635 diff --git a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_a4b6efac-e32f-478c-9177-28e49d7ac7de.png b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_a4b6efac-e32f-478c-9177-28e49d7ac7de.png index d3544d266d12c941ee56ea3a2f4d5aa3370c9e33..9300ea00cac274435f4a376ee3a1ec2184c11446 100644 --- a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_a4b6efac-e32f-478c-9177-28e49d7ac7de.png +++ b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_a4b6efac-e32f-478c-9177-28e49d7ac7de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f06442f5e956da66263b96ed25958623f89f7d32e605ab23b6e363c613e7cd0b -size 2770604 +oid sha256:953e83c263b14c504373542a8d40c09a68df2f6f04688da4826fd57b240dadf7 +size 2662945 diff --git a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c05241f0-b26e-40df-b388-6067f69ff404.png b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c05241f0-b26e-40df-b388-6067f69ff404.png index 04b7ce4444c8d78119c8c1e5201881ef1f663cea..875832a66bb9a030fc4e1523914c1331485ccefe 100644 --- a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c05241f0-b26e-40df-b388-6067f69ff404.png +++ b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c05241f0-b26e-40df-b388-6067f69ff404.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:23b00d1c7bcf5cbc0b06863015cbe311183d2f0638e1d8dae0cd433af7f3b575 -size 1663294 +oid sha256:ae486e71becc45976fa0734989fa84af13944b907181dde29b389b5b9eaff2bc +size 2109769 diff --git a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c72364e6-89d8-4b78-8d47-2636bcd591d2.png b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c72364e6-89d8-4b78-8d47-2636bcd591d2.png index bf0b15217cb4c594ef7841cfcd644f506e787b72..75160183483917affacb0664e949ed5e118e624b 100644 --- a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c72364e6-89d8-4b78-8d47-2636bcd591d2.png +++ b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c72364e6-89d8-4b78-8d47-2636bcd591d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cb10e578d3e9ba02f22c15fdc3185fe331a90bbfa29f0a8d9d8579502b703c6 -size 1115561 +oid sha256:e28c3e2734587e70a396721bf378c9161fafa87fc7337018264c2063c2d52b68 +size 1047895 diff --git a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_f27219e9-c800-4270-9f5d-348090dff023.png b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_f27219e9-c800-4270-9f5d-348090dff023.png index 43deb860e5e63559db6850a25b712a416be2b92c..e5ce38319bb5242093af5cd2a8c19add7824aefb 100644 --- a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_f27219e9-c800-4270-9f5d-348090dff023.png +++ b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_f27219e9-c800-4270-9f5d-348090dff023.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18a8767f4f23da5de00675ca8477be231334020f6def5cf420e92d10cfd5de8d -size 2656613 +oid sha256:b77c4729377166422d6514018131b5e248491643d5a2bc23d021a6d2da8c4521 +size 2102143 diff --git a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_fad7bc53-f4c5-435a-abee-ee54d8595ecd.png b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_fad7bc53-f4c5-435a-abee-ee54d8595ecd.png index 999b7992ec8219eafa5c75fa087cb7004ce56612..ba6ffa255da2b21ddd7008cc2eef3f90d494dc85 100644 --- a/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_fad7bc53-f4c5-435a-abee-ee54d8595ecd.png +++ b/images/d9c160e7-b179-4d42-8570-e6f3f85aa412_fad7bc53-f4c5-435a-abee-ee54d8595ecd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c22c1ebcd013c5e6606622ebe2df015aef317d883b3cffcf901ca86d96497dc -size 2082302 +oid sha256:6961c289b18c482eb9e238f1d31c55f50d6a4a7890d97b8954d4b650d9ee14d7 +size 2421652 diff --git a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_66caa56b-9a19-485d-ad22-cbb39fda106a.png b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_66caa56b-9a19-485d-ad22-cbb39fda106a.png index 67344abaa469906767a9eab18bcb3aca50b45a7c..f7d590272e5bc4b7d02c65ee91fc322f85bf314c 100644 --- a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_66caa56b-9a19-485d-ad22-cbb39fda106a.png +++ b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_66caa56b-9a19-485d-ad22-cbb39fda106a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32fb330cc8d1169be77dd6a810f446f382cf88f32fef3ac16e0455b62d4f1d0e -size 1222927 +oid sha256:2009e156925c6972d33abebbbe34f3d1385b735056a3ce46ae04256cc3db7381 +size 1544377 diff --git a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_89ea3bea-b9d9-4381-8184-5c66df6cd0ea.png b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_89ea3bea-b9d9-4381-8184-5c66df6cd0ea.png index 408d3418bdb5d2e4b69b9af2dcce55c9e2e59900..2f69951d117e17e3997743cfe9a11fbce0b3bec9 100644 --- a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_89ea3bea-b9d9-4381-8184-5c66df6cd0ea.png +++ b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_89ea3bea-b9d9-4381-8184-5c66df6cd0ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:552ac52314694635da613222dc34438adc436d2dc187f8b79da08613ce3b4aef -size 1024620 +oid sha256:40c7ca897ca72b582cb80a707b2e0081be17e118cd287daf6015ec77f146486a +size 1707411 diff --git a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_98b66f05-3799-48a7-955d-5c2075f75a44.png b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_98b66f05-3799-48a7-955d-5c2075f75a44.png index 3ae44c9e1184cb86e1854fee3db84dce1a867fd7..57af6084fee9a6b7525cf0a60a1076b2e2c7075c 100644 --- a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_98b66f05-3799-48a7-955d-5c2075f75a44.png +++ b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_98b66f05-3799-48a7-955d-5c2075f75a44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7da8f20d17a4ad5d62dfc326c04b2b50ae5b05a208dd2c43de5e56f4b41f8b05 -size 1234761 +oid sha256:7aa032a464082608728bed93286083b4195f660d92694dfd832b0228fc9b34a8 +size 1849426 diff --git a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_9c2959ea-dc43-4168-b5af-91a91fccb5b2.png b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_9c2959ea-dc43-4168-b5af-91a91fccb5b2.png index 2aecbd83d135c05b149d5d669d35a45c418aaeb9..bcc2c45b6e168a3bb8fe49ec0baea1d76f046cd0 100644 --- a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_9c2959ea-dc43-4168-b5af-91a91fccb5b2.png +++ b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_9c2959ea-dc43-4168-b5af-91a91fccb5b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c52c1dfdb6625c4b947c126c54c2569cf974f7f85fabdac76af840405b0c124f -size 1227201 +oid sha256:3c3680128027287feb2b5806661bd2d1f35e3070fd2cbabf454e67c4d3c399ed +size 1556222 diff --git a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_bbe4a682-8575-43f2-919e-1e66ee9af9ef.png b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_bbe4a682-8575-43f2-919e-1e66ee9af9ef.png index 398d3b82351bc76af3ed93bc3bcf12ca1042ae8a..205baf780a313e91952b5a6851619d972ced7601 100644 --- a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_bbe4a682-8575-43f2-919e-1e66ee9af9ef.png +++ b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_bbe4a682-8575-43f2-919e-1e66ee9af9ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e23831d86bbe88735a819de1a767d156d8b212c128d3f6de1035f21e830f3dbd -size 1180568 +oid sha256:4aace3cb1959404031f532f71fc85cb7f1b1f4295df636a19af51d06dfef1193 +size 1159627 diff --git a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_d03e7438-57ba-4030-84f8-8f933491cd6d.png b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_d03e7438-57ba-4030-84f8-8f933491cd6d.png index dd8cfc2936e43db55780e8a39de2cf797dda92cb..65fef4c7db526b837a687d27443c4657a3527619 100644 --- a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_d03e7438-57ba-4030-84f8-8f933491cd6d.png +++ b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_d03e7438-57ba-4030-84f8-8f933491cd6d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0554d40cbd2057ca97ddab62564840ffab7de5ba67ca7450135cb74a3943fe31 -size 1184280 +oid sha256:1179141a299811596a1c03fdd37461f1ca8ff1c9388ecc220407ab02a01bf653 +size 1619905 diff --git a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_ec54679e-f5df-407b-abb0-a75b7fe45356.png b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_ec54679e-f5df-407b-abb0-a75b7fe45356.png index e4929528181ea469f15cb9c5c24de2ad98b16a3a..944377cf3d1a76e4d89beed229f533a235043137 100644 --- a/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_ec54679e-f5df-407b-abb0-a75b7fe45356.png +++ b/images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_ec54679e-f5df-407b-abb0-a75b7fe45356.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cb638b7e112da8c8ca7440e3ed09e654907e0568ec336ba0f23d034bb627612 -size 1181902 +oid sha256:a9db4f3864edc2637b17539065d8ed561847348eed0d58374f602e63bd0ead3a +size 1793045 diff --git a/images/da386775-280b-4a84-9801-4ae3098044b0_1f1cfcc3-ec16-49ed-83bd-4b388b1948a9.png b/images/da386775-280b-4a84-9801-4ae3098044b0_1f1cfcc3-ec16-49ed-83bd-4b388b1948a9.png index d0e1374f2f01d4c1ee7c14376e2ab7e27f4a5599..046e985ceee193dcc10d74a549fa1619bf6fd97f 100644 --- a/images/da386775-280b-4a84-9801-4ae3098044b0_1f1cfcc3-ec16-49ed-83bd-4b388b1948a9.png +++ b/images/da386775-280b-4a84-9801-4ae3098044b0_1f1cfcc3-ec16-49ed-83bd-4b388b1948a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b2378c609853948c8b9766c4307578d366f8baef7fb397cdcc29f90955dd9ef -size 743045 +oid sha256:bfa038ab25032713981f1be6451f4e97d2f5323ffbce91f5d94bff930d4cf362 +size 1324210 diff --git a/images/da386775-280b-4a84-9801-4ae3098044b0_8b42d9a9-7e40-4030-bda3-b84edc4d852b.png b/images/da386775-280b-4a84-9801-4ae3098044b0_8b42d9a9-7e40-4030-bda3-b84edc4d852b.png index afd04fdee3dca5810930061ce95126af752e904f..cc8f52d105194dbc9fc87410081468d5ba54ccf1 100644 --- a/images/da386775-280b-4a84-9801-4ae3098044b0_8b42d9a9-7e40-4030-bda3-b84edc4d852b.png +++ b/images/da386775-280b-4a84-9801-4ae3098044b0_8b42d9a9-7e40-4030-bda3-b84edc4d852b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43aa9b7557a1b805c3c135b7a8eef5ccd767d27cb241e5727b91084062b213d8 -size 1288883 +oid sha256:4127bc300de4b25a8db9980c41cca70cb4ea6f32245075119bd1ca91ffa846cc +size 1195216 diff --git a/images/da386775-280b-4a84-9801-4ae3098044b0_be766567-c42c-4657-8021-c37a5151f283.png b/images/da386775-280b-4a84-9801-4ae3098044b0_be766567-c42c-4657-8021-c37a5151f283.png index 952935b35f1a295f488e9e2a22f9a5072770c1e3..01d3120d7ec8b500cfadd79ab90476d989ccb712 100644 --- a/images/da386775-280b-4a84-9801-4ae3098044b0_be766567-c42c-4657-8021-c37a5151f283.png +++ b/images/da386775-280b-4a84-9801-4ae3098044b0_be766567-c42c-4657-8021-c37a5151f283.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ac2ae4aed5407d52e7fa7cf430892875fa4e39d58dd858d5c5f5dddd0d2e018 -size 1031173 +oid sha256:d4f134084ceda8c709a88c426ebecc10dd960900eaeb973fb6222ab8d6659fff +size 955007 diff --git a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_14aad43c-c6e4-4205-beaa-94ff9abb394c.png b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_14aad43c-c6e4-4205-beaa-94ff9abb394c.png index 4a7f5ef752d2a20cb9a887606fe4702d795b9bec..1025fe379fad59754bf4e365a273c404488b2d28 100644 --- a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_14aad43c-c6e4-4205-beaa-94ff9abb394c.png +++ b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_14aad43c-c6e4-4205-beaa-94ff9abb394c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2debc50cfd96eb166b238466b735bd16492bba5739f14f6361583c7a88b9ffc4 -size 739146 +oid sha256:4e423e1128c8ff7b0330bc50aaf687afc60f5680552c430eb7dd8c2d13f3eeb1 +size 610314 diff --git a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_2ec82da8-e3d5-4d54-a618-84c72889c172.png b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_2ec82da8-e3d5-4d54-a618-84c72889c172.png index b6099ad7da0077fd55a6384730cc9bf42b5a2e65..ba92a24dfd1698f96c6e5c99ab720c96d402d516 100644 --- a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_2ec82da8-e3d5-4d54-a618-84c72889c172.png +++ b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_2ec82da8-e3d5-4d54-a618-84c72889c172.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e4c69cd622548d4ed5d09ce7e3a9c7a81b399e494c25945f27199c9227f98f1 -size 879074 +oid sha256:c43535aa357e1b7b2b3b76bb5125729bf5213c3ee8c50607934ccdd989560db4 +size 1252892 diff --git a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_43231286-647f-4ce4-86e5-39ccda467b94.png b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_43231286-647f-4ce4-86e5-39ccda467b94.png index 0724d81ccee54710a963999e0b1f59cebc2dec7a..7362da47b160cb8bd7d75ffade9ee8a4c39d7db8 100644 --- a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_43231286-647f-4ce4-86e5-39ccda467b94.png +++ b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_43231286-647f-4ce4-86e5-39ccda467b94.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43b2a0f87b418e5ffe9f898c46dc6f00501e6e6c94f15db2fcb3a79457e26cc9 -size 814168 +oid sha256:f5e8480f69af9f2e60b941e3308e13b6d94d656ba66498826e205facaa3efa88 +size 1288285 diff --git a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_8613dbd6-8b64-4a19-9b1c-fe4c2190d93f.png b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_8613dbd6-8b64-4a19-9b1c-fe4c2190d93f.png index 408a9d87e62fd4a007174c81a17fafb5c0fc855b..a3e7cb6d32a07efd300cd4e8babded8002a0d2cc 100644 --- a/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_8613dbd6-8b64-4a19-9b1c-fe4c2190d93f.png +++ b/images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_8613dbd6-8b64-4a19-9b1c-fe4c2190d93f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d198eeddfe60061ba314dfc6ed401fe5f951069b891f65236bba6ebed6ff1f6b -size 757311 +oid sha256:9b5629f2fba0bcefcb644bea454ddc60242405826ed252f48e896022e5cf02fb +size 710374 diff --git a/images/da800367-eeab-478b-bcf2-e6d4106591d2_077ba195-2e04-43b3-afce-0b47b400e479.png b/images/da800367-eeab-478b-bcf2-e6d4106591d2_077ba195-2e04-43b3-afce-0b47b400e479.png index e87c65ba7fbedfd5eaedef780b469e20e5e2cd02..7122bc21b9b06d11a768bd163e38af4705208a8f 100644 --- a/images/da800367-eeab-478b-bcf2-e6d4106591d2_077ba195-2e04-43b3-afce-0b47b400e479.png +++ b/images/da800367-eeab-478b-bcf2-e6d4106591d2_077ba195-2e04-43b3-afce-0b47b400e479.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad2603507270e6795bd3daec0ac2ce366546bf20cd933b96ef466f5cc2443e26 -size 2669644 +oid sha256:de56ae3cd525c5e5befd622338cc9ba351d61668780afb962bf64b403bf5a3e9 +size 1475324 diff --git a/images/da800367-eeab-478b-bcf2-e6d4106591d2_3eec5eec-1aed-40dd-bc93-8742767cf94d.png b/images/da800367-eeab-478b-bcf2-e6d4106591d2_3eec5eec-1aed-40dd-bc93-8742767cf94d.png index 84c42fc22a907ee61ee8d272c782b91f1cb399cd..c2e0721e26479be472449435f0d57a1c7ca90865 100644 --- a/images/da800367-eeab-478b-bcf2-e6d4106591d2_3eec5eec-1aed-40dd-bc93-8742767cf94d.png +++ b/images/da800367-eeab-478b-bcf2-e6d4106591d2_3eec5eec-1aed-40dd-bc93-8742767cf94d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9f17f8b544e3e2c26a38ce2e7c0328fd8dad208aaa01097879742ff007a5ff1 -size 796667 +oid sha256:d783849ad1fff69cb4447f64cb23a4ed2ba19b7941ca6faa2cbfdf7c9d1776a8 +size 748986 diff --git a/images/da800367-eeab-478b-bcf2-e6d4106591d2_5ae05692-58b4-478f-91f9-c62ad636c125.png b/images/da800367-eeab-478b-bcf2-e6d4106591d2_5ae05692-58b4-478f-91f9-c62ad636c125.png index bfefb10bfc5336de1aaf1c53faec0c27292b86c9..5b14983d14d854e25dd88a0906c726d62c93db82 100644 --- a/images/da800367-eeab-478b-bcf2-e6d4106591d2_5ae05692-58b4-478f-91f9-c62ad636c125.png +++ b/images/da800367-eeab-478b-bcf2-e6d4106591d2_5ae05692-58b4-478f-91f9-c62ad636c125.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a09e3e1e0a48331f2e4b7d553da81cb42d2d0008e4ad44825190bdf3156be39a -size 726335 +oid sha256:48a9ea74eeaa71c252f17dd8b1d406a1d41b04de94d1755917645ea3518ac879 +size 735786 diff --git a/images/da800367-eeab-478b-bcf2-e6d4106591d2_5e7a77e3-c722-4be5-9dd8-394a7d3ef942.png b/images/da800367-eeab-478b-bcf2-e6d4106591d2_5e7a77e3-c722-4be5-9dd8-394a7d3ef942.png index cbec7ff035cb703ed4548bff43869c5637b1d152..bcfb57a08f9810fea6d70f2b9e5a6cded3d88064 100644 --- a/images/da800367-eeab-478b-bcf2-e6d4106591d2_5e7a77e3-c722-4be5-9dd8-394a7d3ef942.png +++ b/images/da800367-eeab-478b-bcf2-e6d4106591d2_5e7a77e3-c722-4be5-9dd8-394a7d3ef942.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:640c9f4ea9ff05fdc4424d010d7389f1a54b1d70edb0536d20134c9526327c19 -size 2771582 +oid sha256:440a42b43a4650e9f393ebd65e3c51c088f3919d2bf44b7bd2f1d5feeff138a1 +size 1440979 diff --git a/images/da800367-eeab-478b-bcf2-e6d4106591d2_6b3f5ae5-e781-4e07-beea-f548df42dfe7.png b/images/da800367-eeab-478b-bcf2-e6d4106591d2_6b3f5ae5-e781-4e07-beea-f548df42dfe7.png index 33701285e87a75b42c511abe6edc041a366a3e08..5587cee95b33003e47c9b8be7c3de85f7ededa72 100644 --- a/images/da800367-eeab-478b-bcf2-e6d4106591d2_6b3f5ae5-e781-4e07-beea-f548df42dfe7.png +++ b/images/da800367-eeab-478b-bcf2-e6d4106591d2_6b3f5ae5-e781-4e07-beea-f548df42dfe7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe2b79b6f905d19e76dadea7ab8e8e78693a0e7b5bd68ed2e0802de35735e124 -size 2192225 +oid sha256:df6daebd46b8fb9c7b2788cfa4a0ddcf8f3f9789093fbb9fc6834bf453605d73 +size 1476832 diff --git a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_2c0514e7-0198-47f6-9cc4-579b8d94d4b8.png b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_2c0514e7-0198-47f6-9cc4-579b8d94d4b8.png index 37d0b1dd107dae76cb67da7d4c251971a0ed0b22..5fac98f04ac278d6543c1c3f7800dd8a4d1d20f2 100644 --- a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_2c0514e7-0198-47f6-9cc4-579b8d94d4b8.png +++ b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_2c0514e7-0198-47f6-9cc4-579b8d94d4b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1fe9c9afc8e9789e44f27051124cac40d1a00827aa3038deb8b5e4ea6a9f945 -size 1101687 +oid sha256:ae3c78c19e52b4b541faa16b2984e30363c2535da45a86fd5b8233cd0ec49fff +size 1055852 diff --git a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_389c1fc4-5e1d-487d-8791-0f6f32b1a1a1.png b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_389c1fc4-5e1d-487d-8791-0f6f32b1a1a1.png index 9c38602d86dde00cfd3bfa4c00393cd17439edda..1d621a5802dc332448d8fbbec8533473ea85fc95 100644 --- a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_389c1fc4-5e1d-487d-8791-0f6f32b1a1a1.png +++ b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_389c1fc4-5e1d-487d-8791-0f6f32b1a1a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fbc5de23bd3872df58320d7aaf5650b95be855d6a7077792621cfb4751ffe6e -size 1101698 +oid sha256:b42ffda7afde7a05289faca949ab599294d44685d78e4b35dac3a628431fc11b +size 988651 diff --git a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_9ee68218-54fe-4eea-80ad-dbc710aff87f.png b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_9ee68218-54fe-4eea-80ad-dbc710aff87f.png index 07cb74315bd173d95c3821ee48b0839d155c4e3d..17ff69b4c8f0c061b66ae3b6b88edb6aab972524 100644 --- a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_9ee68218-54fe-4eea-80ad-dbc710aff87f.png +++ b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_9ee68218-54fe-4eea-80ad-dbc710aff87f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84a950cfc77459b0e30cfec14c2b10bb802556002fe55c85775bcec2e0e6714f -size 790309 +oid sha256:71734c06d25f15f9fb677167778083b566f34be4574a7cd172b2ee25ccaf9b81 +size 920785 diff --git a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c06ef8b8-57d2-49be-bdac-79839ef57e7c.png b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c06ef8b8-57d2-49be-bdac-79839ef57e7c.png index 269c5980c72b95ea0efeb632bdcb76d78cc3cdd5..58a01f60462d067b193d700a6399113cef72d358 100644 --- a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c06ef8b8-57d2-49be-bdac-79839ef57e7c.png +++ b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c06ef8b8-57d2-49be-bdac-79839ef57e7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9f7d0ba0c1a11c6af7279fd0fb59e04d24867868474924623c3c267f4b48213 -size 1101521 +oid sha256:6701f834cc93911aa82425ab01dd5533b41203c5cddb74f936a5c2cbaf7c14ac +size 1125535 diff --git a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_f981234b-29f6-451d-b795-ad8216ee453f.png b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_f981234b-29f6-451d-b795-ad8216ee453f.png index 10e6a395622da3f0f44cd3a7e386e04bacdcf2e8..4196f913f88cd1244e653584fa230af52bbf9464 100644 --- a/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_f981234b-29f6-451d-b795-ad8216ee453f.png +++ b/images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_f981234b-29f6-451d-b795-ad8216ee453f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:408d3323637fc7228d03bee6f50e4d9f231c4ce9cc8d5fa7284c9717d46c67df -size 776063 +oid sha256:7d97edbbe528eb9ef20da127b7223fb7f57631c12c7c0d7d9805072bc46714ad +size 726631 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2755eead-c61e-4ce0-b14c-e041ca4d1562.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2755eead-c61e-4ce0-b14c-e041ca4d1562.png index 30d57f9a3e65c90ab2abd4a7799054aaa59cdbde..9a2d414790fdfcd4be6fd34df33616154e61d33e 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2755eead-c61e-4ce0-b14c-e041ca4d1562.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2755eead-c61e-4ce0-b14c-e041ca4d1562.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84531c28c06575772ffe12b911a6dc619b381fcf7d9fb88c5913caad7b835b94 -size 1484582 +oid sha256:d717cb748f4426dc7bcea8860cba799065ed2bd143d11cb86a82b30730299b1d +size 1533408 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2ca80415-2ee3-421c-b26e-662116f8f61c.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2ca80415-2ee3-421c-b26e-662116f8f61c.png index 70340933db6174eefc27e4dcc3e60a82f510ab5a..b3819f22d9f1c60173ba933a6d7a3cd5379a61b2 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2ca80415-2ee3-421c-b26e-662116f8f61c.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_2ca80415-2ee3-421c-b26e-662116f8f61c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2aa321b3e838d476b65e4d3dbaaec31ac606a4bb3672a583c0efbdd3615e686f -size 1480501 +oid sha256:657bfcdcbcff7bce34bcc947eaf85b9e33282496dc07faee399d0f56884305bc +size 1481090 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_31a14711-3dfc-40e7-82e0-7c877e622c01.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_31a14711-3dfc-40e7-82e0-7c877e622c01.png index 3e9e78bb0d2a4db47e04f6c942fcb54f6863bce2..6ebcda89fff7bf497c85f072e38143094debba18 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_31a14711-3dfc-40e7-82e0-7c877e622c01.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_31a14711-3dfc-40e7-82e0-7c877e622c01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5cb1079090a9ff9aa5be211d57cc1834cc3b04773f12a4fc8b6c1859e609444 -size 1437064 +oid sha256:3cf69343e6eb70895def56a973ca784ee367d322bec02ab7015c28f987e6c625 +size 1470721 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_3bf9fc1a-cc1f-4276-a11a-485dc45a4eab.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_3bf9fc1a-cc1f-4276-a11a-485dc45a4eab.png index 31cc3c88d821d482598443a1a473508b7eb0db4a..6d94d6dd322166afc847dd5e0e3cef970fde8c3d 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_3bf9fc1a-cc1f-4276-a11a-485dc45a4eab.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_3bf9fc1a-cc1f-4276-a11a-485dc45a4eab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a964ff446f074e3df39d74f40e473418857b92abff688776420fd7a5c23b52a -size 737688 +oid sha256:45c9086c011ab896debd193fad34a75fcac26007fc12769da6028a724b50b76f +size 859927 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_48e7b865-b9ad-4137-875a-03918d8e7933.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_48e7b865-b9ad-4137-875a-03918d8e7933.png index 5fbd4596dbfd60c134511d3df439b598dffe11d2..b055e209b7415ede30147b512e92e3358e912a6d 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_48e7b865-b9ad-4137-875a-03918d8e7933.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_48e7b865-b9ad-4137-875a-03918d8e7933.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4489364446f568549fea7d1733e7db9d3520c7fb96f2d095281ccdac67be68e6 -size 1498743 +oid sha256:633cec17e1362a6086f501b73678082a443cd525287a1481f6a89f1023c368e9 +size 1548828 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5391b73e-2ea7-472d-bbdf-0978e4e0564f.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5391b73e-2ea7-472d-bbdf-0978e4e0564f.png index f7affa32c7e2a1a8145d0993e490c6352914c2a7..8c866315b823022b74b9fd727a4375ffbcb09db7 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5391b73e-2ea7-472d-bbdf-0978e4e0564f.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5391b73e-2ea7-472d-bbdf-0978e4e0564f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:17b2f1635dbdb586bff01ac6e9dedaa0a4b92339321d2cb8b4337bb8adcc7c4b -size 1484684 +oid sha256:2c83166c100c76aebd8156868e73758fccbba277a256773b5a4ffa9031d50465 +size 1532080 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5de17e26-948b-45a1-8b27-0a3a8a79b72d.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5de17e26-948b-45a1-8b27-0a3a8a79b72d.png index 09356d174137a7250ac88943037e1fc304f4a165..b1bd5fbc73d7eff6d28cced008d78c98b6a8d7ce 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5de17e26-948b-45a1-8b27-0a3a8a79b72d.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_5de17e26-948b-45a1-8b27-0a3a8a79b72d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddb4b979107d86f51fe60632fba95d169d98ce9a10095ab3ff3f69b11376a35e -size 1482339 +oid sha256:b79afaf8fd05a8ecb484f06fbefa3bfef24fc3b2ebbb61d7176da840f57034a3 +size 1433296 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_6940b55b-a01a-4773-a195-09ade3dfa191.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_6940b55b-a01a-4773-a195-09ade3dfa191.png index babb66bed4d49b7d05b39c444f60b42b9715160f..42f94b0fc01cf7b4d2ca78bd1a6154ddc02eccee 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_6940b55b-a01a-4773-a195-09ade3dfa191.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_6940b55b-a01a-4773-a195-09ade3dfa191.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92fbfc41f832352775d3d7df42893a35ca42b79c4c72c7e86b0d2a0103449986 -size 1477651 +oid sha256:af071876b0127d4c12e0c2c275f6a4fc64960e984cb29c10e18a5b1533115b0e +size 1477255 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_832003de-d47f-4c28-8581-ee704cc1f19a.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_832003de-d47f-4c28-8581-ee704cc1f19a.png index 60ad7dece5c652408c941d7a4512d21196d21d8b..b56a5d13c03ded613354367e476a3530261002dc 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_832003de-d47f-4c28-8581-ee704cc1f19a.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_832003de-d47f-4c28-8581-ee704cc1f19a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e250a92c7b90b7b2b13667321e48e8d4d1b3ad88633d9bc9efaffef6fb4d515b -size 1485813 +oid sha256:1d022a859294fba53508c4e4cfb727195d352195bf1a565ab1416ced9ffd399d +size 1485513 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_e9f82301-438f-4602-9a01-59d80d5bdae2.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_e9f82301-438f-4602-9a01-59d80d5bdae2.png index 63673d861346e541abe808f75cba9ee680aa1609..799d7d2baa32568eed8edad113844711b6fe8e1a 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_e9f82301-438f-4602-9a01-59d80d5bdae2.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_e9f82301-438f-4602-9a01-59d80d5bdae2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcc89d775e6232e1c195be1a1d727625ee8488d64b58a0fcd96106d646f1b5b5 -size 1582603 +oid sha256:4a3e762f92ee583000d31bf1fff18ac704ca61649f98b81cf7ab749f9f1b1be9 +size 1582922 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_ef86012d-abba-4be3-96bc-e4952b0e8c66.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_ef86012d-abba-4be3-96bc-e4952b0e8c66.png index b910fa893835fec9580366e11e0f83f9fa56a592..6a88b03c92a3e84530a7a62e868b6189c8bd1b3d 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_ef86012d-abba-4be3-96bc-e4952b0e8c66.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_ef86012d-abba-4be3-96bc-e4952b0e8c66.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:243536c8da1ee0995e25715b1d9aad7dada72df9ee9d1dd04328245dda73d5f1 -size 1383200 +oid sha256:caf81f3f66d8bf15ae45c49e9b62c251a1461afaa0eed623033f5cd3d70a85b7 +size 1384751 diff --git a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_f5cda11c-d548-456b-a605-5b5857a87848.png b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_f5cda11c-d548-456b-a605-5b5857a87848.png index b7880ca234cdf2a9390eb109a5337c3f1de5ab00..7cd3c1e558120da80b4eca968929ec41710f1bde 100644 --- a/images/daec5481-187d-4e2f-9dae-a11892cdadf2_f5cda11c-d548-456b-a605-5b5857a87848.png +++ b/images/daec5481-187d-4e2f-9dae-a11892cdadf2_f5cda11c-d548-456b-a605-5b5857a87848.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:283dd8fadc1e3fabaa7912353592344caad8cb70b5ff85b71b1beac7e6916a93 -size 1426101 +oid sha256:6b0fd0b94245522ce41e38df433d432b924e64b8eb840184ea789bee0e28a9ea +size 1518034 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0564ac9b-e9d9-4084-a3f6-7688481a04d5.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0564ac9b-e9d9-4084-a3f6-7688481a04d5.png index 17c71efc59d6c119907b9868d7f9beb76df39950..12876031490b5e60200535ba9e99e3d17cdb6259 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0564ac9b-e9d9-4084-a3f6-7688481a04d5.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0564ac9b-e9d9-4084-a3f6-7688481a04d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37d5f48697b282a82c43804ee5c6beee1250da2d0e494922e0dd0b3b78f1deb4 -size 458453 +oid sha256:0f370ad589a2cfef9e671f26893756762ab5d8847ac02e2dce78a1e984b03155 +size 505983 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0e94f861-5839-4768-8110-49739e46dfed.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0e94f861-5839-4768-8110-49739e46dfed.png index df4333de25a62f4be86bf850f5dfa14c0a5e0892..fa3a9d97680c6b41024d5a98bd78f7999ac73ff8 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0e94f861-5839-4768-8110-49739e46dfed.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_0e94f861-5839-4768-8110-49739e46dfed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a41a0a00f64b074fcea53c67c85cb6206f517d29ce1bc6475410cf307cbbe06 -size 1340984 +oid sha256:aa9e153a0207ef9ee07b0e77922b751d4e277515b44ed76f23cfbc0d5c3f0726 +size 1342879 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_1f130b1f-91a7-4219-b5fa-a656b6fdba86.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_1f130b1f-91a7-4219-b5fa-a656b6fdba86.png index a2d74d38a31899c8a79f53c415dc587742f752b5..83556a342a1320c117eaf66e312dcdd3fc3a3a25 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_1f130b1f-91a7-4219-b5fa-a656b6fdba86.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_1f130b1f-91a7-4219-b5fa-a656b6fdba86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02264840dccaee5bfa3a27ebd1a5d730c65db85c5f3ff152b3f03321fe5d2b14 -size 1340909 +oid sha256:6d128c786627502aa8d986634e603b7c83df2e0dee17a7744dca1f301bb9cfa1 +size 856606 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_2bc7cc9b-a4f6-477c-a8e6-91f2bd06d27b.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_2bc7cc9b-a4f6-477c-a8e6-91f2bd06d27b.png index be3534cf7fcdc8c4febed0a9a431471b1e387cf5..04caa8f91348fb286433e24589b1a57243418fc6 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_2bc7cc9b-a4f6-477c-a8e6-91f2bd06d27b.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_2bc7cc9b-a4f6-477c-a8e6-91f2bd06d27b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb67d5c9b71a845f5271ae14b71e6258c5fc71237cc1d4b29dbc8aa4709521fb -size 1612723 +oid sha256:ec4b223cc129bf6b6181c0896262c0d42f465a8f5391c3f7ad87e5bf0849ec91 +size 1326334 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_360c51fc-14b3-43ec-a013-8485a168a0f7.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_360c51fc-14b3-43ec-a013-8485a168a0f7.png index cd4a41bcd452276a12acfa670d43e84fce301479..a7b73f8376c3e86cfa508acea8f5cfdbc36c3166 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_360c51fc-14b3-43ec-a013-8485a168a0f7.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_360c51fc-14b3-43ec-a013-8485a168a0f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72a5320d953499a210b3f5dcff407b8c99f1097085eaf7ee71b77b97638a1f1f -size 457283 +oid sha256:938a83283412a4261413f66a131c699d39a10d53ecf8e13f5bbd4dc99885e3cf +size 554079 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_3f1ad2d8-508b-4dca-b072-8d2ff125fafe.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_3f1ad2d8-508b-4dca-b072-8d2ff125fafe.png index a2d74d38a31899c8a79f53c415dc587742f752b5..8868f22baeff3489f983ac7113bba95418799de4 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_3f1ad2d8-508b-4dca-b072-8d2ff125fafe.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_3f1ad2d8-508b-4dca-b072-8d2ff125fafe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02264840dccaee5bfa3a27ebd1a5d730c65db85c5f3ff152b3f03321fe5d2b14 -size 1340909 +oid sha256:e2834d3b632e519d489df4d9d3d309b6eb040c584e611cee83aee3fbd1058ef1 +size 1280249 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_435c88ad-84e9-40e9-b104-f732917fa6e8.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_435c88ad-84e9-40e9-b104-f732917fa6e8.png index 7bc69897a29aaa77cfbe09201be4205e56d0a5e1..cc87575920d973ffb223d6947ddb8bbdc57a1ef8 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_435c88ad-84e9-40e9-b104-f732917fa6e8.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_435c88ad-84e9-40e9-b104-f732917fa6e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1d018a288c501909fb73d602e8b3622cd393830a63009bd5e2d6122d2baa4f8 -size 1611396 +oid sha256:cec4eea767f225efafc4d5ec43861a33d87f85556620e6853cfb8c5807433897 +size 852046 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_5e5cfcd1-5a2e-4b97-9a6c-60e242291757.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_5e5cfcd1-5a2e-4b97-9a6c-60e242291757.png index 51eea91501107a3fde18b455ef651110e4adb895..1c372212c48643ee8440a4f078cad4505d10221a 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_5e5cfcd1-5a2e-4b97-9a6c-60e242291757.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_5e5cfcd1-5a2e-4b97-9a6c-60e242291757.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:951dd8af5510086941373111f3fe8e30aab35e65c0ed5edc303e36e83a950f02 -size 1580249 +oid sha256:b5f1d96fbb10e96d23573d96403bc7f60a536d5ab2349076f56a661346adbe8f +size 1186484 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_600847c5-d8ae-4f6b-96a4-c247607440b2.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_600847c5-d8ae-4f6b-96a4-c247607440b2.png index 652924d740190e1e0d62397880f5e1cb2bece0d5..cf9974204dfce7d1ff21698611e170243bdf673e 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_600847c5-d8ae-4f6b-96a4-c247607440b2.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_600847c5-d8ae-4f6b-96a4-c247607440b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16fda8f20a2ad1d7348d56220722d24e657c7f8311f22f760913c2acc8a2f378 -size 1649127 +oid sha256:fd3545b275eac93225dc3b008e937f2fe11a5b6260864fe9bcf700c13f873ac7 +size 1380454 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_6ac17b5f-32f5-4a08-91b0-708e270d6d61.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_6ac17b5f-32f5-4a08-91b0-708e270d6d61.png index 477b5ba17a20c0ed5225b018e4714812bf495e4a..d46796cc3c6fa111b3141e2fbb1ebacb0dc3c482 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_6ac17b5f-32f5-4a08-91b0-708e270d6d61.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_6ac17b5f-32f5-4a08-91b0-708e270d6d61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1d29c44913896d5147ee9c8b5f4986271eda5a21d5355f655791b640c42b134 -size 1586015 +oid sha256:ce2281f67eb7daedc1d377a1b542e033bbacab8d091afa822a96e17fbd5b0e1a +size 1599532 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_95c8c608-6806-4dc5-95c1-ebad7ad6b1b2.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_95c8c608-6806-4dc5-95c1-ebad7ad6b1b2.png index 18a9e3f7911412233950b40b3c8b0c7dc385472e..345be049e9aa18deaf8a67aab71bc9d1153e086e 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_95c8c608-6806-4dc5-95c1-ebad7ad6b1b2.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_95c8c608-6806-4dc5-95c1-ebad7ad6b1b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:563f9ffb419db20b948ceb7d1fd82ec173d69ce4e8498e8df69531d5366e013c -size 1302343 +oid sha256:9cc6609920761b2d1a00a3a12402bb240d3575be80052a94ab7eb3119d2586ef +size 537773 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_9ddc5879-cba8-40f9-bb74-62073d3e1148.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_9ddc5879-cba8-40f9-bb74-62073d3e1148.png index 23a25fe9aa7ba11a9f20b6dcb7216b53fdb6ccb0..7009fb8630adadf08e6fa410020e8379aea090c1 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_9ddc5879-cba8-40f9-bb74-62073d3e1148.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_9ddc5879-cba8-40f9-bb74-62073d3e1148.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35bec93de0f6157ef3457e778ef0ddb39146776c38f3c83dfebb54dfab745128 -size 1314434 +oid sha256:c2e1e2e0a8db453667fda950db9f218e4a82bc280430ec496506c3dd616dafd5 +size 1304520 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ade4eacb-a963-445c-bb0d-c025a8ac3b47.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ade4eacb-a963-445c-bb0d-c025a8ac3b47.png index 497bb8b4ef4e52e380ec4aef3341ce916730faec..d859a888495e7c3e16d1f456d287d9a459d470ba 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ade4eacb-a963-445c-bb0d-c025a8ac3b47.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ade4eacb-a963-445c-bb0d-c025a8ac3b47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42a89ada74b4da39ecc6ca83e5795848c654c1a220b22629d6ab5bf88c72b0d4 -size 1612962 +oid sha256:d42114399e2997823313a67603af92bb8df2c3f84434c473e4740e721ec1d234 +size 1121439 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_b75d1f3b-2376-4441-b2d6-624fd7a5e15f.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_b75d1f3b-2376-4441-b2d6-624fd7a5e15f.png index 76e53fb953413b8dc32697b69f1f83f20791e0b4..491d6cf7b9625f2b32eb9eee22cdcc2f8e27e1ed 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_b75d1f3b-2376-4441-b2d6-624fd7a5e15f.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_b75d1f3b-2376-4441-b2d6-624fd7a5e15f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10be641776588a7ee8bbac71fd01f0212f920259e94ef868be2fe2646decaed2 -size 1316052 +oid sha256:f4f48824efa6a893cfa4d08acc796e6d62942cc41057980cce9db29335c5cde3 +size 1669067 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_be2b8420-57de-4edb-8ee8-1316eabea49a.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_be2b8420-57de-4edb-8ee8-1316eabea49a.png index a2d74d38a31899c8a79f53c415dc587742f752b5..a6a734aaf38f7fde54e290e7dd93bb6572c918e4 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_be2b8420-57de-4edb-8ee8-1316eabea49a.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_be2b8420-57de-4edb-8ee8-1316eabea49a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02264840dccaee5bfa3a27ebd1a5d730c65db85c5f3ff152b3f03321fe5d2b14 -size 1340909 +oid sha256:897f216ec87d6f708d02e296ea23435166267b2ae4a2bcaa9b9d9b10bba8273c +size 905940 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_c3548b02-cec7-474f-bce8-7e280432e230.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_c3548b02-cec7-474f-bce8-7e280432e230.png index c32d8380d2de756153e900dc4a47e5b755eaef66..34b8f75a0d3c6cafb175fe238761c106ef3de6e7 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_c3548b02-cec7-474f-bce8-7e280432e230.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_c3548b02-cec7-474f-bce8-7e280432e230.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21d1b4e4757dcf4a665f7ee74cbd4aca29cbe573bc4393c4936a84203d312ddc -size 1281813 +oid sha256:410a7438deec4502f6abe2c4f209a2ac23b900326b58c52281894c38a5fc5ee6 +size 1103293 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_cb88089d-5c77-4c71-b428-9815070ef35d.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_cb88089d-5c77-4c71-b428-9815070ef35d.png index ed6c1d09da96acf3530b57834a898a3ebcb135c2..3c57b8f985f6dbd4090431903cf92df4e0bc3d3c 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_cb88089d-5c77-4c71-b428-9815070ef35d.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_cb88089d-5c77-4c71-b428-9815070ef35d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:133854f25c99c23d3dd3f3f81c538599ee6242de69a89db25faf47e69a492200 -size 1612331 +oid sha256:8dfafd542bc15b2cf96eff6f13b22f9db6ff8ca0c590f78a7616593117566830 +size 1582137 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ecf837ac-8efb-4881-ba8c-0468980e8236.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ecf837ac-8efb-4881-ba8c-0468980e8236.png index 78b98f3cd0399a1f3e606f04dd72826a8bc01188..798f4415e13f9dbd8389ad2dbe03e11eb34e71cf 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ecf837ac-8efb-4881-ba8c-0468980e8236.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_ecf837ac-8efb-4881-ba8c-0468980e8236.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce1a4697e73f5baaf3fb540373a90be0dcbaeb7b14fa548c9edc0074388af0e2 -size 1235833 +oid sha256:7b03fecab55022724da7ac215c72ae81753d3c49749af878e27c5632b3cc5ad5 +size 1299862 diff --git a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_f8725f0b-18d9-4b4c-845d-ad4fcd1a9d6c.png b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_f8725f0b-18d9-4b4c-845d-ad4fcd1a9d6c.png index ea4776ee7d985e50ed899a223656aa81a0d1d731..aa4ed183ab365c48e03f2f3b2835b9c8620f0f31 100644 --- a/images/db203a3a-8e62-41aa-ac05-717a8c11508d_f8725f0b-18d9-4b4c-845d-ad4fcd1a9d6c.png +++ b/images/db203a3a-8e62-41aa-ac05-717a8c11508d_f8725f0b-18d9-4b4c-845d-ad4fcd1a9d6c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46fe3fa3c4dfa1e14901ddb254fc43321b85519fb9e8e4430a26b8ee4e76f4ea -size 457818 +oid sha256:4e7b96b59b2a5b71e58ee70cd5c02b25a967706a03fa76fa8f005e876414a8c3 +size 562549 diff --git a/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_97a45713-c48e-4eef-8fe4-5711e87f4c5e.png b/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_97a45713-c48e-4eef-8fe4-5711e87f4c5e.png index 6539c40889722cd5e0210d05c90e5bf509a18b78..430929e4302f5c36554e183ba21e1b0ba59e86df 100644 --- a/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_97a45713-c48e-4eef-8fe4-5711e87f4c5e.png +++ b/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_97a45713-c48e-4eef-8fe4-5711e87f4c5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1509c457e0599a8cdb3a5445dc037bfffb0e5dfe86d3bb6029f9679daead347f -size 1175110 +oid sha256:789cc08e4e557f741ae055d0697253951b65e1e803d3ce48d1a4004ec5e09123 +size 424471 diff --git a/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_ba175789-2fbd-4694-80a4-dc507e353aae.png b/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_ba175789-2fbd-4694-80a4-dc507e353aae.png index 6a62e9b4fd3071f23e170dbd899abca699e48e92..6f26b2239f95350e1883197df3c80885cacb8f3f 100644 --- a/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_ba175789-2fbd-4694-80a4-dc507e353aae.png +++ b/images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_ba175789-2fbd-4694-80a4-dc507e353aae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6da36b6daf9ee4d957e7f7ae115472fbffdc34693963273a6f641b9837f57f7d -size 1078304 +oid sha256:20e6cadd1d9d72575b45b1388cfea3d60c69fa5a427855ae59453868e39c29c9 +size 547766 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_10934fcf-a23e-435c-9eed-281e77c57f18.png b/images/db53ba89-9cff-441a-a703-053e89e32961_10934fcf-a23e-435c-9eed-281e77c57f18.png index dd15a0983acfe7ce93344b95f1e35772f10c2de0..90a112b1329fdffb909b3fe6868bfab2908d8b13 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_10934fcf-a23e-435c-9eed-281e77c57f18.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_10934fcf-a23e-435c-9eed-281e77c57f18.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de40c8203179441dd390f2cfba3185862ef99056268e9669f46cb201a5587417 -size 1309979 +oid sha256:38a552e47076fddf113d33206178734b3b6e0c2be3d76bc74b5083fde89a2a3a +size 869280 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_241e6556-a874-4125-b694-5bc8b8bc4e26.png b/images/db53ba89-9cff-441a-a703-053e89e32961_241e6556-a874-4125-b694-5bc8b8bc4e26.png index 91f25ddfa5fec56258d60aacdb6a6dab8f627c84..a74b9ba31027be1d68a5810c6c08ce46d49b7a95 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_241e6556-a874-4125-b694-5bc8b8bc4e26.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_241e6556-a874-4125-b694-5bc8b8bc4e26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3262f17a8dde1b9f3fdfe01b8ec949357364532fec140ee0a114bd8d42cc2fab -size 674250 +oid sha256:3cbaed69b39ae9bacb897aa2e8c06078978c7192ea8613fe14716abbe8f7b8f9 +size 549997 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_5762cb31-0f90-4da5-84d3-8fe2f8e95134.png b/images/db53ba89-9cff-441a-a703-053e89e32961_5762cb31-0f90-4da5-84d3-8fe2f8e95134.png index 35f8a37558d6eda92151c447e98fb6442b2ac4d6..1aa8c13705ba3ffbfa97fb85ab59bb0a96cbed15 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_5762cb31-0f90-4da5-84d3-8fe2f8e95134.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_5762cb31-0f90-4da5-84d3-8fe2f8e95134.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:028e5e073cc2876fbba07cd6f12a1634b456ff77869b17c958e597d1c4e5303c -size 927411 +oid sha256:dcaac47388fa8d0b8ae87ae76a50c2b65df7486fd4e3ea70b3feb8614781f815 +size 646413 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_77cdcb4f-8373-48d4-9dd8-6c9f2ca90b39.png b/images/db53ba89-9cff-441a-a703-053e89e32961_77cdcb4f-8373-48d4-9dd8-6c9f2ca90b39.png index 331bca98a1f743b5801f3be03a78aa098bbf2fec..b2c206c07ef306fb4eef14cf2198019b4bcde87d 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_77cdcb4f-8373-48d4-9dd8-6c9f2ca90b39.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_77cdcb4f-8373-48d4-9dd8-6c9f2ca90b39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a70073369a8ecbadab61a610fd23004b75bf8a64012d2cd603f44ea0bd501192 -size 261608 +oid sha256:5c8361892862ba473e6cfa1b3b3f6c2b93d5a6ac869c8448af5e444773cc715c +size 336940 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_da1da5de-72e9-43ca-93c7-7638f6b66736.png b/images/db53ba89-9cff-441a-a703-053e89e32961_da1da5de-72e9-43ca-93c7-7638f6b66736.png index e09641f986126c051f59c6ecb22b0a442de73ac7..d28cf52ce187b7cf024ce5451dee72c4399b8e49 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_da1da5de-72e9-43ca-93c7-7638f6b66736.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_da1da5de-72e9-43ca-93c7-7638f6b66736.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0076f433d900f45bdf178d0d2daf739b89c2625bbebf4e7522f4b6151ada306f -size 1172369 +oid sha256:436275ef81462c3729ebf438eb032221d506ca59dcef18c8c51da49e54d86bb5 +size 806220 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_dbc01bfc-47d2-48f9-b43a-8b8e74b33d08.png b/images/db53ba89-9cff-441a-a703-053e89e32961_dbc01bfc-47d2-48f9-b43a-8b8e74b33d08.png index 28d3cfe3c550219a167172368b0103853887819d..2d8c04796bf931acb310e063a77ce74781c3adcb 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_dbc01bfc-47d2-48f9-b43a-8b8e74b33d08.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_dbc01bfc-47d2-48f9-b43a-8b8e74b33d08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a4c22c9b11f132d3193b1feb498ec4963ff20eb18b0f797547bf7193ad9b6b9 -size 907474 +oid sha256:a91b81d212834356497076b21980621fdae40d5526a09e5fb3ae03ed0cb850a3 +size 1149909 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_dd6de110-8d90-4416-9a1b-0987e282bec0.png b/images/db53ba89-9cff-441a-a703-053e89e32961_dd6de110-8d90-4416-9a1b-0987e282bec0.png index 5b5465d0145585d8912eb9c393c32141203fde81..75cea074b30809f4881268506b55dd234eb1bf50 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_dd6de110-8d90-4416-9a1b-0987e282bec0.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_dd6de110-8d90-4416-9a1b-0987e282bec0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06227f0cce970daf26205d5a8074972410047cbe03fece69589ff5071998dbcc -size 295978 +oid sha256:73a3f5e89dfc358a5b96bce700ee76faeb81af602f4b62f7f29b62ae65c80167 +size 452238 diff --git a/images/db53ba89-9cff-441a-a703-053e89e32961_e23192da-8d2f-4759-b9c8-79da922f98bf.png b/images/db53ba89-9cff-441a-a703-053e89e32961_e23192da-8d2f-4759-b9c8-79da922f98bf.png index 74cb0de57a833f3256ae3175d8c4770668a3bf03..073d9f5bb92bf2502e9b242dcca6404c25e601d0 100644 --- a/images/db53ba89-9cff-441a-a703-053e89e32961_e23192da-8d2f-4759-b9c8-79da922f98bf.png +++ b/images/db53ba89-9cff-441a-a703-053e89e32961_e23192da-8d2f-4759-b9c8-79da922f98bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f172f67eef61b8ba339ccdd252194bbad4049bf005a798394af8e830fd8c546 -size 760114 +oid sha256:3f875c8c9a0fe11a21cfc1a2b1407f3a4e9e9a9b11e44514607b55640607d74c +size 491918 diff --git a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_081badf7-327c-4983-a9e2-7f77d44cb4f7.png b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_081badf7-327c-4983-a9e2-7f77d44cb4f7.png index 3a1e689d2802942dd226bc248f9e70f9cf4e8515..3d3bd2ba9575aa712289f5202ff9377d0f30154c 100644 --- a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_081badf7-327c-4983-a9e2-7f77d44cb4f7.png +++ b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_081badf7-327c-4983-a9e2-7f77d44cb4f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f781c9bd63e2be96dcf76402abf3412d0ecf12e2d49183cea71a4d632e5caea -size 1566588 +oid sha256:bff3aacc246e772119e8498c4f2d7e94a9f4b2067468bc79e91b51af466a7423 +size 1509570 diff --git a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_1a75f8c0-4a9a-4c49-bac5-85a1ad22aecd.png b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_1a75f8c0-4a9a-4c49-bac5-85a1ad22aecd.png index 1ec6587a2f894a44f468577b8fd55ee509be42b9..e43200f5c3fb3da37e80ad4f76c90eb97ee78294 100644 --- a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_1a75f8c0-4a9a-4c49-bac5-85a1ad22aecd.png +++ b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_1a75f8c0-4a9a-4c49-bac5-85a1ad22aecd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f8f6997e344fa9e55fd06ed5374e3bf86bb1a9f7f38c78d797b6f60e8199cb33 -size 1570868 +oid sha256:ddbd0fb53e8573ef1f379792e7fbe849043f3d38626b601fe89eb745987b82ce +size 1330947 diff --git a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_2b8a282b-a072-4637-8dc2-bbba1e04c12b.png b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_2b8a282b-a072-4637-8dc2-bbba1e04c12b.png index 66e6855ccd2f5854659e6e29831586767827cc44..05515802c6244e1b312e23249c1621177d79811e 100644 --- a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_2b8a282b-a072-4637-8dc2-bbba1e04c12b.png +++ b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_2b8a282b-a072-4637-8dc2-bbba1e04c12b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf70e87dbeec41f5e560e08f025de7698cda1e1dfe8096beec19e29bbd9ecaea -size 1860339 +oid sha256:cf4de4a4c3db07fc0e9cdecadc42cd4391b110eafd58f7b45b19380713b11894 +size 1943597 diff --git a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_38386a3c-7b6f-4dc1-9977-2bdfb13ca2c2.png b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_38386a3c-7b6f-4dc1-9977-2bdfb13ca2c2.png index 5418c10632731e697e3f037599334eeefc2c0aef..0f35a1f1d8554e11aa501544c729a175f7abf570 100644 --- a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_38386a3c-7b6f-4dc1-9977-2bdfb13ca2c2.png +++ b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_38386a3c-7b6f-4dc1-9977-2bdfb13ca2c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1e13572793b38a7093bbce1a9ca5188748c553eef8d50412e48a6f6722fdb7a -size 2703922 +oid sha256:429fc136f2282a644f1d68ae817132069b773f2efecffc027bd26b0ea91ee845 +size 2100258 diff --git a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_5e4a5ce7-a657-4c55-b4fe-52ef17b2c466.png b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_5e4a5ce7-a657-4c55-b4fe-52ef17b2c466.png index b2763eeb2f3151b1508804b5cd29569aa99e8d28..dd7128f5f13d121e8de6201fced66d1048ab33f0 100644 --- a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_5e4a5ce7-a657-4c55-b4fe-52ef17b2c466.png +++ b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_5e4a5ce7-a657-4c55-b4fe-52ef17b2c466.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cab393e3ced140ce78babc8bbf0da842f5bf5b6811b226e6d5a563c0b44fb78 -size 1569094 +oid sha256:9403100b8935372c6c8825a50877b8e86dd6a88399564701dd999302604a14b5 +size 1685578 diff --git a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_75e8514e-c3b1-4654-9bca-cd6f81f056d6.png b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_75e8514e-c3b1-4654-9bca-cd6f81f056d6.png index 388b7c815b43298b5d92368cbc603464943999d9..70ce99f4f09fe6f58b4020843ac8b0d26707e3a0 100644 --- a/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_75e8514e-c3b1-4654-9bca-cd6f81f056d6.png +++ b/images/db72bae1-ff16-495c-89c7-1cff7c0ae503_75e8514e-c3b1-4654-9bca-cd6f81f056d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:589f0a5473746a532402abbe492e128c86d3c1795050687c707f9a51efe1bec5 -size 1573187 +oid sha256:ad81ea6ad6ae027dfe761c29224d0cad8ccffb491e97ec3cbcf52b0594805456 +size 1689776 diff --git a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_23f03cc3-d1ed-4273-9031-a4516ecac26a.png b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_23f03cc3-d1ed-4273-9031-a4516ecac26a.png index 5d341796c9c5422df3e32c3fb1bb2ffd9444b9c9..9581f6ff3966b080d83862db0e74f9b67d5f07fb 100644 --- a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_23f03cc3-d1ed-4273-9031-a4516ecac26a.png +++ b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_23f03cc3-d1ed-4273-9031-a4516ecac26a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac11dcd53322f2fa2d05fc4794cfe10eededf3dca965f45d668a7293d65d4512 -size 3177735 +oid sha256:157ae06b7cd8d0f3b952402b9d864f377bdd126b57997a6d6758bd2141a9f4d1 +size 1258008 diff --git a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_3d12ecd5-f8ec-4e3a-b0c0-7d16c6e27f06.png b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_3d12ecd5-f8ec-4e3a-b0c0-7d16c6e27f06.png index 39f455d2d1d4841a0e11f9c7dd3ba6519922f679..f8d4fb1a4d9919904620c93a9f8a2c5389dac384 100644 --- a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_3d12ecd5-f8ec-4e3a-b0c0-7d16c6e27f06.png +++ b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_3d12ecd5-f8ec-4e3a-b0c0-7d16c6e27f06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7e28a65a3284d589cad2b68fe6c28e9fba0cd2d1f481ce9be336a382b082f3c -size 2515429 +oid sha256:8016e3ba262c9fab9c2c8139b012ebf73c375eb710f0598b90fa7eb3b4d67c22 +size 1934396 diff --git a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_4b20a92b-ba63-4d8b-818f-3b4f5f62d65e.png b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_4b20a92b-ba63-4d8b-818f-3b4f5f62d65e.png index 87aafa85981fc5b7ba9d0ddb757e405b0640713d..4bd14fe7f10bac46e55ca372aa34c389b3373d06 100644 --- a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_4b20a92b-ba63-4d8b-818f-3b4f5f62d65e.png +++ b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_4b20a92b-ba63-4d8b-818f-3b4f5f62d65e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:123d92b49e0e26e59f8a0fce4c791dfbe4d36d0971d773e1705dc2b06ef8b65b -size 1372816 +oid sha256:f9abb3659f288007ff281c2241bbbc15f6a9b6184657a43e6e173709ffc03fcf +size 1372960 diff --git a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_afa03dc3-151d-4448-8936-79d4ce60b351.png b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_afa03dc3-151d-4448-8936-79d4ce60b351.png index df6c13f8fe38f313e7f764bc3288843dd92bb880..705872f2eaef95d28c95849330d30db868c0e2aa 100644 --- a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_afa03dc3-151d-4448-8936-79d4ce60b351.png +++ b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_afa03dc3-151d-4448-8936-79d4ce60b351.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae898e4524b3e1b16723ddf5be9d2e2b0b96d64251ef1e513e0c02f2e5130335 -size 1367703 +oid sha256:e360ff2cd595c299e07de83467726aff8d1119266a4f6e087d7116c6bc72c095 +size 2491866 diff --git a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_f15096b9-a0de-4645-96d0-19b3ddb924ea.png b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_f15096b9-a0de-4645-96d0-19b3ddb924ea.png index e51cbe29641f70e22e9c7e8dded3b7b966963056..09457ff590f0b05350f424e1e8546eb2d57ee5da 100644 --- a/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_f15096b9-a0de-4645-96d0-19b3ddb924ea.png +++ b/images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_f15096b9-a0de-4645-96d0-19b3ddb924ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb455226a5b6c2a4c90f44ec8d15e7b92347cf1fa10459c436ff349a14da07f2 -size 3293477 +oid sha256:116a4da5df6d0105c125a813ccf618ad7d3349138f092306b9380299d1f9c74c +size 2382258 diff --git a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_1f10d654-9925-42b4-80dc-1c85e4fc7e9d.png b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_1f10d654-9925-42b4-80dc-1c85e4fc7e9d.png index 1cb3aedc445a4ffc8ba73bb951d21197a30dd10f..e2f1a291092d929cce40bfd925ee24a381c67748 100644 --- a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_1f10d654-9925-42b4-80dc-1c85e4fc7e9d.png +++ b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_1f10d654-9925-42b4-80dc-1c85e4fc7e9d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ff97ec0a06a473148c6b94d420fdf0f0f5f825943133140010336faa50ff60d -size 497317 +oid sha256:4d715cf3ba7f3467be5d2bdfe2551eabf481be217104664cecee9f05d045cef9 +size 529543 diff --git a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_22568b7b-dd05-491b-880f-c1c3e5df037a.png b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_22568b7b-dd05-491b-880f-c1c3e5df037a.png index ac3f76281b1274a9541ffa8571fd0c6b1c659b86..04a99bb694f14feeae991c7c41a447a69f2d9bd8 100644 --- a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_22568b7b-dd05-491b-880f-c1c3e5df037a.png +++ b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_22568b7b-dd05-491b-880f-c1c3e5df037a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:940fbb4fa305ace70b57f83e6f73f5b42f663edd352d03a310e49fbf55a81e0a -size 1420521 +oid sha256:79112f1b0f96fc08a88cb5a555f563d9ab77f3aa0fb23d9ce10753d2d2bb5851 +size 1592313 diff --git a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_31fa2e7e-646b-4f4b-aaca-fed108191241.png b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_31fa2e7e-646b-4f4b-aaca-fed108191241.png index 6a380564bac592dd2bcb80d77f4b9f3f5196b437..d9c6df56f0aefdc76bd6774b923a8404655c777a 100644 --- a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_31fa2e7e-646b-4f4b-aaca-fed108191241.png +++ b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_31fa2e7e-646b-4f4b-aaca-fed108191241.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:009056996f1b33f787d9b2ce31b2faf5f44e77fa56e87269cbb2bafcbe62f0a9 -size 1370107 +oid sha256:bb81d972233184fd90b687e07ef9724ae125495667c27e4c683e706239d6ffa8 +size 1484869 diff --git a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_6b02e5e0-bda2-415d-9468-9796ce2ad2b8.png b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_6b02e5e0-bda2-415d-9468-9796ce2ad2b8.png index dff65f32a07d2bb8233eb2d933f41d5e3c656580..3f6b159d918aba5633b20c00e82654db534e1c52 100644 --- a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_6b02e5e0-bda2-415d-9468-9796ce2ad2b8.png +++ b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_6b02e5e0-bda2-415d-9468-9796ce2ad2b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc03f31d26823e3ef038936deccd4c7f391c971c6575f8a5e42b0a76928ea7a2 -size 1371183 +oid sha256:d0fdac7ff674331849224c9df88b6c369b54ccc706540746a6a4e07ea67c6944 +size 907907 diff --git a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_89f2b1aa-4e07-4d42-858c-a415206f1d5a.png b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_89f2b1aa-4e07-4d42-858c-a415206f1d5a.png index 00e12292645bdc9a3d128a445109b949399d81f8..687ea10e2fb76e567d33c3766b27fd89887ee63a 100644 --- a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_89f2b1aa-4e07-4d42-858c-a415206f1d5a.png +++ b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_89f2b1aa-4e07-4d42-858c-a415206f1d5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42bf18c6b1fa5fc14ae06b45893e6783935246d4fe429010206f9cc61dfdb7a2 -size 1371107 +oid sha256:b4923cbbe87db5294817a56bca9cbaed296f1c8db9bfe2dfc5b84bc0a46d34f6 +size 1108576 diff --git a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_a8076012-4c69-49af-af46-8d84cfd2638f.png b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_a8076012-4c69-49af-af46-8d84cfd2638f.png index 4e46dfc33d7b722ec48a0703a656e2e446997d6f..ef0e4b53af35e743c2ff6e1f297e890aa507a0d9 100644 --- a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_a8076012-4c69-49af-af46-8d84cfd2638f.png +++ b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_a8076012-4c69-49af-af46-8d84cfd2638f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82a6bcd8834b89e59c64b0cf05c95904344f1132858f45a0465f735796faceb1 -size 1417475 +oid sha256:39edbc307d594bbb241c46ea0c80f5853ea876cdd1282263d2431ba1efddcf0d +size 1606467 diff --git a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_d92ae16a-3126-4997-ab0e-125dd2416c77.png b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_d92ae16a-3126-4997-ab0e-125dd2416c77.png index bd40c817782aa6d9c3e52c17e38216931a9caffd..c99125af0d7546ef3c40c6f747cea27f1b238389 100644 --- a/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_d92ae16a-3126-4997-ab0e-125dd2416c77.png +++ b/images/dc2aa3f8-eda0-455b-98dc-adc56089259a_d92ae16a-3126-4997-ab0e-125dd2416c77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aac54c99360f2cd7d88963319406b8bfc311d3a3b18b569f285bb8c5b355ccfe -size 511858 +oid sha256:2fb21b13c7bd2ce7205dc94cb96cdeca062d0b60a04c5662bf1fc2cab17f7104 +size 449711 diff --git a/images/dc636898-246e-4b08-9978-6a6dc1b20320_2dea02c5-cea0-4856-a1d3-8abb09bd43f2.png b/images/dc636898-246e-4b08-9978-6a6dc1b20320_2dea02c5-cea0-4856-a1d3-8abb09bd43f2.png index 92a189ca60ba987e2ec002e8d7b1c1d3bb6de19d..95095b9d7b97d34fd8fcc0f16ab85c2f440ef029 100644 --- a/images/dc636898-246e-4b08-9978-6a6dc1b20320_2dea02c5-cea0-4856-a1d3-8abb09bd43f2.png +++ b/images/dc636898-246e-4b08-9978-6a6dc1b20320_2dea02c5-cea0-4856-a1d3-8abb09bd43f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d374df6af7bd1c19fe5d9484230e23c8b0d8c570cacf81423db97f2638d2a58e -size 790866 +oid sha256:1dc0e3cf5c733409d4dcf363f28ee986fdac91759fad68d0986509c213593f13 +size 1029513 diff --git a/images/dc636898-246e-4b08-9978-6a6dc1b20320_476b5f29-c0de-40d0-8f3d-2ce384cd591c.png b/images/dc636898-246e-4b08-9978-6a6dc1b20320_476b5f29-c0de-40d0-8f3d-2ce384cd591c.png index 957f294888420e888bba6c71ea7ad613f28d2469..a8c0d688a43328a52b81c78474b02075235827c6 100644 --- a/images/dc636898-246e-4b08-9978-6a6dc1b20320_476b5f29-c0de-40d0-8f3d-2ce384cd591c.png +++ b/images/dc636898-246e-4b08-9978-6a6dc1b20320_476b5f29-c0de-40d0-8f3d-2ce384cd591c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3aa8493867242da981d108e14094e87d779a244af06230269a3572fef28260cb -size 784523 +oid sha256:c8a0da594d4db9100ade655084bba1908f965cd13fa9653a6e1cfaae79871bb3 +size 630115 diff --git a/images/dc636898-246e-4b08-9978-6a6dc1b20320_837dcc3a-4018-485b-a267-89a4c738349a.png b/images/dc636898-246e-4b08-9978-6a6dc1b20320_837dcc3a-4018-485b-a267-89a4c738349a.png index 787ffb189ce486ea95ad03c99db2f93f7e9262a2..690ff42c3ff987d06995075b850d967910fd248e 100644 --- a/images/dc636898-246e-4b08-9978-6a6dc1b20320_837dcc3a-4018-485b-a267-89a4c738349a.png +++ b/images/dc636898-246e-4b08-9978-6a6dc1b20320_837dcc3a-4018-485b-a267-89a4c738349a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6aaf6d434e5a6b09e22e7b4896faf55b946dad0cb575fd11821d9e3a32796d9 -size 1045379 +oid sha256:91df0a62f745c42909a1babf979069ed3ff39eea6cf6e814bc918258860d923c +size 1285570 diff --git a/images/dc636898-246e-4b08-9978-6a6dc1b20320_87f3b880-2540-4b64-b688-f10fca9ea957.png b/images/dc636898-246e-4b08-9978-6a6dc1b20320_87f3b880-2540-4b64-b688-f10fca9ea957.png index 50e8ba5a93ce9c9ac75e6327f3d8c55f3b711512..3e02348e67edb6d781812544bfd5ed7ea4cfdacb 100644 --- a/images/dc636898-246e-4b08-9978-6a6dc1b20320_87f3b880-2540-4b64-b688-f10fca9ea957.png +++ b/images/dc636898-246e-4b08-9978-6a6dc1b20320_87f3b880-2540-4b64-b688-f10fca9ea957.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ade0f15b59ebc5ea62e08dd91431d43506c6a080ae77d4da9d3494d52498d49b -size 2108826 +oid sha256:80f32847d400fccf2d154edad27cfce7004adbc69d3ee33be2d3615d4a1eb59b +size 2037697 diff --git a/images/dd057bda-33ea-40b8-9865-771242e22f40_33ed3481-1a77-422e-8dc8-adf0c11bec5a.png b/images/dd057bda-33ea-40b8-9865-771242e22f40_33ed3481-1a77-422e-8dc8-adf0c11bec5a.png index a23f8645f9b349032c8c872f08b32719966c3f65..d0a1934834936ab6ed75eb89eb9135d0c5dd63fe 100644 --- a/images/dd057bda-33ea-40b8-9865-771242e22f40_33ed3481-1a77-422e-8dc8-adf0c11bec5a.png +++ b/images/dd057bda-33ea-40b8-9865-771242e22f40_33ed3481-1a77-422e-8dc8-adf0c11bec5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:357094443de519515463b59b66af91f6cd48d764d0da2a4789ca1b8b5e584f94 -size 1467060 +oid sha256:8756b4a8a671446eb1133839fdae8b889fafedce1bce46e5ca9eec6e0091a257 +size 1052327 diff --git a/images/dd057bda-33ea-40b8-9865-771242e22f40_36ac0711-a68d-408f-b2eb-1451647e0fe0.png b/images/dd057bda-33ea-40b8-9865-771242e22f40_36ac0711-a68d-408f-b2eb-1451647e0fe0.png index 0dcad3c2d766a8896414f8f3c3c54e3058d74d4f..acd09ebe7165e183704d8d9f3e96d43b4ab2bb4c 100644 --- a/images/dd057bda-33ea-40b8-9865-771242e22f40_36ac0711-a68d-408f-b2eb-1451647e0fe0.png +++ b/images/dd057bda-33ea-40b8-9865-771242e22f40_36ac0711-a68d-408f-b2eb-1451647e0fe0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:766b32e8d100437a0fa84683ba9177a236a69a143bfd37cb0eff0b08d7396ab6 -size 1313839 +oid sha256:e7a973fc55c138e03f457355b9122b07a2cbf38bad1f72db38e42138d94437cc +size 1624633 diff --git a/images/dd057bda-33ea-40b8-9865-771242e22f40_75de3fc4-e791-4ae1-a8fd-765b72f24302.png b/images/dd057bda-33ea-40b8-9865-771242e22f40_75de3fc4-e791-4ae1-a8fd-765b72f24302.png index 8ef891f31260240b2fb5684298fc3e3f8c3a0da5..be35fef35b19dd17258da16e3f55893b0365e978 100644 --- a/images/dd057bda-33ea-40b8-9865-771242e22f40_75de3fc4-e791-4ae1-a8fd-765b72f24302.png +++ b/images/dd057bda-33ea-40b8-9865-771242e22f40_75de3fc4-e791-4ae1-a8fd-765b72f24302.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:19bed6d0262a23713b64f84a0bbc4c55fe7a12bdff54e2d84adc8da825cbf5e1 -size 2691973 +oid sha256:04d7b38f4961991b40b9b2d65d4530fd2dbcb14f99a7f803fddc1063dd236fd5 +size 2601092 diff --git a/images/dd057bda-33ea-40b8-9865-771242e22f40_e24783c6-df28-49de-a73f-cdf3cf4500a3.png b/images/dd057bda-33ea-40b8-9865-771242e22f40_e24783c6-df28-49de-a73f-cdf3cf4500a3.png index 82eab974e3e7c96a3e10634021acfab890d6935d..37ee5db84bbe2109be338ffa137cad0a7bb6d9d0 100644 --- a/images/dd057bda-33ea-40b8-9865-771242e22f40_e24783c6-df28-49de-a73f-cdf3cf4500a3.png +++ b/images/dd057bda-33ea-40b8-9865-771242e22f40_e24783c6-df28-49de-a73f-cdf3cf4500a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eaeeea6a743602fea73bd4e307c13170edaeb55875e80f2f414cab8f9278bfe5 -size 1737776 +oid sha256:d8688f72e3518490955a36b5accd1adfd38dac3948664a0520b0687b0fba5e82 +size 2183022 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_024c93e9-5579-44a1-bf6a-c773f34b8d34.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_024c93e9-5579-44a1-bf6a-c773f34b8d34.png index f2fedbad9136deb0190d4e8cafd6fac20cc01142..28faa285c7e05471ece4ec690ba5b9e8b974b609 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_024c93e9-5579-44a1-bf6a-c773f34b8d34.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_024c93e9-5579-44a1-bf6a-c773f34b8d34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c664b25e94df09e05690acab4da40b50cbecc09fe1ea45acb9ee58dec523e3d -size 1441572 +oid sha256:a207c39b662c3ee9c872a618bc89f40fede3b8a963724168e2978120328eb8e6 +size 1457675 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_0b35cdaf-9c0e-4533-a402-1801ac2683a7.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_0b35cdaf-9c0e-4533-a402-1801ac2683a7.png index 33c526ac6eccf46c82e3c56c2bbec48c340349b9..f2f2312e1ad807b40ccd0171dd1d94a89ea1b0b0 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_0b35cdaf-9c0e-4533-a402-1801ac2683a7.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_0b35cdaf-9c0e-4533-a402-1801ac2683a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f512f36b0d11f0e0e6f88468e81ea62d3e9c31b314d5c3701be3bd617b6c9937 -size 1440757 +oid sha256:7c0784ee637a74fc48c1440f65e410f7f38225b17191ad4ce1b9aab7af8dabe2 +size 587676 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_25d69ce3-0527-4b54-ae05-76b4246c6816.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_25d69ce3-0527-4b54-ae05-76b4246c6816.png index 6a4d1fe27c87f77f48f3dab3837999d87a8f0b98..e5f83edcb493533d82ae1425aaf7859d2f690a9a 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_25d69ce3-0527-4b54-ae05-76b4246c6816.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_25d69ce3-0527-4b54-ae05-76b4246c6816.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1f2727c253e1bef6b79abdbe8ccd2495d092e82342e9fc22c1ba084fcdfeae4 -size 1556452 +oid sha256:0d91cf015f8446af60c83796769c81236a1cbdb5c3c23318d02833d6facc6f20 +size 1355410 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_2bb14193-cad0-433f-aa68-3def5ba090a4.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_2bb14193-cad0-433f-aa68-3def5ba090a4.png index 1682bb0814282f774c91183c96d3d9778e076547..a91116d9a96dd8b0797c4238a8df6646a682e699 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_2bb14193-cad0-433f-aa68-3def5ba090a4.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_2bb14193-cad0-433f-aa68-3def5ba090a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:829c194ee43a4d2c713d4f1b359714cc99fe9168647630bd80f9b2a0b8911b7a -size 1458803 +oid sha256:085fd6068366545a7dbfff1c2cac3c363bb72719ee2db11508f69ea90e319f0b +size 569645 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_460dd5e5-220f-4476-a4fa-639b266566fb.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_460dd5e5-220f-4476-a4fa-639b266566fb.png index 71cf222525ad0077f4e646db1e04e4c436276f9b..3c1e843f37c145508ae58f37648c0b563b98e073 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_460dd5e5-220f-4476-a4fa-639b266566fb.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_460dd5e5-220f-4476-a4fa-639b266566fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7c87aef9d5eab52e6837309cc3968e58d3521b5889e302872269500e51852c9 -size 1437058 +oid sha256:a67ec3b390c8db61e0e204871ccf58bd8ad43a15f6159764c26d4d5f181aa996 +size 1348243 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_471f0a54-bca2-48ac-91d6-7b20917a0ec8.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_471f0a54-bca2-48ac-91d6-7b20917a0ec8.png index a3240277b967b4514be3e3fd35b9f0a18fc27788..5ddda0a2d9d6bcfe2ceb83e51f0646e6265aae95 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_471f0a54-bca2-48ac-91d6-7b20917a0ec8.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_471f0a54-bca2-48ac-91d6-7b20917a0ec8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05f11082f7e569d400c4b1062c7a0a4654a24f3babd8bb029a95e9cfeff5ce5d -size 1423247 +oid sha256:057bbee978db6eb7c81ccfb482f954257011b9c5495daff9d42f485036d43b2a +size 487829 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_5ee0bcc8-a842-47cc-a02d-2d0b9b7b3f3e.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_5ee0bcc8-a842-47cc-a02d-2d0b9b7b3f3e.png index 120e0dd3594eaf6705d4cc6246dad58234b2f011..f4f838cccd7a20e106f2ba0cd3d86bd0ed4d149e 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_5ee0bcc8-a842-47cc-a02d-2d0b9b7b3f3e.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_5ee0bcc8-a842-47cc-a02d-2d0b9b7b3f3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:223ead6fa6c11d025cc933f22da235391caf2de3bd0218e2f5405b55422c2941 -size 1432830 +oid sha256:1b7016dd3ef74bb44eacf32cd0c4fb715b64f4f481313cffa156c38ecab4de59 +size 1050403 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_61d753a4-9e3c-4329-96bd-932d046f6f53.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_61d753a4-9e3c-4329-96bd-932d046f6f53.png index e68480399645d6561c6aa2260bd4cb2ee004383f..600531fb497bfd7b7bb6bd47c0728804a2216ad0 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_61d753a4-9e3c-4329-96bd-932d046f6f53.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_61d753a4-9e3c-4329-96bd-932d046f6f53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0247a63dfcda5e5b33e25aa2fcc4916b8b436217d21de26377ea6d871e36d90d -size 1356412 +oid sha256:2865f7e96fa846ffb0f25f642a3015739c845fb324c1098ac1c2d6a2af674e97 +size 1273246 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_7c557819-bb01-4638-ab93-94a47f72ad22.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_7c557819-bb01-4638-ab93-94a47f72ad22.png index bf2bbf0b4d56b998589fdedbf50078ad324f8ff5..e50ca09cce8b4390ea2a1721a8a7c1d47a6a4302 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_7c557819-bb01-4638-ab93-94a47f72ad22.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_7c557819-bb01-4638-ab93-94a47f72ad22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:814e758e3051b00ba7cdfb677d91e8462dd165ac8b25fda9dc051e259c1fdcdc -size 1464129 +oid sha256:2dc95397f761bcbd2ec083af095de3672c2e73b340474029d459c2eab1959758 +size 573938 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_a56e7cd6-7657-431b-8ae4-cb15032e2f97.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_a56e7cd6-7657-431b-8ae4-cb15032e2f97.png index 14715a0e1217c07e29e84a24c8076d23d736a412..9ee88f1c923c0487ae4a98a45e85b630cc87eaef 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_a56e7cd6-7657-431b-8ae4-cb15032e2f97.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_a56e7cd6-7657-431b-8ae4-cb15032e2f97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e8aae156ae27fc3605a7cd291e12a4f6f21ee6c128fbfcdbf02f89c2c5bb8c6 -size 1439413 +oid sha256:0506e084306e199c519e0fc397f8211e208523b6653c066a730b181f35962011 +size 1379141 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_c0c80763-722c-4265-b734-24d8908b159c.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_c0c80763-722c-4265-b734-24d8908b159c.png index 62c7568787bf2f05bba25c5339f062b91180940f..d26c1eb733edba83dd67d9ba01b40f63822d0a12 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_c0c80763-722c-4265-b734-24d8908b159c.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_c0c80763-722c-4265-b734-24d8908b159c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a745e0e25f0574b000ae210cf42d5d047e59387c21a4eb0ac4aaf888e2645fe -size 1460383 +oid sha256:8accf5f68eb71668607cdfd0e57697155fbe49f610a29def037033484c5a9783 +size 929611 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_cbcdb41a-a319-4771-a94b-6c5348430bd7.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_cbcdb41a-a319-4771-a94b-6c5348430bd7.png index 9ccdd52ca79a0605e30f0494b871b15ba10f902d..d9854f458681a4685b7d3a1be0c3a3d33054e2ab 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_cbcdb41a-a319-4771-a94b-6c5348430bd7.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_cbcdb41a-a319-4771-a94b-6c5348430bd7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:424af11c3d6254b28d942031f9ac6f7538636ed4f78fc4aeeedff42a9af94f34 -size 1420613 +oid sha256:74725c5b5aa68f5280dac990d554872fd7c017ea9e39b2ce982ea9df57b6e311 +size 685086 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_efdcb0e6-e118-4995-bf76-1dc64e6f6e0e.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_efdcb0e6-e118-4995-bf76-1dc64e6f6e0e.png index 91d856c6da05cb35d05c6240653818d57d271ea5..46895a1b0ef3be3a090ad09a866e96e7cf01e619 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_efdcb0e6-e118-4995-bf76-1dc64e6f6e0e.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_efdcb0e6-e118-4995-bf76-1dc64e6f6e0e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e813844f1d3c2026c767109d00bb9c6fc624b7e6e12be11e8b2c5768d95db1a -size 1454842 +oid sha256:a6f1f427831667779fb9a504780a618c4bd9a2dbb1f867eab3c176d5b91bdd60 +size 991618 diff --git a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_f39fd0f5-1a72-4a43-8c03-6e9ce2d22de9.png b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_f39fd0f5-1a72-4a43-8c03-6e9ce2d22de9.png index 55fb8a47fec8318eff0c95e149b76e8fefee7aeb..fcad313ac90770ba873df0a34d1fddc3aea35cb1 100644 --- a/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_f39fd0f5-1a72-4a43-8c03-6e9ce2d22de9.png +++ b/images/dd13bc51-f582-4004-8641-eb4e62cabfc7_f39fd0f5-1a72-4a43-8c03-6e9ce2d22de9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b5ece024db83b12e517211c11f7299ff1de6d36e92323465df8c5fa1d372bb1 -size 1846278 +oid sha256:24fde8a3cad9f9e31b7dc3d13f92338e96a5005c829d11cbce35ce162feb75e4 +size 1775391 diff --git a/images/dd38e3f0-242c-4ef2-b4d9-bd13adc675b7_c116fa0e-3a0a-498f-a57c-bb6427441019.png b/images/dd38e3f0-242c-4ef2-b4d9-bd13adc675b7_c116fa0e-3a0a-498f-a57c-bb6427441019.png index b1036f2ec2e32e22e03c96314ab5e8ce10361709..233448875d067e26529a852ac7eaffd9352f0716 100644 --- a/images/dd38e3f0-242c-4ef2-b4d9-bd13adc675b7_c116fa0e-3a0a-498f-a57c-bb6427441019.png +++ b/images/dd38e3f0-242c-4ef2-b4d9-bd13adc675b7_c116fa0e-3a0a-498f-a57c-bb6427441019.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1cc217d9fdca713f550eabae81a0aedbf7f23ce79a6f726097aefdde0345cba -size 1219790 +oid sha256:abb0b72d852c67f48fa254d9e12fb3d6564f9718de80daa65faba7900896f39f +size 1939351 diff --git a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_1e1232ec-6ba6-4991-b3df-4acf8b58f80c.png b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_1e1232ec-6ba6-4991-b3df-4acf8b58f80c.png index cf46f18b137bcc67227fcb8007891df8ca07b344..92f32b3ddf5f49cb93288c68638a302d768f8998 100644 --- a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_1e1232ec-6ba6-4991-b3df-4acf8b58f80c.png +++ b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_1e1232ec-6ba6-4991-b3df-4acf8b58f80c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d1dc98fc0b652aad7f14165fda720b942e1e52c352adf014857dd7219f6fc75 -size 875548 +oid sha256:a858b015fb308e296d321c2706aef1b17935f8eee0caa34a2d8449c1d37ced0b +size 889759 diff --git a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_2900919c-d57a-4636-940e-a1013a7efe4e.png b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_2900919c-d57a-4636-940e-a1013a7efe4e.png index 627ce49e910f7d01bff7c00a4b6737a9f9461979..097404d4178afee9b171c06c8949a8704b130de7 100644 --- a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_2900919c-d57a-4636-940e-a1013a7efe4e.png +++ b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_2900919c-d57a-4636-940e-a1013a7efe4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd491ede9a9ac1e12c96db7a87b57ed8c6875fab76d5bb0be90c8658e9bd4fa5 -size 654799 +oid sha256:31ec404b7146e5ed02cbb2c34efd7be7960b0e1c4367b81a646d8a9652343697 +size 746583 diff --git a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f053b82b-ae9c-4cf9-9b60-45a18358f2bd.png b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f053b82b-ae9c-4cf9-9b60-45a18358f2bd.png index 627ce49e910f7d01bff7c00a4b6737a9f9461979..0b2af827b5283d627c8c97d59aaf35018b763d20 100644 --- a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f053b82b-ae9c-4cf9-9b60-45a18358f2bd.png +++ b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f053b82b-ae9c-4cf9-9b60-45a18358f2bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd491ede9a9ac1e12c96db7a87b57ed8c6875fab76d5bb0be90c8658e9bd4fa5 -size 654799 +oid sha256:a29288946c3a47d12363f0381fa3df8b8c2f33b7c17a65bb9c1f4fb76127b075 +size 818863 diff --git a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f3d7a9b2-8a52-4123-b7e7-17c771db0e20.png b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f3d7a9b2-8a52-4123-b7e7-17c771db0e20.png index cb1d87f88d981de0984264d63ae6cd8cfc911c04..4473ddbf54dede14fad1fa60f1da93e3731daa77 100644 --- a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f3d7a9b2-8a52-4123-b7e7-17c771db0e20.png +++ b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f3d7a9b2-8a52-4123-b7e7-17c771db0e20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:50311e9840730cdd2482c1182514c2970fb9fab2399225496c5ef58502a63d24 -size 764638 +oid sha256:3fe67f1e00c1391b699ea40e5058f6234bfed3a607258de03ef1502719a527a2 +size 594555 diff --git a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f861f270-4006-47c8-abb8-b7c3ec0ee2c1.png b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f861f270-4006-47c8-abb8-b7c3ec0ee2c1.png index f61a79d6c1c8f7efe7eb263f76eaf47021cfe84c..cdec9e02856a19d6335a742d826727b9fe5a071d 100644 --- a/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f861f270-4006-47c8-abb8-b7c3ec0ee2c1.png +++ b/images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f861f270-4006-47c8-abb8-b7c3ec0ee2c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:88e4dd86a1d76195654ce19b8f3c83baffcf896f2d2c1692f99aaaabc3b1c294 -size 708601 +oid sha256:ba2538726a4de681524f62a92614fe717a5765f6a38241632e27870923e2b210 +size 710118 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_131ab6e4-1cda-403e-892f-48975f9de2b6.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_131ab6e4-1cda-403e-892f-48975f9de2b6.png index 7ab36605d48d485547badf6ac777172643780b79..b19a3197d291817b7be07fd1b6177e703bc81ce1 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_131ab6e4-1cda-403e-892f-48975f9de2b6.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_131ab6e4-1cda-403e-892f-48975f9de2b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:368fb0cf07021c0c47861dd6a23385004d890dca94efe0e41c56c82dd3418a09 -size 704382 +oid sha256:3b07585595c49143317736fc0b8970cbc92ee6d2d0123eefe6e8cbb62ca2f05d +size 911398 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_212a36a7-c358-4fd7-9122-8c6721b7ed7a.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_212a36a7-c358-4fd7-9122-8c6721b7ed7a.png index 708838641ef3094820857465f6f167dc5566df9a..cadf4d89fb45cf24e51226ccf50af6ac72e58859 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_212a36a7-c358-4fd7-9122-8c6721b7ed7a.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_212a36a7-c358-4fd7-9122-8c6721b7ed7a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f24dcc31599d81f2647f4b0157bcc2d512ae3796c4b2c79392372544c4c43da -size 596935 +oid sha256:fe15e27870daba66d1dab52b6c39231c2cadb3bc3a319a5cc08afd97fb2a63af +size 540947 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3b7020b8-f410-4928-836c-247d4cec350d.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3b7020b8-f410-4928-836c-247d4cec350d.png index 7ac995b612807fe5499a4ab912e2cc0ee628e348..aef5e385948c6b2ba7bba1feceb4bae697d9bbcc 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3b7020b8-f410-4928-836c-247d4cec350d.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3b7020b8-f410-4928-836c-247d4cec350d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0c3a960a03d0686fec1fa415b889f7a2b3e9198b4e55a830c572b7be1258694b -size 853380 +oid sha256:c2d2e5ced7563839b3e58b5fc002d2b3d93aada174c813bcf5b3ff3d7f4b5f70 +size 849377 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3be67d5b-638a-4f22-bed6-294e7fbce6b3.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3be67d5b-638a-4f22-bed6-294e7fbce6b3.png index c9c5875dfbcf77796ac630b39ec32808e72f2052..6e68d6bb11e8d078c2afe76c12e1a7de4b336524 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3be67d5b-638a-4f22-bed6-294e7fbce6b3.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3be67d5b-638a-4f22-bed6-294e7fbce6b3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96cc34f6cb1392e9cf6dff4d7f8c80e04adc3cd2767f24b4d3c69b51b90d3063 -size 490144 +oid sha256:112115a0a349a087f90ea6095c30356928003f75df95a0735bf72b30224545c7 +size 817501 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_464d3892-61e6-4d36-81fa-94b33577eda9.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_464d3892-61e6-4d36-81fa-94b33577eda9.png index aa588d4a749948a1b9058f9a64ea996d7de55942..346d637fd1a762883ea97e14d6619bd9f3e8a2b3 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_464d3892-61e6-4d36-81fa-94b33577eda9.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_464d3892-61e6-4d36-81fa-94b33577eda9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a74f3316e7337fc5b4e4e0922945e1a0df9108a287dd29a108c4e731b22d7c4 -size 1335472 +oid sha256:e2d7b2afe743d717f29877bb45861f71091ec1898eb143cc03214a9e8110d3c7 +size 1250304 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5a4b0241-e72e-4a4e-abe9-afcd776fa96b.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5a4b0241-e72e-4a4e-abe9-afcd776fa96b.png index 854a67accb8adffdafa02907243031f8442ed7e6..446a14bde29a7e3f00ee2febbe55482289df4c45 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5a4b0241-e72e-4a4e-abe9-afcd776fa96b.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5a4b0241-e72e-4a4e-abe9-afcd776fa96b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:35ddce3a245d0abd79768e7d43dcd36ff7a818a9b79cd33ae3e2603d977acf11 -size 188090 +oid sha256:a34bcdf04d4f05cb12064aa4782d08e6eac2e66caed51840463b763282c725f8 +size 195802 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5b95df26-ff7a-49e4-b1ea-3abec2316d97.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5b95df26-ff7a-49e4-b1ea-3abec2316d97.png index ea9df6fa4947d50fafb8ee51fe8a26ebde8e6163..7875a64d5d67ccc39508f9b07013d27e00f54c45 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5b95df26-ff7a-49e4-b1ea-3abec2316d97.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5b95df26-ff7a-49e4-b1ea-3abec2316d97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:146f6061484b6ca81efc38ec7f33c66462dc61bf6739f60fb9eebfeff16ccee0 -size 457419 +oid sha256:776014e76913c4bca6336148f212eea874bb8c27ac726a4376b9b8d47cb8c2ac +size 581930 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5d28ead0-d4de-4f2d-9f18-759cd87611ab.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5d28ead0-d4de-4f2d-9f18-759cd87611ab.png index 467149f19b5423425190ba0fba203d350e6d2395..028cfadd3e1ba9d2b5daaed2da5afd21b664be05 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5d28ead0-d4de-4f2d-9f18-759cd87611ab.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5d28ead0-d4de-4f2d-9f18-759cd87611ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5afa66f4cfa87769bcf01f767908318179c8daeaa0d5329ef86f561662d1feec -size 727279 +oid sha256:a16e45e33922323946b514756c67eeda5a828d6b861fe6814e8870a432656350 +size 578246 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_68bef90c-0ea2-41c8-8e0d-24c2cdbe7b8e.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_68bef90c-0ea2-41c8-8e0d-24c2cdbe7b8e.png index 3586291d8d90513c4da97f0f6d3c03ef00e12d3b..bdb41c738b095b270b0dabe983d95735933b2c79 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_68bef90c-0ea2-41c8-8e0d-24c2cdbe7b8e.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_68bef90c-0ea2-41c8-8e0d-24c2cdbe7b8e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c73c9ae286d6a1a968dce4da903f19a8df80212555f424bd67f18d94dc4e723b -size 726477 +oid sha256:8c1dad129a4fc44dfc57505f9b29a983da6351b87f91fcb9137d69453a43ecc9 +size 792633 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_93292f85-ace3-4d5f-9c66-58a5030b4526.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_93292f85-ace3-4d5f-9c66-58a5030b4526.png index ee037a9da164a4c8e8a63b5012bbaf744710c388..f6b06b25c596c23277bdc71e22f8c3db36713680 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_93292f85-ace3-4d5f-9c66-58a5030b4526.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_93292f85-ace3-4d5f-9c66-58a5030b4526.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d308c5b45bd72fb01f309d56706c26fb2819e06cfd4ae4269ecbc0b4280e243 -size 626714 +oid sha256:2115785fa07d6e85e74a5c4c324d194448f540df575a4f8ede9f8aa77d221631 +size 1058492 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_94ea2d0e-1ab8-4d5f-bdd8-a9735cfc485e.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_94ea2d0e-1ab8-4d5f-bdd8-a9735cfc485e.png index 2c1cabca575731a74f5dfcde0bb9167e5ab4b080..ce018ce7bb08ab2f4dfc9b25647969e8ef421746 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_94ea2d0e-1ab8-4d5f-bdd8-a9735cfc485e.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_94ea2d0e-1ab8-4d5f-bdd8-a9735cfc485e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a82116f1d8d6b21a96792401c618ba85a1ebd28cc74a1ad3204ed5a83747da0e -size 574886 +oid sha256:b04928112fec061713e95078fd77ce87b68c83223258314b4ce44eb2e0fb4d65 +size 619486 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_9c70ec38-dd91-4342-a324-41ede6034a26.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_9c70ec38-dd91-4342-a324-41ede6034a26.png index c0f0ffbea9df711e7f3e1f8c0e09f5250c37ae46..ddceb1c7fba3451374633c7e19bfd7f46e88928f 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_9c70ec38-dd91-4342-a324-41ede6034a26.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_9c70ec38-dd91-4342-a324-41ede6034a26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:662d6b8a35d5ebfabf314da8868bfbf45ef00e3d740d6c8d99d601886ff97a68 -size 842472 +oid sha256:d4bc89192a1357322230953558bbdb8de6a607319ae4b132c0f2dcc8b692ae28 +size 1022163 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_a2e74740-9137-4289-afcf-e7975501f39d.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_a2e74740-9137-4289-afcf-e7975501f39d.png index f06af635e455b3e102d05d6e6412efc129c2f065..6ce5c80fece76fc6c21fef9d8e135845a341a086 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_a2e74740-9137-4289-afcf-e7975501f39d.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_a2e74740-9137-4289-afcf-e7975501f39d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7d2e56c4689c98833f56e5f0b29982d6face9d9907f227a5af903c36ee39200 -size 449938 +oid sha256:4cda65dd32ac522c24c2023270789c827738216d2947134b67e6556dc6feb452 +size 221367 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_c49c8db3-a45d-4c32-97bc-8a71c035485e.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_c49c8db3-a45d-4c32-97bc-8a71c035485e.png index e23c1b5bb1742b9e56078f73bf9956c633b05ec3..e81a58dd65619845faf1be31c00aced9920b7f20 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_c49c8db3-a45d-4c32-97bc-8a71c035485e.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_c49c8db3-a45d-4c32-97bc-8a71c035485e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36f9d07c5119a266b368143c3a4911bea0ca4fb3c6cdaa31f9bdf3195ca2e2d7 -size 660680 +oid sha256:9154859fb984c2e6e530d312a1322c657c3b6f52b1f1667a16139f01aaaf8dc6 +size 681734 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_d9cda9b4-abee-42dd-9dee-81f0f2d76601.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_d9cda9b4-abee-42dd-9dee-81f0f2d76601.png index 6c04028c0148a586482be58cff08e3352b5499d4..d2a9a8a89f178271d32af5c4bd6d7ebb33d3a3a2 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_d9cda9b4-abee-42dd-9dee-81f0f2d76601.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_d9cda9b4-abee-42dd-9dee-81f0f2d76601.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ed05ceaf62f1126f6f2295c38ff1397bda84a75cc895438e76c2263766c1932 -size 501815 +oid sha256:382a88f730bda7f680019c2a2d0ef833cad7df6608d9b64f9a8a8355ddb1f701 +size 791588 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_da34e6ef-01dc-47ce-8f12-3a771d0ad4be.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_da34e6ef-01dc-47ce-8f12-3a771d0ad4be.png index e10f91c11e3668461131555e665f4d39c7d49fa9..0b2773c4f5adb1ca2efdf001a4347708618eb645 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_da34e6ef-01dc-47ce-8f12-3a771d0ad4be.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_da34e6ef-01dc-47ce-8f12-3a771d0ad4be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf802f86e3fa566af6b81f816b8fe594eee4b2b49695983ebf41a827ede4ca4c -size 491133 +oid sha256:02146849c67eb42deeda43f0d4b5dde5261f3eb3da688560fce9cb9d9aac11b8 +size 328824 diff --git a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_f80825e0-a464-4208-892e-4982389fabd6.png b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_f80825e0-a464-4208-892e-4982389fabd6.png index c7dd7c6c12d0ecc2c7fbc860b404eb95d9ee743c..a58148a3fbf19e9558a6ce1a96555530a5bd06ba 100644 --- a/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_f80825e0-a464-4208-892e-4982389fabd6.png +++ b/images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_f80825e0-a464-4208-892e-4982389fabd6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ebde2f3c554eb7c8cb71bd4427d0003ff6fe99daf9b2a4847f7ef0469d747289 -size 800571 +oid sha256:0c65d8bed360d3d61efdcbae0ef4e72520071983043caa4c452d6d7b7c386970 +size 1062085 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_05ffdddd-205e-48b4-9f8a-65ff0ac005ed.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_05ffdddd-205e-48b4-9f8a-65ff0ac005ed.png index a4a0f6b236167050f173b45163a32c1f92afa5ae..f1fd0e5e3a5648fca2d81ed14f57541e322e0e2b 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_05ffdddd-205e-48b4-9f8a-65ff0ac005ed.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_05ffdddd-205e-48b4-9f8a-65ff0ac005ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:572515080b3de6e2b9548278ffe329218fa3c3d2545e09df3889b2720e9583e8 -size 1456382 +oid sha256:05f43ad58636ddda1dd9bb928df158900b95012e2ffd077dd1c39ccf7bfbf597 +size 798012 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_32b5be81-0be2-4247-8419-7817ed9927c6.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_32b5be81-0be2-4247-8419-7817ed9927c6.png index 55927e040526b511ed41a3c586cd0781d80941e3..db620e0b5abe82bb442800145ac063fd7be3fbe5 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_32b5be81-0be2-4247-8419-7817ed9927c6.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_32b5be81-0be2-4247-8419-7817ed9927c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4afb0d091b7b3d5355282424c708ff30c8fc28507fd76f317e8ec96e39554ec7 -size 1108935 +oid sha256:9d9b80ef477f21cfdd6ee144b89056faa0a98d531f0c01d6305d6529ba2ea311 +size 1111098 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6a4017cd-86da-4732-92f2-308cdbaa27f0.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6a4017cd-86da-4732-92f2-308cdbaa27f0.png index 1909e7e16fba8a59e66fbbb7f20c1e1fb3984642..9654851d75e67b38142473c4940222fb8bd6a1fc 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6a4017cd-86da-4732-92f2-308cdbaa27f0.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6a4017cd-86da-4732-92f2-308cdbaa27f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ade4f4d53c5f390304f47c5e52f758b10a51e39c8a31342871f504c373543d89 -size 1063905 +oid sha256:d1b6e1aec38e137692f987a5d1d673cdd09e4dbead0618a56adf99e3319d46c6 +size 1091088 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6d7e3a4b-2ba2-4c54-9f63-3bd480654856.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6d7e3a4b-2ba2-4c54-9f63-3bd480654856.png index ed83b2432c4b1e7b9610effe6b8cdd44c68be698..cb7b3f34ad420c5b73a99c0f346f5e83e3725b0b 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6d7e3a4b-2ba2-4c54-9f63-3bd480654856.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_6d7e3a4b-2ba2-4c54-9f63-3bd480654856.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:660cc311243bd0f936e8a4a3bd817fa9091152fa712c8ffe26a889e58b151cce -size 1648769 +oid sha256:91c2420298011bcee1b327b3c70a94db7dcda8d6a4c86e2f17158bf37eb430a0 +size 1295631 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_70e082cb-31c1-4468-a16b-10fe67cce0bb.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_70e082cb-31c1-4468-a16b-10fe67cce0bb.png index 01bba524fe727f6b7b35639f51acf963e406fd00..1e26347cf093941ee6790f87ccdf2a357f0803c4 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_70e082cb-31c1-4468-a16b-10fe67cce0bb.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_70e082cb-31c1-4468-a16b-10fe67cce0bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52307fc4f657e8dc11c27580b57230969ed33b49e43e4b075c83993e35093bf2 -size 675293 +oid sha256:3d2bfd4e02a73721aca254b54bcd262c39023a4371eead6f579c09addbf26c39 +size 611398 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_a94e1a10-31d7-4c5d-8020-06c9229283b6.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_a94e1a10-31d7-4c5d-8020-06c9229283b6.png index aa17901574497b19c544734e183b082a6f7e7df4..3c87811ef9418c39264f960bc7f0eef4462f6c3f 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_a94e1a10-31d7-4c5d-8020-06c9229283b6.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_a94e1a10-31d7-4c5d-8020-06c9229283b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92352b1c011ae9ba105b4dd866fd281e105877ff1e0671934e1eeb4ee750d6f1 -size 1356647 +oid sha256:b4d700bd7f3fc697eecf395fa773a8be578f5a350e9701cf3a6899de80d02793 +size 414371 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_ad2fa916-8d5f-41f8-bd96-ea4924d38c52.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_ad2fa916-8d5f-41f8-bd96-ea4924d38c52.png index 8b13b0e736576e1c4ab6a2e1d0c5566d8ae7a4a9..19ba371e6e53d531380bf445144ba9d77e95ed25 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_ad2fa916-8d5f-41f8-bd96-ea4924d38c52.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_ad2fa916-8d5f-41f8-bd96-ea4924d38c52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2acd3d323f39332a09a7a7a5cc82c11def4a7bd262f348ba6aead983e564e0e -size 1053523 +oid sha256:fb422897c3f8b31347cb8948c65dc2e9557d15417833cf6a44b8ebb8ca5e0d66 +size 1053395 diff --git a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_cbd1bec1-9482-4cc3-87d6-74e3d455da4f.png b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_cbd1bec1-9482-4cc3-87d6-74e3d455da4f.png index f8ef531f20017ba96e8d24b90b66210c38d58604..b9781ca1e8bc99f0b0fa9ca1bcac20f7b350f16e 100644 --- a/images/ddee9314-5a29-4258-b11b-b6432ec719f1_cbd1bec1-9482-4cc3-87d6-74e3d455da4f.png +++ b/images/ddee9314-5a29-4258-b11b-b6432ec719f1_cbd1bec1-9482-4cc3-87d6-74e3d455da4f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:812b9603d8569ee3f3a8e808c62eebef24c96c05a3e56909a223500bb16b5608 -size 928418 +oid sha256:8b25c3e68b7c6cfe93746d6726bab609bed57ff10e684ba11435cea58591327e +size 731692 diff --git a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_2b413c09-655e-41ba-8f2b-fd66aba87bb1.png b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_2b413c09-655e-41ba-8f2b-fd66aba87bb1.png index d8338bf89c080a8d2931e80baa4ca919c736cc79..8f22804437c4b58390c7bd4ec2f027eeb8e669ce 100644 --- a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_2b413c09-655e-41ba-8f2b-fd66aba87bb1.png +++ b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_2b413c09-655e-41ba-8f2b-fd66aba87bb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8cf2e9897b5d72cab2e7f212d179d293564afb6842570353bee0d2819aacae8 -size 155814 +oid sha256:310566f34aee1b9d3c9c11599aa78be6f626025d1dd699403d1947182c3d6a88 +size 152423 diff --git a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_413d5059-2958-4244-883e-b5ec9474badf.png b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_413d5059-2958-4244-883e-b5ec9474badf.png index aa0e000e2cd0db348ba9cf69a74d73dec6c0583a..0d7cfcce17a6787121a2a180495b772e2f7bf26c 100644 --- a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_413d5059-2958-4244-883e-b5ec9474badf.png +++ b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_413d5059-2958-4244-883e-b5ec9474badf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3dcf92ce1eed36d53f3215a541e07c92996a8ddb202b7d89b5b9089f03d2dff2 -size 630469 +oid sha256:94b568dc3e1a5fd5d14c34ce6351597e68eeabc69735b0f50494ed3e5259df13 +size 604141 diff --git a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_50f46316-29db-42fb-9b52-320a814c5355.png b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_50f46316-29db-42fb-9b52-320a814c5355.png index 71d53ffe01818c3e7eb8c0795b190b8fa7a9731e..420c4d941284c26d992b3aed0f9b6dc7d30993d1 100644 --- a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_50f46316-29db-42fb-9b52-320a814c5355.png +++ b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_50f46316-29db-42fb-9b52-320a814c5355.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06f132b7cde402aa57511e72a5ba9a57eaba520b1e8caf432fe881f593036524 -size 924243 +oid sha256:09f36930d18734a3fe0f20a48f27f496d3412c172711baa15c32fd5860267acb +size 1125252 diff --git a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_6dd86b17-3d5a-408f-9230-2a4683dde8cf.png b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_6dd86b17-3d5a-408f-9230-2a4683dde8cf.png index 1a6b9a1fa2eacb6cc7566a105df941ef761d64e7..fe88628bba98ff40ee4b2d693c6e7ab1f3b44d75 100644 --- a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_6dd86b17-3d5a-408f-9230-2a4683dde8cf.png +++ b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_6dd86b17-3d5a-408f-9230-2a4683dde8cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1996558f4b5da2bb1cb8c6fb2391df2061697269e8f7850726b803588c5a30b8 -size 578678 +oid sha256:23ba5eb75ff5311e8ccf01add687a10477112abaebac7003bba5a474dede1cc7 +size 763930 diff --git a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_e63bd0c5-1f40-461f-a792-dfe15f095b29.png b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_e63bd0c5-1f40-461f-a792-dfe15f095b29.png index 10ab1d8cee091bd9407ecbd455ec7b3d1585c0d4..2246021279591b4cb0b1c6a878dbf4cc24f131e9 100644 --- a/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_e63bd0c5-1f40-461f-a792-dfe15f095b29.png +++ b/images/de1045f4-14ce-4de9-9aa8-601315b73b0e_e63bd0c5-1f40-461f-a792-dfe15f095b29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8a90d21f277258a76d204b5f988e2bc6170995bacc94f32d06eaf73c428c54e -size 155815 +oid sha256:62407862a84276df5c99a7752b5e8318936175395a46bead0443703d433b7640 +size 155807 diff --git a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_08a0952a-311d-46f3-bcfd-f183dc5cf434.png b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_08a0952a-311d-46f3-bcfd-f183dc5cf434.png index a0b829da66fdd2fa6427921bbe67c72cdd6e63dc..9cdcbe27b514f7e9104d561f686b1b893e699aff 100644 --- a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_08a0952a-311d-46f3-bcfd-f183dc5cf434.png +++ b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_08a0952a-311d-46f3-bcfd-f183dc5cf434.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1eb7c7bb69bb15131e9df41d8cd5216828ab73a5f56d3700d2b5afac48681596 -size 1201869 +oid sha256:3d7a42fc6d6da3103dc2162e69cfe463e40871cf81c569cc0f8226c8406575a4 +size 868450 diff --git a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_1ae9ef10-208d-4de5-8039-ca99154d2c7c.png b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_1ae9ef10-208d-4de5-8039-ca99154d2c7c.png index 23ae13f46c5df50f7de8a30414812e0ce8f9b4c4..82ab396322ce68e206c2a4f1506808bcaf983247 100644 --- a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_1ae9ef10-208d-4de5-8039-ca99154d2c7c.png +++ b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_1ae9ef10-208d-4de5-8039-ca99154d2c7c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e3dfa27a6d30f1e8d77a277463069e14b23f76600362320b7962215772877176 -size 1197736 +oid sha256:1aed89c7e6715bfd24cc820cecbe6679a2873d8810b47eb552151fc2f406399f +size 903388 diff --git a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_6fcc8f6e-834b-491d-ae80-20b826c834a3.png b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_6fcc8f6e-834b-491d-ae80-20b826c834a3.png index 3bcdc7e8afaefbe40581feb59bd397d6bf97b62e..39647059bb151645e2255ca4afba92118d941fe1 100644 --- a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_6fcc8f6e-834b-491d-ae80-20b826c834a3.png +++ b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_6fcc8f6e-834b-491d-ae80-20b826c834a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73ef0764b5fb186a7873a42bc6bf77b5531f7c44913de4e922fb8204e23cbdd8 -size 1211168 +oid sha256:2bfffe5fc68afb6bd346fcd6f4db694139ee81a254dfadf726db73e251e507cf +size 1262109 diff --git a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_9294ad81-1046-46ae-a950-85a3a34e1b77.png b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_9294ad81-1046-46ae-a950-85a3a34e1b77.png index ea63a56e0dd178a2d32af66a3be6af0f056218cd..81d9e163b8ea507ff9cfd181458ea23cd1e2ccc0 100644 --- a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_9294ad81-1046-46ae-a950-85a3a34e1b77.png +++ b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_9294ad81-1046-46ae-a950-85a3a34e1b77.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f653dbc3c58770cdde2d1eb597c7dbee6228553adbb79f358946a596ffeba50 -size 1573499 +oid sha256:ed1a397c5de21159698b2c413eb4b0d6f8408693d8546d921428bb30e7041d44 +size 1160986 diff --git a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_d25c7cda-0f2b-477a-8971-de77649a5939.png b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_d25c7cda-0f2b-477a-8971-de77649a5939.png index f3255587a8a9c720ee53a03ab589eceabd5ae0e5..c7412491d8388a69e90eba8fa6732449e0345c99 100644 --- a/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_d25c7cda-0f2b-477a-8971-de77649a5939.png +++ b/images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_d25c7cda-0f2b-477a-8971-de77649a5939.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae65abef41920d8d3885c2e05e8dba9d6cb44e65a5fd9ef1faec6060aec59c20 -size 794214 +oid sha256:067fdde08b1597d8e265ba399e0a27f2e2b14dd6bb61cea73c02dd7a21b4abd1 +size 1614313 diff --git a/images/df73be67-e81a-444f-82cc-e69d54a72734_090caaac-cb79-4e25-b11f-dafd929b8871.png b/images/df73be67-e81a-444f-82cc-e69d54a72734_090caaac-cb79-4e25-b11f-dafd929b8871.png index d492a1a50450dc4eeae74272ade9ec090b1c2542..2df574264f6b706f9eac1cabdd814c7e1ffd64b4 100644 --- a/images/df73be67-e81a-444f-82cc-e69d54a72734_090caaac-cb79-4e25-b11f-dafd929b8871.png +++ b/images/df73be67-e81a-444f-82cc-e69d54a72734_090caaac-cb79-4e25-b11f-dafd929b8871.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d583cddfeee96dc9d96941db99ae16ff1210365b1242a0095a195c9993354378 -size 2537414 +oid sha256:28c375498ea2c1b260be10f577a60101a6583264bbea3d243eeaae9d69ff8df2 +size 1524397 diff --git a/images/df73be67-e81a-444f-82cc-e69d54a72734_0f568b3c-9312-4f45-a919-af0b1d2e7d99.png b/images/df73be67-e81a-444f-82cc-e69d54a72734_0f568b3c-9312-4f45-a919-af0b1d2e7d99.png index 92bf38f3862a06361ff82f84869e02400fc4bed8..7d2e35a4729d0f300d2633c506787f7ec086cd84 100644 --- a/images/df73be67-e81a-444f-82cc-e69d54a72734_0f568b3c-9312-4f45-a919-af0b1d2e7d99.png +++ b/images/df73be67-e81a-444f-82cc-e69d54a72734_0f568b3c-9312-4f45-a919-af0b1d2e7d99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4c8514e7649dcd7d09203049c6c0a207a099952cfd5c8fb9e7421377100f9dec -size 2233655 +oid sha256:29ab16fc9a8175ecaa9ba2441cb710f07f90ffd3e6bc827f788451b5604d4381 +size 2498117 diff --git a/images/df73be67-e81a-444f-82cc-e69d54a72734_42312a4c-561a-4fd6-9018-313706b8acf9.png b/images/df73be67-e81a-444f-82cc-e69d54a72734_42312a4c-561a-4fd6-9018-313706b8acf9.png index c490e67215fd5f4a5ece4652ed0c6fa5b2c2ca19..55a2f66033a2fb4af2f53a07114ed2669a573b31 100644 --- a/images/df73be67-e81a-444f-82cc-e69d54a72734_42312a4c-561a-4fd6-9018-313706b8acf9.png +++ b/images/df73be67-e81a-444f-82cc-e69d54a72734_42312a4c-561a-4fd6-9018-313706b8acf9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8351a55820952dea013a9f65e445c55a3d3dbdb398383d15c97152dd21e990f -size 546790 +oid sha256:6c76d08390057a892f9e19004b7b032470831eab540bb207954baae6e7838875 +size 553146 diff --git a/images/df73be67-e81a-444f-82cc-e69d54a72734_7322835c-5bca-4b29-a680-c8d122209a40.png b/images/df73be67-e81a-444f-82cc-e69d54a72734_7322835c-5bca-4b29-a680-c8d122209a40.png index f66542dc3312767074e95a77311ef276f11f6ef8..2abc23ad097b69f457cc21da812ea28f3d0da27c 100644 --- a/images/df73be67-e81a-444f-82cc-e69d54a72734_7322835c-5bca-4b29-a680-c8d122209a40.png +++ b/images/df73be67-e81a-444f-82cc-e69d54a72734_7322835c-5bca-4b29-a680-c8d122209a40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:916a25fd9ea11d203321eee4d225f2ec8e306ba9d64535e241aeaf72ac4a16b7 -size 1334733 +oid sha256:02f9b289442f49c6bed16898ac5ce5a3ccc6259ae91f273a4cc04f0c6702ea7a +size 2205947 diff --git a/images/df73be67-e81a-444f-82cc-e69d54a72734_a709ab3a-f10a-4d4c-adda-404e37e3755d.png b/images/df73be67-e81a-444f-82cc-e69d54a72734_a709ab3a-f10a-4d4c-adda-404e37e3755d.png index 5752e1496e0f47a1c54ccc76a208dcf6b054acca..19fe6127d2b5a046b7f53b65cf2fc463ee69b594 100644 --- a/images/df73be67-e81a-444f-82cc-e69d54a72734_a709ab3a-f10a-4d4c-adda-404e37e3755d.png +++ b/images/df73be67-e81a-444f-82cc-e69d54a72734_a709ab3a-f10a-4d4c-adda-404e37e3755d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:236d391ddac9a389ba6df966d42a5778a185df49a83238422241a21ba289c75e -size 3324791 +oid sha256:d21b57363729400c76b0df05f24f93f452f853073031132abafb5fe525b83f48 +size 1643229 diff --git a/images/df73be67-e81a-444f-82cc-e69d54a72734_ac1ad2da-eeb2-4030-a592-fdf3c8a5c97f.png b/images/df73be67-e81a-444f-82cc-e69d54a72734_ac1ad2da-eeb2-4030-a592-fdf3c8a5c97f.png index 5d341796c9c5422df3e32c3fb1bb2ffd9444b9c9..27fcafece32f5f5a65365c97149d6fbf078ecc99 100644 --- a/images/df73be67-e81a-444f-82cc-e69d54a72734_ac1ad2da-eeb2-4030-a592-fdf3c8a5c97f.png +++ b/images/df73be67-e81a-444f-82cc-e69d54a72734_ac1ad2da-eeb2-4030-a592-fdf3c8a5c97f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac11dcd53322f2fa2d05fc4794cfe10eededf3dca965f45d668a7293d65d4512 -size 3177735 +oid sha256:618ffa197f95925d0ffecb72e961e527afeac9642dd6462c2d26f838220ff9fc +size 2330719 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_0ab87d28-e48a-4d69-abc3-8606181a83dc.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_0ab87d28-e48a-4d69-abc3-8606181a83dc.png index 9cc52265ca3d15c5a1fb16644a5c4bf42c43e3fd..ec47fe3c8425550555fbb0abc9b34cdaeddbb3e6 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_0ab87d28-e48a-4d69-abc3-8606181a83dc.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_0ab87d28-e48a-4d69-abc3-8606181a83dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6f6dc30d65bf9156a54fd87c4e4ab6f1851615f16f757c79f0976c68f7eea3e -size 1623641 +oid sha256:eab82a3f649320f33a56fec202de20df1b2632b76768de64452c46be404e7e8f +size 593700 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_13d235dc-c6ce-45c6-a075-8da1683281ca.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_13d235dc-c6ce-45c6-a075-8da1683281ca.png index ca137eb253f88742904402cd2d2837cc74840e59..c424a03eb192b6a45dbf472a89fa7be71c01e149 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_13d235dc-c6ce-45c6-a075-8da1683281ca.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_13d235dc-c6ce-45c6-a075-8da1683281ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a99d0da8ccc97a76d71793f9d8f25eb0e46756f4f852d95b887cd00193ff47e2 -size 747641 +oid sha256:f24f01b647b6d5f95a8a526da3ea54cd21f6f6fd12fdf9234c88b11dc7d8f22b +size 815571 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_1579fed0-00b7-47db-bfb3-7098175a0ebd.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_1579fed0-00b7-47db-bfb3-7098175a0ebd.png index 7af0de4dca073753e3920baeb7ff59edc53b24c0..7c5434cccb771a6c0dd3afe7a821a99830276bca 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_1579fed0-00b7-47db-bfb3-7098175a0ebd.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_1579fed0-00b7-47db-bfb3-7098175a0ebd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9b1a89a287c01e45b45b76d0254bf59a6843d5f5b78d33d99743125192e7071 -size 615576 +oid sha256:7295bc6c9f92b794a2bead676f1e4545e448fae1858e6ac0e142d62e7ee57a22 +size 842038 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_396e747f-8d48-472d-9972-4fb76df776fe.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_396e747f-8d48-472d-9972-4fb76df776fe.png index c67f7335eac241e5bcc43f22556c88a84cb9a942..e3026976778b9a56b86ba31181572f686ea7e02b 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_396e747f-8d48-472d-9972-4fb76df776fe.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_396e747f-8d48-472d-9972-4fb76df776fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:65118e2f18bbb28e64d6c63fb3ba04c0f909767643c3761aa4110348bc0f686f -size 912542 +oid sha256:ffbe16dd5831ccd127265a877bd7fda6b8e70a87791f82ed303f36a461c39466 +size 541452 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_43e06d15-4af3-477d-8d5a-2be93ca570e8.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_43e06d15-4af3-477d-8d5a-2be93ca570e8.png index 795fbf70d1bf6983619dd19af195e2d23a92658c..01dcc6b99068be413beb31d429d86cf15cc090f0 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_43e06d15-4af3-477d-8d5a-2be93ca570e8.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_43e06d15-4af3-477d-8d5a-2be93ca570e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6f293c5006ac4e1316ddc7aaf36f974ae45cd391e8a7071bb308e9ce4a10e9d6 -size 1755707 +oid sha256:46fe08a64ebbd958cce3a8369440f052d6f6b759569a3b7e4948fed7b184c25f +size 1268545 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_6788371c-dda7-4003-ba11-27f187e92ae1.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_6788371c-dda7-4003-ba11-27f187e92ae1.png index d21bf31ed521886b8c75aa0b212eeec50fa17070..c10f8a856aaeef59d71c836357f6dc63a0b5280a 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_6788371c-dda7-4003-ba11-27f187e92ae1.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_6788371c-dda7-4003-ba11-27f187e92ae1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6b4f2648bd524e98b97c86af0842e92c9946e0725a0dc19356e2c68aaf2402e -size 919631 +oid sha256:1569241de78cdfe500180b3c3a8000f190f5ee6c3fc897b4cd25aefb17ed3820 +size 1485330 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_89654b34-bfdf-4514-b1a1-1aa91462bf85.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_89654b34-bfdf-4514-b1a1-1aa91462bf85.png index 95114bb32231df6973c28222adbc3e2518ce8621..004d5791893db5670585e92fe0f239caeccb9945 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_89654b34-bfdf-4514-b1a1-1aa91462bf85.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_89654b34-bfdf-4514-b1a1-1aa91462bf85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83b5ca6fc4acff857867ebf3524acc17766526f6aff127b66b982ac810dcd309 -size 975721 +oid sha256:a0fd57d71885445743e09757ddb7eaa5bac0ffab8712ce4419096d4c999efce3 +size 805970 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_8cb52a62-2206-4347-b42b-aa230acd9a96.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_8cb52a62-2206-4347-b42b-aa230acd9a96.png index 76cb09271c819795dc3af1f8eeab24a805adac3f..55df179a79c7b14064f8af110eaa175c00a9f930 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_8cb52a62-2206-4347-b42b-aa230acd9a96.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_8cb52a62-2206-4347-b42b-aa230acd9a96.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef27d15d4dd288dda246fa9757c95601960d284b7697fb8824d172e06157d221 -size 750712 +oid sha256:6cd57d7746ea96089b5cd003ddc4a25892c43c6a15ba5b05a1240882fe829c3e +size 548734 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_c3904d4c-b5f4-4a2d-9fdd-68dc50c3227b.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_c3904d4c-b5f4-4a2d-9fdd-68dc50c3227b.png index 1509e113b98a3a6f3ebda29df516a5cc4ff7bdfa..2f3ae5466956e462fd58d0ab6fcdc624d6dd0588 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_c3904d4c-b5f4-4a2d-9fdd-68dc50c3227b.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_c3904d4c-b5f4-4a2d-9fdd-68dc50c3227b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b429e5a15a13362ecfa9fe51ef853129a5df0ccf3c4376e3e41744304835cb73 -size 924245 +oid sha256:99012ec63236350eea3fad1b2b4ac0b35b3bca14e24013bef55e8cfaf023b466 +size 506759 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_dff4e313-9134-4041-a303-6eb0720df8be.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_dff4e313-9134-4041-a303-6eb0720df8be.png index e109b081a0596fa8ee8e98d46ffd9b294fa27526..656d03badd59a41bd3d7db0c216d46b7e82f6592 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_dff4e313-9134-4041-a303-6eb0720df8be.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_dff4e313-9134-4041-a303-6eb0720df8be.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bdde08d505c2cd63f5490d5dbcc1817db08ae0d9772bca4cadf34a7abc83102b -size 443714 +oid sha256:fcf8ca446d2217e4c77a7fa415112b34d2c58f415da7f3a196ad62f7ad0493f5 +size 561270 diff --git a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_f36f8381-72f0-49e3-b691-c855827719b6.png b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_f36f8381-72f0-49e3-b691-c855827719b6.png index 147776e1831a74d1fcb21d4a2a2caaa96491d6e3..967f861036617c42c6b525dee3b57be12c0d2cb7 100644 --- a/images/dfa415d9-efb2-4477-bc33-672f02d3399d_f36f8381-72f0-49e3-b691-c855827719b6.png +++ b/images/dfa415d9-efb2-4477-bc33-672f02d3399d_f36f8381-72f0-49e3-b691-c855827719b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5abeb5616885c0a46964a73577a49c5cc8ad9ff8255aac1da1515403de0a6be5 -size 1439271 +oid sha256:0f3530b3e85096cf8edff44044c3b32faef07f5697bfa67a07a0aac68b2de6a7 +size 709187 diff --git a/images/e031c695-28e2-4507-949e-bbb65edf9f3d_1108cb6a-19ec-4e09-935e-67b15d2f8830.png b/images/e031c695-28e2-4507-949e-bbb65edf9f3d_1108cb6a-19ec-4e09-935e-67b15d2f8830.png index ef57df21694f5250b158737f0cfbed2668d45869..ccb8e81efd46a98ae471046a2ee2e401d167e1f4 100644 --- a/images/e031c695-28e2-4507-949e-bbb65edf9f3d_1108cb6a-19ec-4e09-935e-67b15d2f8830.png +++ b/images/e031c695-28e2-4507-949e-bbb65edf9f3d_1108cb6a-19ec-4e09-935e-67b15d2f8830.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3e8fb89dca4c131f660b255cadc0d90d1bf723b58c3bf6047f20e8ede692fb2 -size 174694 +oid sha256:3dec148ef5949ed993106677edb50140f9598eee39df2367b15b5deb277f0560 +size 437412 diff --git a/images/e031c695-28e2-4507-949e-bbb65edf9f3d_56319ea6-1d3f-4ea2-8ab7-5a064a3d4502.png b/images/e031c695-28e2-4507-949e-bbb65edf9f3d_56319ea6-1d3f-4ea2-8ab7-5a064a3d4502.png index 1a9e03a9e57cb5368185e7a1421d77f609a180ff..d8ee03d5000124c8ac325935fa36618155e77749 100644 --- a/images/e031c695-28e2-4507-949e-bbb65edf9f3d_56319ea6-1d3f-4ea2-8ab7-5a064a3d4502.png +++ b/images/e031c695-28e2-4507-949e-bbb65edf9f3d_56319ea6-1d3f-4ea2-8ab7-5a064a3d4502.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0af5200a38a06c59a0f5e43deed351f08dab9f62879e9a3c32da711710020deb -size 2802190 +oid sha256:23ad4bfb7b3a4edc5228949fc0322a08b2e4f26291b03933cf9c1d626cee17f9 +size 2242459 diff --git a/images/e0feee24-dfed-454a-aa40-eda244f1d044_056c07ae-695a-4085-a246-972a75091afa.png b/images/e0feee24-dfed-454a-aa40-eda244f1d044_056c07ae-695a-4085-a246-972a75091afa.png index 70ff8071974b4d3b7f108e326d055ccb5470608f..21e412b8ea91098f9a668765800020652a3f09a8 100644 --- a/images/e0feee24-dfed-454a-aa40-eda244f1d044_056c07ae-695a-4085-a246-972a75091afa.png +++ b/images/e0feee24-dfed-454a-aa40-eda244f1d044_056c07ae-695a-4085-a246-972a75091afa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9539b6b5e7da02b669fda15b232a8fa8acb582a48b61b146d38616f4d3ec994f -size 993781 +oid sha256:7a29ecea470b8a31b5cf021c2d2d553771696e93aa9ad0b641d3fab37326ae21 +size 855955 diff --git a/images/e0feee24-dfed-454a-aa40-eda244f1d044_72517ff4-d1db-49a4-a416-9539c0b06e84.png b/images/e0feee24-dfed-454a-aa40-eda244f1d044_72517ff4-d1db-49a4-a416-9539c0b06e84.png index b6c153c75d8536f16c1f76df3386ad3b8a26263a..44a25e72f63afbb31af9a6c5f3a5b08de271c67c 100644 --- a/images/e0feee24-dfed-454a-aa40-eda244f1d044_72517ff4-d1db-49a4-a416-9539c0b06e84.png +++ b/images/e0feee24-dfed-454a-aa40-eda244f1d044_72517ff4-d1db-49a4-a416-9539c0b06e84.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63f9ab0ead5f7c32215b404a1db721ffd28a351c0a919305af7038ffd1507ddd -size 630001 +oid sha256:91d8765ec6defd7a9bbe7d32a77b9320ebd5b3e51d441e0b22c94c681e844eca +size 626266 diff --git a/images/e0feee24-dfed-454a-aa40-eda244f1d044_d7a061ca-bdc0-46b6-9f53-d3a7eb20cd89.png b/images/e0feee24-dfed-454a-aa40-eda244f1d044_d7a061ca-bdc0-46b6-9f53-d3a7eb20cd89.png index 5cc56df6d085e0b3302a8b29e741ffe27bb28556..d13507f8afa0c15cea96a4c28092424b00dbef94 100644 --- a/images/e0feee24-dfed-454a-aa40-eda244f1d044_d7a061ca-bdc0-46b6-9f53-d3a7eb20cd89.png +++ b/images/e0feee24-dfed-454a-aa40-eda244f1d044_d7a061ca-bdc0-46b6-9f53-d3a7eb20cd89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d99bbbc375cbae2757ec11ba0b7387ef3759803dacc08421b9d9dfe940f831c -size 935445 +oid sha256:9035a57d3ff13a20c61e87721ba12db0d41955ec3dac3fb42453c2f444eea329 +size 941587 diff --git a/images/e0feee24-dfed-454a-aa40-eda244f1d044_f3ce94f2-22f0-4a8c-b1a8-9bd4a4b30725.png b/images/e0feee24-dfed-454a-aa40-eda244f1d044_f3ce94f2-22f0-4a8c-b1a8-9bd4a4b30725.png index 85ba19f2be6883988b5de8babaf8d0fd817d8495..86615e5ea12e16f0a48585bcdcd48e9e8d8bd3e8 100644 --- a/images/e0feee24-dfed-454a-aa40-eda244f1d044_f3ce94f2-22f0-4a8c-b1a8-9bd4a4b30725.png +++ b/images/e0feee24-dfed-454a-aa40-eda244f1d044_f3ce94f2-22f0-4a8c-b1a8-9bd4a4b30725.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55622090a8b608c7b09b9c467f9904396cdd292bbc03f658419bf41a76605e30 -size 3174139 +oid sha256:95aafe49c886b2057b52c45c021931d6c65ec985d3076f0ca552588a63516641 +size 1201699 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_10d25542-b5a1-41fc-83f2-470ec16e6b0c.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_10d25542-b5a1-41fc-83f2-470ec16e6b0c.png index 954b372dc9b749e1e982c064c084c65e17b9252b..5bd62a96923e351072fbd24a7831dcecff77a96d 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_10d25542-b5a1-41fc-83f2-470ec16e6b0c.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_10d25542-b5a1-41fc-83f2-470ec16e6b0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61203e6162f0976396617dc117857b7d56322e962fd54614eec068eb15f981a7 -size 7203180 +oid sha256:29bbaf53853a3e33e5042e8758c989f3e0682bddac0c7b9486d98ee20bf8c5c0 +size 1760299 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_1fbb7015-2342-4083-9fc8-141bfe2c3d68.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_1fbb7015-2342-4083-9fc8-141bfe2c3d68.png index 7fa09cd75923ec35b14c4737e0818012e9b4117e..ca38761fb7a6c3b88b8e3291bf6c1f2197578209 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_1fbb7015-2342-4083-9fc8-141bfe2c3d68.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_1fbb7015-2342-4083-9fc8-141bfe2c3d68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81d6f6d4494ba1b7ab43d62076611af13fa9b533301c468440a80ee4c6a71aab -size 908636 +oid sha256:3ead4f2a0efb41f25beabf426e0107140745d43e13cccd6f8eeee26b79934aeb +size 1230743 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_32c92320-9fb6-4e3e-96d8-a1c24c44c451.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_32c92320-9fb6-4e3e-96d8-a1c24c44c451.png index d5c0e758fdacff71c082072d2d4b7352d770182f..bd26f81e3171eb502b3a737ff2fcee26afc25139 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_32c92320-9fb6-4e3e-96d8-a1c24c44c451.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_32c92320-9fb6-4e3e-96d8-a1c24c44c451.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a28e992c7ea82c428d4364d80d9420354c45ea13d2c3c9c73830c72eaa80015 -size 1456635 +oid sha256:06314ed9d60c5d1f853f9d6a94945ec891598f63008ea29a8857648d5538540d +size 1249451 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_44857629-fc4a-44e4-873c-080e42c7c79e.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_44857629-fc4a-44e4-873c-080e42c7c79e.png index 67b294ade344de1ca3c58d6e170b4ebbcaf75a5e..41b91a37a2966a0fdbc2b08261e63b257d14b2c9 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_44857629-fc4a-44e4-873c-080e42c7c79e.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_44857629-fc4a-44e4-873c-080e42c7c79e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0b6c69e5031a16cfddda6f4e3980d31aa0c18cb435a0ff6057a815d45d331e0 -size 1856772 +oid sha256:1c0a57a05419865668347c511402d4cf679da7d83fa611a4a3c4f363cb4f12d1 +size 1633788 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_a89299a2-03da-445f-bad6-2ab49df34fa8.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_a89299a2-03da-445f-bad6-2ab49df34fa8.png index 35f16b4a0398d7bc173bb095798753f51785feb0..75fd9ccc8f502cac623bfa9fc5b5ed0ca3cdb575 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_a89299a2-03da-445f-bad6-2ab49df34fa8.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_a89299a2-03da-445f-bad6-2ab49df34fa8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7cdfc8d8d5f3c77c935a06f9cee52c03efa54a05cbe83e15ba5597f039b01d07 -size 1855740 +oid sha256:3ef3ba57c7ead1883173c26e683772f315a1c32a929d69a86a24be305d6e5394 +size 1928525 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_b8e73317-d7a9-4a9a-8eda-7d78fd298cd0.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_b8e73317-d7a9-4a9a-8eda-7d78fd298cd0.png index bc1bbb6dbd7d6ce968593e80c6d752e744ae2699..6fb7e89d0c56d5ba80539658e95eaa19be1f7dab 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_b8e73317-d7a9-4a9a-8eda-7d78fd298cd0.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_b8e73317-d7a9-4a9a-8eda-7d78fd298cd0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fd4017177098d9f6a03d0c87f9775d74a9dce2fb374539ec4d87749399b5889 -size 1398377 +oid sha256:316e5ea30be772c2769bf99e7844e5fbfab5e0d6df8bfe1709a0df4fb008fafe +size 1107661 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_bbe959f7-08e3-4dfd-b80b-b837caab3e9f.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_bbe959f7-08e3-4dfd-b80b-b837caab3e9f.png index e043689a6de2004af279763235c92ca4e9a8ada2..cd4d585cee4f13a3d5c7d8d7a8f0fdf235888b23 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_bbe959f7-08e3-4dfd-b80b-b837caab3e9f.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_bbe959f7-08e3-4dfd-b80b-b837caab3e9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6303dcca7fed6af12f5fa1d50b575c4ab0ed8248b14b27669f5cc7d813cd191 -size 1452191 +oid sha256:540b7335e9c0dd6dc1627ceaf5f86d23e83906ac5fe1399efc78f613cc0b4d83 +size 1339591 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_da89b808-f145-4860-a45f-450834fba4e4.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_da89b808-f145-4860-a45f-450834fba4e4.png index e9781923df2f2d4e80a8a3005b442eafaf245869..9f705cace31513e9e86428027dc93a54fc89590c 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_da89b808-f145-4860-a45f-450834fba4e4.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_da89b808-f145-4860-a45f-450834fba4e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64e8502c2deaad6557221433235563b88009d966dc29d73819bc80b538943f4b -size 6148969 +oid sha256:65fec7db2847e949513eddcbf32052c7b6924a8068ef23853b3f59c37ef93bc0 +size 1803312 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_edb39148-8f83-4870-b6c3-459e520e1b50.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_edb39148-8f83-4870-b6c3-459e520e1b50.png index 5f3c17edf9b2f4fd34adde1f067aebf66ae60099..b638e793a2f86902bbf9d169e14798af99e8bba5 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_edb39148-8f83-4870-b6c3-459e520e1b50.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_edb39148-8f83-4870-b6c3-459e520e1b50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9af2b5900ef0b648eec54b64dbd8b8a6daae9c6ae4e8e4aa038eb51c70c08d03 -size 1368728 +oid sha256:f334837b062d1458bacabb5b8916d0b5feb77e699fd52563803e063f56e0a683 +size 1614509 diff --git a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_f219afff-4fbc-4b22-843b-347a60a6896b.png b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_f219afff-4fbc-4b22-843b-347a60a6896b.png index 6cedfcc8d7487c9489f486d84afe71662484e9b2..2408147538bac8d6ea037a09ce8d42d3c6aa4235 100644 --- a/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_f219afff-4fbc-4b22-843b-347a60a6896b.png +++ b/images/e0ff7945-56b0-487a-920b-cbf954e6fb72_f219afff-4fbc-4b22-843b-347a60a6896b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:068f274028360e5773af13a93c70762bbf4afa496a4447d48af64608f8d96aff -size 986185 +oid sha256:3ca96ad7f5cdc6eb411d5ba89943e19d42c99f467f57026cd2dc3c3a91a116be +size 1420640 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_2dbedcb1-963b-4445-9a0a-bc32144984ea.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_2dbedcb1-963b-4445-9a0a-bc32144984ea.png index df0d7f344f560ef987e47abb48c30ae74c3d3e29..66ea8e20df8ceb31151c5085ea8bf5e8b9b24585 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_2dbedcb1-963b-4445-9a0a-bc32144984ea.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_2dbedcb1-963b-4445-9a0a-bc32144984ea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f0c25df097f8a8afd94c6806aec738aadb1f095943254909418f3075e28bb391 -size 1874455 +oid sha256:003f6f4b077f735d475ebb4414e8e74385e0b96c8107287eb89059babcee005f +size 1288992 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_4745dde5-a1c1-4426-8a1a-1eff32a73563.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_4745dde5-a1c1-4426-8a1a-1eff32a73563.png index ba9403b3e7b51ce365e5a415238f6008f91be120..4a43c0ea45214f5d5796d7f0b734eaad60a89132 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_4745dde5-a1c1-4426-8a1a-1eff32a73563.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_4745dde5-a1c1-4426-8a1a-1eff32a73563.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1071f18cee71679ff01e1ad064941c2096cd35cf746b95f561ab507edcab492 -size 465551 +oid sha256:8cda4853edb52e2849da49f49ef297d704fe3c9b1b69543b61c2a6a24f6f1a0a +size 512500 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_587b5c69-60df-46d7-a9bb-da2624630a64.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_587b5c69-60df-46d7-a9bb-da2624630a64.png index a04f339ea50063f37f52b1b43b26af0283a0dffe..6938a1e22b1dd9d315393911e549e8635262d963 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_587b5c69-60df-46d7-a9bb-da2624630a64.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_587b5c69-60df-46d7-a9bb-da2624630a64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c9cdf12c8cac5db0f67f3624c99746e665f16d6f0c185d97cfdf506f84961f02 -size 664969 +oid sha256:8e09ef4a1ec4c0e591446b81735c8e7880939006ce413f309e26a9324156dcaf +size 934839 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_62309d1f-10e6-4601-9cbb-6b407fe0a0a9.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_62309d1f-10e6-4601-9cbb-6b407fe0a0a9.png index 2e11ae19e47066610ccd9dedfdb3350ad45991b4..230fa392468ec64616ef99ec06ae7a2206aa7a90 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_62309d1f-10e6-4601-9cbb-6b407fe0a0a9.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_62309d1f-10e6-4601-9cbb-6b407fe0a0a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e57ac2ba7b8e5523d3bd7fe0a442330989f873f5d6edb4800cf6d07380fc849a -size 1060294 +oid sha256:b36173bc1844d174aba583b868cee45433beb28ec5941615adf8f2263b0e469a +size 1672242 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_685aa3ad-c42f-4765-92f4-8f2ee0b62d85.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_685aa3ad-c42f-4765-92f4-8f2ee0b62d85.png index 7c6289364d0b51839e846c710d3b02f6c664cd4a..59f2a0878bd33e5ee40e8d63300d673331e575a0 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_685aa3ad-c42f-4765-92f4-8f2ee0b62d85.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_685aa3ad-c42f-4765-92f4-8f2ee0b62d85.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1d38aa1152e3f30743ba0f45c02d6eb18bccf0ddebb010bfc4409f1deb69554 -size 645155 +oid sha256:06ffb3edc65a9ed30188bf8b13c1640db69a163871c4735def112cd59740c95d +size 817097 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_6ad53972-ccb3-4e09-a210-e20efff708a5.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_6ad53972-ccb3-4e09-a210-e20efff708a5.png index 218e8a9b96848619f2030c71abca11b9d0af1ed9..2ad07fec2de8cf0511d99abd810a976bc5890ffc 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_6ad53972-ccb3-4e09-a210-e20efff708a5.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_6ad53972-ccb3-4e09-a210-e20efff708a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e53498215815cb0e840f107bcde2c31b09cecd3c4fde7c50d00bac40b59637d -size 664170 +oid sha256:b7220e389bb467935874e056875d48e7089268200ed71f3fe4b67b3daaa953e3 +size 567589 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_8050d3d0-fb77-42bf-bbe4-77c3f358a46b.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_8050d3d0-fb77-42bf-bbe4-77c3f358a46b.png index 67bc728e5de1c045927b5e4fde8a481f4a18cc8c..97d6b08a164cfb573af2d621f0525f12da265132 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_8050d3d0-fb77-42bf-bbe4-77c3f358a46b.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_8050d3d0-fb77-42bf-bbe4-77c3f358a46b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f157e43192c5c1561769731ae7596dabcfb68ac5f66d8a23e43fcb688cfa842 -size 644520 +oid sha256:4907ea74449476415d30a12ecae95ebdb4ad54e13558cd3b33fc1e7bfe5a31d7 +size 740930 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_b8aff40e-1281-4f10-b20d-829d0ac854c3.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_b8aff40e-1281-4f10-b20d-829d0ac854c3.png index ca9c80f063bd7fe80b4abadde66ab4ca6acc7328..5c97dcc920aa61c73c65549c70332409d5efac35 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_b8aff40e-1281-4f10-b20d-829d0ac854c3.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_b8aff40e-1281-4f10-b20d-829d0ac854c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bda26e34c0d721ca84a91cedbe88dcd9b79c58923ae6146b2827ef8cdaf5849 -size 645571 +oid sha256:8e91f98c1540e02462a144a6a8c48d8276890501c6d6dca9d5513d08bee78069 +size 464688 diff --git a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_bc27f335-5b49-47ef-8632-88d20acb5da2.png b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_bc27f335-5b49-47ef-8632-88d20acb5da2.png index 218e8a9b96848619f2030c71abca11b9d0af1ed9..cc1eddbdcc552240403fa562e597664ef339e6df 100644 --- a/images/e104a4ef-521f-4ca0-8e1a-098656207de2_bc27f335-5b49-47ef-8632-88d20acb5da2.png +++ b/images/e104a4ef-521f-4ca0-8e1a-098656207de2_bc27f335-5b49-47ef-8632-88d20acb5da2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e53498215815cb0e840f107bcde2c31b09cecd3c4fde7c50d00bac40b59637d -size 664170 +oid sha256:d2ef3512bd7be94bf30763add39bd9fb6885b3b06e8ab1c266b281fadb4a8369 +size 885387 diff --git a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_130056b9-3612-4542-a3f6-8db724d54967.png b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_130056b9-3612-4542-a3f6-8db724d54967.png index 4937c08b157437b3807a717de49ce4a0989ac8e2..7486c1ba248961c8d6329ea110ea985f4f9d723d 100644 --- a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_130056b9-3612-4542-a3f6-8db724d54967.png +++ b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_130056b9-3612-4542-a3f6-8db724d54967.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c0f38ed9c4b55692f0afc04420f67117911fd44572fcb75ba1eefddf5ad9b21 -size 30730 +oid sha256:763d50cd5f82c0a01937595dc81cb466577b010437f6253711b3bf3d8f54fb40 +size 43788 diff --git a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_4713b2b4-b558-441a-8635-75ad2fa8a3a5.png b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_4713b2b4-b558-441a-8635-75ad2fa8a3a5.png index 80ddbaaccab6361112dbcbc74ad28c70f13c99d7..2fd8e4b51d1397ad5d7596a1cfd49b0167082126 100644 --- a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_4713b2b4-b558-441a-8635-75ad2fa8a3a5.png +++ b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_4713b2b4-b558-441a-8635-75ad2fa8a3a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6b5820d9017b29bcace5a3c0d9148eeab33ddb214ba3db6659a4d4679873bf9 -size 328913 +oid sha256:b5ce1a9ffabbb58ccc582ea9bb3e62c23e0b57105bf24cad443970b9b940d63c +size 338555 diff --git a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_8aaf0965-49ce-4370-9a67-300ef0a9123f.png b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_8aaf0965-49ce-4370-9a67-300ef0a9123f.png index 23acf00539b0798a231de8514940f12ec4dae1c5..2b22bd09b8a3efbe679462097f9f428594b5a055 100644 --- a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_8aaf0965-49ce-4370-9a67-300ef0a9123f.png +++ b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_8aaf0965-49ce-4370-9a67-300ef0a9123f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a95e4fcfb6c53c7aaa1c2ae698126ab77b384af7fbe61122a48152d864d53f47 -size 264773 +oid sha256:cdb0fbf6779a7c3ac8a56603eadf3b67cbf0968b89a3b8a417c67e314303f6ef +size 92021 diff --git a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_b40a4626-a9b0-46f9-b9a6-35d49a8fd0bd.png b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_b40a4626-a9b0-46f9-b9a6-35d49a8fd0bd.png index 10a39029ec5141550aee27e369e503af04852946..996592f024d7a58885fbcb9cf3fa8fa725aca1a8 100644 --- a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_b40a4626-a9b0-46f9-b9a6-35d49a8fd0bd.png +++ b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_b40a4626-a9b0-46f9-b9a6-35d49a8fd0bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d070efb438f165428ce50f27f3259a09c367762102e2e227416dd2b3bb1c1bd8 -size 339249 +oid sha256:b7ca067f215373ef6d10a7f45d3e08e8df38b84c94374ad6f73c3cc83539c5e7 +size 257250 diff --git a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_f74e6f89-7fd4-4a49-9204-750b69c96b67.png b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_f74e6f89-7fd4-4a49-9204-750b69c96b67.png index 0deee8abe5bafe338ad84f78a8773cb5af078a61..ba76440af1d04cfe1aef4691fba4eeef61b645b3 100644 --- a/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_f74e6f89-7fd4-4a49-9204-750b69c96b67.png +++ b/images/e12f51f6-c8c5-4a68-9586-d77927bca79c_f74e6f89-7fd4-4a49-9204-750b69c96b67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3a8c922fbde767c6dd73e8ec4cddf36c6174a12da2a22e8e5fd58793c17fedc -size 332572 +oid sha256:5cb0e9b5444fdf95dcde79b287ac4d9b734df41d0c98bf605e855b03ed86adaa +size 342159 diff --git a/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_a8f5854f-36b5-45ff-94b0-1a79573adb4e.png b/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_a8f5854f-36b5-45ff-94b0-1a79573adb4e.png index 517dc777c732ac42d51224313f16cb253d9aaaac..4b74443810d50ef89947fecf26724e649621612a 100644 --- a/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_a8f5854f-36b5-45ff-94b0-1a79573adb4e.png +++ b/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_a8f5854f-36b5-45ff-94b0-1a79573adb4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a20fd8bf5dd8cc65166230d41c6db6565da758650fef59b426c452eed08a472a -size 1423164 +oid sha256:67f45a313a40670095b1b1bc63b54f954039ad5466f2defe5a850417087261a7 +size 790872 diff --git a/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_d85cc5e5-16ef-4abe-b03d-fb6d6e7372e7.png b/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_d85cc5e5-16ef-4abe-b03d-fb6d6e7372e7.png index afe518d184982e0b0a935ea2154a4a53ab2b97eb..b52b7ebdffd013f91acb0dd9dc54c3afa55ab3b8 100644 --- a/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_d85cc5e5-16ef-4abe-b03d-fb6d6e7372e7.png +++ b/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_d85cc5e5-16ef-4abe-b03d-fb6d6e7372e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:097292f79a4054be8dc7d3d68a3d84feb739cd0aab6a76093fc8c7d336ec66f6 -size 693936 +oid sha256:9bd0e450da5964818b73d2ce258c2eca363974ea880686a300efa99e68052fbd +size 759807 diff --git a/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_fe3572a1-398a-479d-ba14-aa4ce84f34cf.png b/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_fe3572a1-398a-479d-ba14-aa4ce84f34cf.png index 3b37a0f8650821c7ff4833598b3f7aa3aef5f092..7216c3713af718e170814544a95e0976aaeb6742 100644 --- a/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_fe3572a1-398a-479d-ba14-aa4ce84f34cf.png +++ b/images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_fe3572a1-398a-479d-ba14-aa4ce84f34cf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9cef82facdec12ba390b1dd77301f3e8287d67f747d21cd724fba65a27c34ed -size 716444 +oid sha256:750a72e802be78aa1a6469fbbdf21ed17c48adcd83e694431bcbf67b48b372d5 +size 464672 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_0acd72fb-4000-48a3-be6e-9df18f318ca9.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_0acd72fb-4000-48a3-be6e-9df18f318ca9.png index f926127a305b30adfa19ad8622289e2101d8573b..78b3ac5c8074d0cf7d25a172afefce98c0eb7bfd 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_0acd72fb-4000-48a3-be6e-9df18f318ca9.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_0acd72fb-4000-48a3-be6e-9df18f318ca9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7152927527ff570616d1902893170086f41daab955f1e4a764edc3fda8c76457 -size 723413 +oid sha256:cb44391f1d35f84392427d28b5aabe3cd94463eabf0734ea2f566c9674b7ba62 +size 792850 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_1329c041-da50-44be-9694-0a50b5a51d2e.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_1329c041-da50-44be-9694-0a50b5a51d2e.png index fe91ca0f8f2d3975bb586865e0a1774de5752285..79fc2bc7559f9c0e0bddd0d72cb638100e686eef 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_1329c041-da50-44be-9694-0a50b5a51d2e.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_1329c041-da50-44be-9694-0a50b5a51d2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81e2ccb247eaf3c7dd81ba158faf7d11d6b4112e1dfb641a9d1a8cdbf25e3335 -size 1001563 +oid sha256:c9a5b3d98a325c728a53153791518a960400ce2df80746e4b422f530edf14897 +size 1264222 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_1f6d99a6-6430-48ff-a6a9-8c7881d4a609.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_1f6d99a6-6430-48ff-a6a9-8c7881d4a609.png index f3f8f6cc1138377fea4d987da82e5860aad28e4a..8ebf30f477deeb63dc04683441c4e152b4cf2293 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_1f6d99a6-6430-48ff-a6a9-8c7881d4a609.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_1f6d99a6-6430-48ff-a6a9-8c7881d4a609.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad5518c5bb184b939301a38fc0eecc55e003c41be1cd55c987a093adbec6ddb9 -size 872958 +oid sha256:fcb63f42da2d70eb12b8ad9c60514ce0f9175fe5ac9583a853f9103a2e93a770 +size 1331168 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_8bcbfa0a-acbf-4339-a568-b3e0b28f774f.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_8bcbfa0a-acbf-4339-a568-b3e0b28f774f.png index 6ef9f081db3ff61481f68e766450ea4f1b00b606..a23e6251be029ab0535793d046fa1dc0be23a1c6 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_8bcbfa0a-acbf-4339-a568-b3e0b28f774f.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_8bcbfa0a-acbf-4339-a568-b3e0b28f774f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79d81cd5f9c216317880502d19478ec17f11a742ca9a17dd843f21a452f3646a -size 660032 +oid sha256:ada87efbf5fd65940f12b8ec605736227fc3254c42d405341150f7f43f20b530 +size 1116656 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_b4aa56db-e3ef-4719-8221-e887d800b895.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_b4aa56db-e3ef-4719-8221-e887d800b895.png index 02272c182b4e6a301885260995cf849ea9216e63..177bdc94d62a7b895dc5e2d4b13ecd681083dcf5 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_b4aa56db-e3ef-4719-8221-e887d800b895.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_b4aa56db-e3ef-4719-8221-e887d800b895.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4bc67ff90de55ff2732432e102bac7d308525469b5a4dc92542a171e7aa71d2 -size 701029 +oid sha256:3e1ec4f026e1f3f6014c5d8582f4e1c014ff17020e921c4b0ab78981423f4a97 +size 822810 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_bc008675-e4f9-468b-a15a-02d622cc6f06.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_bc008675-e4f9-468b-a15a-02d622cc6f06.png index b52f6c11318059bdfb020537937d431ecaaa4b8b..1924433bd75d052ae4742ce896afd830f5c290ac 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_bc008675-e4f9-468b-a15a-02d622cc6f06.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_bc008675-e4f9-468b-a15a-02d622cc6f06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:541002dc9d981176fbaf2447fba9bb0e122720bb57e52eb18ae6ed3afcb63393 -size 634003 +oid sha256:b99b11cd2a408e05f7f80f2c15c89260e6b977073b0ad9c4bb5e7b2df58dcac8 +size 656075 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_dacb1820-0368-4992-843d-496d69231c61.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_dacb1820-0368-4992-843d-496d69231c61.png index 015c9655cada15b1be96404c092af1e0a0d96009..53aba567db68a191f476f1da6d4eb1baf006922a 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_dacb1820-0368-4992-843d-496d69231c61.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_dacb1820-0368-4992-843d-496d69231c61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcdde0993b8404c2475c8afd40ee864a835139c168622e741b37bb80e8ae127a -size 614897 +oid sha256:49be1ee8c520db6298a3a0cfb60ee14f4a26de8c0947642c64d593cf6451e8c0 +size 918070 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_df33373b-2ae4-4f6e-8f22-a1b84bde50a0.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_df33373b-2ae4-4f6e-8f22-a1b84bde50a0.png index 0231d12c356d9340af4f120047b5dd10d89604e7..0c49e82dd24414c585ba2a2de1d3f0e53408c824 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_df33373b-2ae4-4f6e-8f22-a1b84bde50a0.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_df33373b-2ae4-4f6e-8f22-a1b84bde50a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46fb54504cfc7fe1383185e70235cb68d8aa348d1aef88718efd1f9db16623b6 -size 551063 +oid sha256:5250d0f17f2cddc6f0731d71c0062f4f73702493072a5092cc3051752a7dfe82 +size 823777 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_e4951d09-66ec-4136-bac3-44a43647d534.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_e4951d09-66ec-4136-bac3-44a43647d534.png index 4ebb533eea1d2e275abe8b75f912782920029d50..bb399894f6bb6c770bba59a73772fb3990ba755a 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_e4951d09-66ec-4136-bac3-44a43647d534.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_e4951d09-66ec-4136-bac3-44a43647d534.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:632ceac1c10cfc342399f6c2ead1961ca2af562a940193ec6202c85ad200aef4 -size 804630 +oid sha256:1e67ccc8b4f296f2fc8fdcc1ab757c1fb045d7036025a15402e9241037066e7a +size 810275 diff --git a/images/e224beac-a998-41e4-abd1-5370196ce9df_e62f4bfc-502c-4f11-a37e-f64cc8febe6e.png b/images/e224beac-a998-41e4-abd1-5370196ce9df_e62f4bfc-502c-4f11-a37e-f64cc8febe6e.png index d74a6ec9f74b855a53a244fa9a78187cae24c58f..46fab7d7a38420dfe0cb38005418a4e3efa1cfad 100644 --- a/images/e224beac-a998-41e4-abd1-5370196ce9df_e62f4bfc-502c-4f11-a37e-f64cc8febe6e.png +++ b/images/e224beac-a998-41e4-abd1-5370196ce9df_e62f4bfc-502c-4f11-a37e-f64cc8febe6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ce2ff545407f049a49f349370556d58ae8072f97423c4003a5e6786c8b7d48b -size 1560772 +oid sha256:2b7d80ebc8e5b0f43777f39fe9691a9b0f6f609e8d701dabd109ee664327a306 +size 1986754 diff --git a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_2bf233d9-c3f4-4d32-8f7c-fbfa4c810e2a.png b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_2bf233d9-c3f4-4d32-8f7c-fbfa4c810e2a.png index 2322107e1b5423ca567931d8b1d4cfd5bf95e592..43a1249bc6e3c4fcb0845cf58f535a977fd5ece8 100644 --- a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_2bf233d9-c3f4-4d32-8f7c-fbfa4c810e2a.png +++ b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_2bf233d9-c3f4-4d32-8f7c-fbfa4c810e2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39432609cc8c204635c6715da2055462a9d6421753c8abdbdbb733a32b3749a8 -size 1060888 +oid sha256:8de960b5184e38b3559dc0cb58fc93c8607854a39e30c6a961a086e3aac6ef87 +size 936650 diff --git a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_33dc45c6-f292-4b68-8df3-95a76a20a619.png b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_33dc45c6-f292-4b68-8df3-95a76a20a619.png index 946d0416b8b659c4df4cfb54bb6e4a33e71b0e1c..6c50e6a8136cc1c0f04202126ceb916246816e4b 100644 --- a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_33dc45c6-f292-4b68-8df3-95a76a20a619.png +++ b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_33dc45c6-f292-4b68-8df3-95a76a20a619.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d10b4898c62fe23e628192dbf24aeb88200b934ffab142de1383f6c35e38d10 -size 498890 +oid sha256:6eadd3eb12a791db3e9adf20eb13f8a96131ad316696cb8816c3edf76482a97a +size 1574885 diff --git a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_98318d24-fc5b-4031-bff4-008759505c93.png b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_98318d24-fc5b-4031-bff4-008759505c93.png index 946d0416b8b659c4df4cfb54bb6e4a33e71b0e1c..1bb94050435b9b64cc57bd22dafaf510293ec879 100644 --- a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_98318d24-fc5b-4031-bff4-008759505c93.png +++ b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_98318d24-fc5b-4031-bff4-008759505c93.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d10b4898c62fe23e628192dbf24aeb88200b934ffab142de1383f6c35e38d10 -size 498890 +oid sha256:c0d90c90c9a1d555a98fd7a468de520cfd399d4c6adec3dc0a080e66df84a436 +size 332658 diff --git a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a3ce4ba4-e949-4d3f-961a-e0ab53de1539.png b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a3ce4ba4-e949-4d3f-961a-e0ab53de1539.png index c29c8cd5d99f5d649a363b5f9de4650bcfaa5b44..f02c637b0c0d26eb97cc2e52450147cdbf794f42 100644 --- a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a3ce4ba4-e949-4d3f-961a-e0ab53de1539.png +++ b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a3ce4ba4-e949-4d3f-961a-e0ab53de1539.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:409827c6022fd258940d8692859b11276b5bf3707400fa44cc10e92f97dc8602 -size 1179241 +oid sha256:6848ea755bc5e668d1730c6f175ae811aca2eb9fe692c7dd9332203f22674fa5 +size 1259960 diff --git a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a79f5fc4-c635-43b6-8229-911c45a5874c.png b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a79f5fc4-c635-43b6-8229-911c45a5874c.png index 34c546508c67a6cebb3a9af07b1ab1b2cb72dedf..2f40e0864ec021f5351cba371ddc2b7c0c6c87c0 100644 --- a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a79f5fc4-c635-43b6-8229-911c45a5874c.png +++ b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a79f5fc4-c635-43b6-8229-911c45a5874c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22815774be84c11923a0806be5238241a6648b35954722d7ddc300309fd0aca3 -size 1038451 +oid sha256:2fdf6666df27559716261580b02cb48677bc7b55cfdc4cc23a9c43739e4b1749 +size 1109537 diff --git a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_b0979f37-ab13-4dc6-b59c-6ff68a53d096.png b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_b0979f37-ab13-4dc6-b59c-6ff68a53d096.png index fe917fa09b113270578400e146f33c8615663653..1ff4d01f08590dd5162858fdfde93c4c761c0537 100644 --- a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_b0979f37-ab13-4dc6-b59c-6ff68a53d096.png +++ b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_b0979f37-ab13-4dc6-b59c-6ff68a53d096.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d4c2f6c7fbe19b3aa0a99ff58608ff4c50f1c5e12a66cbad6e42473150394290 -size 1284875 +oid sha256:e00535e4cdbd25f6a7428529475c87007f6c2490c3a736316c87a0930ac900ef +size 942023 diff --git a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_d9f6a27e-20e4-4711-939c-c1d832462aa2.png b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_d9f6a27e-20e4-4711-939c-c1d832462aa2.png index 04e8960705e46ce5f1d47514c424de93e5f7a400..ae62eae714cb648effe7f066ebf0a6d12414a6d4 100644 --- a/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_d9f6a27e-20e4-4711-939c-c1d832462aa2.png +++ b/images/e2adf8f1-547d-4671-96c1-4a21a56e135d_d9f6a27e-20e4-4711-939c-c1d832462aa2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e7d4114f6fc4161712d30353ba050484ac54caea3fb82f11e52c7aa8eacb7ab -size 1182218 +oid sha256:ffff01ddac39b776c5ee6550728f8185f20e50c6a91bd548e9e78cfaf8ecaa10 +size 1181817 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_2d197653-4bfc-436a-83be-5d50125fe4c4.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_2d197653-4bfc-436a-83be-5d50125fe4c4.png index 39b84aa8011cde4dd29d8b7bb94d76bf1f3094f4..88df61ac58d2f07c7f03de46c6be5cadbf2d22cb 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_2d197653-4bfc-436a-83be-5d50125fe4c4.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_2d197653-4bfc-436a-83be-5d50125fe4c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8084283f0cc60da96ed12a9ed805de591e2ae4b32e7deca0b853892ea0544bfb -size 784727 +oid sha256:79fcf180d2d538cd233b0abc8fcee62146a36b4f4c96bbc853dab4af338e448e +size 819397 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_41cf4d7a-1f81-42f6-8711-5c1a16ed9d72.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_41cf4d7a-1f81-42f6-8711-5c1a16ed9d72.png index ecc82cfdf60dcbfc3a6c9767545ff92eb329a5ab..1c6c8a975c66da8e4cb94af713fea5c3649a3668 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_41cf4d7a-1f81-42f6-8711-5c1a16ed9d72.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_41cf4d7a-1f81-42f6-8711-5c1a16ed9d72.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db266d87d7e084d8ede4cef9585c42bce88777833d670b74ab9c5e852e4d278f -size 938854 +oid sha256:7ac1ecd201e41d48f1dd8674c8de7bd752cc4c286d05964c037adcf1b30380b8 +size 1456993 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_61dea86d-e842-4be0-b179-ec76381b455e.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_61dea86d-e842-4be0-b179-ec76381b455e.png index 6a38f3f820289a422fe43b043c71d990cf897ac0..92727eadd63569d68fd9287bdfc7dc7915ebba7f 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_61dea86d-e842-4be0-b179-ec76381b455e.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_61dea86d-e842-4be0-b179-ec76381b455e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fbe788adebbbfe16761f69f1c2d9619ea3304b60c6cd13fb02bd576483788f7 -size 1110846 +oid sha256:a5dbfb840e8b565880471ebfd763c33db84d01904245cbe1908523bbd752055e +size 1213694 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_708f72c4-9e63-4fc5-84d7-d89623a406d6.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_708f72c4-9e63-4fc5-84d7-d89623a406d6.png index d37c5a4654dc055bf49a6fa134c12e7d481f6d76..684826ec1b467b2f44c765288560a627a8253d31 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_708f72c4-9e63-4fc5-84d7-d89623a406d6.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_708f72c4-9e63-4fc5-84d7-d89623a406d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ce97c6fd775c11776208ebe007c9f4baa0bfbe8fbf7669c427d3f1176969c13 -size 862977 +oid sha256:ffa549d4740c6b21afbe756744fa44b438351589f58716cb14695d331706f2a0 +size 1055679 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_7e4e90e4-e0f9-4262-878b-221a78155dee.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_7e4e90e4-e0f9-4262-878b-221a78155dee.png index 993630e147342ada2a3284efda4c612802d1710a..0c12aa389fe348d8526c7f638a38037dcbea2712 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_7e4e90e4-e0f9-4262-878b-221a78155dee.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_7e4e90e4-e0f9-4262-878b-221a78155dee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca304504f334ce0c4934d6880885abd2d722da98820ed2cee71ae475ec4f80ff -size 862931 +oid sha256:d9e4a5389adb363102c47e5cadf6731f626ac0ae420559235fca5563815ad556 +size 1168923 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_98fac87d-a77e-45a6-be35-d7582402efd9.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_98fac87d-a77e-45a6-be35-d7582402efd9.png index dc2e6c0d7e225bee30412eebab44c7d21274ec4a..f8aef1d49c3e9cd05123ea79ee454efc0246f106 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_98fac87d-a77e-45a6-be35-d7582402efd9.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_98fac87d-a77e-45a6-be35-d7582402efd9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1b22a104e6c8b1f5c0766d936d89a8c11d19a9f911c14406e2cf0a6f12297f2 -size 888989 +oid sha256:3ef67c86746e61bbfc9925e96b0887f13a879f55c06e28c20b3ece36cfa5c210 +size 1123126 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_b7df6516-6050-426f-b729-a41885186422.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_b7df6516-6050-426f-b729-a41885186422.png index 156606b1d6e39d50eaa2a40e1b0ef0a39a206848..6113ad5708beb64fe124fe4a7f48fa4dc0128d73 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_b7df6516-6050-426f-b729-a41885186422.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_b7df6516-6050-426f-b729-a41885186422.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4184f9ea45f8ede6ca4e293dc5fa4d729d86d1d1bcd966a7b3bbec1d8fb32d36 -size 1069732 +oid sha256:55450f88ceaf43e6ca6c8f3cc85615e25cd2fa00b15014af5b834b45adabb261 +size 829540 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_dacb180b-b588-4ff6-982a-c9294c11bddb.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_dacb180b-b588-4ff6-982a-c9294c11bddb.png index 7eb31982290ab1698948e920ae1705c906e20f5c..6eade774fbec5cceaf1c16168d3563bde0178073 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_dacb180b-b588-4ff6-982a-c9294c11bddb.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_dacb180b-b588-4ff6-982a-c9294c11bddb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f55bfcb40867ac881bfd4c9c021291cc89443ea4fd9b20d2a0b18068bbbce42f -size 1414008 +oid sha256:fbe1d98956ae1a8895f5c8a17925c02993aa96a07301099343d4f375ec0e33f7 +size 1118602 diff --git a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_efa5da67-cadc-4dc5-b66f-b1f73acbac75.png b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_efa5da67-cadc-4dc5-b66f-b1f73acbac75.png index eeada24e22ceb244b65d1995846177849c25563c..c1b98d129cc271e6a919d711a82dc81e35e94121 100644 --- a/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_efa5da67-cadc-4dc5-b66f-b1f73acbac75.png +++ b/images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_efa5da67-cadc-4dc5-b66f-b1f73acbac75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72fdf2f4c2bc75254440f04ac563864dd71a391955aee75badcc74548bddf23d -size 688535 +oid sha256:e534aba29d03cf68c09767f896947108c6e8c7888af4e6bb1d2a4c48f789cf41 +size 1437208 diff --git a/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_16c54e19-5ee4-4204-9d47-a622771a3506.png b/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_16c54e19-5ee4-4204-9d47-a622771a3506.png index eaad96d7ea4d5a56030de65bed32ca82774dca13..bb9c9856d86f1834b416a8b7d0e3c1a4e65474ee 100644 --- a/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_16c54e19-5ee4-4204-9d47-a622771a3506.png +++ b/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_16c54e19-5ee4-4204-9d47-a622771a3506.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73b1cb56f4a2f63792a97450f6a2c45ba6251b83d97e55743300b95144d0f342 -size 751249 +oid sha256:f7ff8962100380609cba75abd2982a9c48c15e30e447451c9840e88b13df854d +size 1157056 diff --git a/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_c0b15fcf-8ad7-47dd-85a9-2ee548ce72d8.png b/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_c0b15fcf-8ad7-47dd-85a9-2ee548ce72d8.png index 371e71cae9fda4faecdc6b55668492113e5e3168..f1f1e8e8a973df6e7318edbcb078341096451a29 100644 --- a/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_c0b15fcf-8ad7-47dd-85a9-2ee548ce72d8.png +++ b/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_c0b15fcf-8ad7-47dd-85a9-2ee548ce72d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8dcb93b3b0737c339bb88493f780b7928cd83cc8e650e3f1c66838672c609143 -size 1459002 +oid sha256:fa2c617de9c7b7937b732b9ec5c0847edc289d946cbf1a2d61aa2001116a4f8d +size 2341331 diff --git a/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_fe721d29-2b19-4c71-8bdf-3be63712c52e.png b/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_fe721d29-2b19-4c71-8bdf-3be63712c52e.png index 13171da12e68fdd6a2b10ff792906854cd5b8987..6c6a9c2b3ad0a0da95d9a99e678941567c78a355 100644 --- a/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_fe721d29-2b19-4c71-8bdf-3be63712c52e.png +++ b/images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_fe721d29-2b19-4c71-8bdf-3be63712c52e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1dffba2cd70d99f6247f79bfaa031cc3ef071e7bee54c3f1d9c2e58ea672942 -size 953883 +oid sha256:5fd4cd13295dedc3a075094fae72301110916c6bef76b3e8981a753ca7ffecfe +size 806453 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_0aa34099-a83e-4931-897f-1f1b7e0f7e16.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_0aa34099-a83e-4931-897f-1f1b7e0f7e16.png index 364b676971aed858c86b930ae2b39555c0ab8920..8b580ec2b8c4ede1b0ad396458aa5d40625d55ed 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_0aa34099-a83e-4931-897f-1f1b7e0f7e16.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_0aa34099-a83e-4931-897f-1f1b7e0f7e16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5db309f85d4e48d7fa494a84bbb4bd93665fb6cda24f856bbc87b2034678d6c -size 1258092 +oid sha256:fc58a2d5e6a7786caff4db2b2bc413d0d20ae3d543df5c6eb0e14e079e79e4fc +size 1238347 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_55888df7-c6b9-4ab0-8f38-d6970f7a3025.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_55888df7-c6b9-4ab0-8f38-d6970f7a3025.png index 0c79a9bd3652a79db0c2faab0f3e8c0bcf80f006..5318efc5ae327205f1e6470d6c5a053c68cdb942 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_55888df7-c6b9-4ab0-8f38-d6970f7a3025.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_55888df7-c6b9-4ab0-8f38-d6970f7a3025.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d33f3228dceba16ef69951b14261712c98394bc4b7ea2ed0346df8ee794a78c -size 1543499 +oid sha256:85ed6ead24a607af99b5bd2966202deffe440da93acef1562114fa6879e95ef1 +size 1044770 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_5aa47c09-f306-4e03-b55b-95b304d7a729.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_5aa47c09-f306-4e03-b55b-95b304d7a729.png index 0ece0d88ae90451000808a92ed21d5e9e83108d0..0e0cc3c9eb0b58c3af4fe7cfda1c57af98bbcc2b 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_5aa47c09-f306-4e03-b55b-95b304d7a729.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_5aa47c09-f306-4e03-b55b-95b304d7a729.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a060813e0e981f7d652d659d406836cc32429d9be944c897c029da53c23089d -size 883800 +oid sha256:2e22337819a13960b994443e7d9dea5be405efd560b686118fecaf7ca0cf6a31 +size 1203938 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_694d5209-df32-4dd2-a885-72b559a39cb9.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_694d5209-df32-4dd2-a885-72b559a39cb9.png index 157ecad06f33642d6f524a8f076cb6381ec81837..9ce4bbaf5c70c6010b60491da7661f0429583412 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_694d5209-df32-4dd2-a885-72b559a39cb9.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_694d5209-df32-4dd2-a885-72b559a39cb9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a30a7bf658327f87bc84d5cf906c25c72dda2d9d62309c96c7014a9935d36f35 -size 1115107 +oid sha256:b811c8ae61a548eac84d2711ec3857e2e3c1654baadedd0f4feca8ac823a8945 +size 882717 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_7638b874-5601-4028-9e02-931e87de0aa4.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_7638b874-5601-4028-9e02-931e87de0aa4.png index 28f633bca6d1cb3a1eda1c79cc748af071e378fe..32ee0a069bc9d3f22abe04687358bfce44ba837e 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_7638b874-5601-4028-9e02-931e87de0aa4.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_7638b874-5601-4028-9e02-931e87de0aa4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:393e618d3615f811973fc6e9b4a552ce3d4efc1bf591b6ff0f115366341bad87 -size 704638 +oid sha256:d6244c65355199b2f27a886db5934db0a2d1252ae00cfa4413529d4b9f2ef9e0 +size 586485 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_7d152147-a44e-4294-bc01-98b93f05e570.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_7d152147-a44e-4294-bc01-98b93f05e570.png index ba3c39e56fc28b4ee712224fe5af79eac4ea830d..32061ac8d3167419a0cadb8c4459646f59fc0afe 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_7d152147-a44e-4294-bc01-98b93f05e570.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_7d152147-a44e-4294-bc01-98b93f05e570.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dee5e39b4158562ab40ef1c35790e903dcb7382265d09687951b800ac03bb05b -size 947443 +oid sha256:be70e3b46d0ae76ad572c75078ca70718e27285d1e4aedc107b89b9dd33119b5 +size 1316775 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_e9b55aac-5518-472f-b369-9fd23371f29f.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_e9b55aac-5518-472f-b369-9fd23371f29f.png index c1caf6db8ed76d1d34afc3a8fde55dbc11ce4ca3..23ee4b7970e1d07729d56b28502f27e960d59665 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_e9b55aac-5518-472f-b369-9fd23371f29f.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_e9b55aac-5518-472f-b369-9fd23371f29f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd37d8d43f04368fad9243bf450ba94e3177c94ff6b125ba4d6bb99d4b955a2c -size 1070846 +oid sha256:d5607925fd0ecceb7a2e87f841a2467a27a2f3ca69aac22586594743f5eb1462 +size 922308 diff --git a/images/e344b89e-767a-4618-ba82-4b81cdcba280_eaa6d53f-db1e-4d44-a4bf-95031cb2e235.png b/images/e344b89e-767a-4618-ba82-4b81cdcba280_eaa6d53f-db1e-4d44-a4bf-95031cb2e235.png index 2d54a65c12e708f3a2d43dbc7b24fd61c8ae7935..8484e0cf67835fbb3ae5cf234c40966de2081018 100644 --- a/images/e344b89e-767a-4618-ba82-4b81cdcba280_eaa6d53f-db1e-4d44-a4bf-95031cb2e235.png +++ b/images/e344b89e-767a-4618-ba82-4b81cdcba280_eaa6d53f-db1e-4d44-a4bf-95031cb2e235.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f5a5a4a9a61eba497f3e4554d99c8d12265350703fa9b14aa6b511e147d1df7 -size 1175948 +oid sha256:d572235a2df3a17de721c876e3627f1d01ee5dbc20bc9fe1b014e1e9feb8fe64 +size 1318071 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_0eb19e0d-99bd-405c-ba39-76125661d09e.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_0eb19e0d-99bd-405c-ba39-76125661d09e.png index b841ffe75875130c1837cee8addd71ca33318526..e31cbfcff8aa8d655315139a7d90d95f64f4c330 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_0eb19e0d-99bd-405c-ba39-76125661d09e.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_0eb19e0d-99bd-405c-ba39-76125661d09e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2f96819924bcef1d4e60000d4854f400ec766b7dca7ce44510db2e711cbf178b -size 605080 +oid sha256:d8e7272ffe7ba3b69dc9bc3baab91363c770d5630fcfaf8ea391df367b92a45e +size 735509 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1654a37a-cc71-4bac-88f3-efe73a2675f8.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1654a37a-cc71-4bac-88f3-efe73a2675f8.png index 0f5004c1a4e867102bfa2ad0346759d743565e13..506b4d49f78b0c59b73ef8333cd6549a8653d2a3 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1654a37a-cc71-4bac-88f3-efe73a2675f8.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1654a37a-cc71-4bac-88f3-efe73a2675f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4445c7256face233dd2aee23bb4a424c5bc3dc2ce54617006ed7c99c56a7a446 -size 609515 +oid sha256:533d513621303aba8c3c17e660af41de8efb9d01fbdf8182c02160e345dc6992 +size 481723 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_195a4b8c-1c6d-41ff-bb22-eca585b4e44b.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_195a4b8c-1c6d-41ff-bb22-eca585b4e44b.png index 5cee0e82ae138be43afee385397fc44d69a5a02a..e06913e5d2a9a24db69841ebb00ce01995639086 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_195a4b8c-1c6d-41ff-bb22-eca585b4e44b.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_195a4b8c-1c6d-41ff-bb22-eca585b4e44b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d61b5e89bf3762534100187bd29e70c9c32a1ed14a086223b820e62fcff0937 -size 607700 +oid sha256:f6fa85784b92466060bfae7f7f6247a3e63c95ef422bcd2234aa6a7aa2c81dae +size 355784 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1fff6452-6d62-49a3-84f2-dfbdf0f3e314.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1fff6452-6d62-49a3-84f2-dfbdf0f3e314.png index cf0b59a679923ba0095d6a5db1a01e724f908f69..2391c300f521e6942fa158b1e699aa6fbe41ae79 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1fff6452-6d62-49a3-84f2-dfbdf0f3e314.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1fff6452-6d62-49a3-84f2-dfbdf0f3e314.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28d310899c0fb6e9b08eeda384157bdc7ea42b23170da7a0ceeed383825eb3ed -size 628924 +oid sha256:026c5be9bfe30f4e99dd31abaff3bccf13980d1a8d85947409021f4c81fd837d +size 1185419 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_21008ae4-2f47-4263-93cf-a947f4c43b2e.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_21008ae4-2f47-4263-93cf-a947f4c43b2e.png index 62f2817a8984d48af9e136eaa66c0e9469af08c3..c86863d7801354cd981786a1765ef8d7efbbf815 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_21008ae4-2f47-4263-93cf-a947f4c43b2e.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_21008ae4-2f47-4263-93cf-a947f4c43b2e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:554384a5a4980a11d42bab4f7d0f21f8b52320c5a8e5e7959da70cec74005c6d -size 628906 +oid sha256:7041ca4300f51c4b3f293706920901124dc3e30fbf21e1850f9fb2d0bd7e4dcf +size 892019 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_2557846c-fbb2-4d8a-a709-15856d7dd485.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_2557846c-fbb2-4d8a-a709-15856d7dd485.png index ce4b82049c6ceb2ff551303172ec93d50152baaa..6103a0b4a8b337ba26c1f1ffa825ae78efc79474 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_2557846c-fbb2-4d8a-a709-15856d7dd485.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_2557846c-fbb2-4d8a-a709-15856d7dd485.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60ca8350f358402118c1ccefcc7500e2df0326163fdb92228736a83a687f758d -size 330779 +oid sha256:bcddb9c5170198833e91939bcf22308e022f7740b290c306d706720061e6e685 +size 483175 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_4f2fb538-a8c2-4890-a77e-65c8f133c0cd.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_4f2fb538-a8c2-4890-a77e-65c8f133c0cd.png index f27822c472586d7c2008e4b39f9fdceaf46745a0..aa564191590ad572a2cb93264df11466502f145d 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_4f2fb538-a8c2-4890-a77e-65c8f133c0cd.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_4f2fb538-a8c2-4890-a77e-65c8f133c0cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4b573aa361d7fd3af0e5f775d59ceaca6cdb461bf39657131f8c3a1bd0afd36 -size 604033 +oid sha256:6dfe621decb36ab43ddb028935a25b0be80cc668d8e411fbc1aa8914ed4be3ca +size 451023 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_74e387ce-066e-4f96-8387-702c1b05bf50.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_74e387ce-066e-4f96-8387-702c1b05bf50.png index c9548ed88923d737187b474d64da85b6f71ad2ad..0a1768f5fc51ca6e50e14e1ea07e5cf7e607f30b 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_74e387ce-066e-4f96-8387-702c1b05bf50.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_74e387ce-066e-4f96-8387-702c1b05bf50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc41d82f8ac6507e07dbdb08c51f6c27f86cc4fc91edd162180049ca544e9723 -size 613197 +oid sha256:032b077028b60947c70631e3dd88af28f432ffe0dce5d61d1b1e1ac461bddc9c +size 1088496 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_7af99269-815e-4229-bc9f-599cda4974ec.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_7af99269-815e-4229-bc9f-599cda4974ec.png index 7071fe24d1d0ca993ca6ca0a7526bd09c67e4844..228d4e99a672fcee715c21e4139e9d90652c9c20 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_7af99269-815e-4229-bc9f-599cda4974ec.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_7af99269-815e-4229-bc9f-599cda4974ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36d650622220f311f2e161bb2dc5d8197c3f1c54481e3d5a6ffaf7e9fff857c5 -size 609034 +oid sha256:125be78959010f0b6fd2ad32d93e5d0cac21039d9e2b34ff983dbd0cde15b072 +size 640160 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_86b669c9-2974-40ed-8fc8-6c3f5b709c7e.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_86b669c9-2974-40ed-8fc8-6c3f5b709c7e.png index 836f24f2526c04891c180b08c7638b196e6bb8ef..a066e20d3cb7ba69ac2a5c1fb02473b376a44d47 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_86b669c9-2974-40ed-8fc8-6c3f5b709c7e.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_86b669c9-2974-40ed-8fc8-6c3f5b709c7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47ba5ddd4b44f0ff65a59b69dc07fa7b079c2eb4c1f55717862f8b0f9b76b492 -size 591059 +oid sha256:f9a423d3c3b614b5feb4f517057dcb2448afcb02112895a2a5d8417d63b304a2 +size 670359 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_9fb3ceb3-fc32-4e90-a632-a140bbf943f5.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_9fb3ceb3-fc32-4e90-a632-a140bbf943f5.png index 9b744df0cba00636f3704adf4d8bc8305862300b..354e6269532c871610306dec3d7da3d951666d7f 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_9fb3ceb3-fc32-4e90-a632-a140bbf943f5.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_9fb3ceb3-fc32-4e90-a632-a140bbf943f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f179ea88cd864b18cfe92160c3aab6a80b145751c8cc1aaaf298a6a149e295b4 -size 1787905 +oid sha256:532fcf5013ebbdcec40e77dc94fee8fa8c56a80945f3a77a2fb92d09169e2f4a +size 1750257 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_a75f21c5-093f-435c-ac68-cfeef7b29ccd.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_a75f21c5-093f-435c-ac68-cfeef7b29ccd.png index 5dfc301bd2b17cfbb8858c971231f8bf088ad417..58a2aa31c5a00224862f4cbe63a26eb7080b1bbb 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_a75f21c5-093f-435c-ac68-cfeef7b29ccd.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_a75f21c5-093f-435c-ac68-cfeef7b29ccd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46ab688851766d03955f74e963414185e54e1a328dc316609d73dc863a820542 -size 639799 +oid sha256:2652c1850ae87af8033a955ab8b30f595072ac09b7969b887b1f5fce7ca3ce42 +size 526021 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_badb1150-e62e-4e2b-aec2-bc2053436366.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_badb1150-e62e-4e2b-aec2-bc2053436366.png index a8a80cc6868f3efb80a43279966984ad40770397..b0665647ef51f8db896c17052b9735b137fd573e 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_badb1150-e62e-4e2b-aec2-bc2053436366.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_badb1150-e62e-4e2b-aec2-bc2053436366.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93966e23b162d6d6d2b05f82ca15623a1b06600084ff8f4d3650f50b3cc827a4 -size 607907 +oid sha256:ffe29a2b300aba29de6d7b8d14b750e41e453a290efcc3b186a96551e5a2d210 +size 1111959 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_c79b7700-6a68-429c-b616-d0151bd9bb47.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_c79b7700-6a68-429c-b616-d0151bd9bb47.png index dccebc3d622af46e7d423475af45e8249ad9bb87..48b1da46aca7c254f347df6439784c2ab321a05e 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_c79b7700-6a68-429c-b616-d0151bd9bb47.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_c79b7700-6a68-429c-b616-d0151bd9bb47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31e1abc89574614abbf338b154d242b19a8f29da388300c86a1f932c756c49b6 -size 613248 +oid sha256:5fbafaed4b52e791e4d2b10df793730086a6a527ec34d39879159cec65c9dc57 +size 972159 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_caf11e97-b1ea-4f59-aaf8-02f7a18f9536.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_caf11e97-b1ea-4f59-aaf8-02f7a18f9536.png index 46b8317c5561def34dd4806e4e9dbe821e87dcb6..ff4e868b700d971d3611841cf993e96fb46e4389 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_caf11e97-b1ea-4f59-aaf8-02f7a18f9536.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_caf11e97-b1ea-4f59-aaf8-02f7a18f9536.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd920a15c155c3de1c20263f35351781fd18cdd17b8d7e0467e56e7ffe1e7ef1 -size 1466533 +oid sha256:3174da34d4acd02139315dbe829d03dbef280ef641881023b4da709e901c2df2 +size 1435304 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_d64f4710-22b9-48cd-9649-e2969c135a58.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_d64f4710-22b9-48cd-9649-e2969c135a58.png index 00b945fe5893069923522771ffb9294378253012..2b972eeb5d46b5079fbb3657a58555dd359c8101 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_d64f4710-22b9-48cd-9649-e2969c135a58.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_d64f4710-22b9-48cd-9649-e2969c135a58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ea3c5ab264b7149a88786ac336ae725f651521c3d0e6cfc62e9a70d530894e1 -size 1346492 +oid sha256:34d953aa3ede7bc3cdda483df164b64b610a8f75e728bf0dbda9e65fb8a95bf0 +size 1675072 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_dd90c01f-05ab-4d6e-bfa5-3e9a3c00d161.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_dd90c01f-05ab-4d6e-bfa5-3e9a3c00d161.png index 92dbe5f3990be1e25d83ca36826de99247cd8b77..05576fb0f69a5b1829cbfaf0bf19332134b050a1 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_dd90c01f-05ab-4d6e-bfa5-3e9a3c00d161.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_dd90c01f-05ab-4d6e-bfa5-3e9a3c00d161.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f6f882b58be12c9a34b44651b085ff4dd94cad2c596c39e64746f339bbc24fe -size 1678519 +oid sha256:b9e5bb211a0e1c453c78167314b6ade1cbdf1354464f3d3c00ca055b5188dd04 +size 1898248 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_ed1bafa7-9add-4b87-989b-90aa882fac86.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_ed1bafa7-9add-4b87-989b-90aa882fac86.png index 1d33b35bda267402338f6a3c4ce7420605e2fa56..13f569749304393c9c54a73c20816fff8d01e0c8 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_ed1bafa7-9add-4b87-989b-90aa882fac86.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_ed1bafa7-9add-4b87-989b-90aa882fac86.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9a4f04aaf07b26b9ca37001f6f68993aba304fbde3b5d7c7f3c64997239b410 -size 604314 +oid sha256:af3347874b585096d803a953f6e53ec51de2be5b97e16c2cc5c150e9c282babb +size 1168441 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f70b6c21-ea35-4256-824d-f478df3bc254.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f70b6c21-ea35-4256-824d-f478df3bc254.png index 9a716ef0ef488d32ffea427c45636325572e4e5f..0fbb3a014c2b92a83366b18514097b10122f842a 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f70b6c21-ea35-4256-824d-f478df3bc254.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f70b6c21-ea35-4256-824d-f478df3bc254.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a794e46231bc238db4e5cdb53a852de0af81168c7eae907aff105944ed44eb7f -size 601471 +oid sha256:8ba57d010029d2524400ac1015d4e21e46319c921db654c5315b995dbce70009 +size 495048 diff --git a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f8dc3296-ee92-4059-922e-f380c7f8a6ed.png b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f8dc3296-ee92-4059-922e-f380c7f8a6ed.png index 9840264c38082ec6e4f66755061bc89b88aa5296..f6d455b479d34ae20ad0e1e3e8d0d997572991a5 100644 --- a/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f8dc3296-ee92-4059-922e-f380c7f8a6ed.png +++ b/images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f8dc3296-ee92-4059-922e-f380c7f8a6ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ab940c0b1f38e32d323226e179ff7e845c9124f5a4273b715bed226c24979b7 -size 1579695 +oid sha256:df2f68f88cad2d6c327478a8382bf193003b178631d5ab3484a227fef1f44757 +size 1279641 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_01b4466a-8da6-4894-9b73-283acd8a8d89.png b/images/e437082b-8383-4322-aa58-a6a683113970_01b4466a-8da6-4894-9b73-283acd8a8d89.png index 025750f5a645653ea5950c59462e040867f84215..d748af150ff1dd2808a7e33c0f790e0532158122 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_01b4466a-8da6-4894-9b73-283acd8a8d89.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_01b4466a-8da6-4894-9b73-283acd8a8d89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61f84eb1bd60858f91f52e48cc8602a192a25b8c249d308911beba9b28373ea9 -size 1373107 +oid sha256:cbc6c2b739c6144ade99e1cfe6abe8a031ba165a76712e074d36bcb816fda358 +size 1285461 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_1fad7c31-f747-4e0f-b2c7-99e5a4febcfe.png b/images/e437082b-8383-4322-aa58-a6a683113970_1fad7c31-f747-4e0f-b2c7-99e5a4febcfe.png index dfd3d6a6bf38ebe91ed6fd69ac7d6a4d123cbea7..0e10f1b7048017cff834d8587408c0606294a03f 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_1fad7c31-f747-4e0f-b2c7-99e5a4febcfe.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_1fad7c31-f747-4e0f-b2c7-99e5a4febcfe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d49ec22cf6baff46604659c9c379a31793bcf4d80a16026afd3af01d0599a18a -size 1364750 +oid sha256:60ffe501ceeb4e9a1c61e8ad166ddb8efcf035bc463af0b1ebcd71ed2ddc5f44 +size 1173958 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_2b36eab1-cf61-497e-92cc-c454fb7c4aaf.png b/images/e437082b-8383-4322-aa58-a6a683113970_2b36eab1-cf61-497e-92cc-c454fb7c4aaf.png index c53f95c384dea7711f50b2368b6958022c0e1696..a746672278a14802b2f4ef5573bb471b74177428 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_2b36eab1-cf61-497e-92cc-c454fb7c4aaf.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_2b36eab1-cf61-497e-92cc-c454fb7c4aaf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d004c7a7a7711f2d3d9839b5985ebe1336b26ada9537f76b2d42a7ee5ed4c6af -size 1408351 +oid sha256:03bf3f981788925eaaef4d27fee5992cc57533832c2c8ead81ec1c79be130262 +size 2620023 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_7ae94499-171d-4bee-a8f8-12cd500daf3f.png b/images/e437082b-8383-4322-aa58-a6a683113970_7ae94499-171d-4bee-a8f8-12cd500daf3f.png index 54709ea094ea602d9114ad0bcf187e704f9cde35..b8fe81f6c25bfe555ec06abbb8447030524ed642 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_7ae94499-171d-4bee-a8f8-12cd500daf3f.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_7ae94499-171d-4bee-a8f8-12cd500daf3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f87848d6f7af68df15c7dd01ba450c8b587617d89913c595e896bbf84d8bb214 -size 931170 +oid sha256:69cc17c6c9b9f5ceb4c0dc6d251f53d95f9aae0355cea91c266348d70d038037 +size 969195 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_d46fc387-0c97-4047-b400-07d10dd1c8d7.png b/images/e437082b-8383-4322-aa58-a6a683113970_d46fc387-0c97-4047-b400-07d10dd1c8d7.png index d48f964dd464346f66170a65a8f9132ebf782ac6..62a48ea2d609fda14a56fc0a5df6c3c437162edd 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_d46fc387-0c97-4047-b400-07d10dd1c8d7.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_d46fc387-0c97-4047-b400-07d10dd1c8d7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6f30ad8f53b4f64c76f4611a5d877785057a28e382c349fdb62477edcd41d2c -size 877376 +oid sha256:2520169aeebc5c6b5c31c798ffce9a302be9b7b1d7a5304e4df73c2090913336 +size 1023860 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_d59e9047-3e81-43c4-832c-0513a9f41954.png b/images/e437082b-8383-4322-aa58-a6a683113970_d59e9047-3e81-43c4-832c-0513a9f41954.png index 7ac9059f70b0edfaee5bd93d9af1303fc6674d5f..209a983700e4b6c27d2b022b71b2bd452c17e4a9 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_d59e9047-3e81-43c4-832c-0513a9f41954.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_d59e9047-3e81-43c4-832c-0513a9f41954.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1162f39cfb84cd54ff0f1744f5926e31bc83dde1cf4d4785429ada9218ef5a01 -size 1372135 +oid sha256:a5dfa200c77eb672b7c1dad7561a3d157fd5d5d9ee487fae805263e1007ffd39 +size 1913916 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_f5167534-c977-4b5d-9525-f05085be7f43.png b/images/e437082b-8383-4322-aa58-a6a683113970_f5167534-c977-4b5d-9525-f05085be7f43.png index 38e398dd458fc98556a06ba40cbae50b12157c51..76a3ca0c3a2fa7839228a6c2eb8997d5ccdea53f 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_f5167534-c977-4b5d-9525-f05085be7f43.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_f5167534-c977-4b5d-9525-f05085be7f43.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:08cbb45743abff6d99927d3846a8856737093ee3b967f645358726cf5e8506e6 -size 1908597 +oid sha256:fa5f1750e53e76dbc30926b0d158b8acde8b52840d637f077ab22d54ede30207 +size 1536371 diff --git a/images/e437082b-8383-4322-aa58-a6a683113970_f9b97955-f20c-41ea-83d4-8b3c274cb9f8.png b/images/e437082b-8383-4322-aa58-a6a683113970_f9b97955-f20c-41ea-83d4-8b3c274cb9f8.png index 21c22e6272dd2573b1078686a25d4aac4706aa15..32329f7bb996672edc23508e87c7c876b3f79f61 100644 --- a/images/e437082b-8383-4322-aa58-a6a683113970_f9b97955-f20c-41ea-83d4-8b3c274cb9f8.png +++ b/images/e437082b-8383-4322-aa58-a6a683113970_f9b97955-f20c-41ea-83d4-8b3c274cb9f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f9aabbe74b6dcd1d858cdbfb6350d6d57cedb7a40665e54a59dd146f39a346a -size 2407966 +oid sha256:ec5b46d1f343a2d8a8cea4853097f365e6a17becd4fca8c6e357b69299058ece +size 1001518 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_21334cc2-269f-4dd5-898b-f2cab62a8b19.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_21334cc2-269f-4dd5-898b-f2cab62a8b19.png index 7c55e05257f5deaf87068434d3dfd571485313e3..657e08716b492144e0a3b902fecdf4555ffa8d27 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_21334cc2-269f-4dd5-898b-f2cab62a8b19.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_21334cc2-269f-4dd5-898b-f2cab62a8b19.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a05435654509a7a6a9b2f852b6f37fe5bb841469429855151cafe6b41d924369 -size 1309674 +oid sha256:f5e8a961d11227835ba4803ef1b9bcad293881fe0ae070be67b90ddd9f82aefc +size 402945 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_2ed0e2bc-5efb-4f91-af60-ce5031a71a68.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_2ed0e2bc-5efb-4f91-af60-ce5031a71a68.png index 7a113013836a2038e3afeb35f1246ceedef9d94d..00b81f0866bae2ba41b10fa9ee8534ca0333e410 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_2ed0e2bc-5efb-4f91-af60-ce5031a71a68.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_2ed0e2bc-5efb-4f91-af60-ce5031a71a68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c41f565bf4d47fb9c85e8959491b319a9ad8f0cc4f709e9b17fe728e4248764 -size 367680 +oid sha256:ccc35baa134dcf1d6f4225c85188f11300f2b81403a6af200e5b2cc545dbe328 +size 780565 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_3067c854-4afb-4093-9f28-ee9b0f735e2b.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_3067c854-4afb-4093-9f28-ee9b0f735e2b.png index fdf5bf5b94a62e82f2e280e9d8f47bee34b6b161..eb27b8d1be7224ec8fde50848c597dc182c3257a 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_3067c854-4afb-4093-9f28-ee9b0f735e2b.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_3067c854-4afb-4093-9f28-ee9b0f735e2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4952bf9b1967215dd5f792f570a037cdf6f2d8ffc05dd5dd7b3c146544178dd2 -size 5016760 +oid sha256:bbf127d4e5ef20d876d63660f7d7433c563a6bbd9bec9e16ea59edb867436fbd +size 232942 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_4b1fb823-3bba-4dbe-b7fb-b6aa69585739.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_4b1fb823-3bba-4dbe-b7fb-b6aa69585739.png index 4735fe443dfd8ab1dc76abf7df997a45179dd4e3..f320f21f4e91ec4094f20d7d8eaf1f6675d2d9d8 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_4b1fb823-3bba-4dbe-b7fb-b6aa69585739.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_4b1fb823-3bba-4dbe-b7fb-b6aa69585739.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad134414e87de64382bb47de69a4ad7c712239b5d196d50a4bb270d5f34b6c09 -size 440203 +oid sha256:c1e882e2df1341fb2c485cdb5e4b59906146f4ca446aae585e61d057a834f7a6 +size 474186 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_5df6d848-d5b7-4202-ac80-1959faf35581.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_5df6d848-d5b7-4202-ac80-1959faf35581.png index ebf15956ee0455f4c931686888ad5ea51c289556..b5c6c718f1c66414dc7e06f2862a0f3d45c8ec79 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_5df6d848-d5b7-4202-ac80-1959faf35581.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_5df6d848-d5b7-4202-ac80-1959faf35581.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3120143aa2a14b97ad92fe414ce3b6ce040364134a2f1a68bb9af49c4214e670 -size 2374877 +oid sha256:7e709f5ef5e7de94bf074a15d985703d5272b51c939a97e9a47e4be59d9991bc +size 5468759 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_662fb87c-70e4-4f70-bf85-337aa79a8d75.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_662fb87c-70e4-4f70-bf85-337aa79a8d75.png index f739e96cc5b9a57dcc7ad354f92e39a8ade32c1d..bfbb500c401c709c4188f09b787bdcaf17100e2d 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_662fb87c-70e4-4f70-bf85-337aa79a8d75.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_662fb87c-70e4-4f70-bf85-337aa79a8d75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0a01dd717c58ce76dac76bd2af537b195031290425f662cbcaeca1dd058cf551 -size 2860708 +oid sha256:b0aa795cbe2296d19db326ecb0186961e94868ce9db36ba66361ca0cf95fa1f4 +size 127035 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_67bf019d-0b9f-412d-abd8-1b30480269c3.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_67bf019d-0b9f-412d-abd8-1b30480269c3.png index 95b04a18fdac163bb926ccb3af92d177514655c3..2c1a9e9140c9b77f07b2d4756cfadd822b838fc6 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_67bf019d-0b9f-412d-abd8-1b30480269c3.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_67bf019d-0b9f-412d-abd8-1b30480269c3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9058c42c93be7e6195760a4ab1e9d86f39a1e8d4b6df78542c1a3e593d166c6f -size 3988873 +oid sha256:cdd8bc92bba6ac43d823c3f08dea6ccba02679e93238b01d653b2eea39bec4ab +size 958793 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_926471db-6655-45d2-9182-4af24f614ad2.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_926471db-6655-45d2-9182-4af24f614ad2.png index 7d162deffc687849243823fd4286abc22350c6a1..8bd3f0ebddbe7236c386e6ab6327e22812143b1c 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_926471db-6655-45d2-9182-4af24f614ad2.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_926471db-6655-45d2-9182-4af24f614ad2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24a96a39f39f9f3005391989d4a093f3d8e15b35c8ec5f5f3413e47bf8db873a -size 425996 +oid sha256:00ef1c4fa37a57369606c00ad4a64b87d15b1520ee50d77ad850de54cdd60a0f +size 359794 diff --git a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_e6a3d740-87e2-4af5-a32e-55478f7813bc.png b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_e6a3d740-87e2-4af5-a32e-55478f7813bc.png index bca2eb496c1566974c09133d36068180f4e444fd..57fe443cc0cabe6487fc3ab0e81a49356eaec79a 100644 --- a/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_e6a3d740-87e2-4af5-a32e-55478f7813bc.png +++ b/images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_e6a3d740-87e2-4af5-a32e-55478f7813bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9c87903bd27d24380e5f5e3e018fc65eabfc364ed387f8fee9b4214569ad12a -size 386046 +oid sha256:49632d1887038bec96e4c4d164c2f0e9f52ee8737320657d245759c3f7f07410 +size 647061 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_226219ef-0a76-4285-b2d1-b601d1086627.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_226219ef-0a76-4285-b2d1-b601d1086627.png index ce2d709362bfce9577bb579a276de6d5a6f1c7c4..5e24202f2bf4de65ac91f12111a461a69ccf1355 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_226219ef-0a76-4285-b2d1-b601d1086627.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_226219ef-0a76-4285-b2d1-b601d1086627.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0a12d1d92c7e6700df7f0fcbbdd324b517993f0b452ece50fdfe278e22c9374 -size 1267472 +oid sha256:032a1dda0e51ee0ce2ce66e9784b331a746d82e4cc25fad1be2367f44fd09898 +size 1857073 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_254d076a-6aec-4696-b23f-a83c21573d62.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_254d076a-6aec-4696-b23f-a83c21573d62.png index 1db2168a58faaac786dec7fd8ccc27beb407517b..a36e558d03377e127c88543376938d432c1b272f 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_254d076a-6aec-4696-b23f-a83c21573d62.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_254d076a-6aec-4696-b23f-a83c21573d62.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f95d21d28a4b0ef4d669113cfd8d3237aee88a1794df7c32d13c5389a664f89 -size 1220054 +oid sha256:745da0c80ece7a991fed1e38ee3ddc4fa44c4ee0c3755939885641e376333a31 +size 1744243 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_416731f8-343d-415b-8f98-b01beae69685.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_416731f8-343d-415b-8f98-b01beae69685.png index e17bb5f1f7c5be79df67099b4203c4245e22b894..694ca534056e27ca6205fbcbfd98f3b144c9ed62 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_416731f8-343d-415b-8f98-b01beae69685.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_416731f8-343d-415b-8f98-b01beae69685.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2eb906a86a11824b1cef8633372b57d4a22c2d00839cdc3a62df8fe9ecede3ea -size 1452300 +oid sha256:35adff03289797a918f9713c5f57a5980f6f5ac9e45f0f7391b01e919eef615d +size 727354 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_774ff5d1-0b71-489b-81f0-c0cc6ba9e6cc.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_774ff5d1-0b71-489b-81f0-c0cc6ba9e6cc.png index e0fa7bc1f166cdcea995b12b179ab7ea1be4de7e..0a033f8fcae2b4cecaa3854005872b13396997a1 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_774ff5d1-0b71-489b-81f0-c0cc6ba9e6cc.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_774ff5d1-0b71-489b-81f0-c0cc6ba9e6cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b68d9dc6423fd125f963600e62bee4a95677d9a755875e0944edf210a549043 -size 1449476 +oid sha256:cf444a4b10eb1bc3a238cab8c9db10bd8e9090acc88e9284316629ab95028c9b +size 746772 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88261830-aa18-4e93-bf12-4fef640e05d0.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88261830-aa18-4e93-bf12-4fef640e05d0.png index 61b4187b76349891a2432de5eab4312e099c522c..ef39c80794c6151c6430cff9af0e59421629b212 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88261830-aa18-4e93-bf12-4fef640e05d0.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88261830-aa18-4e93-bf12-4fef640e05d0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92a57936bce8646e17fcdba42c1e02fcb3610bc8dfa8c8d90de42d1f52ce8e64 -size 1495583 +oid sha256:bf37f7cdcfd95c824a0aedeaac593d46f284ba9791d060c0c3d3634f44a95bde +size 840329 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8887cd5c-e8b7-419e-b48b-3fcb1e46fd82.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8887cd5c-e8b7-419e-b48b-3fcb1e46fd82.png index 3926e1d420816b0587d92e05842307ee5f6954f1..c9396f0470d10321be6dd6fa3f289d8601cdbc61 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8887cd5c-e8b7-419e-b48b-3fcb1e46fd82.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8887cd5c-e8b7-419e-b48b-3fcb1e46fd82.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03ee111f5ffe0af8bcb855deaedc93f9e08b88e9b4128324f2cf9f850f900704 -size 750803 +oid sha256:d6dbf7f5dabd7b662903f0c2cfe299c59309a7554b8f69fc8fef8e39df95512b +size 600453 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88e8254f-f9bc-4604-9dcd-92b6618a6ffb.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88e8254f-f9bc-4604-9dcd-92b6618a6ffb.png index 8a32478674eaa1aa97651b90657b8744050ce732..c401c3055950ca8675b9c8fbf5b8555f33286a39 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88e8254f-f9bc-4604-9dcd-92b6618a6ffb.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88e8254f-f9bc-4604-9dcd-92b6618a6ffb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a71b008e5c41b80a7918c64555540a8524776441afd031536676b39b8ed10bae -size 754064 +oid sha256:ca023ca9c04e55a124b09f724b414a2f41a9122d310b737c354fcce59103ab8f +size 992626 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8c63e1f7-d7ba-4b22-97a1-c688e34f5959.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8c63e1f7-d7ba-4b22-97a1-c688e34f5959.png index a931099a56bf76055feebe2af8c13a2ee546f01b..c6a9407334ad885acaed8edc4666bb4e20c713ab 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8c63e1f7-d7ba-4b22-97a1-c688e34f5959.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8c63e1f7-d7ba-4b22-97a1-c688e34f5959.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1ab1825046cc9e20b394001b64a15d8ee1bfaefa25b98c312779ab02d63a47e -size 1258562 +oid sha256:9d933702c82b81953370cc1b90c23aee871892071bd726f4c5201f8b54a2f1d1 +size 1051576 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_b874a73e-65d0-4078-9549-063b0402b53d.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_b874a73e-65d0-4078-9549-063b0402b53d.png index d2e169a6897f685b9fb4e1f06b6f2b923c95847b..eeb9f20d85a040fdfe0452e4c8673f3f6cabc53f 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_b874a73e-65d0-4078-9549-063b0402b53d.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_b874a73e-65d0-4078-9549-063b0402b53d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e1d6053346b93c3187ff743ef9d6a10c72ba2641cc14a8cb90095400d5f33933 -size 753822 +oid sha256:d660848c71ff9318baa1760e32a5b83a0364ed93363d2feae90fe40de6917d89 +size 1604360 diff --git a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_e184447e-7f19-40e8-82f0-58a6173878f4.png b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_e184447e-7f19-40e8-82f0-58a6173878f4.png index cdc53ba1787b732e7f2b66b4c29f0b8b564ed809..fbc91c018a2f0fa27b00415fa843b7b7b50124fc 100644 --- a/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_e184447e-7f19-40e8-82f0-58a6173878f4.png +++ b/images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_e184447e-7f19-40e8-82f0-58a6173878f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0006c8f48a9b17562d7bb5220cfeff19ef28298588b1f4cc90514027d11dfccb -size 1402803 +oid sha256:2218381626cad9748967a686f667cdf32a0df626f87168c5eabdd1e5b3384812 +size 693496 diff --git a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_2804c209-5ed1-40c8-9ed5-bc60068ae0fb.png b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_2804c209-5ed1-40c8-9ed5-bc60068ae0fb.png index 2565765c92502ce75f4977b63d3424b057bb03f5..17259e5d3f5d05b5c0b20db3cf95af111c6efeb9 100644 --- a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_2804c209-5ed1-40c8-9ed5-bc60068ae0fb.png +++ b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_2804c209-5ed1-40c8-9ed5-bc60068ae0fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60b6eafc50e258fce58561ae19dff9744eb127a556b82c61e4a47e373683be39 -size 879911 +oid sha256:fb1ec40b6f89b72f6af78f2f52ad71780daa53deb6381da7cf9748fcaa2674f4 +size 411877 diff --git a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_39e0fe3d-64be-40eb-a9b5-65dcf8a97695.png b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_39e0fe3d-64be-40eb-a9b5-65dcf8a97695.png index 3e79c189d1718579c18c7b04708b26094dca814d..bcd002614becc7e463560bc780c0500bb1ff2d50 100644 --- a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_39e0fe3d-64be-40eb-a9b5-65dcf8a97695.png +++ b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_39e0fe3d-64be-40eb-a9b5-65dcf8a97695.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31ae9b9784d07690c5161f31bf0c6d56cc2f765ed211f219c8a351aa65bfcf94 -size 561330 +oid sha256:591f405d651fedd3016c9eea2203d133f0df53f7b67752f8e85b952020d01354 +size 411550 diff --git a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d1e11c67-483f-4ef7-aac4-3740e9498349.png b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d1e11c67-483f-4ef7-aac4-3740e9498349.png index 3a43700944da0ef22340dc3f97ae5c39077ef0ab..88f89583cf3262f08b1c44f3b55a1437ed56236c 100644 --- a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d1e11c67-483f-4ef7-aac4-3740e9498349.png +++ b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d1e11c67-483f-4ef7-aac4-3740e9498349.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fb4d0e7b65b1b7f3bb377583d31874ac361438e55c4b797ad80498fa542c74f -size 412254 +oid sha256:1f825d4d2a57a6f04d3fc7bee4d5a62a39f185d2d3e0b2679e884b6861707cab +size 511956 diff --git a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d3c88cc2-8226-4464-b356-e448c7a3e5dc.png b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d3c88cc2-8226-4464-b356-e448c7a3e5dc.png index 64e1b9b18905fb5e86432d3279fc389b94a3aca2..06c5ca057ac0b8e31d448578d30d15b2f7f55635 100644 --- a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d3c88cc2-8226-4464-b356-e448c7a3e5dc.png +++ b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d3c88cc2-8226-4464-b356-e448c7a3e5dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38ca98ddabd5a68a3fe57678253d78acfcf37bd73ef00bb46abddedee853d0e4 -size 418593 +oid sha256:7172dca170956bf589e8658ce2b1aebb265788c4a7a88ee48fd20f7ecd3ac150 +size 577953 diff --git a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d5d020f2-aeae-4c90-9b5f-0b0183babe5a.png b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d5d020f2-aeae-4c90-9b5f-0b0183babe5a.png index a4ba114d90cf9c985db5942b0aec45ca6f10e419..891d10629940ab8c2b4037b1d83649a29e474664 100644 --- a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d5d020f2-aeae-4c90-9b5f-0b0183babe5a.png +++ b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d5d020f2-aeae-4c90-9b5f-0b0183babe5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0796850129c64f9459c029ee585eb58f3cfcda0208e35e7dadf4e2d7adfa964a -size 1217457 +oid sha256:36d2859c26016a763fff794b050df99b36acbfbcfd681bfa7ccae754fc3ac27f +size 1109294 diff --git a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_f73c9f1b-6d77-4f34-bbdc-84ab137a91f6.png b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_f73c9f1b-6d77-4f34-bbdc-84ab137a91f6.png index bad3796f2c4aedc698fb952093b3dd0dcf89f421..8f0a22aa10bfe1dc13b2001af630ca8c7bde00aa 100644 --- a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_f73c9f1b-6d77-4f34-bbdc-84ab137a91f6.png +++ b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_f73c9f1b-6d77-4f34-bbdc-84ab137a91f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67f98e37936d4694688d5c9e73cae0791a94477482e383f0b4c5a3ad66438823 -size 1154716 +oid sha256:a8b8325d5b0763568a1ffce51bf6b1ec188f092b4ae11d469fd170756aef5479 +size 260337 diff --git a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_fb0bb348-ec09-4106-a0c6-5072cb5a070c.png b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_fb0bb348-ec09-4106-a0c6-5072cb5a070c.png index 17e3b5089e8ebca59e5c677f4f9b416004b78ccd..6de40f3bd84ebd9086da9b1fb5911cc9e7e2f80e 100644 --- a/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_fb0bb348-ec09-4106-a0c6-5072cb5a070c.png +++ b/images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_fb0bb348-ec09-4106-a0c6-5072cb5a070c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5d17a9d028fdfd72f4417b583990e865292e99e42fbdf149b6318a3b600cec4 -size 412336 +oid sha256:99df4c6d05b7a631e56d1ba58226801e62b782c0632423fe3918c606bbcea06c +size 672040 diff --git a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_55015e6d-ec84-41ae-99cc-1c8298eba5a4.png b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_55015e6d-ec84-41ae-99cc-1c8298eba5a4.png index 699b25fda85ff4c66caf8ac79f2718a6180ce839..94d13be0363695eedfe8e73c7f9e7efc723068c7 100644 --- a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_55015e6d-ec84-41ae-99cc-1c8298eba5a4.png +++ b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_55015e6d-ec84-41ae-99cc-1c8298eba5a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab0e2d460b9d69813cffb88fbae8750298176196a812c9daa995e7004d8ef6e8 -size 901070 +oid sha256:a244e3a7adec399c3d2380ac07d2ef20c7bceddbc252eb3c914fb7b756f39b8b +size 897457 diff --git a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_a4d1e68e-8bb3-42fb-a386-74798f3660b2.png b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_a4d1e68e-8bb3-42fb-a386-74798f3660b2.png index 56982075f81a164cd23a0925855a83dc5552487e..24ca7429f9adb21032ee24e401cf550bc9353473 100644 --- a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_a4d1e68e-8bb3-42fb-a386-74798f3660b2.png +++ b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_a4d1e68e-8bb3-42fb-a386-74798f3660b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:771047bde9277482d2f25fd1b8ecb6a843bb46afc5e5e08b179a713b1638c253 -size 556113 +oid sha256:87cbea69e6dc79876bbc678210e85fffa8b2e6b4782b44d6c787df04a2b26dad +size 551092 diff --git a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_c741a4b9-037f-4e8f-8a72-606fb1bcba61.png b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_c741a4b9-037f-4e8f-8a72-606fb1bcba61.png index a0eb2f5d53647bab7dadbe6439791813fffdeb46..e4f6c383836d700a928c70a8dd865dbaebf35622 100644 --- a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_c741a4b9-037f-4e8f-8a72-606fb1bcba61.png +++ b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_c741a4b9-037f-4e8f-8a72-606fb1bcba61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:198130920799630c128eb55107ca2c2ee22072b9f07b45a585104b56b28eceeb -size 645793 +oid sha256:7ad0fef523a422b49f70298c75064f605a8e271931cdcedabaf6616dd3f3b9f9 +size 751284 diff --git a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_f4364245-326b-4e7b-8712-6a5189987c6b.png b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_f4364245-326b-4e7b-8712-6a5189987c6b.png index 5ef56cb1ce07622c60e464b8c959044c976a8dc2..8d787a5188ca6910962d34a930cd18223d69cd78 100644 --- a/images/e592a68f-567f-4d6a-a1d6-4166cd129638_f4364245-326b-4e7b-8712-6a5189987c6b.png +++ b/images/e592a68f-567f-4d6a-a1d6-4166cd129638_f4364245-326b-4e7b-8712-6a5189987c6b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d6288e7b25d97c9b1469688e3833b97a201d57889911986247133ba65c65494 -size 551968 +oid sha256:91c1f4b4ec2f745f184afab8c59f06294771bb4004d0096ecf7d340e6523b3c3 +size 483022 diff --git a/images/e5c228ff-7834-46d2-b906-dcea21a0c477_62479223-7350-45b8-a272-43a71a83db44.png b/images/e5c228ff-7834-46d2-b906-dcea21a0c477_62479223-7350-45b8-a272-43a71a83db44.png index ebd6943a776a50a0bc9fd33d9901781492cad067..a75fa85b1226e86c7175f8cf19081270cd5a8407 100644 --- a/images/e5c228ff-7834-46d2-b906-dcea21a0c477_62479223-7350-45b8-a272-43a71a83db44.png +++ b/images/e5c228ff-7834-46d2-b906-dcea21a0c477_62479223-7350-45b8-a272-43a71a83db44.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d0c5542e6ba8e55d4f7318ccb0508579c01bf0ca66d4e7c4484813ed6f083c1d -size 1278836 +oid sha256:c24df524ba0a6384f852ca7aff0e654a3490d8b9c2cf673f005e130d7997ad45 +size 1254690 diff --git a/images/e5c228ff-7834-46d2-b906-dcea21a0c477_d204295e-0a07-4ad7-8dd2-92287300ce28.png b/images/e5c228ff-7834-46d2-b906-dcea21a0c477_d204295e-0a07-4ad7-8dd2-92287300ce28.png index 2e60e899c052152ccae17c4d257993ad15b55e43..02d9811db368c4c470822635a37cce7ef7e34104 100644 --- a/images/e5c228ff-7834-46d2-b906-dcea21a0c477_d204295e-0a07-4ad7-8dd2-92287300ce28.png +++ b/images/e5c228ff-7834-46d2-b906-dcea21a0c477_d204295e-0a07-4ad7-8dd2-92287300ce28.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c11c7eb6c5f8edd797a177e7203338fd89714fce2265f3e96f10e3f9bcad0586 -size 811641 +oid sha256:b6c051bcffe86a1f210be61825911011783056b01b489517901aed56d1b97cde +size 800115 diff --git a/images/e62bcf45-0be3-4195-b234-37755d6d715e_4bf4e3a7-8e4d-4453-9bdb-3f68faa7feb1.png b/images/e62bcf45-0be3-4195-b234-37755d6d715e_4bf4e3a7-8e4d-4453-9bdb-3f68faa7feb1.png index a50189c7c91dff7420fb86ff777a8d57cd7379a4..e85000ce0577bef7a8a521bdc6afae7f11b1cc78 100644 --- a/images/e62bcf45-0be3-4195-b234-37755d6d715e_4bf4e3a7-8e4d-4453-9bdb-3f68faa7feb1.png +++ b/images/e62bcf45-0be3-4195-b234-37755d6d715e_4bf4e3a7-8e4d-4453-9bdb-3f68faa7feb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:24f3d3a017d5a797ad52f08da8b7918fc7c8e6ea73a457c3201a63f5c0d3bc07 -size 698955 +oid sha256:1bf5f26df9f9df5827bab2cef348e1df6ac0d509b4ddcb94540080d2203e3375 +size 1240968 diff --git a/images/e62bcf45-0be3-4195-b234-37755d6d715e_4c487c29-f51f-47a4-b521-08bb8e2c8253.png b/images/e62bcf45-0be3-4195-b234-37755d6d715e_4c487c29-f51f-47a4-b521-08bb8e2c8253.png index 09fd76a9e0c7d06d44f35889a28c3df197228500..1bf67c83d98b4b5de53c17d4b2db1ff71403fedb 100644 --- a/images/e62bcf45-0be3-4195-b234-37755d6d715e_4c487c29-f51f-47a4-b521-08bb8e2c8253.png +++ b/images/e62bcf45-0be3-4195-b234-37755d6d715e_4c487c29-f51f-47a4-b521-08bb8e2c8253.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b218b7db407f57a33313a1ae5ea131c07681a4df66ee3638b36debc6d90cb42 -size 2400965 +oid sha256:ffc4ffad4648f1015e227400b5307183482c763444f12a69a4386254adb87888 +size 1496400 diff --git a/images/e62bcf45-0be3-4195-b234-37755d6d715e_734c5ed7-2c72-4f8d-87d1-95043346307f.png b/images/e62bcf45-0be3-4195-b234-37755d6d715e_734c5ed7-2c72-4f8d-87d1-95043346307f.png index bed2de385c5e270c51bff9d2e879a6218ffb8d86..691a3522ccd0116d45bf92d2a380f923112429ad 100644 --- a/images/e62bcf45-0be3-4195-b234-37755d6d715e_734c5ed7-2c72-4f8d-87d1-95043346307f.png +++ b/images/e62bcf45-0be3-4195-b234-37755d6d715e_734c5ed7-2c72-4f8d-87d1-95043346307f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43d01ece209f904196532940af63eb2d66647af64a7e510f3f526caad6f5bde5 -size 1879189 +oid sha256:2747b65283f980e9e0f013e3bcc99113101e3c357fe55c777cb89091b3548510 +size 1616543 diff --git a/images/e62bcf45-0be3-4195-b234-37755d6d715e_d4753161-a0e6-48a8-bd37-6dacdc712fa9.png b/images/e62bcf45-0be3-4195-b234-37755d6d715e_d4753161-a0e6-48a8-bd37-6dacdc712fa9.png index 1433a1b0beef231e21aceef7a47f3d592fa2b6c5..21d47de1f2040f71753ab4bada8f1572bf26e64f 100644 --- a/images/e62bcf45-0be3-4195-b234-37755d6d715e_d4753161-a0e6-48a8-bd37-6dacdc712fa9.png +++ b/images/e62bcf45-0be3-4195-b234-37755d6d715e_d4753161-a0e6-48a8-bd37-6dacdc712fa9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:030bf5170724552ab1988ecd042e0d15463615269e2c6189956cad21ffa07785 -size 1737894 +oid sha256:5770972c8d0ea76819d6d781afaff35d1829a6f52e46b22b79fd1f5f48550e87 +size 1099788 diff --git a/images/e62bcf45-0be3-4195-b234-37755d6d715e_f1fcb650-e85a-459c-a24f-1140130da6b5.png b/images/e62bcf45-0be3-4195-b234-37755d6d715e_f1fcb650-e85a-459c-a24f-1140130da6b5.png index 4852798078970ddf2d5cede7d55448a909849135..69cf426bbde175f150cba239791b5e492c607066 100644 --- a/images/e62bcf45-0be3-4195-b234-37755d6d715e_f1fcb650-e85a-459c-a24f-1140130da6b5.png +++ b/images/e62bcf45-0be3-4195-b234-37755d6d715e_f1fcb650-e85a-459c-a24f-1140130da6b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e6ac167567981b129ebcd9ef1784ef19ca66f74b4311ca0b06c07c69b603830 -size 2377266 +oid sha256:a6a3fd68646831ece4e324101668da2fbc1d344185f3cb1e13a0c29603d0c20e +size 2401416 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0e8893ad-49da-4f23-b04b-ee6ed6e2caf7.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0e8893ad-49da-4f23-b04b-ee6ed6e2caf7.png index 128684129c5c7ac024c25b1f7b96ee676a287807..542249616feb0374a079481c5611e8255fc5f335 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0e8893ad-49da-4f23-b04b-ee6ed6e2caf7.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0e8893ad-49da-4f23-b04b-ee6ed6e2caf7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f9d6ada0426f0ab95b32e058847408bbd1c7c9e2487e78962f765623946d064f -size 1834267 +oid sha256:056e9e52092ccca3ac2e4832e3a9f094c3654fae94f0d2416d0d6b477f18ce4b +size 484649 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0f4fe5e2-b0da-40f2-855c-b90d7b3b2911.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0f4fe5e2-b0da-40f2-855c-b90d7b3b2911.png index 85ca1d07d6f81a02c255d5f5996c32ccd191d0ea..8946961f655a96f43f21ea709931cac91572f1e4 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0f4fe5e2-b0da-40f2-855c-b90d7b3b2911.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0f4fe5e2-b0da-40f2-855c-b90d7b3b2911.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afe9d05e69d6b2c538976166cd7b3be65c40720d3b86694711562df44375df22 -size 615884 +oid sha256:7ed08078901c39529411e5d1f9af07a1c055196e84636b445bdfac2f95b1369c +size 584028 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_495d586d-b1a2-41e0-a289-1abc2365840e.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_495d586d-b1a2-41e0-a289-1abc2365840e.png index d8d722665d1bc8b38ad0f93bea42267fa326cb0d..8fceedb0cf9de47b19d9b690af5e7597474ce720 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_495d586d-b1a2-41e0-a289-1abc2365840e.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_495d586d-b1a2-41e0-a289-1abc2365840e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:686559d8b65a66a6ef0ff3112d2c1714b6dddb3e0e1c95fc349e45bfb81a5579 -size 652323 +oid sha256:9f1475f10ff8154cff074e94827bbf6d476b9b202f331bf629a180336878d863 +size 674414 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_5785c6c2-b69a-4770-be93-f0d6131e71fb.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_5785c6c2-b69a-4770-be93-f0d6131e71fb.png index acaa34e3e089295b222fdb85178a866f2d3c49c7..9b0244322ada2b0a74daa3eee20c41da09ea50c1 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_5785c6c2-b69a-4770-be93-f0d6131e71fb.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_5785c6c2-b69a-4770-be93-f0d6131e71fb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f6909641b4105ce368a31480e78b0eedfa25e42e80876e3ba9475d4a767533bf -size 811213 +oid sha256:f17e35e3764ace2682b99b01593880b27028ff1512d0b154e7409631c7a34a14 +size 433547 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_775290f1-45da-49b4-b454-0c1739b55504.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_775290f1-45da-49b4-b454-0c1739b55504.png index 1f848ef0b583f3c97b35b636017df7967daac5b6..3ef825db4c124fb590be0dc6bff2520440767c03 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_775290f1-45da-49b4-b454-0c1739b55504.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_775290f1-45da-49b4-b454-0c1739b55504.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28a8530c900a4aed5832a1489de5192aaff797dd2b7add7a97709bf43b608e51 -size 646313 +oid sha256:e593cf01ae5f4ae4998bf30641c8953473a0fa82f513f25d940e51901ac1d499 +size 673805 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_7ff13337-9f2f-4ca3-874a-76cacb179479.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_7ff13337-9f2f-4ca3-874a-76cacb179479.png index 3db4a09441f4fd904ffd89f4d9cb8f7668b0d3d4..fd54bd5fae356842d035095cf9bcd882aff34867 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_7ff13337-9f2f-4ca3-874a-76cacb179479.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_7ff13337-9f2f-4ca3-874a-76cacb179479.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10a9df217584137f4e43f84f13fe937df89a13528821cf70b946b8d242c71db4 -size 1336852 +oid sha256:ccc2dcd35071a9687198486cea229486508829d71b49a0a3153be0e19fa9090c +size 542760 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_91afaca3-3df5-479e-aa43-1717da3b664c.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_91afaca3-3df5-479e-aa43-1717da3b664c.png index 0958ca4fd46a1bb8436a02b505c2c6f8e2bbad0b..1617144533297cf830e824a845537cddf520bad5 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_91afaca3-3df5-479e-aa43-1717da3b664c.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_91afaca3-3df5-479e-aa43-1717da3b664c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98482d588b382c973979226ee2bc97fed147262cf43abdf10eb28d9efb0de333 -size 648935 +oid sha256:00591b262b207da87acd21c3cfac663fe367ceddaf2370d02f0606602d0997c6 +size 684517 diff --git a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_b9e28e22-0524-4e6b-a3f2-13059124e719.png b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_b9e28e22-0524-4e6b-a3f2-13059124e719.png index 5249fd1e0d1d418ca5a8eeb03db301d55444fad0..2f5e371a7e2cbabc3b4ad4fbda1b64d6b8e4391e 100644 --- a/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_b9e28e22-0524-4e6b-a3f2-13059124e719.png +++ b/images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_b9e28e22-0524-4e6b-a3f2-13059124e719.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d1a16e8f80e2355736770f59eec94a96a346c4ad7e612d7edd00c35e08ae7e1 -size 646053 +oid sha256:f380c0f140ff79b802720ff962853918e7cbd65bbfb69a204f0acc981f30ccf5 +size 338758 diff --git a/images/e638beb6-059f-40ea-8653-929221e997db_47f27a16-dea7-46da-b800-33f2c3f70383.png b/images/e638beb6-059f-40ea-8653-929221e997db_47f27a16-dea7-46da-b800-33f2c3f70383.png index e398a36d1da4d14ea8de5ba5cee96761b1c637f1..53dc9179d7dc3745ef077fcc54dc98e739fa9725 100644 --- a/images/e638beb6-059f-40ea-8653-929221e997db_47f27a16-dea7-46da-b800-33f2c3f70383.png +++ b/images/e638beb6-059f-40ea-8653-929221e997db_47f27a16-dea7-46da-b800-33f2c3f70383.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f975de72b644bc04859169fe42b683e8d279d8812b00e8f0643a645fef350cf -size 696766 +oid sha256:bc6bc2e4eee36d56f2c8ac21714133a75ddaa80ff5fae50992681aff942af484 +size 541101 diff --git a/images/e638beb6-059f-40ea-8653-929221e997db_4859f571-24a4-48b6-aed1-e9267843e8e9.png b/images/e638beb6-059f-40ea-8653-929221e997db_4859f571-24a4-48b6-aed1-e9267843e8e9.png index 55145142bfd5f8eb7fd244bdc77430c4eedd91eb..359a07534ec4667758e502638494aa1ffdeb8255 100644 --- a/images/e638beb6-059f-40ea-8653-929221e997db_4859f571-24a4-48b6-aed1-e9267843e8e9.png +++ b/images/e638beb6-059f-40ea-8653-929221e997db_4859f571-24a4-48b6-aed1-e9267843e8e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8093daef898a3d56075aa3bc418ff2d58ed9f19a81089e27bdadb6f48c9c345 -size 800852 +oid sha256:ea6b2f4b4c9937a722853ee2656270235ad110c07f4e4826517b9c9daccafc61 +size 538060 diff --git a/images/e638beb6-059f-40ea-8653-929221e997db_5317b42b-0d04-47ec-ba12-84aab7c9039d.png b/images/e638beb6-059f-40ea-8653-929221e997db_5317b42b-0d04-47ec-ba12-84aab7c9039d.png index 682bde2d9973a34dce988fefb40d71ebb3e36d87..cf8651beda1d99462cc4137e1c283ff66f3b2d63 100644 --- a/images/e638beb6-059f-40ea-8653-929221e997db_5317b42b-0d04-47ec-ba12-84aab7c9039d.png +++ b/images/e638beb6-059f-40ea-8653-929221e997db_5317b42b-0d04-47ec-ba12-84aab7c9039d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6780928ec9d0ba3ae92e898cb604d9d381fdafc6a3f2ef5c7eb43e0fb136b53b -size 928935 +oid sha256:20fd950b3dd4e4fa46c56b4497b42ca25c42039a270d35f3dd429b3ecc41fccd +size 871837 diff --git a/images/e638beb6-059f-40ea-8653-929221e997db_db3a2cca-7d23-48a8-a3c6-7dd991378b98.png b/images/e638beb6-059f-40ea-8653-929221e997db_db3a2cca-7d23-48a8-a3c6-7dd991378b98.png index 8d9c12da2b65ea243eb06d6e8e1cfd70c1db9587..a76b40cc721bd30ebebfa88b2d294444a4893129 100644 --- a/images/e638beb6-059f-40ea-8653-929221e997db_db3a2cca-7d23-48a8-a3c6-7dd991378b98.png +++ b/images/e638beb6-059f-40ea-8653-929221e997db_db3a2cca-7d23-48a8-a3c6-7dd991378b98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd1e64876d4e0d3d6eb4c9b326c342fdb85755186efed2be8e7e5c06e391a6ef -size 789230 +oid sha256:822071f0f26515d41c4d0fb8debe0ab3d03f2456a88a5c3f2d82f29f4f4622f5 +size 704993 diff --git a/images/e638beb6-059f-40ea-8653-929221e997db_e478223e-6ef8-445f-9130-d6f6645f4f9f.png b/images/e638beb6-059f-40ea-8653-929221e997db_e478223e-6ef8-445f-9130-d6f6645f4f9f.png index 06468ec3bbfa8a8d5565586d0657564c297475b4..7b9d3f287d7a275d7a94ce1b755d21bc48ca6e40 100644 --- a/images/e638beb6-059f-40ea-8653-929221e997db_e478223e-6ef8-445f-9130-d6f6645f4f9f.png +++ b/images/e638beb6-059f-40ea-8653-929221e997db_e478223e-6ef8-445f-9130-d6f6645f4f9f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb7e11241ad51f4ba4bc17121ad45079a5a05e10ab6500a617d979f1e54e35c8 -size 990026 +oid sha256:063a02292dd8e794656f815bda14ac351fbed240553e3d84d5e6429f89c4f170 +size 1088741 diff --git a/images/e638beb6-059f-40ea-8653-929221e997db_f933ceb6-cb8e-401e-a15f-74121d8541ff.png b/images/e638beb6-059f-40ea-8653-929221e997db_f933ceb6-cb8e-401e-a15f-74121d8541ff.png index 85d34119dae0722836374507bd5f96932111932f..7679cd2d6c85e3b7bed5a102790ff9f651ba7919 100644 --- a/images/e638beb6-059f-40ea-8653-929221e997db_f933ceb6-cb8e-401e-a15f-74121d8541ff.png +++ b/images/e638beb6-059f-40ea-8653-929221e997db_f933ceb6-cb8e-401e-a15f-74121d8541ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3da7f222fe799231ccbb2ee96092e43ca563b7c11a2c97238ffd98b70c11cc5d -size 852890 +oid sha256:4eb4d7327a6468c1f5f43c89a3f6134848a96c507bd155c7811660a1ac7d6dd5 +size 655376 diff --git a/images/e6643cfb-567e-4e11-8cab-f85483573539_3161aa29-78c0-4ab3-b776-10d894cf75fd.png b/images/e6643cfb-567e-4e11-8cab-f85483573539_3161aa29-78c0-4ab3-b776-10d894cf75fd.png index 9cd1f1f744a93e73639f8ac20ea1eb2a7e003068..3059501944fe2ade29975ea66cd7d6e9b5aa74d0 100644 --- a/images/e6643cfb-567e-4e11-8cab-f85483573539_3161aa29-78c0-4ab3-b776-10d894cf75fd.png +++ b/images/e6643cfb-567e-4e11-8cab-f85483573539_3161aa29-78c0-4ab3-b776-10d894cf75fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:942428302199d81383ecb4a0fd055a89cf537b254ebadb83d006e6d0550202a8 -size 1054329 +oid sha256:6e330f15193fceae4e7358086852fbbdd3f3a5fe9f909112fb544f55d9a5b058 +size 1088997 diff --git a/images/e6643cfb-567e-4e11-8cab-f85483573539_49c73a41-8cf0-4ec0-b12e-b588fa3a2320.png b/images/e6643cfb-567e-4e11-8cab-f85483573539_49c73a41-8cf0-4ec0-b12e-b588fa3a2320.png index 818eb953ef4850ef3f5ec7f5f2fcc0c0e61ecef3..2198f98a4acc2e86a7a9398d3611668229871e1c 100644 --- a/images/e6643cfb-567e-4e11-8cab-f85483573539_49c73a41-8cf0-4ec0-b12e-b588fa3a2320.png +++ b/images/e6643cfb-567e-4e11-8cab-f85483573539_49c73a41-8cf0-4ec0-b12e-b588fa3a2320.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe055dd7676b5130dd955e723ce4b3a18d3b961ef3dda016d366a6e213b1b4a1 -size 909910 +oid sha256:a28bc14b9121aa09c77c098a929f9948dbe7631246bfd28f03547fa21e46ebc5 +size 1264988 diff --git a/images/e6643cfb-567e-4e11-8cab-f85483573539_82ac4858-e319-4d30-b3e5-f4a4c395f697.png b/images/e6643cfb-567e-4e11-8cab-f85483573539_82ac4858-e319-4d30-b3e5-f4a4c395f697.png index 1278fea9f6e56c54d4e081221080704c40b27e7d..5039754b8a6dc0f2e4e5115db2d563948e01240d 100644 --- a/images/e6643cfb-567e-4e11-8cab-f85483573539_82ac4858-e319-4d30-b3e5-f4a4c395f697.png +++ b/images/e6643cfb-567e-4e11-8cab-f85483573539_82ac4858-e319-4d30-b3e5-f4a4c395f697.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e22045a9fc08e2d19aec0b69994c28a8f82bc9414f31dc986b1b6260ff693d14 -size 1514021 +oid sha256:0c511698e4b6fbd066644c70e363117f2cbf28767cf0a6c3250d5bfa2932bbd9 +size 2056489 diff --git a/images/e6643cfb-567e-4e11-8cab-f85483573539_873ad00e-a6df-4834-ad94-1f8d537ee77c.png b/images/e6643cfb-567e-4e11-8cab-f85483573539_873ad00e-a6df-4834-ad94-1f8d537ee77c.png index f861c138b706735a4896f31fda28bc67b3ac6144..df84d1a31cbda6c5eb1140c22cea7236c82d209f 100644 --- a/images/e6643cfb-567e-4e11-8cab-f85483573539_873ad00e-a6df-4834-ad94-1f8d537ee77c.png +++ b/images/e6643cfb-567e-4e11-8cab-f85483573539_873ad00e-a6df-4834-ad94-1f8d537ee77c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59b6ef189bc842e6c57026877f5e60857bd18a58cb93000bd4cb297074153200 -size 740852 +oid sha256:1200cc22b3c61c559091e64cd644bdc18160d77f94cd794a5e56d95df0d53419 +size 732818 diff --git a/images/e6643cfb-567e-4e11-8cab-f85483573539_adbd43a1-3981-48e5-97e4-1f12fb54e667.png b/images/e6643cfb-567e-4e11-8cab-f85483573539_adbd43a1-3981-48e5-97e4-1f12fb54e667.png index ae99cd9fd99f5377df7ad9cbba9093025227aecd..3f0eb4719b237a2acb9976bc93c488cec5375059 100644 --- a/images/e6643cfb-567e-4e11-8cab-f85483573539_adbd43a1-3981-48e5-97e4-1f12fb54e667.png +++ b/images/e6643cfb-567e-4e11-8cab-f85483573539_adbd43a1-3981-48e5-97e4-1f12fb54e667.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b237b6d077641c663b434d9ad9645dc2517a50784a58bcdc8d603f2efb7847b5 -size 950076 +oid sha256:7b0b246106d6dffaef2ae64f0eb15b3cf80add5e9943e4f43471f86ad6c2e569 +size 1123317 diff --git a/images/e6643cfb-567e-4e11-8cab-f85483573539_caecd46f-1c1b-4494-bb4f-2d64fa469b04.png b/images/e6643cfb-567e-4e11-8cab-f85483573539_caecd46f-1c1b-4494-bb4f-2d64fa469b04.png index a37ac30ff84ef1126140511d3f497517ad0afe09..a09cf6cde08b67109e2f3fb27ec12233e613ad1d 100644 --- a/images/e6643cfb-567e-4e11-8cab-f85483573539_caecd46f-1c1b-4494-bb4f-2d64fa469b04.png +++ b/images/e6643cfb-567e-4e11-8cab-f85483573539_caecd46f-1c1b-4494-bb4f-2d64fa469b04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5112d5c737c9f3f6e6df91599941831b3ca0a9b9ad42b6af83b453499d5d82a3 -size 1237537 +oid sha256:272b024b1f24160d7e90188fa78a68ecbbf0c9f17371356f6b255117f83fcde0 +size 784644 diff --git a/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_4ed5a644-3e8b-4966-8913-bb0e0c5b63ce.png b/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_4ed5a644-3e8b-4966-8913-bb0e0c5b63ce.png index 584208a331e1756882fbb608abfe9c65bbbaec9a..f76fa0e289120a87e25ee7157f50e627f39d0740 100644 --- a/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_4ed5a644-3e8b-4966-8913-bb0e0c5b63ce.png +++ b/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_4ed5a644-3e8b-4966-8913-bb0e0c5b63ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0cfe1cab01ed17c7e35b5950bab67df6648abf3c91a273ebbfe79cf04a70270c -size 495075 +oid sha256:f5156ecbf4e02316ba65e4aace1e84dfe3f256354685850c3428874dda794afd +size 507041 diff --git a/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_ec2892e2-3184-4086-bef5-33ba043db515.png b/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_ec2892e2-3184-4086-bef5-33ba043db515.png index af50dee5b5343aadf397c1351e96f019c9f03b05..9d46902f6862de023ff114aa413c278abb1d4c0b 100644 --- a/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_ec2892e2-3184-4086-bef5-33ba043db515.png +++ b/images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_ec2892e2-3184-4086-bef5-33ba043db515.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:639d37c5c68d4dab1dfb7353a0e520d9a2763d1914be18a4927d5eb8d00fa99d -size 1283646 +oid sha256:bc839011c4354ae66c82ba96b7c50414841e34cab0f248622fbb50b433a3e6ca +size 547530 diff --git a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_22c760c4-ab12-4ef2-ba74-9f42f6fab59a.png b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_22c760c4-ab12-4ef2-ba74-9f42f6fab59a.png index 6fb7b93a20b2d58432287bd4e38f0797c98a5900..7ced3fc094742ee419d792c675bc7e4463bf8d17 100644 --- a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_22c760c4-ab12-4ef2-ba74-9f42f6fab59a.png +++ b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_22c760c4-ab12-4ef2-ba74-9f42f6fab59a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78520c3f01b31f7fe28e01c5f853baedd59b2f09306cdc3b08b2c9dff3d377c8 -size 1453925 +oid sha256:89e92aafcd476d232010ea8d03b05a5e3d65d7566782affa38104ab10bbd0a38 +size 2305712 diff --git a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_2b807397-c070-4c9f-9438-75fe88d865d9.png b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_2b807397-c070-4c9f-9438-75fe88d865d9.png index 19ea9b9494ab1e2bed9ebc67bbbe49e3b2eea282..eddc112b9d2ae3b1edf6c78943a9d975a6db6d70 100644 --- a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_2b807397-c070-4c9f-9438-75fe88d865d9.png +++ b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_2b807397-c070-4c9f-9438-75fe88d865d9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b08f98660c2a870c381e42be27187ecb0d2e1012196f69977e5496ce1ec4a28 -size 576697 +oid sha256:d2b91a0e830e60a39c0b577b1533296224ff7a0d4c577ca2e7a69b8395d373e7 +size 1200104 diff --git a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_4559c623-3668-4d7b-8d9b-f91e46c95435.png b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_4559c623-3668-4d7b-8d9b-f91e46c95435.png index 8a0eee70ae53506a5c6cd7db78cc62f620948647..6a7da12b75b3bd0dc7e195a692eb7c5d70d9d259 100644 --- a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_4559c623-3668-4d7b-8d9b-f91e46c95435.png +++ b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_4559c623-3668-4d7b-8d9b-f91e46c95435.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ed92c3018bf74f5bbfcb5e76513ff674bee237115dcbd4186123cbf7963d60b -size 968664 +oid sha256:c8559dd2b1eb3fb9773cba3bb880b1c5d691d4d3d86334b4aa98222576ad5290 +size 766300 diff --git a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7713e50d-7086-49cf-a8ab-0cc3befbd494.png b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7713e50d-7086-49cf-a8ab-0cc3befbd494.png index 2baf0b0e9949b3f6e711ff2fbb172afc38d423be..d09e0e92baa9178a4b6aac6c419a22c7b7a70fc7 100644 --- a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7713e50d-7086-49cf-a8ab-0cc3befbd494.png +++ b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7713e50d-7086-49cf-a8ab-0cc3befbd494.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36452164883c9952a620616c807bb69780c25396d6ae5e336678078eaf8e5a13 -size 3006161 +oid sha256:fd3833dc075c8333cd55c8fa6b283ff92e3d76e64f31543d878e1f474ed56224 +size 751565 diff --git a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7842187c-40c4-40d1-9735-376204241576.png b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7842187c-40c4-40d1-9735-376204241576.png index c09ca1abf26c7967d73a75d647ef0c63593207b5..c186f1b6cb913e34807843d26e4e547538ec503c 100644 --- a/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7842187c-40c4-40d1-9735-376204241576.png +++ b/images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7842187c-40c4-40d1-9735-376204241576.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5124392cc4d4f450ca05058a6f4b321f4f00fddafdd0ea1a15687148b84ded18 -size 722431 +oid sha256:532349f2b16d22bdf44e3008322ac3193e627b64c03d284a1b89b894751acf68 +size 655259 diff --git a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_3bb8a4f7-e32f-4613-ba9a-f72be20a839b.png b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_3bb8a4f7-e32f-4613-ba9a-f72be20a839b.png index 756a509ff1a078fb013cf585c2f0489d27d6df82..e1f374c75f08438f4784adc9ae77b6e627ee8d75 100644 --- a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_3bb8a4f7-e32f-4613-ba9a-f72be20a839b.png +++ b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_3bb8a4f7-e32f-4613-ba9a-f72be20a839b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2558f6c9ffebe4830176760c4aab3b97801fa41bf7c1260837f5ba5722e90b2b -size 1973684 +oid sha256:b4309c713f953f4ad4aba54a4ad64ae4fafddc8633289836c6893ea8384200e5 +size 1807841 diff --git a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_42319d5e-a274-4be1-a41e-e97ed6615952.png b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_42319d5e-a274-4be1-a41e-e97ed6615952.png index 362f5f666689bbc83423989eac329caf74c7d687..c7faa2c066cd6a35a054c8b31a2601ff6e709c2e 100644 --- a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_42319d5e-a274-4be1-a41e-e97ed6615952.png +++ b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_42319d5e-a274-4be1-a41e-e97ed6615952.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6638efd747f11cdea52db2b1fb6c0fb110a9d71c811482023a0071362fb9b6d -size 2169408 +oid sha256:ec2aceb41c6baefac8c5a1556d3fe3682eb069eee2d2c36527ead6da4c75cda4 +size 2361938 diff --git a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_497bba29-70fa-48b0-a11e-3c610e59cb1e.png b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_497bba29-70fa-48b0-a11e-3c610e59cb1e.png index 4cc930e715ae8b7798b1298f8064e4028da181d6..e03c88eb728671e6326674d7bc86c70649c41e32 100644 --- a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_497bba29-70fa-48b0-a11e-3c610e59cb1e.png +++ b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_497bba29-70fa-48b0-a11e-3c610e59cb1e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4c680f0e9be401e6b7834d70af9a55cb8f29250259d43a429846ba28d7e8c66 -size 1952949 +oid sha256:387dfb557622662ad3da7dcaf313bf6e08def0652297873b9339352c71f9aee1 +size 1390296 diff --git a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_cb08d3e8-86b1-44c5-9bee-0261182c7acd.png b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_cb08d3e8-86b1-44c5-9bee-0261182c7acd.png index 5dfdf45c25546c8813ab6c25849105ce824f2bf0..c8b0515058c43917afff447651969199e3fd0d05 100644 --- a/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_cb08d3e8-86b1-44c5-9bee-0261182c7acd.png +++ b/images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_cb08d3e8-86b1-44c5-9bee-0261182c7acd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8fc0aafbb0907cbacfb3469686f687f7b5822cd15835e9d604e67bb5867974a -size 1506042 +oid sha256:515bd4fc097223feb2367b3cd76c69bcf7af791b38d339fd991b15c6787aeeff +size 933402 diff --git a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_1694e0bf-0021-422e-a914-aad55c47be68.png b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_1694e0bf-0021-422e-a914-aad55c47be68.png index 0e234773032872744238ac6c5b2db836113d0dc4..b3c1eac9f5fce731b12d0d801377c15809a25d3a 100644 --- a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_1694e0bf-0021-422e-a914-aad55c47be68.png +++ b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_1694e0bf-0021-422e-a914-aad55c47be68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:188a13dd78b26a38fc3a02a0c0bcb6f52e879936f1702fa9f6a3cb24b12372f8 -size 911865 +oid sha256:814b403eb8bf399bc92bbcc033a6d6e489d77557d2d6ea4bb203fc28cbfe83a4 +size 1715545 diff --git a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_2c7cb3e8-b290-44ff-865e-30eb46c48a18.png b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_2c7cb3e8-b290-44ff-865e-30eb46c48a18.png index 4c214bf6f93c047223b4abe8bcfdc96d9ccc2640..0e3903ca37ce08c058627468ba1dfa54d7cce292 100644 --- a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_2c7cb3e8-b290-44ff-865e-30eb46c48a18.png +++ b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_2c7cb3e8-b290-44ff-865e-30eb46c48a18.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:426a3b0d0c2c8b107e6281f262d6611b829d4c47766ca13a104b8a0700628c15 -size 919552 +oid sha256:e90f164106188d5b004182433a96cb2a972ea219ee3b39728834db080fa76c71 +size 1642536 diff --git a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_40ef5585-94d4-4cf2-97ed-691180d5b6ff.png b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_40ef5585-94d4-4cf2-97ed-691180d5b6ff.png index bac3c2e6a23d8710028bd1935d26e44441d2b696..66a6c9d3f906a08c44b10bd2bd480f9a6022683d 100644 --- a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_40ef5585-94d4-4cf2-97ed-691180d5b6ff.png +++ b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_40ef5585-94d4-4cf2-97ed-691180d5b6ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e7b4ed34c16e0a9c243c9ceba70a7e7fbe3056f5ff32600bb75806c98641c954 -size 945510 +oid sha256:f0684089ca90a3f28f793399c217810d9cac3148b2f965175a6a2dfe45ed3ba1 +size 972093 diff --git a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_540327b8-779c-4b6b-8ea9-e4a180265a55.png b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_540327b8-779c-4b6b-8ea9-e4a180265a55.png index 62c5c8c28fc2ccd978ff2377c0cda0e37fcb1e53..19c999647c4e4882a5728b0c894c50d6cf7486c4 100644 --- a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_540327b8-779c-4b6b-8ea9-e4a180265a55.png +++ b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_540327b8-779c-4b6b-8ea9-e4a180265a55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d7a01e773aa8ee782f7d338fd71f09aace7f8a5d5340690302d508d8e42ba3d8 -size 917447 +oid sha256:1e0047648543d87253c2d8999d89370679d6d51cec2c6f1475eb1d4165820537 +size 916360 diff --git a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_60f12b9c-9c86-4d52-986c-d66d26ff9ea0.png b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_60f12b9c-9c86-4d52-986c-d66d26ff9ea0.png index 2e28f9099a519bd4a837d589ddd2b2aab7eb4e81..df23d55cc60be9b313c7466508358fe12ecaa1c8 100644 --- a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_60f12b9c-9c86-4d52-986c-d66d26ff9ea0.png +++ b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_60f12b9c-9c86-4d52-986c-d66d26ff9ea0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91906cca6df7278c7f187427ad2b0a470242b4a1e8126440edd03444d2b7819d -size 960807 +oid sha256:bfad74341901d789dbee8361f85047b7b2b6058523f4d54f6902baca93300d28 +size 854398 diff --git a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_790289e7-9b0e-4672-abeb-18703347e599.png b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_790289e7-9b0e-4672-abeb-18703347e599.png index 5acfae174d8f95ce22b3efb6cba7a9a147e6d830..a267847ba0afa3886e12a3d7abae684116b5b33c 100644 --- a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_790289e7-9b0e-4672-abeb-18703347e599.png +++ b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_790289e7-9b0e-4672-abeb-18703347e599.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7834a436d3018b3c65a819f9bd74baf10103fe8d8683600c73dd5bbc33b5ea7c -size 894442 +oid sha256:5b59e2754db6b1ea5e6202b9eac87adc99762ba368749fbb021240399df2c248 +size 1577941 diff --git a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_aa129fe1-fa82-4007-80d5-c8700bb6dac4.png b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_aa129fe1-fa82-4007-80d5-c8700bb6dac4.png index c8a7fc8aa0c4724a581f45e8ec22537dff7abbcd..0d449b13f2872154ce022ab0d95a08880cba1129 100644 --- a/images/e783cb30-26d8-45b8-b3d3-04570566bd32_aa129fe1-fa82-4007-80d5-c8700bb6dac4.png +++ b/images/e783cb30-26d8-45b8-b3d3-04570566bd32_aa129fe1-fa82-4007-80d5-c8700bb6dac4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cad16bbf218a20cfff8ac658658c9de4454ebbe280069631f9bd428a587f4fb -size 915429 +oid sha256:e70e44d9ccc957119ff094f3e1b5f95f6878991a11b33f34cf2d7c88995d1695 +size 895115 diff --git a/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_8450177b-97fb-4355-8b95-ac90354952fa.png b/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_8450177b-97fb-4355-8b95-ac90354952fa.png index e98d11fc42537e2710dc3dbef946a2ac94609f94..5695987b4f0a3d084f68f869f819763c5d10a6d0 100644 --- a/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_8450177b-97fb-4355-8b95-ac90354952fa.png +++ b/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_8450177b-97fb-4355-8b95-ac90354952fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d8cdeb5d9dc1e3000994d21b142dfc633814613c8fb5be7cf005d9b135db9c8 -size 1028833 +oid sha256:b6976f8332b64c22544bd2a7c44942ee0b483110534621fc43cd58673b5c7e07 +size 1244843 diff --git a/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_fbfa94eb-b0f2-40b4-a0ec-c95ea564d036.png b/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_fbfa94eb-b0f2-40b4-a0ec-c95ea564d036.png index 34ea2c83c0e7f60b0b4f74136af5ae50e9f26315..567e8e961094618da2869a959b4377f4ef5abd25 100644 --- a/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_fbfa94eb-b0f2-40b4-a0ec-c95ea564d036.png +++ b/images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_fbfa94eb-b0f2-40b4-a0ec-c95ea564d036.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:371313bacb7234963bf92981f0cc67588b896b972d86db219c9bb7af7569406f -size 1156356 +oid sha256:578d6d0b470e3c5aac9e5896f9021ac6a3f70395f6dc08d9efb894cc496ac4fb +size 1870809 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_0d69ea98-ed44-4420-9611-46a13ab910fd.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_0d69ea98-ed44-4420-9611-46a13ab910fd.png index 97e34d929ef0d692694d6126444f85bf51c68a14..05f4b5056402b5b099af6845c7cc79c095792c82 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_0d69ea98-ed44-4420-9611-46a13ab910fd.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_0d69ea98-ed44-4420-9611-46a13ab910fd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e67180e744dc5af6314c25e0f99140331d6226006a50084138fa0c3a6352fc1d -size 815374 +oid sha256:aaec3ed972835c8f471bcb2b6d28403d304ad77eae6b8e0c48132be71cdf9547 +size 834455 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_1db700ef-960a-4147-b117-c0f64f18138e.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_1db700ef-960a-4147-b117-c0f64f18138e.png index 6a03fdc774fcf1fb4a863b99289d48106951c767..38427d8a7873d309e530e928a561838334e0c7dc 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_1db700ef-960a-4147-b117-c0f64f18138e.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_1db700ef-960a-4147-b117-c0f64f18138e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a6ee0bacf628d4b429084fee2e44a63ce7ff9af3afb33c9f427dce8b9fdfa53 -size 765518 +oid sha256:434c71e9ab99a34646eecce788611862baf43cbc9381f814f2a154ce79c4ea37 +size 910263 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_48650296-30f6-4c10-90bc-b65a4f8d92c1.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_48650296-30f6-4c10-90bc-b65a4f8d92c1.png index 65e35c538b568945f369e73fa218ff473eb73c0e..8bb9d024753715b5ae7501f0cedca31282628527 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_48650296-30f6-4c10-90bc-b65a4f8d92c1.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_48650296-30f6-4c10-90bc-b65a4f8d92c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a480d8b834ec4fdb73a6ce89d467099f0239e286f4cbedcb4d133087e505dc02 -size 810187 +oid sha256:84506d3b11794cb26648fab2828145be9332fefcb0e942fccef7535fc411831d +size 923921 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_519ead86-64a8-4df9-a1d2-6bd89a9f8f54.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_519ead86-64a8-4df9-a1d2-6bd89a9f8f54.png index f3c8e23485bda4da1cfa4f0a72aa37ec20da5a3a..b332c8309e0fc12900841079de3f376135b111e9 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_519ead86-64a8-4df9-a1d2-6bd89a9f8f54.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_519ead86-64a8-4df9-a1d2-6bd89a9f8f54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:68ebdeb9637f93f32a985f0dce88d565d6040f88e7eddde3cafa255469589865 -size 808457 +oid sha256:768c3811e80423f360dc38f9eef5c8de950a819ff25e4d90474101e9b3d29243 +size 920511 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_7cccd532-c34e-487b-9a2b-c0a0f96305b1.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_7cccd532-c34e-487b-9a2b-c0a0f96305b1.png index f6aee1d5d9153806b3b694726ffcea67698c1e31..b8e7cccf6656257d45257c3444d1390d4c9d59d4 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_7cccd532-c34e-487b-9a2b-c0a0f96305b1.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_7cccd532-c34e-487b-9a2b-c0a0f96305b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ab5d8e5268fbf10a5ec4f200b0444dbc59686bc50a46e75341c226a1c5415f5 -size 787547 +oid sha256:191289a7dd77ddc27811014432c8ef2d4d85da07403d0539e1fad6cd448baa53 +size 856365 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8b0ee93f-8fc6-4664-930a-4f58525661ba.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8b0ee93f-8fc6-4664-930a-4f58525661ba.png index e41e6a4eabc7989e7a3220330cdf3490104b4223..4bcbae850bcef906cdb2a0ce21c647e18e82facb 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8b0ee93f-8fc6-4664-930a-4f58525661ba.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8b0ee93f-8fc6-4664-930a-4f58525661ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70b7c8ebd492bf18a1a0277cf301b97ede1f478bbd82002075fc02e8b26d3c00 -size 772187 +oid sha256:c32016c05023538e328f2b1e0ceb30f1a7e213ac29320d8d908316726e7f712b +size 918004 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8d8d0373-60d3-481b-8aa8-41c5cd2de300.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8d8d0373-60d3-481b-8aa8-41c5cd2de300.png index abe6693d28f73008da82458feb38053e53612f02..db366cc22093b8846ebd7cf73bb7c52027077d59 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8d8d0373-60d3-481b-8aa8-41c5cd2de300.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8d8d0373-60d3-481b-8aa8-41c5cd2de300.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66a1e5deb76a32d682d3b8074fd9397090e0b3b50ed5fa827306b15d86c9e92a -size 808564 +oid sha256:835dabb6e175792191f26e315c76a623692f7f89352dbabf4ab9dea8165ffc95 +size 805560 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_98e02390-fb4c-4887-9ec0-294167219c7e.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_98e02390-fb4c-4887-9ec0-294167219c7e.png index f6edac5efb042772631271721d5ba95198853a0a..e796bba08a27e2cae05ea425b9df3d8a00294a01 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_98e02390-fb4c-4887-9ec0-294167219c7e.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_98e02390-fb4c-4887-9ec0-294167219c7e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1a9e6b2b34428f67b0fe23ecde60f0f9739e5dc8705008da7a6115740c7f0a4 -size 382423 +oid sha256:e938411acd56283f5339135fd284b6ce7964b2cdd815459a41502a8f21e8a0d9 +size 286516 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_9c7a9f0c-e8d7-47cf-903d-30fb9d0b5854.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_9c7a9f0c-e8d7-47cf-903d-30fb9d0b5854.png index 49a5a6967b91f13a8d6e4a5d0b5917e9e922a8f7..083d850f8aa8c086a776154653d2155a81c4cccf 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_9c7a9f0c-e8d7-47cf-903d-30fb9d0b5854.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_9c7a9f0c-e8d7-47cf-903d-30fb9d0b5854.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c36afbcb2cb9c0665f2779cb9b0cd181948ddb36a788f12bdc354e0da9c1a3df -size 802704 +oid sha256:a0c2acbcacdce4be24b3240017d458524b13580d9f0ed73b8b06e8f59354955d +size 879189 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ab382337-5bf6-47b5-a717-1589609ab85f.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ab382337-5bf6-47b5-a717-1589609ab85f.png index 8c5a808bf8237689356325552529052fea0aa587..c870046cb10db1d8de4dd83367a918d66287c6d1 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ab382337-5bf6-47b5-a717-1589609ab85f.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ab382337-5bf6-47b5-a717-1589609ab85f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:25c4a542ec4da86b303badd41df08ffcb1a65f27bc639dfc582f92b27b164c38 -size 819815 +oid sha256:2ceef27d8b3a742f0a76f8a0c68e938a9fb8750633c25fa1c7b8b4a009579491 +size 931144 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_bfd1ac18-f07a-4bc9-ba4e-cdc4eb36fafb.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_bfd1ac18-f07a-4bc9-ba4e-cdc4eb36fafb.png index da999c95f3a3ae6cd249eded0e73da833975c83c..583111095522bc1975da0342d0f7788f714822f2 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_bfd1ac18-f07a-4bc9-ba4e-cdc4eb36fafb.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_bfd1ac18-f07a-4bc9-ba4e-cdc4eb36fafb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3fa282e3e1f62c24afe56c4c1fe9548278d10312f413aa08d7273f7f681b227d -size 801788 +oid sha256:5ed7da1767c4e42eb9326560dcc2af5151d4e95742c5ae2d77508aa1ba59f115 +size 911921 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_d8e1e74e-2d72-49ef-9c27-8e81179156c0.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_d8e1e74e-2d72-49ef-9c27-8e81179156c0.png index 07e05bee1b4a207904e1e599e5383372c2b5657a..f78b2f33eda838c6d8d0a0e868d98d7bc63e8361 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_d8e1e74e-2d72-49ef-9c27-8e81179156c0.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_d8e1e74e-2d72-49ef-9c27-8e81179156c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36ea8542424bef0812395ee93cdade76e65625c4aacb7c66f258d94fd84cd507 -size 666480 +oid sha256:98f0dbf64ae29ffd7c4b98886360ff4b11e9374543f7a398f1e71061fb3b2804 +size 783917 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_f34b6e67-a22e-4092-8304-c34b40b107e0.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_f34b6e67-a22e-4092-8304-c34b40b107e0.png index 06337bc3a40a1702461badf680c31bc3b48a157c..b58a48e3d3d7f3cb4213c90d2a46f3cd5e2eb78f 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_f34b6e67-a22e-4092-8304-c34b40b107e0.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_f34b6e67-a22e-4092-8304-c34b40b107e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10c0bcc8a5dec8ca407eb6a61810f275e4ee2d5f7291b96120af80715c10b7db -size 707912 +oid sha256:814e61b40b81f543bc6be8e542a557a38e86c69993a3202894f83a0a1f403f7b +size 864822 diff --git a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ffc2d7ad-0691-466b-b825-956744be5a2a.png b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ffc2d7ad-0691-466b-b825-956744be5a2a.png index faeea1f884a2b7d6c9c75bf43fe67bcedbb168a6..d7bad88a788c7d0d4e50ff0a0b29f17979bcc854 100644 --- a/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ffc2d7ad-0691-466b-b825-956744be5a2a.png +++ b/images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ffc2d7ad-0691-466b-b825-956744be5a2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b3a51f8cc11067ae3172d3e0ca8e330ee1ea63902a7c1c9cc887cd49e2fbff47 -size 704179 +oid sha256:cd8e0d11e5151bc620de2f3c8c9643ff1821dce189a61246a741b3614262996a +size 704338 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_0c03bd04-974b-4904-9ccd-9ec0e2152f29.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_0c03bd04-974b-4904-9ccd-9ec0e2152f29.png index cfef3c8d446036d6f30fda26e9204d8a3f3b93a9..fb1e340829ee46caf510b40eb4608cd42332001d 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_0c03bd04-974b-4904-9ccd-9ec0e2152f29.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_0c03bd04-974b-4904-9ccd-9ec0e2152f29.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:90b2f4f336e62da92a0d6672240393fff412cd9b19ab7bf613a8750b241a4294 -size 213647 +oid sha256:38a7912cbbd77c83fc3991f2874babebf43ff29bc854b47c1bba4baf5bbb0bec +size 363407 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_131d877b-75a0-4877-af95-39ad3de38bd4.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_131d877b-75a0-4877-af95-39ad3de38bd4.png index 79d4e4e0f7571ef987d61e7cc1f93b1dd415fdc9..f37a7a297d8ae81ab52ed8f30ecdddd7a48fc8c9 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_131d877b-75a0-4877-af95-39ad3de38bd4.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_131d877b-75a0-4877-af95-39ad3de38bd4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f42c88e49c9a80f6854d4ffb7eee03d80c4e7d1b809fb6dd5a8190c0f9052683 -size 1485661 +oid sha256:d4d73490fd808dae1455c7bfa7c5d6ca8ae4a5bc495a916a641354ccd6c24192 +size 1693971 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_148f65ff-4194-4d67-a558-70f7122f3ca9.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_148f65ff-4194-4d67-a558-70f7122f3ca9.png index 7439ca0ede5cdb6350d82677f732e8139eb4d952..b9f604fd7ec7b710033cc2920fbba02daf17917b 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_148f65ff-4194-4d67-a558-70f7122f3ca9.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_148f65ff-4194-4d67-a558-70f7122f3ca9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66fb388f408141974f7c46b3d5fbb490cd2b534e91554108e5c1685d5d240a9b -size 352041 +oid sha256:efc911d89f0c5442327216c72735b077d29b034fcd408221dd27f8fe97481a12 +size 437506 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_18977e76-04cc-4a66-a066-08c24cd53b5c.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_18977e76-04cc-4a66-a066-08c24cd53b5c.png index 7439ca0ede5cdb6350d82677f732e8139eb4d952..d42ba917df8273f95a14837dc3fc4b9976bb5223 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_18977e76-04cc-4a66-a066-08c24cd53b5c.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_18977e76-04cc-4a66-a066-08c24cd53b5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66fb388f408141974f7c46b3d5fbb490cd2b534e91554108e5c1685d5d240a9b -size 352041 +oid sha256:a5b5878d2261e321f4ea37c13a9cd9796adb875dab04fa4a412120b78581e8c6 +size 458509 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_1f65089a-5b26-4f98-a884-82c44e2cc83a.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_1f65089a-5b26-4f98-a884-82c44e2cc83a.png index 609b06b1bfd8547b3b725d5a493918cc787b7065..80e2bbbff2aded8c6ead8db2bf68936c004a78c8 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_1f65089a-5b26-4f98-a884-82c44e2cc83a.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_1f65089a-5b26-4f98-a884-82c44e2cc83a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92d62458e7ebfee9c58d1d7ff7b7ceddd76749ea33bf39a66faf19ba8b5369fd -size 1499887 +oid sha256:9a50333b3388a201681f11027e63896267830f76be6475f8ea21630f35034cdc +size 1529051 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_20a29fa4-5700-4dae-a6c7-46b5d878e615.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_20a29fa4-5700-4dae-a6c7-46b5d878e615.png index 7f12bcad01abefc3608272d3fd312a5ca8e74df8..f34808907ad08572cc22407dad644ffdb9c13b5a 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_20a29fa4-5700-4dae-a6c7-46b5d878e615.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_20a29fa4-5700-4dae-a6c7-46b5d878e615.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5d8ab576cdb31446a0118fa461e94cb0e9b65d96fb43b939aee6830b037421c -size 349862 +oid sha256:bf3a23dbb04c21fb35a0dbe03cc12226adf09d460fd96d34bb565f386589c10f +size 340023 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_2ef33feb-d103-4283-8d88-68fb0ca9c9c2.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_2ef33feb-d103-4283-8d88-68fb0ca9c9c2.png index edbf2e821fe05b541783c7e5558693a9a2377c30..ab48e43e7752fbde3480dd96897a6a58c4851385 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_2ef33feb-d103-4283-8d88-68fb0ca9c9c2.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_2ef33feb-d103-4283-8d88-68fb0ca9c9c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7039a5c6eec346f10c4b9b51bdd39b25a24e8158475d1eb91cee05e1bc0139ff -size 433095 +oid sha256:fb5d2161dfcc79954681e6a007d273e7194fbe9e6dd17821db303d48cb3e7034 +size 233443 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_30031b64-c4c3-4741-a338-9de86a7bd529.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_30031b64-c4c3-4741-a338-9de86a7bd529.png index dbfcd2c42e14c0a0ef0d4f371f7f78615bdd22d0..1c29bea520b7c83cb728534e4e2df7f8db10df48 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_30031b64-c4c3-4741-a338-9de86a7bd529.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_30031b64-c4c3-4741-a338-9de86a7bd529.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:49cceabacede683b8c2554f922b873c0ac124650a2a90c2187768a412a5844b9 -size 1484803 +oid sha256:669362cd1ea14470c074596d99925da71e8d02c17224d7c609501e3c31cc70d3 +size 1530045 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_39a01968-5e21-459c-82ec-924e69ae3041.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_39a01968-5e21-459c-82ec-924e69ae3041.png index 621fd3a74e267cb8409db6d720cec10a3d01c964..65f5578eaddf6f87531bdf309acffddbc6452a19 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_39a01968-5e21-459c-82ec-924e69ae3041.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_39a01968-5e21-459c-82ec-924e69ae3041.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4a1467464dbd1c02600346f20853a2e6712359f3cbe3a6fd50abd5f404c5827 -size 1486692 +oid sha256:7457dfdd273b2a67028d125ae5f3b9e570306e7cac88e84415e958a4e0d2215c +size 1531462 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_4074b1d5-90b6-4f54-a8bf-80233191ff1b.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_4074b1d5-90b6-4f54-a8bf-80233191ff1b.png index 276cbf5054bd0dd146a6881d9ff1a4cceeaaf119..9d75d245ea99ee39ea9433c9c65508458f3b0bea 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_4074b1d5-90b6-4f54-a8bf-80233191ff1b.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_4074b1d5-90b6-4f54-a8bf-80233191ff1b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:414eb4579bedf46e45578a2709d956b2f9cd1f4184ed8a2c48ffc01787f61b61 -size 314887 +oid sha256:8849acbf82cd46f4bc1e7c202d580e9ed0ce536b2cbb43a076b60fb32e88f811 +size 483032 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_44d4df9f-e984-403f-aefb-96169d606b23.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_44d4df9f-e984-403f-aefb-96169d606b23.png index b55937ad8f5765655fcdd0cd80a0adb73dcd036f..b219c516218055f9615c80b0d3db6a4fd2a0a98d 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_44d4df9f-e984-403f-aefb-96169d606b23.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_44d4df9f-e984-403f-aefb-96169d606b23.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b3ac34dacc9c114b56eec6e61889dc808c478bf1349ed5a39da7e4062f9b1de -size 467290 +oid sha256:bf8807a141d2cea33a345993a6dfe243641ba1e8fe23ba4e3bdfa96ce93a08b0 +size 335567 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_50d10c0a-7be8-4680-97b3-b7047b61e733.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_50d10c0a-7be8-4680-97b3-b7047b61e733.png index f7619c0fa4a87cee145ed961a82914ec7c217759..b5b2dd4d722552be8fd622d477ad758fdfc5f837 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_50d10c0a-7be8-4680-97b3-b7047b61e733.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_50d10c0a-7be8-4680-97b3-b7047b61e733.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7f649ad63eeae958319fc2b2cee73e05aeff7b2c5bc8c00eb59626df3236b0a -size 626417 +oid sha256:de71cd157b80c321ad80d9e69dfa251be1d092fe591db5592d1c0c30758282fd +size 405942 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_59f55fde-ad57-424f-a2fb-3045b8b4d5a8.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_59f55fde-ad57-424f-a2fb-3045b8b4d5a8.png index 9b9ef3c6e1e50f763a6d3b756658c444007070bd..6854b6ec402ae1393070389e75f76da5840be1a3 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_59f55fde-ad57-424f-a2fb-3045b8b4d5a8.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_59f55fde-ad57-424f-a2fb-3045b8b4d5a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7dd04a77ab71b046c13d224b7d26a6fb98fd9b769074daefa0a008491db39f38 -size 466680 +oid sha256:a779af9ae13711cc2fd2f1c07c6640a0bb418e2a4e92eface86c1bcab1f3112d +size 306087 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_61a66563-d15b-4bd5-a0e1-cca261a596de.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_61a66563-d15b-4bd5-a0e1-cca261a596de.png index f1e70b816c75b999dc64ebdbd8e2a5bda6ad5c73..4c8619f87087bcf587ca068702dafaf18a528f27 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_61a66563-d15b-4bd5-a0e1-cca261a596de.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_61a66563-d15b-4bd5-a0e1-cca261a596de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36d89982d2240ed1bad5e7e2898c840f4d404f727631c85f50c99085a8037c4c -size 429841 +oid sha256:81343315e14d6dc0a587d4ca1850c2028ff65dd00091ee7776abd75a4131f5b8 +size 129798 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_79ecf264-bcba-4974-af90-74b67ca769aa.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_79ecf264-bcba-4974-af90-74b67ca769aa.png index c83f374428c5cd3d9cb9227c7df991936cb347a9..2f11b184c504c3d0bc08174edef8e259e353212a 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_79ecf264-bcba-4974-af90-74b67ca769aa.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_79ecf264-bcba-4974-af90-74b67ca769aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2be3bf1409a397daecd7112d05e467ab19dba243aa2996c1d46defe2377b441 -size 378647 +oid sha256:0752772e1168d43c32b6d04cfecbebd549b29d836b680ecac7027eb126c5793f +size 339341 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_94c51afd-2b7a-47e3-b33e-711ca7f9cd4e.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_94c51afd-2b7a-47e3-b33e-711ca7f9cd4e.png index 4e965319db1622f09b580f7da51d8facfe7db06a..43c13d896ddd30bdd0aa6066424c2da6a7e672db 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_94c51afd-2b7a-47e3-b33e-711ca7f9cd4e.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_94c51afd-2b7a-47e3-b33e-711ca7f9cd4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2865d8135adbcd4d2da4571b6c2810891e7e7e6f8d340245e84d068b1aa94f41 -size 626541 +oid sha256:5ac2d9962dd83ba2199c66ff4ae2ee597013579616b056fa47fa929b70d433d0 +size 315400 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_c2cf99e0-83fb-4746-9a9d-7b151d9c60b8.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_c2cf99e0-83fb-4746-9a9d-7b151d9c60b8.png index d5d9e90fffdfd191927bf80ec9ffae237a66db52..17f2ddd3abc31a0d069dba3cca4dcd0d6b6bc696 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_c2cf99e0-83fb-4746-9a9d-7b151d9c60b8.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_c2cf99e0-83fb-4746-9a9d-7b151d9c60b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40a9b6780b9b0db1cca0a2b07b3a3de5a156bd9c70d06e9547aeac373bdd0168 -size 202137 +oid sha256:805d5315c2539906beb43acbc614509a8730946cc95374893a78b9ec4b356fb6 +size 390895 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_d143425a-b021-4736-b687-76deed6509ee.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_d143425a-b021-4736-b687-76deed6509ee.png index 227ce20a3e48e2ddfe72895f86d94842855c9b8d..83615d5347fd464611d4d692b9a007c0cd7c000d 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_d143425a-b021-4736-b687-76deed6509ee.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_d143425a-b021-4736-b687-76deed6509ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26ed7181a2100ed945825bfad0267671595418020bea598ae3d803694646669a -size 600493 +oid sha256:b807b44c3c5544762fecf29ebf490f682bc7a3b642b9e03d3ae5f64e1ebcde5c +size 354436 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_dc38e438-dc4a-4fea-8621-383fb449ebf2.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_dc38e438-dc4a-4fea-8621-383fb449ebf2.png index 2acab6702f6994e9e22081639b582a045396b4ae..3e263603fe41330bb0e8f4104ee889f9ad9e616c 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_dc38e438-dc4a-4fea-8621-383fb449ebf2.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_dc38e438-dc4a-4fea-8621-383fb449ebf2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:368e0f2fce5ab88f593249e9e716ad96504a3981349feacfebcb56903cb5cc2f -size 398033 +oid sha256:7925cdf46a0158e9a2dc4e8941dc25b827ba0ed99b37646f5a124b4f6eb39fa5 +size 441690 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e1baa59a-4622-4d82-9916-a8ab39e36512.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e1baa59a-4622-4d82-9916-a8ab39e36512.png index 33578ca7dedda423f96e7a604909ecfbaee7040c..bc5462a0e73eabf116c2aac87c3f152afcb46dac 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e1baa59a-4622-4d82-9916-a8ab39e36512.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e1baa59a-4622-4d82-9916-a8ab39e36512.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dd780fff5d31c11b6d5f6dc7730c55ac74b4e83f7b37f06fd12d731cbafecba3 -size 388900 +oid sha256:8f656449716c0f95a5e3f0803cb13c429bef8b08ea973c2f40e2d3fcaf4f013d +size 289468 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e9b98033-42d1-478a-ba2b-e7e73105a6f5.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e9b98033-42d1-478a-ba2b-e7e73105a6f5.png index 7439ca0ede5cdb6350d82677f732e8139eb4d952..dd55878f4538eb055745abe9db135aea1bf530b8 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e9b98033-42d1-478a-ba2b-e7e73105a6f5.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e9b98033-42d1-478a-ba2b-e7e73105a6f5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66fb388f408141974f7c46b3d5fbb490cd2b534e91554108e5c1685d5d240a9b -size 352041 +oid sha256:960da7b644c821d55493d96e18a07131a54c968dada3d807f5315d196bc5b94b +size 422544 diff --git a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f42e267a-d410-43e3-986b-17397fa958cc.png b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f42e267a-d410-43e3-986b-17397fa958cc.png index 431c8cf0d46e922c0743a7bab03cb45ed58b4df7..f7ff3108fd6030a1933e7b8138c95cdfcd58d30d 100644 --- a/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f42e267a-d410-43e3-986b-17397fa958cc.png +++ b/images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f42e267a-d410-43e3-986b-17397fa958cc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56ab7cd2a656a26573fa3b232c9dc1cc279247c1c83a6aac93433c9140df8507 -size 624477 +oid sha256:c2aa09f837e56fc29c543efab654ce922ff4166f5d4d546688232bc6d6360804 +size 445613 diff --git a/images/e84111f2-5193-421e-877a-9af8418b558a_02254aee-cb52-48db-bd49-b5397932ee15.png b/images/e84111f2-5193-421e-877a-9af8418b558a_02254aee-cb52-48db-bd49-b5397932ee15.png index 417346747f4a5cfd7e8d67819e270aa23549de64..4931714ded39738ef6fae7bfa01d45601c3355bb 100644 --- a/images/e84111f2-5193-421e-877a-9af8418b558a_02254aee-cb52-48db-bd49-b5397932ee15.png +++ b/images/e84111f2-5193-421e-877a-9af8418b558a_02254aee-cb52-48db-bd49-b5397932ee15.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:87ba3be5ed9b467de0ca4085c3096fc9ee98eb8ad707f59591b776effe8e7e26 -size 1346237 +oid sha256:36c791e678bacdd7d77db350928908149df6af292707660fa4f4af7a9a65958c +size 714661 diff --git a/images/e84111f2-5193-421e-877a-9af8418b558a_79eb3b5b-854a-44b5-a115-c239a4d58c3f.png b/images/e84111f2-5193-421e-877a-9af8418b558a_79eb3b5b-854a-44b5-a115-c239a4d58c3f.png index 908c9adeb9adacaee80ef20a37bb6a98539bd22b..c97f8e64ff118172884d09bfaadc9bbff4bd0929 100644 --- a/images/e84111f2-5193-421e-877a-9af8418b558a_79eb3b5b-854a-44b5-a115-c239a4d58c3f.png +++ b/images/e84111f2-5193-421e-877a-9af8418b558a_79eb3b5b-854a-44b5-a115-c239a4d58c3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92470f6366f0f90e1ba65afa701dd1af2acadf5c0c1aa055ef8b85ceae22eda6 -size 857302 +oid sha256:4d260e2bc30666fcb7a2dfe2f4b9a2eec2249c4052dbd7cc3d150ec005e76f9b +size 867098 diff --git a/images/e84111f2-5193-421e-877a-9af8418b558a_a1bb6c97-bc21-4cbe-ba5b-6a8d0e0536e9.png b/images/e84111f2-5193-421e-877a-9af8418b558a_a1bb6c97-bc21-4cbe-ba5b-6a8d0e0536e9.png index 5ab13c816fd7bf0a572577dec512b32ad55cf9d2..90b69fa0f540eacb3fb6db807972b600f5b1778a 100644 --- a/images/e84111f2-5193-421e-877a-9af8418b558a_a1bb6c97-bc21-4cbe-ba5b-6a8d0e0536e9.png +++ b/images/e84111f2-5193-421e-877a-9af8418b558a_a1bb6c97-bc21-4cbe-ba5b-6a8d0e0536e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d72f0faf95cc2a9b1bdcdcbda33046f70cf90628e500fcee7ea880baef8eee34 -size 1444703 +oid sha256:1b2cb47053ca583a3fc566373dbaaf1bedf6f600ad8fb91a82452e5adaaf351c +size 1460894 diff --git a/images/e84111f2-5193-421e-877a-9af8418b558a_efd16945-67fb-4e57-ac60-d699b278ddb2.png b/images/e84111f2-5193-421e-877a-9af8418b558a_efd16945-67fb-4e57-ac60-d699b278ddb2.png index 6ef4ff79d1359ee31c7b20f338cb7c578b9ff953..d3c18262323b066dde15a4ce11ff1e9107d06856 100644 --- a/images/e84111f2-5193-421e-877a-9af8418b558a_efd16945-67fb-4e57-ac60-d699b278ddb2.png +++ b/images/e84111f2-5193-421e-877a-9af8418b558a_efd16945-67fb-4e57-ac60-d699b278ddb2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0ab9c824e19aab50445f10c41fd1cbffd3d9fc7f02005fe0de64042fccdb959 -size 778271 +oid sha256:5199d204d221938053be47b5076cfba444d27f688b9a7f90b4d68eb7cc31b98e +size 1011192 diff --git a/images/e8603513-2740-485e-adf9-86361dd015f4_69f51f8b-8192-4638-beed-fcc9f187a9a5.png b/images/e8603513-2740-485e-adf9-86361dd015f4_69f51f8b-8192-4638-beed-fcc9f187a9a5.png index d2bbe10ece090c62d8321a5639dd0d3cfc2cf910..a6529acb0502312313d597b01764560925425b10 100644 --- a/images/e8603513-2740-485e-adf9-86361dd015f4_69f51f8b-8192-4638-beed-fcc9f187a9a5.png +++ b/images/e8603513-2740-485e-adf9-86361dd015f4_69f51f8b-8192-4638-beed-fcc9f187a9a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9896d9a36671849761d7370afce59740313c61069818da19df8f81d528dcebb2 -size 745860 +oid sha256:4fa0641679efc37efcd7413bf0636a29bf28ad08863eb22b90c0b1e5d6a3b991 +size 767363 diff --git a/images/e8603513-2740-485e-adf9-86361dd015f4_f12321ef-5a11-4132-ab68-7660be13e08c.png b/images/e8603513-2740-485e-adf9-86361dd015f4_f12321ef-5a11-4132-ab68-7660be13e08c.png index dd520f2724091d07e58fa8312c2e600904b16cd0..0e10754e7013cf72cd512cc60553fb0a2741df98 100644 --- a/images/e8603513-2740-485e-adf9-86361dd015f4_f12321ef-5a11-4132-ab68-7660be13e08c.png +++ b/images/e8603513-2740-485e-adf9-86361dd015f4_f12321ef-5a11-4132-ab68-7660be13e08c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67cbea4f49758a6c268b933a115f4eefbd13d940df207ff48e2350416be791cd -size 1158927 +oid sha256:391692a81bc4a807e2a17c56c014e8e1ab1e9d1004b922a44faa48981db3e690 +size 1702632 diff --git a/images/e8603513-2740-485e-adf9-86361dd015f4_f149f408-6377-466c-8b6a-f552605df2f1.png b/images/e8603513-2740-485e-adf9-86361dd015f4_f149f408-6377-466c-8b6a-f552605df2f1.png index d2bbe10ece090c62d8321a5639dd0d3cfc2cf910..c97c3dff6be8a1e635341801ded81bec17ac3c55 100644 --- a/images/e8603513-2740-485e-adf9-86361dd015f4_f149f408-6377-466c-8b6a-f552605df2f1.png +++ b/images/e8603513-2740-485e-adf9-86361dd015f4_f149f408-6377-466c-8b6a-f552605df2f1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9896d9a36671849761d7370afce59740313c61069818da19df8f81d528dcebb2 -size 745860 +oid sha256:008800de584c5ec94760ebfd0eaaf1972f94c5f344d24748030ba7130115143c +size 728280 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_181a2bf7-0625-438b-85e7-5b0d10523e46.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_181a2bf7-0625-438b-85e7-5b0d10523e46.png index 4faaeaefb2de4e05d02d12086efd786aa8a56336..b7c9da441977ddeef40572c503ee855b45f83179 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_181a2bf7-0625-438b-85e7-5b0d10523e46.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_181a2bf7-0625-438b-85e7-5b0d10523e46.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74247eafbb36769886ef3890cd0902ca373decc04c5037248a4d8fdeba2ac9e5 -size 298133 +oid sha256:a3067b740d09e20a62bf078f6f640b30f024b0d98dc32b3e93ae124fe5fb9c29 +size 311255 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_3ee042de-e542-4cdf-b2c0-0f2c3a4f74f4.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_3ee042de-e542-4cdf-b2c0-0f2c3a4f74f4.png index 1e1f1679dc23ac3b3df21a89fe429f7a776d1903..d8f20aec3fbba1301065e6dc276b530e15ad1bca 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_3ee042de-e542-4cdf-b2c0-0f2c3a4f74f4.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_3ee042de-e542-4cdf-b2c0-0f2c3a4f74f4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69533f2b3081369ae7b1c4ea4a91391e7a48136a869556f56106c5984df9cce6 -size 408174 +oid sha256:b20c3ec8feabca6e8c262314295b9cd3406e45303cf596849f3df9cbc78a2f27 +size 416865 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_4a582ba5-4347-4b8b-8e83-40d48174cd24.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_4a582ba5-4347-4b8b-8e83-40d48174cd24.png index edf4879f45077ed3a3e293fcd42323528c2b7420..c91c4659c9b894e089a18a9e75e1169943a24ced 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_4a582ba5-4347-4b8b-8e83-40d48174cd24.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_4a582ba5-4347-4b8b-8e83-40d48174cd24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89e6c49654bd57b4f3147c637b388ea4e8d42453fee0a7519f6ab4690b9cd029 -size 483536 +oid sha256:c87e9c788419e49e608b7b4024d0fcf82f6d316b64a84a6307c7bcd0a5c8b3da +size 267611 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_75d262b6-ddd5-48f9-966f-4438087ee50e.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_75d262b6-ddd5-48f9-966f-4438087ee50e.png index ea7360117ef8a572aaf38f45184755acb5e4dfcf..e0afa1f95809b502d649f2c5f879c822d09851d8 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_75d262b6-ddd5-48f9-966f-4438087ee50e.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_75d262b6-ddd5-48f9-966f-4438087ee50e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce191d44bcd96dc4730a50fb71a9c1b8a904fa4c9a2302ba70115763de2558ef -size 264171 +oid sha256:82aba2df591b911d5aadbbe50eccb8fa29b026909843bf80df8a35bbba531cbd +size 21022 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_8840e68d-babf-4527-95cb-df13c183703e.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_8840e68d-babf-4527-95cb-df13c183703e.png index 962142409234a3d9ff5d11bfe973b0930ea136ea..17ea1828058d109a7b72d10a1bed7091b2c9cd8e 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_8840e68d-babf-4527-95cb-df13c183703e.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_8840e68d-babf-4527-95cb-df13c183703e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:62897b9d328c7c1acedabe8c3cd1df7d73e1e652dc568b8ad415b3e421c9c896 -size 421962 +oid sha256:c394da22cfec8395f30e954ad67dfceb5c6ccfdef8788d3a5f245b05956ba079 +size 401536 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_93ff61b6-0bab-479c-9f06-45a4274258ed.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_93ff61b6-0bab-479c-9f06-45a4274258ed.png index cde777e5515dc3d0170a28fb1358e3b3e7b59dd5..1a9e036967876d0ff2ee2c24f4e3fbd0d07d0e45 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_93ff61b6-0bab-479c-9f06-45a4274258ed.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_93ff61b6-0bab-479c-9f06-45a4274258ed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60f50407e5fcca603fe8d843be3676d68fd5afc6985c757c1b84f8d30a292b72 -size 410276 +oid sha256:a239d15380521152d5b1a7c3698b55e9e6fcff1161af9b2d0eb105779f3dcdeb +size 393507 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_99166e02-7f26-4ead-b3ac-370225b32d30.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_99166e02-7f26-4ead-b3ac-370225b32d30.png index 78a80551e80ea635c89e23e6986181afd95bdb2c..c6ce4e19cf4176f8c397fb9fc346846057d89ee8 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_99166e02-7f26-4ead-b3ac-370225b32d30.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_99166e02-7f26-4ead-b3ac-370225b32d30.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:437b6c140848d557bcfd9c1aeedf6515caeec4da7ad1beedd52f3b79b6a5627b -size 395280 +oid sha256:6ae8fe10a97637cfc54fb1d1c088869c4c6b45f549a9ece9569e16bfcad59971 +size 414352 diff --git a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_f949bab4-d297-47ca-926b-32bc1573d765.png b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_f949bab4-d297-47ca-926b-32bc1573d765.png index e0721c0f7f8b971fd5c5b21e9ddb81ba265166e5..3a2c7199df6232553f90fb0e797fe3e8d29fe97a 100644 --- a/images/e8637690-bb8c-4596-a608-5b40a29d77c9_f949bab4-d297-47ca-926b-32bc1573d765.png +++ b/images/e8637690-bb8c-4596-a608-5b40a29d77c9_f949bab4-d297-47ca-926b-32bc1573d765.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02eb74af2e3da4270162405ee185794b2ef9b77e21652a310c21664ce2065687 -size 393693 +oid sha256:76aaaa1f027b5608cbd8df5d3793387fb6e45399c4e7f516cd084e4fdd868326 +size 272176 diff --git a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_1fa019d8-0d92-44b0-803b-88881eac1293.png b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_1fa019d8-0d92-44b0-803b-88881eac1293.png index 3a3ea9549af85ffdeab295f856ef5efa930e399c..9cce33751e26c5165c5d782d1445729e4e2bd213 100644 --- a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_1fa019d8-0d92-44b0-803b-88881eac1293.png +++ b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_1fa019d8-0d92-44b0-803b-88881eac1293.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77d76e24f587a90b520cdbfa32f291b907eaabb4e8880b36ea558bed432fd6fb -size 903161 +oid sha256:3f12f0e4631213da6b638d5b24589af22e2ada6230d6f9f165af08674fc99a96 +size 1190161 diff --git a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_6179284c-8010-42fc-9db7-c552407fe3b6.png b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_6179284c-8010-42fc-9db7-c552407fe3b6.png index fb8987da50f19c6ae62bdd5197afe7569a33791f..a162fe4ee4153bb99acacd3ce1d4178d5bfbfe3b 100644 --- a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_6179284c-8010-42fc-9db7-c552407fe3b6.png +++ b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_6179284c-8010-42fc-9db7-c552407fe3b6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a13f35f68e98c0255d7fe090bf8d77510fb8ad0f66b9658a98fd30c1e3f81f8 -size 918488 +oid sha256:3b560d5cefbade9f2af1d8c010b26b9d1fa783e07f5a81dc53b3eed30a16f8d4 +size 893346 diff --git a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_a5d935f5-61f6-4797-9dc6-33eb9a260ece.png b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_a5d935f5-61f6-4797-9dc6-33eb9a260ece.png index bc6b33c76e9ee7a7fffce48ef3e896cd9ef5c8dc..1d6bf92938fbd73f249c1c6dae98a32efa76f6b9 100644 --- a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_a5d935f5-61f6-4797-9dc6-33eb9a260ece.png +++ b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_a5d935f5-61f6-4797-9dc6-33eb9a260ece.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da4ba6ed2ef4a2a39132c29c746036f92ea77e968352e068c24993b4e2dfe2aa -size 920520 +oid sha256:820ec827b74944d1f80a0db24916472684c0eaeae6ef8658998f1dd11f7ad6e4 +size 1275053 diff --git a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_bc81411f-ea59-4192-b04d-e62b85850b5e.png b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_bc81411f-ea59-4192-b04d-e62b85850b5e.png index d87bb9fad03b77257c5ba2722c55537384076d97..29409649e623e6c72b4d9fa62bf6bd215ed3618a 100644 --- a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_bc81411f-ea59-4192-b04d-e62b85850b5e.png +++ b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_bc81411f-ea59-4192-b04d-e62b85850b5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:559e98b334cc2c43fdf5b14f95995f6d14837a4dec9d152fb350595c5f0f7160 -size 1490796 +oid sha256:8aa341b57cf40b35761bcbdf3f6c64f466f37fdaa0b4f3e020974ff877f0c0f7 +size 735662 diff --git a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_d41e77ad-e822-4e59-8fd8-dcc8807c67f6.png b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_d41e77ad-e822-4e59-8fd8-dcc8807c67f6.png index 9c27ff14b650cb110470cee6a7b1eef277e6ec1a..2133dd503d7916a9dcf9209e4423de64fcd5e54e 100644 --- a/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_d41e77ad-e822-4e59-8fd8-dcc8807c67f6.png +++ b/images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_d41e77ad-e822-4e59-8fd8-dcc8807c67f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b7222897c61afa6fef45f2d9671226b0d129dbed29a23a31b5eca957e095aff6 -size 1083953 +oid sha256:d983bb313f59e5e58f74c0ea174ea07b27a026e5b5ba81a92f5d365498e035ae +size 604779 diff --git a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_4256fd38-43d4-49a8-a0da-618b5264ed20.png b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_4256fd38-43d4-49a8-a0da-618b5264ed20.png index dfdc97adf7ddec9ed89a4d6cb61d28526276521c..93e91459de4f9d4638bbf2f341fd703b59045926 100644 --- a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_4256fd38-43d4-49a8-a0da-618b5264ed20.png +++ b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_4256fd38-43d4-49a8-a0da-618b5264ed20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:773451609b753e0bb612d6c8c473e70fa8568a0be5d46c40489a3a1270e51d48 -size 1802984 +oid sha256:b591cef46ebca0b82636af3ba67957b36dae9365eb398c95cb74fdeff7e10292 +size 1234240 diff --git a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_57555471-f75f-42f1-a810-cf336ce2258b.png b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_57555471-f75f-42f1-a810-cf336ce2258b.png index c6283b72988f2e8c84554520acbbc009f4340e38..aad271ea98f60d3b9b2de66f4992545632d6b718 100644 --- a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_57555471-f75f-42f1-a810-cf336ce2258b.png +++ b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_57555471-f75f-42f1-a810-cf336ce2258b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18ed708e0e077f36335d6aaa29c24c647f3d6cf6b65f2b582db492c1d1c9a45f -size 1802545 +oid sha256:6614723f902821d9499579d4b55de5063d32e16e588c853b19c49aebf7d50ca4 +size 2239788 diff --git a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_76999e4d-1134-413a-8def-ee37b4d1c84d.png b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_76999e4d-1134-413a-8def-ee37b4d1c84d.png index 899b75125161ed0b384e86d4264938726a381d49..224dec7c788e5f8315ca78611a97913ecaeb20cf 100644 --- a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_76999e4d-1134-413a-8def-ee37b4d1c84d.png +++ b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_76999e4d-1134-413a-8def-ee37b4d1c84d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33ba5d951b7a40839d7314c3dd0bb32b4fe6588c8a0461f0c8607ad2956ab978 -size 1805149 +oid sha256:5109be880ece281a16d20bd1f8c5720ba247604c017f73946f2de70673a97a69 +size 2234325 diff --git a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_bc48fabd-306e-466a-98cd-490fe1730ece.png b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_bc48fabd-306e-466a-98cd-490fe1730ece.png index 9163f3a235d215fbb42ec012ae364807e43bdc60..ada631b74a66ea312c4d9c82615b98b89ee2e749 100644 --- a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_bc48fabd-306e-466a-98cd-490fe1730ece.png +++ b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_bc48fabd-306e-466a-98cd-490fe1730ece.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ff70a6da50dce518f3399b7404306b44308b8cf1c8f8fdf50c5c9974ad50469 -size 993184 +oid sha256:048cbeb26a0a4719e92df9e7dda4d002d0894f2c114a2bfd932e73a417fa6ed9 +size 1159614 diff --git a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c47062d6-0d58-4383-9d62-efc14a92807c.png b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c47062d6-0d58-4383-9d62-efc14a92807c.png index d6a9c9b1ef534870d37fcd422996fdc0a464629b..663be917cbe9a186567cb9a0614ae246afcf6350 100644 --- a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c47062d6-0d58-4383-9d62-efc14a92807c.png +++ b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c47062d6-0d58-4383-9d62-efc14a92807c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:994b3e3d2eeb02df634b609d44cd529465539eec862889a024198dbd7af970d9 -size 1467179 +oid sha256:ec4ddd7a1cc52279109505edfb080779fc0b4c24f42cb4007b80ef12a76b5ec9 +size 1955462 diff --git a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c5e5e76f-39da-440f-a771-be5ef5b7e0c6.png b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c5e5e76f-39da-440f-a771-be5ef5b7e0c6.png index 3499d79af4c55785cd169da2f05bc366e26bd3cc..b54217a5ab43c0dfb4301fb5b798d8d92f4ad7c4 100644 --- a/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c5e5e76f-39da-440f-a771-be5ef5b7e0c6.png +++ b/images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c5e5e76f-39da-440f-a771-be5ef5b7e0c6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b4e32397dc86ccf140ebdb6086de667ac03089c5a78213ca8d6d0394d1836a4 -size 1802085 +oid sha256:276c952ef329a61d82131e0e1422b1a1f5bacd171b85722b0903542c0d7d5c48 +size 2239278 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_0c1dc335-6547-4426-bfed-610421e2c194.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_0c1dc335-6547-4426-bfed-610421e2c194.png index 2f301795b96b8f75a533830a07fb54e2126b7557..70797b8c7dd85c66415a46f7e5d40b8f91506f8c 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_0c1dc335-6547-4426-bfed-610421e2c194.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_0c1dc335-6547-4426-bfed-610421e2c194.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c4d3b0ca05b540f37f68d75d73409ef2d9d1d6fecb793ca10e00d9076445e5e6 -size 937286 +oid sha256:ea24e40eee343bbf22ed359a0a7c79f314b8c18319ce9a7f858d34a79dfb5d24 +size 1021731 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_118b28dd-087b-45f4-8490-baa847a291ab.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_118b28dd-087b-45f4-8490-baa847a291ab.png index 36404e803497e651f6914fff7eb21b9fe0e3b884..6aec68747badcc98f713c92e6f3bca8ef2b0eca7 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_118b28dd-087b-45f4-8490-baa847a291ab.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_118b28dd-087b-45f4-8490-baa847a291ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:41710fb6db0a8b61b06295c3f8cb05a65da76a66f1080466703920c13a22bf73 -size 1018760 +oid sha256:6515f21ebba39fdcda16f6a90b7119249c4be034d0d2a1f88e00b9cc96439d17 +size 677804 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_7450671e-a644-40ce-b909-56b5ee226fad.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_7450671e-a644-40ce-b909-56b5ee226fad.png index b533387c81e0de3475207058437536ffbdeeb272..9ff5e9a9865fc8f5d61379a6d6a9b790ebc48511 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_7450671e-a644-40ce-b909-56b5ee226fad.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_7450671e-a644-40ce-b909-56b5ee226fad.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f454d3e3ab0db53dd8f47bafcabf67d4c785ba7fa9f02dae284bc4d47b43f9af -size 1054641 +oid sha256:431f0425e7bd2111eb4b5e9b197456cd32b1f6be7294beeafd9e757a456af397 +size 878197 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_7cbb3878-36cd-48df-96b4-d28fad34a7c2.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_7cbb3878-36cd-48df-96b4-d28fad34a7c2.png index 3f2467c847ca708950ecf7450d0152a4453fcc71..c3aceb7a6d2138ad3a4580659abffa0783ab312e 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_7cbb3878-36cd-48df-96b4-d28fad34a7c2.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_7cbb3878-36cd-48df-96b4-d28fad34a7c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73ed4d2f2361f858d3a6d5179a3e9e2fe41788babfdbf52426344105cd2ccf14 -size 1053688 +oid sha256:153bbe8560422c8151aff82c96b69694d9adde6da701d0f4bbbec06159dba9ba +size 1116869 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_7e70b73f-5b14-457e-b4cc-532742d72dcc.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_7e70b73f-5b14-457e-b4cc-532742d72dcc.png index 76ee2363b7fc5117f8ae4f1440585c691fab88ca..f1a51857fddff543d9be250924f0a7dae2f61160 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_7e70b73f-5b14-457e-b4cc-532742d72dcc.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_7e70b73f-5b14-457e-b4cc-532742d72dcc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:02c33ec5e9ca114e2882494fe0dc2b1af8114366ad313f5747f5c5e209c0fb55 -size 985611 +oid sha256:72437a76f1e6773c75b68e8717788b3f93c47d9d56926147d674f2c8c98f1a4d +size 834894 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_81b1149d-7ff0-4e12-a33c-f093e82f71de.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_81b1149d-7ff0-4e12-a33c-f093e82f71de.png index 368ba36a871fb53614060937beecbae1dbec2192..61fceb150d2855c64b41ea2ffaeb5031b1eb7047 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_81b1149d-7ff0-4e12-a33c-f093e82f71de.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_81b1149d-7ff0-4e12-a33c-f093e82f71de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb684ed33b41c222ef87179f81bb61e2e567b8b190ff0a56a59b31de7ee92b4e -size 618297 +oid sha256:5163cc78000fd0aa0f3b59a55bc26449c72ad26d3e43159d30054342edb697bb +size 618824 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_ccced979-b0e9-4efc-997c-d53364206c7d.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_ccced979-b0e9-4efc-997c-d53364206c7d.png index 8d92c3b2be6f47044747608c4cb5e3960817b1cb..219c4de1f303d5de4b3cbb6d6c8c7e48247f1925 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_ccced979-b0e9-4efc-997c-d53364206c7d.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_ccced979-b0e9-4efc-997c-d53364206c7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef40458a7ee9b9e97896ef8d980101d17c5321f25d31f269862fcd7a3eb7deee -size 653217 +oid sha256:0b2f7d5e47a269994e2b8b866325a41f7517988e87f73b08a4288c0964a26adb +size 571599 diff --git a/images/e92a2c89-3310-434b-9543-a0d896881bb9_fc4260f0-f7dd-44e3-8e76-33f0a7a4c96a.png b/images/e92a2c89-3310-434b-9543-a0d896881bb9_fc4260f0-f7dd-44e3-8e76-33f0a7a4c96a.png index cb508318192823e892ad4731a40424b2c6dfa71a..56d3cc8c323cadd367373a182b3acbfa9256efc1 100644 --- a/images/e92a2c89-3310-434b-9543-a0d896881bb9_fc4260f0-f7dd-44e3-8e76-33f0a7a4c96a.png +++ b/images/e92a2c89-3310-434b-9543-a0d896881bb9_fc4260f0-f7dd-44e3-8e76-33f0a7a4c96a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16b18eee1476871490e66ec2d4979578c0899d63e382dbd5f946f836aa02c13b -size 1051703 +oid sha256:77ae36ad940f89a6301e0f2757dd4ac43b2030429fb3d5c89391800cde80ef8f +size 1114838 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_0551c27e-cc99-459d-b713-9a698a9eb578.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_0551c27e-cc99-459d-b713-9a698a9eb578.png index ad8423600a2299ab066a76706ea10882dca8fad8..11b35c1a32ab698d6465c4077b73903f9e39dfac 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_0551c27e-cc99-459d-b713-9a698a9eb578.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_0551c27e-cc99-459d-b713-9a698a9eb578.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0b0f31f8c677a907f0064a99b448ff0dbb7791ec6128b0d61ea3b733dd030dd -size 813089 +oid sha256:80f50c1014ad861ee13bea68d41dc08ed5e4939f905ff2217ac864f90b78cebd +size 940555 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_4db51223-64ed-46ee-aee9-c61490715f38.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_4db51223-64ed-46ee-aee9-c61490715f38.png index 06b66d361dbe67f53b244974a0222ae43269bf18..22bafe311cea46e68240fe0b73cfd23d076e62de 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_4db51223-64ed-46ee-aee9-c61490715f38.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_4db51223-64ed-46ee-aee9-c61490715f38.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6286c7073cc11894bf1e980d6bf15f9ba548334f3badbb24e984b36a8767ca9b -size 1317828 +oid sha256:44b360b186855c51bc5251cdeaa8ac31acd9bdbade9003f36b8efe92de48b9f0 +size 1533326 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_69915593-b522-4215-bd37-8a27f3aa41b2.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_69915593-b522-4215-bd37-8a27f3aa41b2.png index e5a6be3fffb4980eccf85924c042c057b70ded14..b8a1d3ae9c717e40b67c8f312c7ec1d1dddc1120 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_69915593-b522-4215-bd37-8a27f3aa41b2.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_69915593-b522-4215-bd37-8a27f3aa41b2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6a70cbc06cb05c7fe6dd3eba9bfc2670bd43982147b51b3fa6df3b0c1c307dd -size 915959 +oid sha256:0b95c41d502fac0bc8c96817ba7d5dd38c425b40057279c74ac840cdb496832a +size 902610 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8b668c59-b70f-47c6-89af-b30d15b3d84b.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8b668c59-b70f-47c6-89af-b30d15b3d84b.png index 0e6c8b7f816e34e8ddf6537cd1bda548df2faa2d..699e6664e18540c40779247443b6f22c58afcc1e 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8b668c59-b70f-47c6-89af-b30d15b3d84b.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8b668c59-b70f-47c6-89af-b30d15b3d84b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb3dd792a9a977b048f8312505979a2030339a970d19520afb0c15dd27ce3de4 -size 1258317 +oid sha256:189ee40871bbf5686f92acea15a52311e26934d14ceb1c07caf94542b3e97945 +size 820847 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8e888704-08c3-4164-9b92-57ad8521fb4e.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8e888704-08c3-4164-9b92-57ad8521fb4e.png index 31b0c0167d604ad90b6f9928855c6414e71f539b..3fe70dd73e4269abbd4d89d0be01790f5cf1ef22 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8e888704-08c3-4164-9b92-57ad8521fb4e.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8e888704-08c3-4164-9b92-57ad8521fb4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dc74afc4e7de947f319c4440eab70a4cf49239c2b0a47c7e269398a288abaa0d -size 757197 +oid sha256:7b563d43e5be4f9d0dc754861daceb502a0b4cfbc02b8db4f23e044fb8b6d69d +size 733430 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_9bf58be2-cf7a-4732-af73-5e7c17b70540.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_9bf58be2-cf7a-4732-af73-5e7c17b70540.png index 32760e3b900d879e6f87983ef3d5a79cbc869abe..18e6ebd39e4f8dce651b3209158965214ebafbdf 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_9bf58be2-cf7a-4732-af73-5e7c17b70540.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_9bf58be2-cf7a-4732-af73-5e7c17b70540.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1a5a52a0b6a72db46efb98a204f24beb6b425e90a2673e54b43b040a9a123fc -size 958117 +oid sha256:64497bbb7d2afe5766411ef961096a5419fad1402853c5d1451531ad224dbd23 +size 616939 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_d5b4d8ea-73a9-4e11-8496-13694222c79b.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_d5b4d8ea-73a9-4e11-8496-13694222c79b.png index 969b93d410339ed4e881e8fa2e02821c3a5341be..538c5d2f0a6f32421d49b4d20c3e2033481f4bfe 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_d5b4d8ea-73a9-4e11-8496-13694222c79b.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_d5b4d8ea-73a9-4e11-8496-13694222c79b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a89b275ef5be22b99ac76b8f511c70921542b1a20c8d2effbe26634d1e201c1 -size 993618 +oid sha256:27856673f412a5bd95154a5e85e22cf84c3c48c7086e55108af4769c89845869 +size 991065 diff --git a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_f2a535a9-3a26-4aac-873a-ca97ed26b08e.png b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_f2a535a9-3a26-4aac-873a-ca97ed26b08e.png index 83987eaf47faef35c9a794448fe96333bc23c961..9a7db99367abc347f66f8d48a084e31113a15c5e 100644 --- a/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_f2a535a9-3a26-4aac-873a-ca97ed26b08e.png +++ b/images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_f2a535a9-3a26-4aac-873a-ca97ed26b08e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5119e53bf6c90a07cbb7cc8ead50de1e8c02b4580a9af47fa87aef4c142d2460 -size 802876 +oid sha256:bfb49e20312a650d45108f1af5f72c60eaab9725d44764806335ef3305874915 +size 478745 diff --git a/images/e9300d50-11fa-4f98-8c39-424630668ab9_0be78dd3-a700-4d87-92b5-bc57c37a4384.png b/images/e9300d50-11fa-4f98-8c39-424630668ab9_0be78dd3-a700-4d87-92b5-bc57c37a4384.png index 6291c6f0df0c08e398d9131616996158772651ef..bd1aeccdb1644986b365669ed46b77d86b1f22dc 100644 --- a/images/e9300d50-11fa-4f98-8c39-424630668ab9_0be78dd3-a700-4d87-92b5-bc57c37a4384.png +++ b/images/e9300d50-11fa-4f98-8c39-424630668ab9_0be78dd3-a700-4d87-92b5-bc57c37a4384.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dadd22f26eb0d63f634f5f97699087662529da0bb64b47dacb2b4d07d86eb902 -size 1143840 +oid sha256:315b1b0b1ee7da7175a6a2dcf1bc0259e8abc664f7b21fc01557b71f1feecdf6 +size 865636 diff --git a/images/e9300d50-11fa-4f98-8c39-424630668ab9_66a995c0-14dc-4dc8-8e8f-adfdd0247b88.png b/images/e9300d50-11fa-4f98-8c39-424630668ab9_66a995c0-14dc-4dc8-8e8f-adfdd0247b88.png index ba55938c5732124928379135c3c33fce59c9ac0d..97744c739ebaffe21203dd392a54292beeb9b479 100644 --- a/images/e9300d50-11fa-4f98-8c39-424630668ab9_66a995c0-14dc-4dc8-8e8f-adfdd0247b88.png +++ b/images/e9300d50-11fa-4f98-8c39-424630668ab9_66a995c0-14dc-4dc8-8e8f-adfdd0247b88.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bce27ee2d942f618bc7c64e22d956e62919bfb57830c6c1a9d4bb8bb55327932 -size 1538866 +oid sha256:195f08f029e43b51ee007cba0f815a405718959f9a419f172813555f08d1b2fe +size 1510762 diff --git a/images/e9300d50-11fa-4f98-8c39-424630668ab9_6b487bd0-fda3-43e7-8adb-45fd77815a64.png b/images/e9300d50-11fa-4f98-8c39-424630668ab9_6b487bd0-fda3-43e7-8adb-45fd77815a64.png index ed78dfe1577f92b4ca57900b3b5599e8bc2426fd..29e027fbc015ee174bd527bf784b18d047b52947 100644 --- a/images/e9300d50-11fa-4f98-8c39-424630668ab9_6b487bd0-fda3-43e7-8adb-45fd77815a64.png +++ b/images/e9300d50-11fa-4f98-8c39-424630668ab9_6b487bd0-fda3-43e7-8adb-45fd77815a64.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:056268602a360cf2b5920753f8b595b4aa2d4ee1ee30e849b0dfc1de27d12dd1 -size 822521 +oid sha256:30829041531fc060ce9fa08f43cb7c3580c628d9221d5cd1d15612741897eedf +size 789534 diff --git a/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_5f4a93f4-ae47-4f5c-b7ba-c9ccc2463d53.png b/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_5f4a93f4-ae47-4f5c-b7ba-c9ccc2463d53.png index bdaf9044d3322a6c6ab771c9dfedba6231b0369c..24eccab4f8eb56324711ba1217a634e385788fcf 100644 --- a/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_5f4a93f4-ae47-4f5c-b7ba-c9ccc2463d53.png +++ b/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_5f4a93f4-ae47-4f5c-b7ba-c9ccc2463d53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d92a0c8a296ff76f839cefd94aa1e55807ee95eefb7b09762e29e304258af6b2 -size 574728 +oid sha256:213b31e4a7d08f11b81e3a59a463bca05a45a89166dfe6247e88e9ae31277bb6 +size 545778 diff --git a/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_ca1c9ff1-ad81-4a7a-b51f-0d2958396277.png b/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_ca1c9ff1-ad81-4a7a-b51f-0d2958396277.png index 2044d25882ee9a9f8c134d912dc5237d4e3c340f..fd7f874ba1b101446d4ec8adb80430736ed59533 100644 --- a/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_ca1c9ff1-ad81-4a7a-b51f-0d2958396277.png +++ b/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_ca1c9ff1-ad81-4a7a-b51f-0d2958396277.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:869d521e8e83bae428691c4ca87345f460ddf4582925212314ac465d485450ed -size 773783 +oid sha256:9568db481679171da3ed7cc262adb037c64c3d9bf0f1bff78f8540cfc089b596 +size 603014 diff --git a/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_f6247f9c-9e1b-43d9-a842-0ee512d1cbef.png b/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_f6247f9c-9e1b-43d9-a842-0ee512d1cbef.png index 67dd58831362f4c77a5fc9c80e7acd2d44c952d7..196f1ff4cd5da3c24cb7f557eb8fae9193d4470d 100644 --- a/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_f6247f9c-9e1b-43d9-a842-0ee512d1cbef.png +++ b/images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_f6247f9c-9e1b-43d9-a842-0ee512d1cbef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:779723702795775ad894b343a720d1bda2b945e2f8e516775d23afda85f3b601 -size 593804 +oid sha256:52f62ae52427b440d904c3884ab4e622fddd0ab926fed1f1b46da09fffd83990 +size 525202 diff --git a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_2b375810-fcfa-4607-b97b-f1d4ee31a5a6.png b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_2b375810-fcfa-4607-b97b-f1d4ee31a5a6.png index 624bae8f6906cdcfc9c5f532756366446dc48965..ae132e01d1467b3b75dd74f3aa2ff74b6f12437b 100644 --- a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_2b375810-fcfa-4607-b97b-f1d4ee31a5a6.png +++ b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_2b375810-fcfa-4607-b97b-f1d4ee31a5a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73fbf276aa90c5616d9a050528b50c2d0c4064efe9f5b7941b97ea74a6f21606 -size 2118805 +oid sha256:dcd9726b5c744616cec3505d0119e9d3b65e74bb08654edacdbf515d00a7b56f +size 843881 diff --git a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_39ddc5fa-ac0b-46c4-97f1-7fda5d38e1d3.png b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_39ddc5fa-ac0b-46c4-97f1-7fda5d38e1d3.png index 9f11b03b02f828d6ed0ec8bcc32dc7f7ee9d988c..6bdea033cc53560f76eb17ef80c6a1c22a029300 100644 --- a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_39ddc5fa-ac0b-46c4-97f1-7fda5d38e1d3.png +++ b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_39ddc5fa-ac0b-46c4-97f1-7fda5d38e1d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc8dda501d49eec7f48927f9f9e174a8d9253bfce914a335075b2d8f1dc7e1f2 -size 2119777 +oid sha256:7d3a7e098dc30e35578ceb4a69e165bf7c34138db650960891e8885aff706ca0 +size 1421914 diff --git a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_9cbdc533-8352-4da3-b64c-bdc59d0517a0.png b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_9cbdc533-8352-4da3-b64c-bdc59d0517a0.png index 398ab801413daa4568fa673ace0201e8488ceea6..06482bf95f56bd11e7a7d564181ceefef32a64e6 100644 --- a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_9cbdc533-8352-4da3-b64c-bdc59d0517a0.png +++ b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_9cbdc533-8352-4da3-b64c-bdc59d0517a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85bc2092d5be8a37da552dd23787684621b718c3f0de4347128f76b89304c4ab -size 1976806 +oid sha256:b11978e5b15f0724d73b9f8b71ca46451aafb23ba2544117ec96d0452fef2eeb +size 891404 diff --git a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_aef60faa-2b24-4efc-9056-42572c18b68e.png b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_aef60faa-2b24-4efc-9056-42572c18b68e.png index 23974ad4453371a9760612b767268e95bc410ce0..45f377acec325ca15d0898c8f0d11a119e0bb18a 100644 --- a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_aef60faa-2b24-4efc-9056-42572c18b68e.png +++ b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_aef60faa-2b24-4efc-9056-42572c18b68e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85eb32d7da6a3945b9c9a97e5e891bd28d523c933ec6e10d74e2d25fa56d4997 -size 298345 +oid sha256:60306f05dcc0324356a81756750c4f3b3e6b9421a5a2720df213ff131e947f89 +size 244183 diff --git a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_ef9f87aa-ac59-4820-9149-5dbd1c644beb.png b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_ef9f87aa-ac59-4820-9149-5dbd1c644beb.png index 01d3d850062a5498cbdc5ef1c2f49794ff76f2a1..bec30f6883cc262be0622602a2c8f1f43e587492 100644 --- a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_ef9f87aa-ac59-4820-9149-5dbd1c644beb.png +++ b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_ef9f87aa-ac59-4820-9149-5dbd1c644beb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec6548cf4503821cf7fc5ab4c39c8c5ef5de21d0fee43f3e58a250f86600b8d2 -size 1549229 +oid sha256:be19b476b24ae2f8b3f84a9e33c012f14e5f3f0a2dbb8097e4b898e63e35b4e1 +size 977378 diff --git a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_efa0a116-02af-4a54-a426-72d5b7f09ac1.png b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_efa0a116-02af-4a54-a426-72d5b7f09ac1.png index a283234bfd66516d405b429132200e42ee3bd3dd..ade632459cdfa963ee23f4372b3e57022fe2ec1f 100644 --- a/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_efa0a116-02af-4a54-a426-72d5b7f09ac1.png +++ b/images/e9a5ab90-517c-4323-a343-6e10e6b9632f_efa0a116-02af-4a54-a426-72d5b7f09ac1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca5e5ca3a2b8d27a09eef85c0f583f78a7bfa151cb070e64744767703369c24a -size 1963280 +oid sha256:893c49d27e78fd50189e799bba3326ac68cd8e13e0fb1203d8e287598929b0ac +size 2110812 diff --git a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_005d0877-8b37-4969-b673-51a0e9ff85ae.png b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_005d0877-8b37-4969-b673-51a0e9ff85ae.png index b04dc2d47be9be4afa4c2d43417dbe2beff483d7..083fa8db6978b270c140741a340bab321a55ea6c 100644 --- a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_005d0877-8b37-4969-b673-51a0e9ff85ae.png +++ b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_005d0877-8b37-4969-b673-51a0e9ff85ae.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1cacbbeefdec1d4c5ac93bfbb106f037da069be9a1f24128a175bf0c6e4b24d3 -size 1162735 +oid sha256:f7f982a3a726cecb4846ca2e3774f8548ccb217237cfd3b24eb47f5c383edb36 +size 1364065 diff --git a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_0c26d744-d579-4c4b-a235-1e6127cc77a6.png b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_0c26d744-d579-4c4b-a235-1e6127cc77a6.png index ff5448fa433605aefda7c6f8020aa2b5584300b8..d13cbe0f6d09768e38984755cafe960e42831840 100644 --- a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_0c26d744-d579-4c4b-a235-1e6127cc77a6.png +++ b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_0c26d744-d579-4c4b-a235-1e6127cc77a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:089a8f185b0b53fe016f04ca255f0f772d0532a8c0fe05ddb8baf2ac312f212f -size 929473 +oid sha256:f214e2c7914aed00c922dc2a34dcba73fc3133a54a025805d865614d1e9efffe +size 723525 diff --git a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_4d4276de-b1f8-4b63-95a9-90730f481623.png b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_4d4276de-b1f8-4b63-95a9-90730f481623.png index b4a39879b166fda2fbba34e593d89e5f7d0e00af..32c5b84b065ffca33cf1defe93f1d96a3052d097 100644 --- a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_4d4276de-b1f8-4b63-95a9-90730f481623.png +++ b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_4d4276de-b1f8-4b63-95a9-90730f481623.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b4eb82641347798778cf9757948d73436eaef7a58750236a2ef26f981fea891d -size 1194734 +oid sha256:37fc144e4c9514c2eb1a6d2cdc117966e770eec058058e93512a9431c1e37b14 +size 1287950 diff --git a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_85fb9ffc-48b1-4e4b-b07f-f81e89cebb4a.png b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_85fb9ffc-48b1-4e4b-b07f-f81e89cebb4a.png index 61cfe9aa8107c6ba2bacea25b9f48be6e37cfbee..b2be13f71ce4f9906088e4a520fd77d8d7f097b4 100644 --- a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_85fb9ffc-48b1-4e4b-b07f-f81e89cebb4a.png +++ b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_85fb9ffc-48b1-4e4b-b07f-f81e89cebb4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9fb9a8696f92ca88a4fffced7069df401c8d2ca0e387f83d02daa5a2ac4a88f -size 986116 +oid sha256:a3b846468fb596c2b861726a728d14169a16770b0e533d5a2a61cf840d8f59b2 +size 922808 diff --git a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_d09aa5b6-073d-4456-895a-50e397fb9f58.png b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_d09aa5b6-073d-4456-895a-50e397fb9f58.png index 3353df4ee8134a9a20640e47a25b926a4a2f1a71..ba42ee0de7371831022ca8e77c3f5170ffad5002 100644 --- a/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_d09aa5b6-073d-4456-895a-50e397fb9f58.png +++ b/images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_d09aa5b6-073d-4456-895a-50e397fb9f58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d12797cc437c166e211f19076bf28bcc147a1a16de9cb2c8a678ac4e49c5b037 -size 1226293 +oid sha256:68b16416a4b91d27ca233fa2e8a7a985d2911060a03c662b4e658997aa291969 +size 1284310 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_3d184f90-9278-44e1-ba90-4b853b6d57d3.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_3d184f90-9278-44e1-ba90-4b853b6d57d3.png index 88268c0485a89c5b70d877e1aee7db6ca5671b2d..551d343883ca6c0f0b8b6e3e2e3737ecef35604d 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_3d184f90-9278-44e1-ba90-4b853b6d57d3.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_3d184f90-9278-44e1-ba90-4b853b6d57d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a0220993bcb66cb947ed95ae28460318371babef29a69cc07e0a1b38488a7c5 -size 535982 +oid sha256:60724966f2bee77d7e139686fb577608563b4e9e0844154d94364f7bbfe91426 +size 857976 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5af256e5-4b7d-429d-a1d6-e4c6fffd8129.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5af256e5-4b7d-429d-a1d6-e4c6fffd8129.png index 82fbd689ac3e85dfd82a5a6ef7c4d10e7ca1f4fa..2eb4870e920bca8e984ae2bf56a34cfe51c42075 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5af256e5-4b7d-429d-a1d6-e4c6fffd8129.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5af256e5-4b7d-429d-a1d6-e4c6fffd8129.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57a91cb1fbcac6b90e0ecbef5ed7228900c5736271f536ba8c8996b4783201c0 -size 537422 +oid sha256:0a40ab22454dc593232bcb871ced42c3e964b32d4962dea9fc1641db632fb38c +size 818977 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5d397754-60c0-4eec-bc5e-b1f68748dddf.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5d397754-60c0-4eec-bc5e-b1f68748dddf.png index d3519e52ce7f791ef2223914ca6e3c36c7c1b8e2..0e8f78b5afe05d212ee5ae0fc6fd8f3c56c4c826 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5d397754-60c0-4eec-bc5e-b1f68748dddf.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5d397754-60c0-4eec-bc5e-b1f68748dddf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10c07287571a7f40cfbf3b3b2b67331e47f6adf59d8066fc973bef582204ff56 -size 871009 +oid sha256:490a1c7c1ee5e79dcf4537db380724de4f7c87eb3f99119f1c74b306d63ab09f +size 628731 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_60fe2dd5-eaff-4563-9cf3-dd946f846edc.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_60fe2dd5-eaff-4563-9cf3-dd946f846edc.png index 729d96153bb9d51b95fd5ca88103ecc865e15284..43e729553a49fb2eab9f54c4bbaccb4804777ca9 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_60fe2dd5-eaff-4563-9cf3-dd946f846edc.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_60fe2dd5-eaff-4563-9cf3-dd946f846edc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:599dfb9312a701a5daff216e63b58fd6ed3087a2ac724a9f68d22c6b69488a95 -size 564258 +oid sha256:13565a493308f483293d1f220608bb6f3a53b5c60a8dae8efe39c4cfb6da2abf +size 910093 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_653e6f08-8ac9-495e-94e1-9f6fcda996e0.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_653e6f08-8ac9-495e-94e1-9f6fcda996e0.png index 5393ece2bce43ae835ec6c7621ffe12c0b4e6c08..b5e86641fef541e1eeec8e375574bd3b99a2d38f 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_653e6f08-8ac9-495e-94e1-9f6fcda996e0.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_653e6f08-8ac9-495e-94e1-9f6fcda996e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0eb271f525c3da974859f4277219ebd79537c930a4e0b87cd65add691998f6f4 -size 598994 +oid sha256:a6d11bf71691e60eea4a9caa130e6ed5f1dfbb87ec2abf4f17b24a6d0c7b920c +size 893599 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_82cc36d2-5e6b-4fff-b30e-4cea1a55c919.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_82cc36d2-5e6b-4fff-b30e-4cea1a55c919.png index bd620fb8080555b68f0a80aff3fc0daa023c5345..79d9d5c5126b495ceffb8ed9e6dec28525bcd6cb 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_82cc36d2-5e6b-4fff-b30e-4cea1a55c919.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_82cc36d2-5e6b-4fff-b30e-4cea1a55c919.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6744bd800380a1c56a05bc2aa4d2a832bcf31cd9d2546d905a62c1a871e2e0c0 -size 533861 +oid sha256:1774c6454eca33ffa261535252d3515a7a2463f9f27d2b57a260d32b14043378 +size 470808 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_a91d01fe-afc1-4e3d-94fe-fd6f02b955af.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_a91d01fe-afc1-4e3d-94fe-fd6f02b955af.png index 470485d4514fc0097dbd31ff501a47642518512a..860ebf6c1041e79a1a460f23ed11f9655e273e6b 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_a91d01fe-afc1-4e3d-94fe-fd6f02b955af.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_a91d01fe-afc1-4e3d-94fe-fd6f02b955af.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b54eeb261e48e1c50eb1b95017dd7fb02272df92d8b38daec9613a771eb02f5d -size 506830 +oid sha256:200aea8c37229cb29de69debcd34a033924b8d6a142252c384ca2bc36cfb33b8 +size 698295 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_eaaaf553-37a7-488f-95ac-adf4cde55890.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_eaaaf553-37a7-488f-95ac-adf4cde55890.png index 48e47ce9d629a5d0ab2689b6ff5297573902faf7..a6f589ed0360487d27c5319feb3302d3295a23b2 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_eaaaf553-37a7-488f-95ac-adf4cde55890.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_eaaaf553-37a7-488f-95ac-adf4cde55890.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72e422e299a088c5ae5f737d6f97dfb0201a2b40b95054d7583bbdf228b55d5e -size 930155 +oid sha256:2350a5de85d2e2d141deb5f47fb673b2bb34e3e1a71ab3dff5d2635adc73583a +size 985529 diff --git a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_f1d21f88-e302-42ae-8d0c-0144616650fc.png b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_f1d21f88-e302-42ae-8d0c-0144616650fc.png index 16c059d456a1b3ba7d1635cb2cf6918209bfab4f..b51be9eb8884ff1f71b88dd68da60da79f8186b7 100644 --- a/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_f1d21f88-e302-42ae-8d0c-0144616650fc.png +++ b/images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_f1d21f88-e302-42ae-8d0c-0144616650fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5adefea9a79b397c78c189602336a2f742fa9ef8adfa936073ca75f2d23c817b -size 644162 +oid sha256:a264cfcff57be8678ebd4bc60ac129e8835532d5fbe44bcf868a4b7847c3a1ec +size 436618 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_17b6a926-894b-4c39-82a4-70ce263fd6db.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_17b6a926-894b-4c39-82a4-70ce263fd6db.png index c4c4f1296a40c50619a08eb8d64ebc3a038b9197..b44caa947deb0025e608a874b5e9820b09695594 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_17b6a926-894b-4c39-82a4-70ce263fd6db.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_17b6a926-894b-4c39-82a4-70ce263fd6db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:05901f7d3ecffec0d473e6063bb8fbc208d6b230b83d99989bdc135dfe855be9 -size 723434 +oid sha256:260c991a500d78a79f2f82cd6f9bf9113be182a0eecef4851c60dd9be5d5d6cd +size 941297 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_4e1fe0be-da2a-4005-8084-67028e46af25.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_4e1fe0be-da2a-4005-8084-67028e46af25.png index 3ad306fb56161b080e9acb96cc89934f79b1c610..b8dc855ab111185cd9c72a7334be09ae62525b18 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_4e1fe0be-da2a-4005-8084-67028e46af25.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_4e1fe0be-da2a-4005-8084-67028e46af25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34a279c9f5a121cfe27ac2f444b57f67b8552159caf3786bcb48899f4d3cabb3 -size 726884 +oid sha256:5c1cc5414c381f0e9fa9dc4e1fb512869fd55f824e411b09a5e4b914fb450b4e +size 674671 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_584ff31f-642d-4e32-a387-3b47a67f9725.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_584ff31f-642d-4e32-a387-3b47a67f9725.png index 30f47c514a1bc75b22edf9358c625d3aa0053234..f7256612e85c272735d664836105f4fd9b23fedd 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_584ff31f-642d-4e32-a387-3b47a67f9725.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_584ff31f-642d-4e32-a387-3b47a67f9725.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:233b5abbb19754cf39237d49a51d2bcb164c0900a90300effa464634a81d26fa -size 1059732 +oid sha256:a8f9a808ad942a81ec088e51f178d779c5a9d93addea841eeb92a35d56c3a446 +size 1279323 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_82c86cfb-3786-4a17-95c7-5cb6562ae363.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_82c86cfb-3786-4a17-95c7-5cb6562ae363.png index 9e84e61b8a758d6dd51d23dc5be44ad9b8f36514..d2bccdf3f00fda112d749f431216a5b6bb204096 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_82c86cfb-3786-4a17-95c7-5cb6562ae363.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_82c86cfb-3786-4a17-95c7-5cb6562ae363.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9564a841f3ef13876433c99b0b2518ae50374f1f230fff42ebe2e1b1c839d6e3 -size 1075015 +oid sha256:76d466a384726c84ffbc229f59b28c56eaad33289078b660128165b24e0741eb +size 955880 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ceb95a13-7820-495e-913d-8cff0a0494c0.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ceb95a13-7820-495e-913d-8cff0a0494c0.png index 71589228fead5a8c1c1abb581882be37b2ddc0fe..3fd8bb363d3aeed81dd4b93989a37eb16b8ce87a 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ceb95a13-7820-495e-913d-8cff0a0494c0.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ceb95a13-7820-495e-913d-8cff0a0494c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb5e1eeb71d80ebbc0ba54de446943898fa8efffc6eeae149bfc20b12d9bf487 -size 979083 +oid sha256:904c29e9cafd5b693daa6accc180057ab1f7f6780d52429da6139e73841d2ee0 +size 898061 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_dc4596ac-20df-47d4-97db-d42b1c289351.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_dc4596ac-20df-47d4-97db-d42b1c289351.png index d936a826d7845574acd98efcc8dbff4717f8414c..56c0d39cd61703b265b13e670ca97deb13cc9925 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_dc4596ac-20df-47d4-97db-d42b1c289351.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_dc4596ac-20df-47d4-97db-d42b1c289351.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99e0bc98d061cf53d15412bcdbbf81f129feab3b3aa23e7cd689c10ba495cc58 -size 1330321 +oid sha256:d7fa2efa6bea117368d3ff776605dba6f44cd25d2d6ed01decbef626ad52a857 +size 1295637 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ef71de36-8d22-4f74-a4db-9ef1d45fd9fe.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ef71de36-8d22-4f74-a4db-9ef1d45fd9fe.png index 388c896150afe9f14aa1679d2174f6666919bcc1..36e4c8e779d48ccb77581880e142b4460c87450c 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ef71de36-8d22-4f74-a4db-9ef1d45fd9fe.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ef71de36-8d22-4f74-a4db-9ef1d45fd9fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3603a53b00f5b56dcbc5cfbca4bd0c17f5d6d93308c24cfcca5be42df2376117 -size 1122003 +oid sha256:ac2fa55b4a86c69b7484e82ad7f072eb8bbf1bc2cd9741f9959272282c0639db +size 1138601 diff --git a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_f9942007-cb07-4f7a-a597-4280403e62d2.png b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_f9942007-cb07-4f7a-a597-4280403e62d2.png index 0da647bc3ef1ab1e725a7d172a682da4c78b1634..d0b3a843a8f7f317e762632564e6ad7fd60e8fb7 100644 --- a/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_f9942007-cb07-4f7a-a597-4280403e62d2.png +++ b/images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_f9942007-cb07-4f7a-a597-4280403e62d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32ff74f68fbe0789e6efd84e2f7946946334dcd813467caaa19baaba3c258b50 -size 850516 +oid sha256:80d95babadced93999799f429917c320dfb808df1c4394012a0161ddd08700eb +size 1449526 diff --git a/images/eab97f0c-38b3-4421-bff6-697b3267f23c_6b8873a4-d11a-4e18-ba66-664584b4be5b.png b/images/eab97f0c-38b3-4421-bff6-697b3267f23c_6b8873a4-d11a-4e18-ba66-664584b4be5b.png index 7be1b7afe4672a8e187393aab01de3fe0defd0b8..874e4b2a4956e132d6309b0ea3771811f1dc3205 100644 --- a/images/eab97f0c-38b3-4421-bff6-697b3267f23c_6b8873a4-d11a-4e18-ba66-664584b4be5b.png +++ b/images/eab97f0c-38b3-4421-bff6-697b3267f23c_6b8873a4-d11a-4e18-ba66-664584b4be5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48bfb652ba1bbae5d96d9b3a29d6245025ed3959f03724ab920e42703a08a33d -size 972574 +oid sha256:13c50b0f089215e623b0fd86fb9ed4d09f00227cb27de2f33c183b0d3dcdecd7 +size 1310520 diff --git a/images/eab97f0c-38b3-4421-bff6-697b3267f23c_78cd3d9b-a495-4c39-9102-2dc14b522e61.png b/images/eab97f0c-38b3-4421-bff6-697b3267f23c_78cd3d9b-a495-4c39-9102-2dc14b522e61.png index 7de79086f1382cc5d9873acfc567e4aa545f5170..1c3d4e06219b8ede9147cfa64b712204ad9cdf88 100644 --- a/images/eab97f0c-38b3-4421-bff6-697b3267f23c_78cd3d9b-a495-4c39-9102-2dc14b522e61.png +++ b/images/eab97f0c-38b3-4421-bff6-697b3267f23c_78cd3d9b-a495-4c39-9102-2dc14b522e61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82dcb9c36804584e6049472485bc07822952d3d17daddc8843fe3ee5d13accbb -size 1210952 +oid sha256:42dc405f73857fdcbe2b8ba7f9b34ad5d543c48ea90fc1289d7c2243be2a57aa +size 2290498 diff --git a/images/eab97f0c-38b3-4421-bff6-697b3267f23c_9d6b03f7-af9e-4339-9c0e-9b57b36796e6.png b/images/eab97f0c-38b3-4421-bff6-697b3267f23c_9d6b03f7-af9e-4339-9c0e-9b57b36796e6.png index bd4e7a227af8e74255ea71240e1bbe3f09b42ca0..9fe0e5212ee7e8da646b52a3c1b43d1dbd7b6888 100644 --- a/images/eab97f0c-38b3-4421-bff6-697b3267f23c_9d6b03f7-af9e-4339-9c0e-9b57b36796e6.png +++ b/images/eab97f0c-38b3-4421-bff6-697b3267f23c_9d6b03f7-af9e-4339-9c0e-9b57b36796e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3c07c8ec014db68560b887cc52972f3153fb5524cab52b26618d36d041212550 -size 862321 +oid sha256:f36cb32582bff28311570f515d74dddf3c2e779505e9f7f6bc8bbd79c1274037 +size 1426078 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_09fdd043-f803-4750-933d-aee5e5291cdc.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_09fdd043-f803-4750-933d-aee5e5291cdc.png index 6b4c4cf342cd0663f606f317f052a267fb29f229..932ea97474639c961ddafed3d8e9f6454a98fc88 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_09fdd043-f803-4750-933d-aee5e5291cdc.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_09fdd043-f803-4750-933d-aee5e5291cdc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7755dda0ae68a0a71392f6efe2aff7cfff91d39fa3327ca8992a5676aac5de9d -size 1057088 +oid sha256:095db8b63559f5653ee88e1ff31e2870a232155252e11a3018cfd47afb4c978e +size 1104889 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_0dffc638-04bd-4d82-87de-2094b4767d4e.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_0dffc638-04bd-4d82-87de-2094b4767d4e.png index 2b308e71e008f725282a0eb7a2b02fea9514488e..c6d13bef5e91ffe2d9f343935ee532f9a7c1d629 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_0dffc638-04bd-4d82-87de-2094b4767d4e.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_0dffc638-04bd-4d82-87de-2094b4767d4e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ee47860ccfee093bc88aac2120a568aafc3fc967989a465bb2a1a5e4c2f2f22 -size 1050108 +oid sha256:04079c4bccd0b8997d194605954f17dae6fc865a895a7a0a57cbac89aed7cb15 +size 1216260 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_3db9ae03-d8ec-410c-a8b1-c8436fb9194e.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_3db9ae03-d8ec-410c-a8b1-c8436fb9194e.png index fce13a19d0ee28aa7065161a04c851c5765c3fb5..3e0ad814a6106ac879dd6327720adb99f791d771 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_3db9ae03-d8ec-410c-a8b1-c8436fb9194e.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_3db9ae03-d8ec-410c-a8b1-c8436fb9194e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f27bbaa9f11f31182b099a9426c18ce66a8d7386b9de4cb8b9538e1fa983fbf -size 245614 +oid sha256:d2b0ae8041611df32063030d2df47eae96c88d4e9a3dc2c0f945ccb9fe9f5091 +size 254441 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_558e8da1-899c-4f41-804b-8979032f2849.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_558e8da1-899c-4f41-804b-8979032f2849.png index 368d19bbfa7a2307a33fbddddc5898518e6ec9c2..cea3af0e533c3b4b65912b0000a12e712bbcc2bb 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_558e8da1-899c-4f41-804b-8979032f2849.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_558e8da1-899c-4f41-804b-8979032f2849.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a87b53e1f4de7d09169b1150d77cc0c91adb4a6b89fe11aede79ae06632539ba -size 778667 +oid sha256:e43b319693234b66d00966aaa59497e3f68a65c553a571fd56ba29eed8f71cb4 +size 964891 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_5caef1d1-97f5-4407-b1f3-5cbfc6655121.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_5caef1d1-97f5-4407-b1f3-5cbfc6655121.png index 1624663800ebcce2bc4d8136526b75c871377145..27b45012144147a7543c4f71e2c4f2804643007b 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_5caef1d1-97f5-4407-b1f3-5cbfc6655121.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_5caef1d1-97f5-4407-b1f3-5cbfc6655121.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c06cb5b5e774f79b5705633d9ad25f6182ed4ae2f3fa1d9b86a2abbda5d4e908 -size 927163 +oid sha256:ec1d6f7d30dbd139967b102b870e54f4b6f25c7763d4230e18643fb97abd3de8 +size 638863 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_6f4fdbe1-0c56-424c-9df6-b84d8876fc21.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_6f4fdbe1-0c56-424c-9df6-b84d8876fc21.png index 4c8c3f430d01705e49fb58937f3306049472f815..858dfa24ad3d1f74b9ebce53a5d7985d212c5d15 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_6f4fdbe1-0c56-424c-9df6-b84d8876fc21.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_6f4fdbe1-0c56-424c-9df6-b84d8876fc21.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:91c75bb1e720bd31b8abc61a1073025c1f2a3c46ab3c9bcf0b3f8a7931553482 -size 262237 +oid sha256:c7a1689ccf4bdc7832e5a13e9a3525b9ad2f9c42ac036f51c2758a8e9b129792 +size 269925 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_9027e140-c963-4718-afa2-d6a47ce31453.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_9027e140-c963-4718-afa2-d6a47ce31453.png index 6ebe99df438796d237ef8940b2d735dc0bb481bd..1415937142a26c433264aa45bcaa8ff5d09e2d7d 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_9027e140-c963-4718-afa2-d6a47ce31453.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_9027e140-c963-4718-afa2-d6a47ce31453.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:220007a7fb19be1f70e72df65866093d40e2af8576b2f3e955589cdd398fc6af -size 1131094 +oid sha256:070439dfc7f37d5d01bbaf1baceaa7c3c4880e3bfc0b94f71ca740cb25bdd8c4 +size 1545165 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_94563925-37bd-495c-9e75-5a2cfda4e37e.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_94563925-37bd-495c-9e75-5a2cfda4e37e.png index 4815618b4dfa077ef737628789154ed1a357d46c..26e0d3b38013a0209e10b7666b643027158e2368 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_94563925-37bd-495c-9e75-5a2cfda4e37e.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_94563925-37bd-495c-9e75-5a2cfda4e37e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:476c1f2ce87e13b33ea8b7360523920e9c773ff8d766c05fe9e1524606251560 -size 914901 +oid sha256:c18a1469fb466791b8b14c16be4f1df4797f6bb3ef63090e3fbef612bd9191f6 +size 1247020 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_addf3e71-975d-475e-8f5e-5d005886f8ac.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_addf3e71-975d-475e-8f5e-5d005886f8ac.png index 80ceed9a34cf9afa59e1e8856d272cba2e806996..a55a31bd79862a85d7ce4cb81bf6f0c2a38456c7 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_addf3e71-975d-475e-8f5e-5d005886f8ac.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_addf3e71-975d-475e-8f5e-5d005886f8ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1f2b21ea9eebbb10ecd65175df397deb10c4c09bee510bebc36a235dcd92317 -size 1118636 +oid sha256:dd4adab0e2bd1662d8f3b8bfbdacdd1de7128551368b63993566b0f16a98f24e +size 1158019 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_d3e071fc-c039-46af-adab-d88fcba72fa8.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_d3e071fc-c039-46af-adab-d88fcba72fa8.png index 7df9d7ed1284b35c0256f79055119cb33dc7436e..2b1a682792926f6e7c074e4d612a60aaf6b9a6bd 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_d3e071fc-c039-46af-adab-d88fcba72fa8.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_d3e071fc-c039-46af-adab-d88fcba72fa8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:62ba4539253c0da8b051a8b5484def3d410bf4548ff9c299cd2f2744fcf81c85 -size 885493 +oid sha256:9c1a1c854425d189d696a0100597758adca7aa52131ad2c98ae5bbbf3503cb3c +size 817395 diff --git a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_f15001c6-c158-486a-8987-66186ce22fab.png b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_f15001c6-c158-486a-8987-66186ce22fab.png index 49ae8f25d919370d9eb5f603ded9d60ed9cb3673..a29b37cc0b2e05913848e14a2f94509c48ec8bd4 100644 --- a/images/eb609e15-ff10-4b3d-82ce-348c439548ca_f15001c6-c158-486a-8987-66186ce22fab.png +++ b/images/eb609e15-ff10-4b3d-82ce-348c439548ca_f15001c6-c158-486a-8987-66186ce22fab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2b925be39d3e97a0493e1e070766bedf7aaba770e414619fefd5c0ede7a59b52 -size 1036243 +oid sha256:0e972c3e3702a6cbfb7e95a194edc39315f42cc596ad294b39aed8e9414970c4 +size 1205945 diff --git a/images/eb9995b5-261b-4659-bebc-951e0f855d75_41d423cb-c3e5-4dae-92e0-2e6fd5ce03d8.png b/images/eb9995b5-261b-4659-bebc-951e0f855d75_41d423cb-c3e5-4dae-92e0-2e6fd5ce03d8.png index f24e194f787b75f19419ce04358d7b270381e0f5..87990331bc41ccdf94dd265f3999266aec0cdce2 100644 --- a/images/eb9995b5-261b-4659-bebc-951e0f855d75_41d423cb-c3e5-4dae-92e0-2e6fd5ce03d8.png +++ b/images/eb9995b5-261b-4659-bebc-951e0f855d75_41d423cb-c3e5-4dae-92e0-2e6fd5ce03d8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:261cccf2d1e22c11eeeaa329c55a8459b80415bca25ebd36c0930daee035b88c -size 604963 +oid sha256:400fdbfae69a6a33010ddd02f4bb54de0c8d79507d23bed9bfc373462b6d84c7 +size 465464 diff --git a/images/eb9995b5-261b-4659-bebc-951e0f855d75_7222bd6f-e476-48ce-8e0c-01637c662281.png b/images/eb9995b5-261b-4659-bebc-951e0f855d75_7222bd6f-e476-48ce-8e0c-01637c662281.png index 26e64caf7526bc5ef080d39a792379f5c631d7c0..c14066f086d70a58bb6a78a217bc7cf3b69420a2 100644 --- a/images/eb9995b5-261b-4659-bebc-951e0f855d75_7222bd6f-e476-48ce-8e0c-01637c662281.png +++ b/images/eb9995b5-261b-4659-bebc-951e0f855d75_7222bd6f-e476-48ce-8e0c-01637c662281.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c903fa3b5af9eee5a2204614dde48f1ba6289bcb945ecac855d514590e829e14 -size 777820 +oid sha256:438c68cb6f0ddfec0f78ae045c147aec65aa16c24ae8c2e3834c275d3a35b441 +size 947690 diff --git a/images/eb9995b5-261b-4659-bebc-951e0f855d75_7338cf47-dda0-4a46-85bd-3d8d340b7f21.png b/images/eb9995b5-261b-4659-bebc-951e0f855d75_7338cf47-dda0-4a46-85bd-3d8d340b7f21.png index 2a274b5cc9b6a08bfb2a199351863d9c160e5442..5520942c9311035aedd756de2ac92317cf47643e 100644 --- a/images/eb9995b5-261b-4659-bebc-951e0f855d75_7338cf47-dda0-4a46-85bd-3d8d340b7f21.png +++ b/images/eb9995b5-261b-4659-bebc-951e0f855d75_7338cf47-dda0-4a46-85bd-3d8d340b7f21.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f154bbe2af2e126458387140aaec2a09913549f4676dd9b96d20b44fb8190341 -size 737617 +oid sha256:dec792dabf234a36553b8bc2744e64c650dc9cca851790818a03604cf6831872 +size 726816 diff --git a/images/eb9995b5-261b-4659-bebc-951e0f855d75_81552843-1039-466f-8d45-f68f83177b73.png b/images/eb9995b5-261b-4659-bebc-951e0f855d75_81552843-1039-466f-8d45-f68f83177b73.png index bffbccf5356a706c2620a99dfbc319e49b0edd02..2813c676258c6662458918d9450b995d6fb6e78c 100644 --- a/images/eb9995b5-261b-4659-bebc-951e0f855d75_81552843-1039-466f-8d45-f68f83177b73.png +++ b/images/eb9995b5-261b-4659-bebc-951e0f855d75_81552843-1039-466f-8d45-f68f83177b73.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5753abbbe8dd01eedaff995dd80da33228f260491de83a5cfa681b674c9beb05 -size 3055060 +oid sha256:3f775a05eb3bf3ecab03e373a7f617e76c9b8a81caed429be3c821a48297ecef +size 1469454 diff --git a/images/eb9995b5-261b-4659-bebc-951e0f855d75_ea9ed2c3-e0a1-449f-85b9-3c708ee4cc7d.png b/images/eb9995b5-261b-4659-bebc-951e0f855d75_ea9ed2c3-e0a1-449f-85b9-3c708ee4cc7d.png index d259e9975b089397cbf6b9efa62e31d3eb075beb..f5aea0bd7c6f02972d16a80662aedbd7efab9b26 100644 --- a/images/eb9995b5-261b-4659-bebc-951e0f855d75_ea9ed2c3-e0a1-449f-85b9-3c708ee4cc7d.png +++ b/images/eb9995b5-261b-4659-bebc-951e0f855d75_ea9ed2c3-e0a1-449f-85b9-3c708ee4cc7d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22f119a4523e1a974e1a73127651f04918028613e181f595edf507a22709ff78 -size 612311 +oid sha256:f424d76c7de260fd17b3783995ca9b949ae1d542fa92e0c88dab2c1d4d3ed15e +size 683452 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_371d79e1-816f-4ac1-b567-3373e6257e51.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_371d79e1-816f-4ac1-b567-3373e6257e51.png index fb8c9cd4ea27202edb9c7f331222ed6e0e418fa2..0ccdd1a8b6c61dc504099f293ce331587ee67fb6 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_371d79e1-816f-4ac1-b567-3373e6257e51.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_371d79e1-816f-4ac1-b567-3373e6257e51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bac1884b2a70b71ea726814aa7dc3570d0578272a40cd68f93e73b28db0c85f -size 992406 +oid sha256:598e9cc3bebe860c72d6b4ed34f889890c7dfb156151c02ae0a6804e147e5414 +size 921282 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_44d13dde-5192-4e92-9fe1-1246632f3e97.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_44d13dde-5192-4e92-9fe1-1246632f3e97.png index 03fba180ae6c1bf23f9de04a5a25732c6f36fb36..e1694280209ba685975c74b268f1d2cc0f539efa 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_44d13dde-5192-4e92-9fe1-1246632f3e97.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_44d13dde-5192-4e92-9fe1-1246632f3e97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2ef300af22e95f499e509e38fbaf2b5eed3296d00c00631a4becf60908b26a01 -size 997459 +oid sha256:fa3ad486aa1517d7b7f5e4a5996b0a4c18d120dfd4fe58b1938aa8e3f99c1344 +size 1093282 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_a4fc7924-09c5-4edb-b4e5-a8c733c2942c.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_a4fc7924-09c5-4edb-b4e5-a8c733c2942c.png index 8a8d4eed0d0befa90135d482490628fd10795f2d..6deaa67abc21eecd6df4a9764345161efc0db253 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_a4fc7924-09c5-4edb-b4e5-a8c733c2942c.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_a4fc7924-09c5-4edb-b4e5-a8c733c2942c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0a714f43a27a6c97141e67a48bdacdaf40f8d11829892b80cb644ed485d1cca -size 3791106 +oid sha256:3c0735bf04b8fc6f722227218ea2ac6d213f9c661229e8baef012ae1ea3c4fd0 +size 1840489 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_aa25f120-fb19-4ecd-9708-d18d857e48ee.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_aa25f120-fb19-4ecd-9708-d18d857e48ee.png index 486e857e6a39219ebe140a8429551fd73d7bc1f7..63071c6d44630c7b3b5d60aaea804f6be6249264 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_aa25f120-fb19-4ecd-9708-d18d857e48ee.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_aa25f120-fb19-4ecd-9708-d18d857e48ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:979f56fee38cabf56fe52d03be7ce8e2abdc19354c539f91e99f61856b9cac86 -size 589346 +oid sha256:0e306f1f2779ec9e272f0e05348ac77b9f95b2bac0860e1bf63d7c26298e497d +size 645122 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_b3f45973-641c-4e50-bca1-519fcd6f135d.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_b3f45973-641c-4e50-bca1-519fcd6f135d.png index 99a01eccd6363610bffa665bef4ede8982e1c5f2..bfc6314a8e1fa0ed3acbb607d7846a7810a1ef26 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_b3f45973-641c-4e50-bca1-519fcd6f135d.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_b3f45973-641c-4e50-bca1-519fcd6f135d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e12fc62807c4a78565591b5269bc0dc9d80c0d522a84e8209cf1bc21d137801 -size 535258 +oid sha256:9294a63f627a158797773bc18faf1192bcd1ac80579c44c12ebf1688312cbd16 +size 782430 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_caf5665b-735a-4dbc-b204-6b82136c31db.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_caf5665b-735a-4dbc-b204-6b82136c31db.png index 4350ae4a8fbf750f18c21fc381a74982572ed74e..f38ca86cb5b8a7eaf117014c9037f66a05265ea7 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_caf5665b-735a-4dbc-b204-6b82136c31db.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_caf5665b-735a-4dbc-b204-6b82136c31db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9fb5614010f11512f0605993bce774a159c9af529ad3a8a1859dd148d5a21d39 -size 647169 +oid sha256:ae5b2915a942c5d06438915f42e7ffd6655cb1d645b9f45f2b0660b62170cbbb +size 645537 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_cd10aed6-e4b2-4cfb-af74-8c6b0c2caae5.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_cd10aed6-e4b2-4cfb-af74-8c6b0c2caae5.png index 2bcf84cb0baa5ccb416bc3a574b63a8599be71db..acbebe19076267636a7dcdf5202bab1b8fe4702b 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_cd10aed6-e4b2-4cfb-af74-8c6b0c2caae5.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_cd10aed6-e4b2-4cfb-af74-8c6b0c2caae5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c96fa593da29b1d2c2320448fd89421632a82a9fd9f2a769516701fb830db268 -size 470136 +oid sha256:04337379c3bd6cde4ef6692ae65d3055f2c98a96f050ed29887e39d6ca35e212 +size 363369 diff --git a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_d834a522-37a2-4ae0-88eb-0d4490a2d956.png b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_d834a522-37a2-4ae0-88eb-0d4490a2d956.png index 2f6599fa0b83dfe0999981d870d89fe1727b3652..235ce3305a2d5eaa8438c37cd06ebfc5822ca1b6 100644 --- a/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_d834a522-37a2-4ae0-88eb-0d4490a2d956.png +++ b/images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_d834a522-37a2-4ae0-88eb-0d4490a2d956.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b57f98e660fa48e23e9bdabc3b42b65a9adad41d17748c719fd3e6975676c25c -size 562042 +oid sha256:8392925f3a72e78f24458c16688f9f6c500020ec280dde0599bb6c990be86ffa +size 447526 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_0180d34b-3ffd-44d5-ae82-c7e6b031c05e.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_0180d34b-3ffd-44d5-ae82-c7e6b031c05e.png index 0392d32f1c8ccf38610f3b9608f66904084dabf4..fc19dfcebc6322a7f0699be6e8efe582eaa32659 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_0180d34b-3ffd-44d5-ae82-c7e6b031c05e.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_0180d34b-3ffd-44d5-ae82-c7e6b031c05e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:10c45f6bbe0b12e72750e37018051e33e1415867fcb3ec808170a21098bf44da -size 568650 +oid sha256:8123481394d37b58d478027db587572e196979ed19935c815e962bc3a8545060 +size 909904 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_3260885d-20b9-4daf-9f4b-1a95e0a6a4d5.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_3260885d-20b9-4daf-9f4b-1a95e0a6a4d5.png index bccdd438db4e259902563d57f77343f6dacc5393..5a156b364a5eea4aaa9bb530e3bcdd1d180dd04f 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_3260885d-20b9-4daf-9f4b-1a95e0a6a4d5.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_3260885d-20b9-4daf-9f4b-1a95e0a6a4d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1b410d2a3f7275f493b209ef3e67a32b69efa568e3ac76ee2879d82f0e0907c -size 1696533 +oid sha256:e0c4fb4fe2462710296c6e6195d24ef843752fbc41fdb80e16e40173e8c67328 +size 2326757 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_4f8fbba5-548b-4037-bcbe-a63232bbf964.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_4f8fbba5-548b-4037-bcbe-a63232bbf964.png index ecf5c490e347608bbf7e95d0a7a1e0ae84e14997..08942bb565f0a15dd648a6d853e46b99e6748401 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_4f8fbba5-548b-4037-bcbe-a63232bbf964.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_4f8fbba5-548b-4037-bcbe-a63232bbf964.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:642f7c0d47e5f9299f78206304f77079c8d5aa62cfb1397637822e1e782ddcd5 -size 580946 +oid sha256:54af37f671ba99de1e260eb7efd3097f6652d88ccedaed6cea1969b30bc4d0a4 +size 575395 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_542b1db1-6a74-4c6c-bd1c-ff43a1309b99.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_542b1db1-6a74-4c6c-bd1c-ff43a1309b99.png index 72c102d31953edd33103d5667a2a520b774b40a7..25fee6ee60973b0f973690d95b193345ec5685cc 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_542b1db1-6a74-4c6c-bd1c-ff43a1309b99.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_542b1db1-6a74-4c6c-bd1c-ff43a1309b99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e9566d191cba05483a17b60a230128085523da8db70af2361be3bce897f3a9e -size 500473 +oid sha256:63066727f15504c497fac10576d542f839d2f686170b66343ed29402223fc200 +size 825271 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_5f4295e1-0830-4af2-a782-84396e3d8a0e.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_5f4295e1-0830-4af2-a782-84396e3d8a0e.png index cc71d2bd6710ab488dddc1ccbb92b00373d26b2d..b22ff10e9efd5439e371deebc1ffd2411f5217dd 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_5f4295e1-0830-4af2-a782-84396e3d8a0e.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_5f4295e1-0830-4af2-a782-84396e3d8a0e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c125dd9c5be3106ce3b1c4e22312ad474ce173f9470b89036ddbca0bc0f2295 -size 582213 +oid sha256:8ee5bfc25eea5418a580cd18bc68cbfcf9595ab02da28e712b408210dd5f0a53 +size 981568 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_6876130c-d667-4051-a398-95e5cba6f1e6.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_6876130c-d667-4051-a398-95e5cba6f1e6.png index adbf29c8152c67944907f01ea423165b4ce804fa..14842040173f6b759e5b815c82072f7799f8aa2b 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_6876130c-d667-4051-a398-95e5cba6f1e6.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_6876130c-d667-4051-a398-95e5cba6f1e6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1cd38f0b8b32c5481c37e82056cee3dc5b7221d8347d7389c6a850f6d6ed344e -size 736071 +oid sha256:3a235b26051dde4bdb4259f562bfcf210d3e8b3fc0f9659167f04dc84805be5b +size 718617 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_728205e5-9af6-447c-8866-339071d7f193.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_728205e5-9af6-447c-8866-339071d7f193.png index 5c28387046ac88ac886800c59cb84820ddf38b6b..b147e2ec130c9fcdaaf9e18037ceea19e6475d19 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_728205e5-9af6-447c-8866-339071d7f193.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_728205e5-9af6-447c-8866-339071d7f193.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3eedc29e3df41ac569542426f625380ae70da486e6c1fb47b7614c25b5a0e19 -size 1397841 +oid sha256:3917869ee7da4e3e778fa4ff03e586fecea6665dda4f888899f8874ceffe740e +size 1429881 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_78d05d85-15c8-4638-b44f-b3bcdade5119.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_78d05d85-15c8-4638-b44f-b3bcdade5119.png index c65a6823bb4e0ebe4fd714a75feecbf93173e337..6a843201029db453a074cb03c2e206006e1ed88b 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_78d05d85-15c8-4638-b44f-b3bcdade5119.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_78d05d85-15c8-4638-b44f-b3bcdade5119.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26cec63018de595b84ef340edcfcd711717efefaaeaa0464d343b0dced9e8d86 -size 492032 +oid sha256:a59b7ab6e4acd4cf5b7736e40154415fccfd5408bb7630b8eb630ebc546d8981 +size 410083 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_7b60bb01-31dd-49a7-b2a1-b0f9ed18651f.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_7b60bb01-31dd-49a7-b2a1-b0f9ed18651f.png index 37ed5497a3348f7b3cc1dee514012cb2275dd087..b48838a198066bdc9fd5cc40f0cfa0947bca9c9b 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_7b60bb01-31dd-49a7-b2a1-b0f9ed18651f.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_7b60bb01-31dd-49a7-b2a1-b0f9ed18651f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06360d96457b75e2a03b6429cf57e076c55c17a35c01c290afebaaa5efe83103 -size 721732 +oid sha256:b07677a64eea50b05603b3f74a21ebdb4052921ec83169a9a827fa4a709f8b2a +size 936574 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_83c4ca2d-e6f6-4dd3-8981-904229809643.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_83c4ca2d-e6f6-4dd3-8981-904229809643.png index 89a5be29855d7a8f3cb34ef78f259b7d3db6cafc..a60ed9bf657ce27bf9898eec57116d5e3cacbf73 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_83c4ca2d-e6f6-4dd3-8981-904229809643.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_83c4ca2d-e6f6-4dd3-8981-904229809643.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a334ab6d9d68da8ff42c526d9647b7d49e8411d8e4105960021e25183a0ffbe -size 1314785 +oid sha256:e55f4a5d859b289b686c1d9d535dab3cf81ba02385d42d551e946788d8d20ab6 +size 1178948 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_95928075-0682-411b-bc35-436756ed5eb1.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_95928075-0682-411b-bc35-436756ed5eb1.png index 0cdc4ea30d39ac62369e1923e883fce9d2282d3c..6a218fff98e695b20ac0051fb3d2bb500f35a122 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_95928075-0682-411b-bc35-436756ed5eb1.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_95928075-0682-411b-bc35-436756ed5eb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:644e4ad0a920ee9333698ee6ac2011fe78fc0c7b5468e5df1241a28329fc3792 -size 241453 +oid sha256:099c5285ea7738f5c58bd94d5b36662ba02b750480e1ba36fd56f9b6b2e7a38c +size 215587 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_96aaffb2-02ba-4ac2-b804-6a30b524648c.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_96aaffb2-02ba-4ac2-b804-6a30b524648c.png index a71791fedd0ed7a2fcdc2ae67ca524604472b286..a337e882b8b1541bd13f207dc89ba2aa0433397a 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_96aaffb2-02ba-4ac2-b804-6a30b524648c.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_96aaffb2-02ba-4ac2-b804-6a30b524648c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c013e87f427cb8b3aa7da8150be26bbc98182e93ac31247bd0ff3a91a3bcb157 -size 613529 +oid sha256:f51b9c83402841fd6d6deb78016cafba1eb95f12f14f192c4325ceb321526507 +size 486961 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_a0d343ae-e59d-44b8-abfb-e1ed3c0df2d1.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_a0d343ae-e59d-44b8-abfb-e1ed3c0df2d1.png index bcf12f6fe4a7b04a1d039c7ddc7c4dc693167a98..b5ce24f9986f894abee1da37c52240a8a46fd69d 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_a0d343ae-e59d-44b8-abfb-e1ed3c0df2d1.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_a0d343ae-e59d-44b8-abfb-e1ed3c0df2d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80b789fe1901b00feb101d553e4fa69fcb46866dd92eac37154a92db31d31419 -size 591712 +oid sha256:0cc3d1fda35a71084521b12ea2fe4c4fa8fa75661e3eaf927fdbebc9823113a5 +size 972749 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_a1a020e3-7bbc-464e-8ea3-cdff088f36db.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_a1a020e3-7bbc-464e-8ea3-cdff088f36db.png index c0dcf4523d8ccebef34652d5fafe12c351747565..3695c084090a883072d118e9ca5372772bc0ab3d 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_a1a020e3-7bbc-464e-8ea3-cdff088f36db.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_a1a020e3-7bbc-464e-8ea3-cdff088f36db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7752fc554b5591fad0b3a189dcbe9779d052d541b7642ac4729de2020d24c7e8 -size 1390905 +oid sha256:82293b50d8bed298e7812e65f8dc156621da644cc37e8b1ea7f55a03eb18fa7a +size 1596265 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_aad02f8a-a965-4f56-ae56-baf426db1a3f.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_aad02f8a-a965-4f56-ae56-baf426db1a3f.png index 8d8a02607635a6142f190cf9fcce2adf509a0c1a..006c1dd0928a5053e513722049b457249efc9010 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_aad02f8a-a965-4f56-ae56-baf426db1a3f.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_aad02f8a-a965-4f56-ae56-baf426db1a3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e94bc19102df594674bcaeed6e5fd88846ea35f91615c0c69350b8fedb4482af -size 606617 +oid sha256:6164394198b974747ad288eb9bf22be3228d61bfb96e053f783d55559c5b8b47 +size 960603 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_ad2e9a55-fccc-47c3-addf-579d53655742.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_ad2e9a55-fccc-47c3-addf-579d53655742.png index f6813d78f0c563c45d58211cf6090e6d46d443c6..218b28fed170aeb6bc4c08d4768cf8930e7f58aa 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_ad2e9a55-fccc-47c3-addf-579d53655742.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_ad2e9a55-fccc-47c3-addf-579d53655742.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:daa7914b439dba89ac359df856a3ddaa9923a2aebb0e23bb832d6099cae673dc -size 503545 +oid sha256:0032f54a4a0c62e7488373b6d96a3063591da051da4c1db2e7b84d104fbb2314 +size 707179 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_b0204daf-d53c-416b-bbf3-fe924f4d9d25.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_b0204daf-d53c-416b-bbf3-fe924f4d9d25.png index a256bee50555558ad729e7616433954a103d13e0..73360ea9addac9511384dae587e72ca28cdcc912 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_b0204daf-d53c-416b-bbf3-fe924f4d9d25.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_b0204daf-d53c-416b-bbf3-fe924f4d9d25.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a888d42b8b15dd1e90648fced27ecd0cc69f5da3f2fee9a00f2b2e35dfe24b7b -size 450152 +oid sha256:36c2e328830875538e4f2a7cd16226b9cda385a5dd0915a28874fdab6fa2830b +size 632286 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_b740c1d3-669f-45ae-beed-936d5f4e4f08.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_b740c1d3-669f-45ae-beed-936d5f4e4f08.png index 6abcc8908607638dd8eba99128f2b54df866ef75..6eb380a75e38b61c93ec933056847a8ec1e4391b 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_b740c1d3-669f-45ae-beed-936d5f4e4f08.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_b740c1d3-669f-45ae-beed-936d5f4e4f08.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eafdf7fadc31131094bb97daa9c33808b9695fab8cccbacb98037d02054b51a6 -size 587303 +oid sha256:078ad1fc320e556f73d2ddb7c584f573bd0f99381712f9473d25e97bdbe51ede +size 561722 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_bf67284f-ff0d-423c-ac34-ec7359de7867.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_bf67284f-ff0d-423c-ac34-ec7359de7867.png index 4c85d8e9b44f6a0a9402bccb6ad2a87f17994ad3..715dc30ce8a7870e48d2bf2c6c42cb15c79e1988 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_bf67284f-ff0d-423c-ac34-ec7359de7867.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_bf67284f-ff0d-423c-ac34-ec7359de7867.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa0b14ea34aa09fbb3f8d817d96dd0c369275e9d746346f056c763859c5d70a0 -size 589090 +oid sha256:abd955da5e0811c1f7075060349bc0d09d21cde5cbda9da5b945507ef6cd7bdd +size 605164 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_c82b0897-8695-4678-9894-9e6dc3f0dec4.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_c82b0897-8695-4678-9894-9e6dc3f0dec4.png index cd34a4890b16b9afce06e15ebaf9f1242230328f..db1e9d415a9dbf0b462a5cdaaa4800ea1b789515 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_c82b0897-8695-4678-9894-9e6dc3f0dec4.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_c82b0897-8695-4678-9894-9e6dc3f0dec4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca94896580f7910efbb6d98177a9ce98c3d520bcd082ccfbf8d214dd671951d2 -size 504857 +oid sha256:a7a4dd812a15ad7bc4f9abc2aa2831cd634ade4829e24c3695e43e7328aee687 +size 460718 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_d13147e2-afaf-4608-bd27-65d8b4520f52.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_d13147e2-afaf-4608-bd27-65d8b4520f52.png index fb57dfa7842a348825349af6726a97e0209d809c..c86ebba9a9e01daaab8726cdf80cf51d3b9b0972 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_d13147e2-afaf-4608-bd27-65d8b4520f52.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_d13147e2-afaf-4608-bd27-65d8b4520f52.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43993bc818fea17013c591f800c90c6e7f096682844d1ad2bc2677f326c80670 -size 1047388 +oid sha256:ec4e60716d573aa70dd96441a10fe98f89d246aa2d3c6422815a7f7edc2df307 +size 846348 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_d28a21e8-c00e-4910-b822-cc0f714abbc5.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_d28a21e8-c00e-4910-b822-cc0f714abbc5.png index c483c14b99dda34ea116def281bc9f5ed46825b8..727e0ef376b49d829d68cad8e5e5121b8b08e214 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_d28a21e8-c00e-4910-b822-cc0f714abbc5.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_d28a21e8-c00e-4910-b822-cc0f714abbc5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd5936be8c6b0c41cad6667dc51da3c6efc0120e3cda44cdfcd7b35da8ff7313 -size 566848 +oid sha256:892dd6d2da663cdd7c8b83d53a2654902d3f36acbbdb4767dc4c097d33467012 +size 670123 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_d5672242-c470-499b-bc08-b42bbd8fb450.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_d5672242-c470-499b-bc08-b42bbd8fb450.png index 8a2139e353a70828b58fd8bf9c7cd20ca1c36c36..8982a6a1c541c4df03d8c2dd1dc394eaeefda222 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_d5672242-c470-499b-bc08-b42bbd8fb450.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_d5672242-c470-499b-bc08-b42bbd8fb450.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3ea87a8bd8cdffb32b5aa6bdc1e89d399ad05c51298a434e9e7d951703a982c -size 1221710 +oid sha256:0ae506bb49a964adab23ecc79da931b60da99b49f01f0f1f679ba164229f2a65 +size 1465550 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_df54b5a8-f70c-4695-9c8c-5780019eedb1.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_df54b5a8-f70c-4695-9c8c-5780019eedb1.png index c44162ad6b0e4eea251826cb9db1cd7f93578c4a..0a4366f48dd904d8a1a6d0302f870786c2bd06c4 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_df54b5a8-f70c-4695-9c8c-5780019eedb1.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_df54b5a8-f70c-4695-9c8c-5780019eedb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:631f2781e07d931c0506d8779377fd501e52bd9ca84dac39655671a74a6694b1 -size 1112864 +oid sha256:a1154c391aa8c322c042424b2a6fd5cdcacb980b0cccc754e3271dec64fcd0c3 +size 1073815 diff --git a/images/ec472065-2913-40df-bbbf-ee95bc76485a_e65a5c29-d512-478a-99f5-ff82dcc22246.png b/images/ec472065-2913-40df-bbbf-ee95bc76485a_e65a5c29-d512-478a-99f5-ff82dcc22246.png index f3ce700d71ec3369bda2f4265b8abea3e14f4e77..58c3ca53f7c0399e4377682a1ab736c37d335d8f 100644 --- a/images/ec472065-2913-40df-bbbf-ee95bc76485a_e65a5c29-d512-478a-99f5-ff82dcc22246.png +++ b/images/ec472065-2913-40df-bbbf-ee95bc76485a_e65a5c29-d512-478a-99f5-ff82dcc22246.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c03c02de92b073cc00aacf9e6ebac83228394a900d5f2f9c7bbd0ec8701d2f1 -size 610122 +oid sha256:8c6b139b5a40df777cdab2c36f9cba839b6ed264bbb9aac60ea258ae32d0b402 +size 880865 diff --git a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_0f4264dd-1f6e-4313-b1e6-c3f392fc27c9.png b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_0f4264dd-1f6e-4313-b1e6-c3f392fc27c9.png index 7fbc57113b20f69221bb00b45f6b8efc142af188..973452c455b4cc68dee9b22bf42eab3892721f76 100644 --- a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_0f4264dd-1f6e-4313-b1e6-c3f392fc27c9.png +++ b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_0f4264dd-1f6e-4313-b1e6-c3f392fc27c9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a349729b86a4fa7681ea6bec2457396430eba428a40f6cd539c58e386f9126d -size 1435496 +oid sha256:0ccb867ed52a93e72d5bb7f9ad356a9b3d732a66218fab45b94997c982250347 +size 1449460 diff --git a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_235ebfe3-a9e9-4f3e-8629-731eeda9bafc.png b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_235ebfe3-a9e9-4f3e-8629-731eeda9bafc.png index 2b59d93083605979e440dd1bcde6aab3a0f5b5b2..1f65d6dfed1110f3faf50840312fcf6e03d128ab 100644 --- a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_235ebfe3-a9e9-4f3e-8629-731eeda9bafc.png +++ b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_235ebfe3-a9e9-4f3e-8629-731eeda9bafc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04d03fd9c50a014a9729c1f77a8f24a8e2b7db51d578d8ccb9c8ba887a28d6bb -size 1434503 +oid sha256:2950b78c262a7cd00ae27cf8b89c1fda0638e58bddcdc4467524d44a61ee005c +size 1709998 diff --git a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_40bef6c6-0468-4277-8a65-6b4fd6ef2c5d.png b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_40bef6c6-0468-4277-8a65-6b4fd6ef2c5d.png index 54b3d89d3ccb8b41fbb544ea8b1cbdd9a6088e3f..cf7f1de0038ddf91d90b21cfa94baf1aaf4d6cee 100644 --- a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_40bef6c6-0468-4277-8a65-6b4fd6ef2c5d.png +++ b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_40bef6c6-0468-4277-8a65-6b4fd6ef2c5d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:650eb4584b98628ffa58f2d720138b512d8e63064bde3d01b8a6d93e7252a384 -size 1435953 +oid sha256:806956eb83fbd90b56317d3a99aa3ebbb3bd80ea33ef4ed8e588abd0c9ca681d +size 1389672 diff --git a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_6b1edaaf-3328-41f0-a8dc-462e1d2cb8c2.png b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_6b1edaaf-3328-41f0-a8dc-462e1d2cb8c2.png index 8ad284c69931c855949fa6aedc62ae1ab387c6cf..739c20866d634e2ad9a28a933617116457c5bf3d 100644 --- a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_6b1edaaf-3328-41f0-a8dc-462e1d2cb8c2.png +++ b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_6b1edaaf-3328-41f0-a8dc-462e1d2cb8c2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4828e248b086da43c91bf3da06757346b1a3d3c1ddd528dade5de95160db483b -size 1487925 +oid sha256:bdca2e031472b872fd4f96aa74115fb724001f70bc82fc7b9886e38f32a407c4 +size 1277380 diff --git a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_8735b62b-f80c-4908-8d6f-bb314454a8b7.png b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_8735b62b-f80c-4908-8d6f-bb314454a8b7.png index 27bb04693a1de840e0636b1c19a299139a4fd57c..4c79e20e8eac0a06383198992a38d05a8b7ed5df 100644 --- a/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_8735b62b-f80c-4908-8d6f-bb314454a8b7.png +++ b/images/ecb649da-0ca7-4707-8ebb-8707296e28b7_8735b62b-f80c-4908-8d6f-bb314454a8b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:562bc4fb24ef47c5e5302b97b98d36249a7ae69b172187919694f735420dda33 -size 1500874 +oid sha256:11d3ed2c7747090f4107209577f192d17758896e8f69935eeef0d72747566a26 +size 1769173 diff --git a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_507573e9-3eef-41c7-833f-a9992b520d5e.png b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_507573e9-3eef-41c7-833f-a9992b520d5e.png index 4ec62ba79d30afe1c8453ced0f59888dcbc14764..36dac4359d96c3c77ae52ec93475216df07976ee 100644 --- a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_507573e9-3eef-41c7-833f-a9992b520d5e.png +++ b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_507573e9-3eef-41c7-833f-a9992b520d5e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5ce545a7cffe0aac504c3c2be604eddd56ed312b67b74241bab7b2e3736c4069 -size 1525787 +oid sha256:903c8cf4698b2d8a4ba7a771ac5eac62f6bdbf46b7b9d0a34f3c02c38ce8c68c +size 1383518 diff --git a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_592e633c-4a13-4a6e-9032-106326773974.png b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_592e633c-4a13-4a6e-9032-106326773974.png index 5763679bb226e6bd3129ead06809859dd8534ca5..9139d0b3d16b8cefea7470574521521ad4b5da08 100644 --- a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_592e633c-4a13-4a6e-9032-106326773974.png +++ b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_592e633c-4a13-4a6e-9032-106326773974.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8495afdb6020c6b0c3d33dcd1e2fe46b14ec7d9e857950379b14b7b142ff0148 -size 1733117 +oid sha256:7c430e1d124cd4bfe1c3639726fd94af1327493a20802176b34fe344084046eb +size 2276629 diff --git a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_6068b180-5a93-4752-8e12-0faafbbdd5c0.png b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_6068b180-5a93-4752-8e12-0faafbbdd5c0.png index 46e21e55d61d45f9a0823d0ede21a972b2281004..70acd22f333434f67c18b8cab321e808f2db7dda 100644 --- a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_6068b180-5a93-4752-8e12-0faafbbdd5c0.png +++ b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_6068b180-5a93-4752-8e12-0faafbbdd5c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a5c52baaef2916917891efe097a9f71108cc871e123090cb36c518ec4d8d1ab -size 1733830 +oid sha256:e8959cf9f8534b52afbedaa21c17ff52133d28af7981450b8630f9cac6651a68 +size 1504815 diff --git a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_f7935528-8d53-49ae-9235-70b6c1304d79.png b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_f7935528-8d53-49ae-9235-70b6c1304d79.png index cdadc478536f53a44dfe1f4a8db19ab9754ff233..6292f1b56933dd6b28e7791b057f24324adaec74 100644 --- a/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_f7935528-8d53-49ae-9235-70b6c1304d79.png +++ b/images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_f7935528-8d53-49ae-9235-70b6c1304d79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e82deb721133387d31174e47bda768dd2badab4f58281085059fcb237ea788fe -size 1567005 +oid sha256:733c3f33173f0c2d318150f43a819c6117e2d1828a9f95abe14934207b3a9252 +size 2469220 diff --git a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_3be026ae-9dc4-4f3a-aefe-230af68e72dd.png b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_3be026ae-9dc4-4f3a-aefe-230af68e72dd.png index f0342d45ff8f51fc529fc6e5b5435b563dfd338d..66d45201af1b10a07ae087f9bfdd53fd2cfd1ab4 100644 --- a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_3be026ae-9dc4-4f3a-aefe-230af68e72dd.png +++ b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_3be026ae-9dc4-4f3a-aefe-230af68e72dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:894a0c57b6bd4cd35c1017f814a24e03e783db8a45826b85d92bb380d03bc475 -size 540480 +oid sha256:54b60db042b98985707d0c50d260c13ae6ec8b276499116aff4a737394dc94b9 +size 476017 diff --git a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_54b9a781-e649-40a7-8f18-0361898363c4.png b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_54b9a781-e649-40a7-8f18-0361898363c4.png index 64397a7e508bf09fb0a4d46c5241573ff2a835f6..020eecbeb7b05549397498b491cfcd32881ee807 100644 --- a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_54b9a781-e649-40a7-8f18-0361898363c4.png +++ b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_54b9a781-e649-40a7-8f18-0361898363c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:620d179c8c91144a2f8f553147ec7c057af1cb99420b4af4657118ba90fe820e -size 1265953 +oid sha256:25e3f29c3ed021ce878a01766c1d43ce1ed1f7dc2d4c318b62a708be3b0c909b +size 510982 diff --git a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_99a9f5ae-ce6c-4ca8-aad5-2a7374738144.png b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_99a9f5ae-ce6c-4ca8-aad5-2a7374738144.png index be3e20d728c7b1ad2a991b035e43afc55dfc2f5e..c3c0c03226fbd44ed76596cd22dc95379725f3e0 100644 --- a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_99a9f5ae-ce6c-4ca8-aad5-2a7374738144.png +++ b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_99a9f5ae-ce6c-4ca8-aad5-2a7374738144.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c610200c4af3d6252bf13120ee8f84af225aabe7167079d687ccf6e83f903cf5 -size 997344 +oid sha256:f9290d3b886feffdbe1cf8e3eaf23a849ecacdfccf69ed02940068e9a19f756e +size 1192156 diff --git a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_e5f9b638-b724-473f-869d-615c6c141aeb.png b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_e5f9b638-b724-473f-869d-615c6c141aeb.png index 17535dfad9be1e330763011b8415ca8626379354..9fa4383e3e03cead079919c13665e466ae48b5a0 100644 --- a/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_e5f9b638-b724-473f-869d-615c6c141aeb.png +++ b/images/edbac1c3-5409-48b0-a0ac-402a4900c59f_e5f9b638-b724-473f-869d-615c6c141aeb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4561aec251bf7ae34a26b68e0dc21af76622ebc8eecd0c33921a8b0071f211f -size 346268 +oid sha256:0f137e1f96fca593da3c273c1125ce3d6efab5a7d1f3eb538d7f675a9a6295f9 +size 382018 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0b060218-50cb-4545-bf81-04f57be2db97.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0b060218-50cb-4545-bf81-04f57be2db97.png index a22fa48bcf096782d24333d6326b7c9fc686ef70..1b36af440c3c7e9d627a67ee152fb87ac2f2629b 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0b060218-50cb-4545-bf81-04f57be2db97.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0b060218-50cb-4545-bf81-04f57be2db97.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66a173e27a3aa8950fbd93e07ab78fefc268807259ab34c2b1f74a24ef2343f8 -size 578273 +oid sha256:418f8abfdcc17e30d4546dbc84687b6cb9ec5d62a6b42f45136c3b00d274b84c +size 586701 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0d5293b2-9ff7-48b5-80f6-b043d52c9066.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0d5293b2-9ff7-48b5-80f6-b043d52c9066.png index 11454d7c07881a22f53a4782028dd38bb51fc47e..1fbc7ef41df2f7ad08eff6d98d90d17ae039fcaa 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0d5293b2-9ff7-48b5-80f6-b043d52c9066.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0d5293b2-9ff7-48b5-80f6-b043d52c9066.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb217ebd901d85100592e117807d66acc19928f7a05d5bd7c1d390a98b97cfe8 -size 844852 +oid sha256:04f15f93c080b6cb5e0122a3389e5c3fff8068c24693493c8f5e35f45297941e +size 843224 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1680dfb8-7555-457e-916a-b744dd50ccb5.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1680dfb8-7555-457e-916a-b744dd50ccb5.png index 07bcc0a5bee42bb70d2850b046e283a0c76e411f..e7931436ab8bdf0a88a4fdd39f3b4540f024a0d2 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1680dfb8-7555-457e-916a-b744dd50ccb5.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1680dfb8-7555-457e-916a-b744dd50ccb5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20ca745e5fabee96d5c66cd4b741933e955bbb647d4a42fdd342528a4530ad80 -size 1139407 +oid sha256:7a96daac6eb8ed4b7739ce049a8927570c2a98cb709d4050533a39ddc11a50e3 +size 1195103 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1abebad8-af94-4e45-880b-8bc9dd0bb103.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1abebad8-af94-4e45-880b-8bc9dd0bb103.png index 681e1f52d056b9770d49192fc5d9b5943b1c1d19..1d3fdb059ee4b5489509c393ae8fca941444e3cf 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1abebad8-af94-4e45-880b-8bc9dd0bb103.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1abebad8-af94-4e45-880b-8bc9dd0bb103.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0d3f8906108caa43fa5c3cbdd91ac8acad5a18029664004588b597ae61a809a -size 583360 +oid sha256:2e36a654221d8989897e821ab43bc3b986eb0354b06eda3b8c3767b9d549249f +size 485169 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2a371b9b-0d60-4252-bb8f-ed98d12d77c7.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2a371b9b-0d60-4252-bb8f-ed98d12d77c7.png index d4840881e5654d49ad4e4ae0bc46b7fa6454de0c..bfc4cde9b3126458111d39e99c03b2ceac6316ff 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2a371b9b-0d60-4252-bb8f-ed98d12d77c7.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2a371b9b-0d60-4252-bb8f-ed98d12d77c7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6ff0c3a5e606a1f9b5806d58ccca0c905a5ffa69fdada47f6b07e691bfa07771 -size 607494 +oid sha256:0aa39bf6b166d2f4b664ac9f3b1aa3bc677530116aae908df4385cc90cc6f751 +size 426312 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2be3b5da-bcc5-4de4-b691-c7115cd419f7.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2be3b5da-bcc5-4de4-b691-c7115cd419f7.png index 6d1575779881fbc6f9ef7465b8d90a342e3a06a8..327d717be50ac24f453c401cb6d193509e39fd41 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2be3b5da-bcc5-4de4-b691-c7115cd419f7.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2be3b5da-bcc5-4de4-b691-c7115cd419f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c27f01930499bd50113d1997fcb3c11dafccd7507793199233a495c8cb9304e -size 601155 +oid sha256:94e7e0b388eddf4a1dc94e664ccaa7f393a360b3961bdbef35ecf7da81007de3 +size 339758 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_46bb7c15-23d8-4e39-872a-f5166565b18b.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_46bb7c15-23d8-4e39-872a-f5166565b18b.png index dc91390e27864f2a936349bf6c2a1283a8622046..9d24c784924ab79a89d8166b83d40a2912c91c26 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_46bb7c15-23d8-4e39-872a-f5166565b18b.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_46bb7c15-23d8-4e39-872a-f5166565b18b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7edf31b2fba388ae8ced479341b9ce66414caabbb3f630f786491e3d9d7b7415 -size 1337772 +oid sha256:132d0d09f808f0d0fe1f66efb296133b68ef741a2de0a7070f60d484582436ac +size 1513424 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_52f08417-3d87-4854-b93b-6c9e07559ab6.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_52f08417-3d87-4854-b93b-6c9e07559ab6.png index f13c27771892dd00bcfcfe5bf015f4f2bf2bde56..068537361dd967fbeb4b9ceb1914c210d439c650 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_52f08417-3d87-4854-b93b-6c9e07559ab6.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_52f08417-3d87-4854-b93b-6c9e07559ab6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03f58b02630fef5e68c6affddaf97197f3b06aa92b4493762e26d060fb9430df -size 1623337 +oid sha256:7cecffc61f22d15d8623973f5a01cce069f854551fbdaa38d7a99b0d5105cd57 +size 784828 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_56fe6cc9-a3e9-4b7c-990c-803cd8a94f2f.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_56fe6cc9-a3e9-4b7c-990c-803cd8a94f2f.png index dd0d062155dd75a43839018a186d7ed8e7a7de29..bdf55411ecad0d12ad2a2c7d962106b11993806d 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_56fe6cc9-a3e9-4b7c-990c-803cd8a94f2f.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_56fe6cc9-a3e9-4b7c-990c-803cd8a94f2f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c4958f01b01ae7d3303fe061e13c7c19ba8da2b9721e69101a62d907bce16dc -size 591506 +oid sha256:c14aef2fa70f1e46459f7c5005ee74d1d0fed0fe934d2e416232472dd1334a51 +size 637512 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_5700b7d0-ce16-4fb9-b77f-0546c08c8568.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_5700b7d0-ce16-4fb9-b77f-0546c08c8568.png index affffb6dda1000a97a3e64e794ff5bfe72334322..bb258adbc42f37d14949b8b4472a655b0fdb3803 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_5700b7d0-ce16-4fb9-b77f-0546c08c8568.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_5700b7d0-ce16-4fb9-b77f-0546c08c8568.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a01f5096408530713ca378efeed59fb356ce7fc31b6e6b4af8c293f760c4b64a -size 1412045 +oid sha256:f791e68fb8c091fb83d9f10569bc0ee7305f7c8601e5cbd7d8af113363a1650c +size 1463000 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_571bfafa-ad8c-454a-bcd7-5d507abb8478.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_571bfafa-ad8c-454a-bcd7-5d507abb8478.png index 1c966d39aa854b799f0f33b41b7db2e9716342b9..fa33c583e7754a94f1b782874e9409a4e21d6a1b 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_571bfafa-ad8c-454a-bcd7-5d507abb8478.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_571bfafa-ad8c-454a-bcd7-5d507abb8478.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:555502d0bb542059d1f122f459660d84633eaa2504211fa7c4b943c3521f54e1 -size 850259 +oid sha256:ad6b0755e305d2afd400e35895a55ea19a6973ea0ff397a6c574c369d7f38c08 +size 796233 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_6bd9289f-2ad1-42a9-81fc-f1719e3e9d89.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_6bd9289f-2ad1-42a9-81fc-f1719e3e9d89.png index 6fa046ddb96b17942fbf15e6723f475868b123f7..f68af3d2dc334e64e493cf648782be1d968ddf29 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_6bd9289f-2ad1-42a9-81fc-f1719e3e9d89.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_6bd9289f-2ad1-42a9-81fc-f1719e3e9d89.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c6504b69f72df126ce2f20514c2c9e477d8955408915d6698b3322257a681b1 -size 1328403 +oid sha256:fa1261586799c0d5bd85ae3a18ac77fb7615f58e95f38a59702f928b7322a09e +size 982881 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_771645db-3909-401b-9e11-ec577982b6c8.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_771645db-3909-401b-9e11-ec577982b6c8.png index 533ef11633fac41aa5604c3973bb7b0ff4523bec..8fc364cf7f8cb68760f9ed61def5b044d52da420 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_771645db-3909-401b-9e11-ec577982b6c8.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_771645db-3909-401b-9e11-ec577982b6c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61c6c75a7d605b305905a48fbfd2f0396e91d9d6e8050c6354ae15d61e407947 -size 1420787 +oid sha256:e6d59071cea0762bf94fcea93ecc7c5b466e222e7d96d4dd28137a366e857ae1 +size 1075785 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_78d5767c-9755-4037-a6cc-b9395a07ba99.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_78d5767c-9755-4037-a6cc-b9395a07ba99.png index 7c6b5fc2c5a02d9a6688c776b77eadb15b782ebb..66f21bb0c5f9475417d092ec47338fddd873229a 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_78d5767c-9755-4037-a6cc-b9395a07ba99.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_78d5767c-9755-4037-a6cc-b9395a07ba99.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22f6d2abab55ab84ae74589d37511362c523fe08208a851927aced7157427344 -size 596715 +oid sha256:c4df39eaee73261b70bbb6545403328e07416140a314852232272a6c91b39110 +size 563421 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_797e693d-0b01-49e4-856c-74dc502eca54.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_797e693d-0b01-49e4-856c-74dc502eca54.png index 41219da8ce2e23ba333c11f919642977541a0a62..b0212cdc6b26c318f3dc461a5e7d84c383508331 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_797e693d-0b01-49e4-856c-74dc502eca54.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_797e693d-0b01-49e4-856c-74dc502eca54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a856d2ee0cbea4a6bc53eaae427e9213e40a84bccfb683a7ffe5172d3f8b84ab -size 593433 +oid sha256:038396ccd941a8677eb8623a71f4e26fcfa62b81ee39fb92bbbdd5c61256f6ea +size 601516 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9a1f3e01-87cc-45c1-bb32-f90ce0bc5eed.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9a1f3e01-87cc-45c1-bb32-f90ce0bc5eed.png index fbb10a69d0a2d62b39e1a07cf727c83adeb0f54b..ed096feedc39256d5d4852da54090808bf629797 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9a1f3e01-87cc-45c1-bb32-f90ce0bc5eed.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9a1f3e01-87cc-45c1-bb32-f90ce0bc5eed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7609ba76e18ff00ebaf6dc95921615cd5cf7f2c1faa77c723d7337a5e2b4265 -size 577951 +oid sha256:a33825cfdc6b1b66e52a32a0cd1d19628c023c67635cb127c62be3d5312ee2fc +size 586382 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9b195a60-73db-47f5-a2f0-d5a47fbdeb06.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9b195a60-73db-47f5-a2f0-d5a47fbdeb06.png index 3fb6fb92431438416507d36ddedc1a6dd22354ae..340913edce6d2c4d9d4cf70b00df2360d075da47 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9b195a60-73db-47f5-a2f0-d5a47fbdeb06.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9b195a60-73db-47f5-a2f0-d5a47fbdeb06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf9e9ad1069c2318bb551ee908679d82a88049029217e882c3d606c6c93b24e5 -size 590647 +oid sha256:c123bfcf7e7567fabc55a0a446d193a4c2cd84d75b88807c344ab1328bf0203b +size 471251 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9cffd287-ffc6-42a3-a408-b3198b37fd01.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9cffd287-ffc6-42a3-a408-b3198b37fd01.png index cb47f6c3400cd56f58de917e0536defb9b92f481..a75e7fa1ec5bd0237eb3591ec7b19a700db77c46 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9cffd287-ffc6-42a3-a408-b3198b37fd01.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9cffd287-ffc6-42a3-a408-b3198b37fd01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:050d5ce59219e4a18a8b83b686b616bfee7f95ddfc1e24783336ea8f5bc1d58d -size 1329967 +oid sha256:c4b00e54630d81505cfa9a48f00271cf837c69a8af43a12d80d244f0c96b1464 +size 1531805 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a3741eaf-81d7-4424-8aa6-4171091b1faf.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a3741eaf-81d7-4424-8aa6-4171091b1faf.png index 1cb959a4e5551923607db45f2228e9d27117ef74..91266667866a12d54a7a3ca65d6000654ac0f1b0 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a3741eaf-81d7-4424-8aa6-4171091b1faf.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a3741eaf-81d7-4424-8aa6-4171091b1faf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:052c252ebf84257735bb094b2c962370945de2f468d3ac247e4058704c580eb4 -size 1396344 +oid sha256:d71efbe38739c5952e28b4b912026cb8243989e28628a21d12b2ee27f6f9b410 +size 1468461 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a8b0b04f-7a57-4daa-9501-dcc668509760.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a8b0b04f-7a57-4daa-9501-dcc668509760.png index e8117b64fd161d3d1b512cba784ddd7a788e6bfe..fb003c5135eef61a9a6fbd4237e7374d8a50c0a7 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a8b0b04f-7a57-4daa-9501-dcc668509760.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a8b0b04f-7a57-4daa-9501-dcc668509760.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec9fc445998cf96ca0ce4c9b032fe58a697ab8105fae5ab8f5c13974403848ae -size 580649 +oid sha256:214b553c26a8b8a57201408d9c5e42d30ae8a5ed06568ecf80774c1602f5a883 +size 589091 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b0beeee0-4e2a-477e-8e63-b6195edd64f2.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b0beeee0-4e2a-477e-8e63-b6195edd64f2.png index d6a7bfde2f60023c47b8f5d1e443ae216a953472..0ed07c20ca80689e621ee036978412de3b126eea 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b0beeee0-4e2a-477e-8e63-b6195edd64f2.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b0beeee0-4e2a-477e-8e63-b6195edd64f2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea0ba5544680aaad321f831130d15495168e5e7302d96a06682b2658174ac576 -size 577862 +oid sha256:0505905779252521979eca3b24d467485a86650e5c93057a3ec665eb7c2cc81b +size 586297 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b48ce68b-6792-48b3-8531-e49eef1bf081.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b48ce68b-6792-48b3-8531-e49eef1bf081.png index 3ef6c2ae25367d2d29573a5826093974f1dcb0ef..cdd43caedad5a57dbfa22c10925c201d07ba995c 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b48ce68b-6792-48b3-8531-e49eef1bf081.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b48ce68b-6792-48b3-8531-e49eef1bf081.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c10245d6ec678ce99c109bb1700e4ad57d3e97763c3280dcc2a6959b9cd0730 -size 1376199 +oid sha256:f5c68ac1f02910fe7903a1ddfaa109c229329e736ace6091ba23e0c737017e04 +size 1312629 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_be6f3ede-4d0a-4a03-8f49-78f91329c5e5.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_be6f3ede-4d0a-4a03-8f49-78f91329c5e5.png index 6ab9bfe55e1a9cb28431fcdb49b916ea3f87475e..a0f0023514d999a2a9c88b954a0801f70e0f017c 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_be6f3ede-4d0a-4a03-8f49-78f91329c5e5.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_be6f3ede-4d0a-4a03-8f49-78f91329c5e5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b23f58bc77ebc8f3cab37204842bbaf3202588be6422662ac5734655ff21d584 -size 1331557 +oid sha256:251b00c2f9e1a905b06d533262ef54d8f10f039f0ed66490d56b9864c9b6f359 +size 1510869 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_dac23220-8015-4eca-83ef-a520c024eb6a.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_dac23220-8015-4eca-83ef-a520c024eb6a.png index cd34509997c51599c5cc1a71bf6c02e80234bb8a..338a7d0d7c3e75acbc5e81a5f997b265b7d485c5 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_dac23220-8015-4eca-83ef-a520c024eb6a.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_dac23220-8015-4eca-83ef-a520c024eb6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b0d954c8f69475abb15275a42b00cdc6cf80de508bd5abb4cbde5f64106875ea -size 593823 +oid sha256:1f7522e06d942d33b89aa46597098c9298b356f10592ba46d2f99415d17baffe +size 456823 diff --git a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_e97577ad-f25d-42c2-98a2-74fda1a588c0.png b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_e97577ad-f25d-42c2-98a2-74fda1a588c0.png index f5b58e56e410138806d319a166bdc595c7e54d79..8fafee3b0c157be97d5436673baf7a15269f3491 100644 --- a/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_e97577ad-f25d-42c2-98a2-74fda1a588c0.png +++ b/images/edf748d4-07cd-4f0b-aad3-01baebbd557b_e97577ad-f25d-42c2-98a2-74fda1a588c0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c16b7274c87f83f0b72423948a1854aa435e9d5e4436807f616d7e519058d5be -size 601145 +oid sha256:a6fc303fedb16a8d44c6b194316b97335e91b0def27faa97dfe24c8d8c3df3fd +size 347993 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_10fb0059-93be-4e14-875a-92fd1557bfd5.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_10fb0059-93be-4e14-875a-92fd1557bfd5.png index 7e86a2706990a569f1882d77102a0c4663856904..5d0eb503be903996564f91f75533dfaca8d2c5bc 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_10fb0059-93be-4e14-875a-92fd1557bfd5.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_10fb0059-93be-4e14-875a-92fd1557bfd5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f13a2dc7d03552d5f4cda77c67a27b3066687e2437648cb78e4ad003d8317dd -size 1083230 +oid sha256:14448d7c4c3c85411366287fee4fec568da67b4acd48dd297553d4ab914b6d8e +size 934400 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_27abc650-25bc-4667-8e1d-cd1b4e7b42ca.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_27abc650-25bc-4667-8e1d-cd1b4e7b42ca.png index 65b66a47f000204f35674c9d8e8379728f355464..d69b1167d98794d273d8ef9b8072f4eaf72f88d9 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_27abc650-25bc-4667-8e1d-cd1b4e7b42ca.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_27abc650-25bc-4667-8e1d-cd1b4e7b42ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b41e482afe96cb6e6ac281854777a659f1489f66e515db1a60a28f7b24e233a8 -size 1110539 +oid sha256:5926b0737f75046f12ad4b98ae98f5f176257ef9a5071f8d1f1007abfff8a592 +size 1263055 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_30b98e40-d57a-4744-bfdb-660a9dfef288.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_30b98e40-d57a-4744-bfdb-660a9dfef288.png index 9feb5208da5f7dc261d20e5c4cc86aa124fff99a..b9959646a8eb815e55289adf22fde7480bb38286 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_30b98e40-d57a-4744-bfdb-660a9dfef288.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_30b98e40-d57a-4744-bfdb-660a9dfef288.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b1337330bb91e13404301690f46b57a07b29931a8ebacb042dbe479dd73014dc -size 1298978 +oid sha256:e0e91de9341520fa1d1e5b63da7d4ce70eab4f1868b4ca0a044569f12b294a08 +size 1049979 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_405b8bdc-3279-4507-8ed8-b6102b66252e.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_405b8bdc-3279-4507-8ed8-b6102b66252e.png index 8e642437d35e5030c0a63b7a91065fcff816d63b..77ad1830b98e1964f2a70ff73c241756c3c3d18f 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_405b8bdc-3279-4507-8ed8-b6102b66252e.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_405b8bdc-3279-4507-8ed8-b6102b66252e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:488501220ccb6e71f635f4f0782f3062f0a1fce6ad1ab86adaf203e82f59c56a -size 1098411 +oid sha256:e9a7854a6d4721ffee794db8f8eced576fb4e2ed4ab71b02108708359ca9ad57 +size 1401547 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_53510142-0f5f-4f73-86f1-61dc206fb9a0.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_53510142-0f5f-4f73-86f1-61dc206fb9a0.png index 00d8fcfeafe13f705d1c13f5bd847e52191f547a..cc36e3861fbc8f658d59a351868f8a9e0b5e71aa 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_53510142-0f5f-4f73-86f1-61dc206fb9a0.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_53510142-0f5f-4f73-86f1-61dc206fb9a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52f2d2ce416e06206a6c6cf1138b5d840824b3dc075a9083d50b4beaac13b940 -size 847998 +oid sha256:992db0132d036da4b868dde5f321808eba888b149afdc46afcffa3a3b5c13bea +size 1119170 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_56ec5ed4-9bd9-4caa-9fd3-21b38487f195.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_56ec5ed4-9bd9-4caa-9fd3-21b38487f195.png index 33fd4031da968712423e63c436bc5d7c87c80ee6..9e0d350a78b2cd829ff2ed19b853e23c4885fce0 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_56ec5ed4-9bd9-4caa-9fd3-21b38487f195.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_56ec5ed4-9bd9-4caa-9fd3-21b38487f195.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb0067a3127c474a5f2d128366e9a018f75ed8873a703330073ee05314aca6d7 -size 839596 +oid sha256:36971226c2c29bd49760c28ce92c51d26bac1364379a09245d96583bbc28b4e5 +size 805413 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_61a2a3af-3b29-4d5d-b252-856c6a60c022.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_61a2a3af-3b29-4d5d-b252-856c6a60c022.png index d9dc00d2a96de5d0243b0a59346be8a6572ceb9f..0822a4c67ae20acac42ddc53fad04ac8901f347b 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_61a2a3af-3b29-4d5d-b252-856c6a60c022.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_61a2a3af-3b29-4d5d-b252-856c6a60c022.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81189b148660c7cd25a44c18b9846a4bfcedcbd49234a88a37b9ea191121904a -size 690312 +oid sha256:7bf7f740d1869ec91603a44f3bdc4f6885ccfc8c64854362dbb607f977bb8a9f +size 887546 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_98caa132-9fac-4589-8b0d-4fcc6e8e0f75.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_98caa132-9fac-4589-8b0d-4fcc6e8e0f75.png index 68ef6748af13d2c49356756b04160f7461470a3f..ebcd69e223edb7c601dbe51b233bc875bd9c113a 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_98caa132-9fac-4589-8b0d-4fcc6e8e0f75.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_98caa132-9fac-4589-8b0d-4fcc6e8e0f75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:30c43af92f7e9c083646ffbece641419b712733e9c72e9fbe5cade950c3df7be -size 1036461 +oid sha256:68b0fd237c5ae089d21d6d492c627a4a27af5045200c2fba9beccf1bb8371698 +size 1184022 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_b53df9a0-55be-448b-ba60-f6d1fba1653c.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_b53df9a0-55be-448b-ba60-f6d1fba1653c.png index d082bf908b18f76cce84b092f78d452b21b47315..522357c7517d4f302b1606b9a4a6e21e09b8a393 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_b53df9a0-55be-448b-ba60-f6d1fba1653c.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_b53df9a0-55be-448b-ba60-f6d1fba1653c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b976087201f225bbe6f974b3278f51d10f496c069eea142830147fb713d7f1c4 -size 715904 +oid sha256:c429fb2fedd5b9c0bdabc2403ac81d106cc4d24aacf0debdab5dd8976499cc77 +size 566464 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_c1fa9374-0367-4285-a5f2-3f7cfa4f5379.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_c1fa9374-0367-4285-a5f2-3f7cfa4f5379.png index 3727d8d83b145d14c1619c9c2db633d668b11a72..2ad6d4a17dffd2ca7f92a28dcffa3b0800c5a58b 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_c1fa9374-0367-4285-a5f2-3f7cfa4f5379.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_c1fa9374-0367-4285-a5f2-3f7cfa4f5379.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c724c5c4d168d8c1e34dd435af2bee3b4717fe94ee79432ba7f10a02ac50cb1 -size 1086253 +oid sha256:fa94e514218a4f07e00796cebfcc79491d9b8946934cb6f57d1ce8b8070b072e +size 1458371 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_f0e82b12-d16c-4d45-b667-0ceba837fc70.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_f0e82b12-d16c-4d45-b667-0ceba837fc70.png index 56324c37039086b59b9c452f89d7838bec215e99..7adf099a99f588a3d268d6d030602da61aac223d 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_f0e82b12-d16c-4d45-b667-0ceba837fc70.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_f0e82b12-d16c-4d45-b667-0ceba837fc70.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:946be0ddee783f46c1b68d2a4c055272bfbf2b304a242b2208f3e2552a474a09 -size 731605 +oid sha256:05781dd77a5168b16f81665c777d67695a43e5a2ee84e4cf58bd7fa8852daa1a +size 751954 diff --git a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_fe72704d-6041-4c96-9ac4-dabec16780df.png b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_fe72704d-6041-4c96-9ac4-dabec16780df.png index cacec864bdd6667a14f360b406e490810684aa84..a7bf52d65f24a0664c8f023a70a46ab54f050ccf 100644 --- a/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_fe72704d-6041-4c96-9ac4-dabec16780df.png +++ b/images/ee22220c-802b-431f-abb4-0131fd8dbe5f_fe72704d-6041-4c96-9ac4-dabec16780df.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9faef60f2fa111b69e74197a7bda7d1127f04312928d26aed262348ed0d07f11 -size 941833 +oid sha256:43591ebbfea4c68cffab30823e7f01edd092e5dc132bc834bdf1ac1af0a5090f +size 680445 diff --git a/images/ee9e993b-0254-465d-bb04-072e01e5f498_05693c99-fd4d-4edb-8bc6-928ce06772f7.png b/images/ee9e993b-0254-465d-bb04-072e01e5f498_05693c99-fd4d-4edb-8bc6-928ce06772f7.png index 03cf8cc69435637010462a1fd66fc9a9714d127a..87ab1506d1eba82468c526c36510e7d456683384 100644 --- a/images/ee9e993b-0254-465d-bb04-072e01e5f498_05693c99-fd4d-4edb-8bc6-928ce06772f7.png +++ b/images/ee9e993b-0254-465d-bb04-072e01e5f498_05693c99-fd4d-4edb-8bc6-928ce06772f7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:973ab30524e2046a1a674d1b05d1ceff0a66546665958dec5c0263cf18739306 -size 895580 +oid sha256:638f4acae3b26e078a97458b905d1b3e84eadde1200faaa024c21b02409ca8fc +size 709559 diff --git a/images/ee9e993b-0254-465d-bb04-072e01e5f498_18cbba50-27fd-4d98-84c7-7b9802e028d5.png b/images/ee9e993b-0254-465d-bb04-072e01e5f498_18cbba50-27fd-4d98-84c7-7b9802e028d5.png index e50a255c887fa909fa58ce170b02f618c8ea503b..23422f65356206173d1971919dedfd8153c20539 100644 --- a/images/ee9e993b-0254-465d-bb04-072e01e5f498_18cbba50-27fd-4d98-84c7-7b9802e028d5.png +++ b/images/ee9e993b-0254-465d-bb04-072e01e5f498_18cbba50-27fd-4d98-84c7-7b9802e028d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6838903e5d541127d04d519845c9adf5e9b5dac9abc0731904b15761a8d44676 -size 681732 +oid sha256:689b0144f10bbbff16a033032af1972b4d3824d89f9247530790b551b2c0726e +size 732263 diff --git a/images/ee9e993b-0254-465d-bb04-072e01e5f498_429d9db7-1a1c-4bb2-8b4b-09d1a8b862b4.png b/images/ee9e993b-0254-465d-bb04-072e01e5f498_429d9db7-1a1c-4bb2-8b4b-09d1a8b862b4.png index 87505bb565595a458d38b778f679a26a643e1b10..1978949b4d493e90aca696fa61888519e84abefb 100644 --- a/images/ee9e993b-0254-465d-bb04-072e01e5f498_429d9db7-1a1c-4bb2-8b4b-09d1a8b862b4.png +++ b/images/ee9e993b-0254-465d-bb04-072e01e5f498_429d9db7-1a1c-4bb2-8b4b-09d1a8b862b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c2579c474d47b40e9d174ef75630c1d29cc6ef3cc0d628edb7885e46ec83f9b -size 1130832 +oid sha256:9403805c00dee6eee634ecea24cb76475d331b6f22d19f273fa2907f2d235727 +size 1209363 diff --git a/images/ee9e993b-0254-465d-bb04-072e01e5f498_dbb56b26-c531-4672-9299-555c711b8688.png b/images/ee9e993b-0254-465d-bb04-072e01e5f498_dbb56b26-c531-4672-9299-555c711b8688.png index f640fe6fc5c7784bab6fa1aa99b6d4e925a5d70d..a8c69980ef7ffed84f613d031574e7af2b9c6198 100644 --- a/images/ee9e993b-0254-465d-bb04-072e01e5f498_dbb56b26-c531-4672-9299-555c711b8688.png +++ b/images/ee9e993b-0254-465d-bb04-072e01e5f498_dbb56b26-c531-4672-9299-555c711b8688.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00c58a487085948d19507f69bfbcd79648289af8e737193d58b0d3459b6d80a7 -size 593975 +oid sha256:eaf30ab9e95a7360ae82baf504c13c8799ed24e33460032c4f348678febf7036 +size 681586 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_178bad0d-047f-4dc2-84ec-7f2a39924cc2.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_178bad0d-047f-4dc2-84ec-7f2a39924cc2.png index 33375d0609a75dc16c1562322d789a658f04f4a6..c517f3583462e3a9aec48d8cdf96501d6702272f 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_178bad0d-047f-4dc2-84ec-7f2a39924cc2.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_178bad0d-047f-4dc2-84ec-7f2a39924cc2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:777825d96538c25812adef855edfcec18ac229fbb68b00878a6b8ee86ea403f5 -size 696468 +oid sha256:0d74de66000ae9381cce51a3ce4deb7516937fa3b1f53f819a139904351af77b +size 1043485 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_24d83bed-8e0b-43ec-8a4e-6a977a86d9fa.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_24d83bed-8e0b-43ec-8a4e-6a977a86d9fa.png index 6f0f176a873ec6af1c59182436e3d19a27dd3b8c..882dc22e1790b4bd0d62581590fe802b5e168516 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_24d83bed-8e0b-43ec-8a4e-6a977a86d9fa.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_24d83bed-8e0b-43ec-8a4e-6a977a86d9fa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ea900e168584b56c3debce1699306169905d8184b81f8e57f5639fb4168e14a3 -size 817006 +oid sha256:9b5aa44063e66bc9a4ddef4cc2081cdb975ff763c75072d7eb53f14607fac6fa +size 749725 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_2a60c96b-b69a-4763-9e83-c7ad02c58d8f.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_2a60c96b-b69a-4763-9e83-c7ad02c58d8f.png index 206242b0b92ce5bb0386341d789986e0d216c52d..85c9dfcc525fb3712b5d912654aa3068aacaec81 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_2a60c96b-b69a-4763-9e83-c7ad02c58d8f.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_2a60c96b-b69a-4763-9e83-c7ad02c58d8f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:199c958e130e51cdfa61161d96970d11c2f9093dce2f438e79c7c1e5de426029 -size 620411 +oid sha256:e5edbbd9cf5d44390c1ac74fb88558b836f7862c01e144460bf2476dc8ec1b61 +size 483513 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_3c16c075-2c0d-4f6b-8239-27d144b4b7bc.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_3c16c075-2c0d-4f6b-8239-27d144b4b7bc.png index c85655203affe9db21b449e43d6491d5a55683aa..10dc0ac740cf272a982589e0724c4b366e390e58 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_3c16c075-2c0d-4f6b-8239-27d144b4b7bc.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_3c16c075-2c0d-4f6b-8239-27d144b4b7bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b198c038665ef7eb95f0233bc42d21b5b17a9ab350a236d6cebcccd329d442db -size 559700 +oid sha256:44abf5d37a60e6e96c59573851fd93a727b1528910aef52be4414a16edd032ee +size 549474 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_425a5e71-db07-473d-9e9a-43da9606841a.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_425a5e71-db07-473d-9e9a-43da9606841a.png index 5eebc3e48719da853f6680082c8ade348a6467c6..9f8e402f4a6e28488984268d01e21ae844e591cd 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_425a5e71-db07-473d-9e9a-43da9606841a.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_425a5e71-db07-473d-9e9a-43da9606841a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f1ea72a7d506178441ee5e7bb680c35f6dfe9612f38edad3f20d579bee54a875 -size 529774 +oid sha256:5dacd731031c432bb568415279f367c6231656db0482a0e79e653190d6b94f80 +size 428328 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_6017cb86-e365-4f2e-ae94-89c66c382a9b.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_6017cb86-e365-4f2e-ae94-89c66c382a9b.png index b8afc834aa374a250e9601469f59abe7a4c08927..17e987a86326f859387d50c27a4513167d5993c3 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_6017cb86-e365-4f2e-ae94-89c66c382a9b.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_6017cb86-e365-4f2e-ae94-89c66c382a9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcb110cfb397f7b4e753a66fa1362ee734c06b7012797635c704fa5da6846ab0 -size 759148 +oid sha256:0ff4188a0554cf74deb1995639f89e293d96c36194da0c382662900811be38f0 +size 1032527 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_84c56bf6-9bbd-44d5-bcd2-ec8a1a549af6.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_84c56bf6-9bbd-44d5-bcd2-ec8a1a549af6.png index 8c94f5f6ed4e28c804aa1b5564ebcf395cefd46d..4416b64db3b2f98cd97a089d9beb28ae88e6b64a 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_84c56bf6-9bbd-44d5-bcd2-ec8a1a549af6.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_84c56bf6-9bbd-44d5-bcd2-ec8a1a549af6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7762f9ea6a05c3ee1dc047357c5bf68b1022e8f68ffd5f4bfe2f541324424f5 -size 800222 +oid sha256:3a651151a7d98f5e3b808920a41d4ccd61c23a39a50f17ae5eba68ac502a8165 +size 940604 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_8b11a754-1a38-47d8-8712-457499d2b048.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_8b11a754-1a38-47d8-8712-457499d2b048.png index 96b6a5a264f69b0a511309260af3ab966580c60c..d9864297793188372a9e95ddc6ac425923512ccb 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_8b11a754-1a38-47d8-8712-457499d2b048.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_8b11a754-1a38-47d8-8712-457499d2b048.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f309a95a10caf5cc876012cc5ce2e7a7a072e385d805f39c8722e065850d699 -size 753267 +oid sha256:73e3b9baef33d9f77ff59a3537bdb3dbbfe67a8a7cc3940c26ff6aa501fa8b94 +size 667702 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cebfac0b-996a-4c18-b6e8-08e9f22c8751.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cebfac0b-996a-4c18-b6e8-08e9f22c8751.png index 9c942831e799ac8fc13fa7207c46e87a461ad193..4ec56b8602c0a3ea4cd299bfdcb7968ab8fdffde 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cebfac0b-996a-4c18-b6e8-08e9f22c8751.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cebfac0b-996a-4c18-b6e8-08e9f22c8751.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a14aef286c42f689c1435d9798b9f0c434468841112ba0606261591b428d583d -size 895629 +oid sha256:d0f72ca1251d02edf9142104f590e1afbc2d1a50ea2a07ec3d5edc82fadb5d19 +size 980189 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cfe62f81-d404-4c83-af48-e2a2d50afc4a.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cfe62f81-d404-4c83-af48-e2a2d50afc4a.png index bb0262fc4807542f0d07597bb8a02ba196fff6f7..30ef5b54060ec773b30a428d0ae9cfc575972e68 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cfe62f81-d404-4c83-af48-e2a2d50afc4a.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cfe62f81-d404-4c83-af48-e2a2d50afc4a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c43ddccd437fc6c4e7ce90b07e22fcd2de9354efa7d5f8cf64784add1a0777b -size 930895 +oid sha256:827d38a4d033c38c20d3b3bef754146c541917bce17ca886cd8ad4911fa2abe9 +size 1156751 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_d153d7e1-bde1-467f-a08a-77052c38a054.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_d153d7e1-bde1-467f-a08a-77052c38a054.png index 83f2b8b35e089ea9ebd1e9c42a329cb10b9de341..7e66a82c82e4c2f7753c91d4e19e52eff0247547 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_d153d7e1-bde1-467f-a08a-77052c38a054.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_d153d7e1-bde1-467f-a08a-77052c38a054.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8141d2c46327f4a2e95f2a73c2ecfb3f831f33169e3f854f8ccb3e90cd183a99 -size 751535 +oid sha256:8c67a76a6f1fbd81d2841d8b60e375f363f9bcb150e149ced391f8d836ee3cfb +size 872521 diff --git a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_e02b279d-3e39-4465-81eb-d34ad716873d.png b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_e02b279d-3e39-4465-81eb-d34ad716873d.png index b0f5ef160241a0442f1abc8086cdd24308f57197..78be2395756f96fefe5ffa968aa34e59efb7161f 100644 --- a/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_e02b279d-3e39-4465-81eb-d34ad716873d.png +++ b/images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_e02b279d-3e39-4465-81eb-d34ad716873d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4cba609b3880769aea72ac9a3a5d1023196900a34291c50eaeefcd43086bfa0 -size 641078 +oid sha256:00082c95281e571a1750297e0299281d55962e42d86ddd04dce8c043ba8316f1 +size 690741 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_04780493-f795-4e65-a207-2e3edb57e3e4.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_04780493-f795-4e65-a207-2e3edb57e3e4.png index 7d4bdc71315ca43d92556f75b55a68fb03694468..e3b581bb78cb59cc93e452bc252738e2945e40e6 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_04780493-f795-4e65-a207-2e3edb57e3e4.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_04780493-f795-4e65-a207-2e3edb57e3e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e5de52d2b2571f4bf46f12b5f02f54ee7842419a26d4be113b8fd4840928039f -size 806233 +oid sha256:3921f335b3b63dd75f12170d05b4bc4af2fda1209d7222e86e781ea3877b15b4 +size 761377 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_0833eb64-245d-427a-be49-e6a766226478.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_0833eb64-245d-427a-be49-e6a766226478.png index 0719333b1bf33ce590fe63bd12045f567f292758..555f3625d6fdf5f671f5d38daeadddd7bb56c88d 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_0833eb64-245d-427a-be49-e6a766226478.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_0833eb64-245d-427a-be49-e6a766226478.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:242d05e9800bd2b91945c3e4e0bba165fcb046edb03e05626edf2325485e620d -size 749005 +oid sha256:d53f68e72c55e360c611fdef00f76c350958b88e3de6061cb601845c1449f792 +size 1453066 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_238e3167-126e-4c08-8de2-c51cb969c94b.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_238e3167-126e-4c08-8de2-c51cb969c94b.png index 08c874267b59c8f95765e4d42f84bacabb6e28ff..e49e588537f5e92670edeba257419f7a23395297 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_238e3167-126e-4c08-8de2-c51cb969c94b.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_238e3167-126e-4c08-8de2-c51cb969c94b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:439a5486d2387b196eb7bb4156748ced1279c2214ee724ceacbaf09773bd9091 -size 919547 +oid sha256:5835f74b6e9cafca15787593b9d6d88d349d27341db8e710024e86ca1ca0ce48 +size 964427 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_353ff760-01eb-4a28-8694-2e0dfbf72cb4.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_353ff760-01eb-4a28-8694-2e0dfbf72cb4.png index d0cf89fe62941eaf994c03a00dcd7a8000488af6..bc8b4364e4a861a2e71af0f7ee9afec86ef6b3cd 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_353ff760-01eb-4a28-8694-2e0dfbf72cb4.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_353ff760-01eb-4a28-8694-2e0dfbf72cb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:336d6bf13e357f4222c1ab46c2ffed4ca0729da1eae94d64b74c67ba4f5a5918 -size 1425536 +oid sha256:2e53f455eecb3f2d4b9727ddf40485f2acf88faad35c5fc33192e24957c327d8 +size 1275678 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_701e1555-43d2-4dbb-86a8-308404d496a8.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_701e1555-43d2-4dbb-86a8-308404d496a8.png index 1e2d727a0506b6a87d8e556ddbfd686a15d64f0c..3c82f0c016175ace156fab47e2e3db6266be6cde 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_701e1555-43d2-4dbb-86a8-308404d496a8.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_701e1555-43d2-4dbb-86a8-308404d496a8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:517cb568409122ce079584c2e36874d204d049ab335824b7acffd170ff931c21 -size 693080 +oid sha256:0b57f57b9f6a02ba030feafd5bdf48211c5ebaafc388c0ccff020937990ef4e3 +size 1056169 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_72a3df07-b748-4dce-9fcd-8047ccba0f04.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_72a3df07-b748-4dce-9fcd-8047ccba0f04.png index 617fcd23a19924c4b1d4eb2b51cc6a8b9d108bbc..4d94d3a1f6a0525d721ea78534848c1c54f84bf3 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_72a3df07-b748-4dce-9fcd-8047ccba0f04.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_72a3df07-b748-4dce-9fcd-8047ccba0f04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abb67aa8c758d1274d86822df616d7f775f9f68c6f0619de02cda6f44939422d -size 822370 +oid sha256:1803c602a5f7a6decba4eee205160ab0413bb10305ca1c07655d88fbe108a59e +size 1278761 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_95b4682d-c31a-4bcc-877d-e861c8f213ab.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_95b4682d-c31a-4bcc-877d-e861c8f213ab.png index ea23cdef1690f9b20e0c6e2bf20a1b83c54fb58b..4c09b75b55791102289436f718d869231a9fcfb3 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_95b4682d-c31a-4bcc-877d-e861c8f213ab.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_95b4682d-c31a-4bcc-877d-e861c8f213ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d88ebc9964ab68c15fe944482a1115df89dd104d4a17ae7207113925b1dffe5 -size 1634810 +oid sha256:4c0d4d70361fa99f45bbb051299c71e915e8cd180df147dfe8ffacfd5dd362c8 +size 1040614 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_ce7369ff-594f-473b-8e09-4a88c6876c80.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_ce7369ff-594f-473b-8e09-4a88c6876c80.png index 509c3d91a488499edc8a411f126a148a5da06f20..d6da4e232b60882a4f3b99fcff08f9b84a3185aa 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_ce7369ff-594f-473b-8e09-4a88c6876c80.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_ce7369ff-594f-473b-8e09-4a88c6876c80.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:920524ad7768e451819280be19291bdde3f9d79493e1b3a81efac00c73669eb5 -size 1509404 +oid sha256:6a7c76a65311d3dc37dc7eecb3c64d855357e496a7c1b77dd22af63604298df8 +size 1402230 diff --git a/images/eee72e78-71bd-434b-ba74-33888ea5522d_e4fe74f4-0455-4d58-a108-1d2820295a1a.png b/images/eee72e78-71bd-434b-ba74-33888ea5522d_e4fe74f4-0455-4d58-a108-1d2820295a1a.png index 148a0e6263c27abc6698e9087c576709f8b7f2af..256b45de0042695d37a426e72b815429ba8245a1 100644 --- a/images/eee72e78-71bd-434b-ba74-33888ea5522d_e4fe74f4-0455-4d58-a108-1d2820295a1a.png +++ b/images/eee72e78-71bd-434b-ba74-33888ea5522d_e4fe74f4-0455-4d58-a108-1d2820295a1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1a8f249d08b4a942d4ff694e0d38fb1b62568ca914d2090fe2d463ad327a0678 -size 922864 +oid sha256:6aada93d5e30ae5dc2a7e0a86894a5d7a9d60306ee1ff7262922028c0651864b +size 1305388 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_244fae6f-f044-41cb-b2e9-28ae4d806164.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_244fae6f-f044-41cb-b2e9-28ae4d806164.png index 76de1d6d8da1b61c01ab32c0f9876dc98f1fbd05..27f1cc4690a8eb3fe3aa336011feea6988e2db0a 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_244fae6f-f044-41cb-b2e9-28ae4d806164.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_244fae6f-f044-41cb-b2e9-28ae4d806164.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c646b8dfdb7772773d2437a5009ab991979e908cf25fd20cbad9f8191deabd1b -size 385652 +oid sha256:2835454b282157ae13d36aacc58f5f34881c1085e7afc82416a97eac0338b37f +size 381909 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_2f4fed09-a787-4ec5-8706-4efca121d6a7.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_2f4fed09-a787-4ec5-8706-4efca121d6a7.png index 1899262527cd21c04669721ac30dc689fe448a3b..02fc5904d4050b7eec32b0a58b231cce8ce578b3 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_2f4fed09-a787-4ec5-8706-4efca121d6a7.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_2f4fed09-a787-4ec5-8706-4efca121d6a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6246801bd42be5f5eaf3736a1c75f67c8025c9f43352eed2c998d3590d76f5fe -size 1005128 +oid sha256:c5a973d6dabff4a6944d75da7f969749f940e6844e557666be058b8a8e08168e +size 1828216 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_3f4f7403-be24-4fc0-a33a-961f7dc478a0.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_3f4f7403-be24-4fc0-a33a-961f7dc478a0.png index e8fd833c113988029562fabd3926d4b46d11cfff..9005855fbf340ebec9a990337c0e40efb94c2226 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_3f4f7403-be24-4fc0-a33a-961f7dc478a0.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_3f4f7403-be24-4fc0-a33a-961f7dc478a0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:185af9d241fdf5971fdab66b3156f933712d87d923c8d929d82c917840776fd3 -size 315637 +oid sha256:aa8fb28bd77b736a495c0091b397f758e23c5fce0082180ec1b96343cda78026 +size 319624 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_8c6d633b-d81f-42ce-98c8-0704f88dd95e.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_8c6d633b-d81f-42ce-98c8-0704f88dd95e.png index 6e4fb32689332f10af2433c776ae9b6df4a36e2f..5bfe8b664d074bc5c703e6f4b2cd78e483b95fe7 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_8c6d633b-d81f-42ce-98c8-0704f88dd95e.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_8c6d633b-d81f-42ce-98c8-0704f88dd95e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:282b45a73529bf9555bcc8c0d1484a78802ff121532b2e329363920f975a3a7b -size 332101 +oid sha256:fcf9fa6d9c9d29ba548839dedd848ae7575f77ede4ab20e6ec57e072fee676e1 +size 371101 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ab08133d-0fb2-4fe2-abe7-fc145167b9b8.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ab08133d-0fb2-4fe2-abe7-fc145167b9b8.png index 83521820df9a4b005ed85c7c3e49c10a6d056f1c..ddd354861d72676619dc999171a916f828f4a3eb 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ab08133d-0fb2-4fe2-abe7-fc145167b9b8.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ab08133d-0fb2-4fe2-abe7-fc145167b9b8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83ad716b09a305219655bf4acce9ed8dace47eb29f878e9b5d7ed5c71d807a8f -size 387658 +oid sha256:88ccc3397174258287fa6a34e0668c2307ba16bfa1e270e3239ae7f542bc3cd0 +size 646648 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_c58b2bc7-5044-42d8-8804-2536761d5dd4.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_c58b2bc7-5044-42d8-8804-2536761d5dd4.png index 26997b978b7d6c46f5505294444b565a39c2abe2..275198018aad7e0e36e7602842d8f47eaa03ea5e 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_c58b2bc7-5044-42d8-8804-2536761d5dd4.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_c58b2bc7-5044-42d8-8804-2536761d5dd4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f27c90e550e195a8017e206fd002c5e1e9d5bc4b534fa3aaf685a0dd66ecd11 -size 2449200 +oid sha256:c9c55ced39898f41c8eb0185b97667e59c0fd51cd2f37569d263c7a5d03297f2 +size 217754 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ceba3c43-95bd-4b82-9110-676cb466aab9.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ceba3c43-95bd-4b82-9110-676cb466aab9.png index 557218c83e51be02c24d01a8306d366c659f948b..9b9ca9188f9257887faa3575465b7493127017d2 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ceba3c43-95bd-4b82-9110-676cb466aab9.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ceba3c43-95bd-4b82-9110-676cb466aab9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2f6ba925ef93c55c6f9f1cab2a5965deb424d4a13a066769e3ff3d68d273ab2 -size 475713 +oid sha256:3e190b43057f4d305534f2c74b1ba724a3693b24e7378a88f1a969d409fcddb3 +size 480108 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_d17036de-b354-42ee-b6a4-9b0cbc5d44fc.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_d17036de-b354-42ee-b6a4-9b0cbc5d44fc.png index 14c07540e75a8f5a5442a833a56b259594c4d597..56ee6d9daa332915d5455f5311320e3b5c457b4a 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_d17036de-b354-42ee-b6a4-9b0cbc5d44fc.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_d17036de-b354-42ee-b6a4-9b0cbc5d44fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c250630ffe5ec24e0bd0d29f22396f86e69599247120ccba046da13623bde6b -size 1492092 +oid sha256:59207dc3bdc32660c7d76c0d13bbea448d09bb3023da4e3f2b04f71a83bc5cf8 +size 900991 diff --git a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_f66238f2-ef35-41ed-bd5a-61140b435c0b.png b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_f66238f2-ef35-41ed-bd5a-61140b435c0b.png index ca78dfcbe3598b076d0d4d42d03982e708d7c41f..bbfe6714634fa15e8053f96ff2d28500306c459a 100644 --- a/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_f66238f2-ef35-41ed-bd5a-61140b435c0b.png +++ b/images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_f66238f2-ef35-41ed-bd5a-61140b435c0b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:526af176da4e7cc6fb03f41613b0c0c91aa078338d0d5dc5e8ddb75a769fec62 -size 340703 +oid sha256:fedb21994233e41d817c68caf21034e811b4d3f0bbc2af7c0db4fc6f7f0f14be +size 357459 diff --git a/images/ef09c913-747d-4256-bd51-2b91540c5222_65eeb779-e67c-43e7-a846-e15f5adb0238.png b/images/ef09c913-747d-4256-bd51-2b91540c5222_65eeb779-e67c-43e7-a846-e15f5adb0238.png index b301789d08b437247797017b4c26f4207e65e283..78aaf4419649b26c631c448b7bad16cefb10bd6c 100644 --- a/images/ef09c913-747d-4256-bd51-2b91540c5222_65eeb779-e67c-43e7-a846-e15f5adb0238.png +++ b/images/ef09c913-747d-4256-bd51-2b91540c5222_65eeb779-e67c-43e7-a846-e15f5adb0238.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a05e66801dcca31aa8ce67730edc6ecdb4e2e15e17a18edd63fbee0dac563247 -size 1434528 +oid sha256:12e56a583b9e60585163e28da33a5b86115f8ceaf0da5dcd89fb39a6b8d5d8d7 +size 1698868 diff --git a/images/ef09c913-747d-4256-bd51-2b91540c5222_6d6f7cec-62f9-470d-bec2-d7867d662dba.png b/images/ef09c913-747d-4256-bd51-2b91540c5222_6d6f7cec-62f9-470d-bec2-d7867d662dba.png index 9f606e8d3b961a27f0b19f86613afbef767286b1..05283b038cbcfcfe33dd51a577af2362c552835e 100644 --- a/images/ef09c913-747d-4256-bd51-2b91540c5222_6d6f7cec-62f9-470d-bec2-d7867d662dba.png +++ b/images/ef09c913-747d-4256-bd51-2b91540c5222_6d6f7cec-62f9-470d-bec2-d7867d662dba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:504b83067988a58384bc13e95f40ad4427430df2720f34e766e3785ab9d5f06a -size 1178244 +oid sha256:6e435d551af0c43669a3d9baeaa9572811eda0a57a25283662484a254538b0b0 +size 950094 diff --git a/images/ef09c913-747d-4256-bd51-2b91540c5222_d601a7c6-57c5-4f10-993f-b6ca0040497d.png b/images/ef09c913-747d-4256-bd51-2b91540c5222_d601a7c6-57c5-4f10-993f-b6ca0040497d.png index 585023bdcd35cdab4b80f2b3267d8db383704e30..25ac8cb49a911cbbccd75c873f6c15e20648b458 100644 --- a/images/ef09c913-747d-4256-bd51-2b91540c5222_d601a7c6-57c5-4f10-993f-b6ca0040497d.png +++ b/images/ef09c913-747d-4256-bd51-2b91540c5222_d601a7c6-57c5-4f10-993f-b6ca0040497d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcdb0ee8a24545e7f3752c8b0bd5b8b0de805cde711582d9cb72df4219b9438c -size 1440750 +oid sha256:63501bf3b662e4c4c1c839728ec12275541317322b3d8d609b5ac784f2cac4df +size 1520297 diff --git a/images/ef09c913-747d-4256-bd51-2b91540c5222_f42668ce-2ef9-4046-99fc-b0bb221a96de.png b/images/ef09c913-747d-4256-bd51-2b91540c5222_f42668ce-2ef9-4046-99fc-b0bb221a96de.png index d1db4b833be22b6e2888d45670e59fc17669d391..c5f773269d1cd1c1ad2d48c5fdc1aeb8034bc844 100644 --- a/images/ef09c913-747d-4256-bd51-2b91540c5222_f42668ce-2ef9-4046-99fc-b0bb221a96de.png +++ b/images/ef09c913-747d-4256-bd51-2b91540c5222_f42668ce-2ef9-4046-99fc-b0bb221a96de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cef22e9cfa2f0aa1bc527ffb281ecee6171ad2972363188638360ae87fa454b -size 1436415 +oid sha256:fb1a1ccc52b4c4c43533911acfedd00b95d0929174ebe14d54ef1e7ce94565e8 +size 1732490 diff --git a/images/ef23fbf3-f05e-41e2-b847-a27028f42470_145d33f0-a819-4a3b-b6d3-3ae7980c8dda.png b/images/ef23fbf3-f05e-41e2-b847-a27028f42470_145d33f0-a819-4a3b-b6d3-3ae7980c8dda.png index 0c075791414092f8421f466607b68bf413571245..45bac158db9f2c27250ab4488d90508536ccb3f5 100644 --- a/images/ef23fbf3-f05e-41e2-b847-a27028f42470_145d33f0-a819-4a3b-b6d3-3ae7980c8dda.png +++ b/images/ef23fbf3-f05e-41e2-b847-a27028f42470_145d33f0-a819-4a3b-b6d3-3ae7980c8dda.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f3c62f754be315c88c81243880e50154f8b1600a2679bebaeca8adb22185fc9 -size 2980433 +oid sha256:9b486c16b47c6f62cebeb4337cda26b68f36f3c5fe9729b836c3c0429cac31f1 +size 1063640 diff --git a/images/ef23fbf3-f05e-41e2-b847-a27028f42470_6d29058c-c968-4817-a15a-99a4667e39f8.png b/images/ef23fbf3-f05e-41e2-b847-a27028f42470_6d29058c-c968-4817-a15a-99a4667e39f8.png index 16b5af73ceda112c745e58f76f56c9c616c94922..ed7a530df8ea3be916b8dfcc0b92f0efb87545c0 100644 --- a/images/ef23fbf3-f05e-41e2-b847-a27028f42470_6d29058c-c968-4817-a15a-99a4667e39f8.png +++ b/images/ef23fbf3-f05e-41e2-b847-a27028f42470_6d29058c-c968-4817-a15a-99a4667e39f8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d767104d3711be1af29faa48d550ac87e5690901b95a43fccdf3b507e2a63c8 -size 740878 +oid sha256:e8619e7906c18723b049540fd8d436178f372088c163fb3ee1815eb28de7a8bf +size 549788 diff --git a/images/ef23fbf3-f05e-41e2-b847-a27028f42470_bb1ae489-b33e-475a-83fd-3ecabe7d1fab.png b/images/ef23fbf3-f05e-41e2-b847-a27028f42470_bb1ae489-b33e-475a-83fd-3ecabe7d1fab.png index f8ad1bfb21af51782c92e1035d779d0cce9547f0..e90620b7ffa884437332a57c597d89c113e74410 100644 --- a/images/ef23fbf3-f05e-41e2-b847-a27028f42470_bb1ae489-b33e-475a-83fd-3ecabe7d1fab.png +++ b/images/ef23fbf3-f05e-41e2-b847-a27028f42470_bb1ae489-b33e-475a-83fd-3ecabe7d1fab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c61b94f1c7ca8ea9777d16553c37b3ddcf5318715e1efb3c7d302b1c3d097fb0 -size 1883462 +oid sha256:35dc05ee051bf2665e23875ad3db2d8231dc38cd8ef4d8e31f55f2b404881549 +size 968997 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_018e6be5-2f73-4aaa-8710-7dea55fb84ff.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_018e6be5-2f73-4aaa-8710-7dea55fb84ff.png index 3d222fb9c8daa726258f96d00cb4fcf43bbf2c2d..96275f7267d2e28352e7390751317cc9d009735a 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_018e6be5-2f73-4aaa-8710-7dea55fb84ff.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_018e6be5-2f73-4aaa-8710-7dea55fb84ff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a127c1378fff15a8be6c78a86c46c10f8b8980eee1a0f555baa48e02d61c456 -size 1377743 +oid sha256:01092c2e492988df66493d79c4e1aa765273dda68a95b285be0dd3a3f708d96e +size 209987 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01991012-99bb-43ba-80b3-8761e12526b9.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01991012-99bb-43ba-80b3-8761e12526b9.png index 4b3a033177c2816f40e982224055b5e92c2efec0..c4aac2de0ad978ab7d0601cd96629c7c83ffe92d 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01991012-99bb-43ba-80b3-8761e12526b9.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01991012-99bb-43ba-80b3-8761e12526b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:31510408d83800b2f3e08a9fdb7a2616a47f4f8ee10d89e49375e0918a64e6a9 -size 809256 +oid sha256:c0bad586429d8045231bba0a5269522f6d45cb1743a0dfcddf4083492062e72a +size 631863 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01f9edf6-29ed-4d92-a014-f3130a29558b.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01f9edf6-29ed-4d92-a014-f3130a29558b.png index 20c373a461bf99f8d235878f1a0426cc811e340b..f376172b176599d84e6bec94b98f574c4e71cf5d 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01f9edf6-29ed-4d92-a014-f3130a29558b.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_01f9edf6-29ed-4d92-a014-f3130a29558b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf4c5061001f571979970e190d4e3053efb72410f2213da84f15808b193afa96 -size 649371 +oid sha256:a6a42407edc3df79e0ea76b12cfaa4d14429264c3c5ef7ce4cf462587bd482d7 +size 771192 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_26e640a9-4ea1-4d49-91b5-c85e6f60afff.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_26e640a9-4ea1-4d49-91b5-c85e6f60afff.png index 6f6e1da243e3eba29834d805afb72dfa35050697..0df3de7100859c594b951a732d626ded1637bdde 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_26e640a9-4ea1-4d49-91b5-c85e6f60afff.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_26e640a9-4ea1-4d49-91b5-c85e6f60afff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74ba79bd879a022b74e80739dbb78a9d9c9f39b32bb999274cb3bec9fcab212f -size 605943 +oid sha256:3895af59791ca089034e56f1566719c44c2c32cf7e2907200c1bc819b7ce238a +size 645165 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_399f334d-68f3-4b0a-ad34-57645e5d3ae6.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_399f334d-68f3-4b0a-ad34-57645e5d3ae6.png index d22b3cafeec53e20a2341b39f9ae70002fffad92..947fa1c06961afd844ca9c7b9e082f1c58e3d63f 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_399f334d-68f3-4b0a-ad34-57645e5d3ae6.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_399f334d-68f3-4b0a-ad34-57645e5d3ae6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c00a4e5a87a62f66ebfab4c8deac3280d025446308ee4e264890c4ee60924e92 -size 709445 +oid sha256:5fe320951a0c0fdcd5d9097f2ca9ba1402d8ff7e4ea245c253bc4c39ba893a34 +size 512977 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_4ac20b82-db8a-4e3f-94c8-d76357986448.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_4ac20b82-db8a-4e3f-94c8-d76357986448.png index 5d4dac21d36c9200b47060fa34793e3696ca5589..ad09c3e73306fdea3b176d04c8baa7800d8239e4 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_4ac20b82-db8a-4e3f-94c8-d76357986448.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_4ac20b82-db8a-4e3f-94c8-d76357986448.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db0ffe68bbd821672f7ddb4dd5a5fb8fe7a903ded861d8dab87f17c507ec4e03 -size 704379 +oid sha256:6504e3ed15de4aa24bb1c3dc8953d13f9731b6130cfee3a416958bf279c161cb +size 554979 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_5074958c-7330-4688-bdd6-f3eb05b8c31e.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_5074958c-7330-4688-bdd6-f3eb05b8c31e.png index 5a47ef4826f82cb8db7a8f871ff194aeb735f9ca..cbcc6993f61b60fc6ad5fdf304945cb819bf3200 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_5074958c-7330-4688-bdd6-f3eb05b8c31e.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_5074958c-7330-4688-bdd6-f3eb05b8c31e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fdc5e126c2bf82bef3bee0e3567682e97c5773671a85b5b7d38e21c8b9f7aa56 -size 539779 +oid sha256:b92d2453b8c15b5a44cd6c5717e54cd4e926312d49cd1217feb612275ff5cd49 +size 575199 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_78a55844-7ec2-4b4d-9a58-e35d37ef18e9.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_78a55844-7ec2-4b4d-9a58-e35d37ef18e9.png index c4a364565273e629ed71811f95164122c27883db..3d8869dd9397168fdcacbcb1502c2a1863c32f18 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_78a55844-7ec2-4b4d-9a58-e35d37ef18e9.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_78a55844-7ec2-4b4d-9a58-e35d37ef18e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb16a879ae4ba4f775cc36c9331ed9df4b1e00c76a72a82bd2adf4c3ea68d2b4 -size 723935 +oid sha256:41d07bdffa37a9e9258a226c2c7c5df15d9d0ee2003700e88a727b52d22ae156 +size 761069 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_8b6a9853-063e-4fc6-82da-0f226ba3679f.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_8b6a9853-063e-4fc6-82da-0f226ba3679f.png index 3b31f826304fdd4d27f51c36378b36ed452ae706..a3e2b3637885cfa098e0419725c3ca6dc399ec6f 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_8b6a9853-063e-4fc6-82da-0f226ba3679f.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_8b6a9853-063e-4fc6-82da-0f226ba3679f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b8588ed399cbc442c8edb0d461630ed042445c07e159226c695c0f097739658 -size 955446 +oid sha256:fa7103cef9b79b9b04640d85c505340cab346aee2aefba049d402f6afbe8e24f +size 1255328 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_96c2ecf0-d98a-4fd3-af03-4eefb8ccf225.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_96c2ecf0-d98a-4fd3-af03-4eefb8ccf225.png index 13a95adce5116479c56c9fe185ff073db32ea475..6b5e9c72bada19429d3a84382557ee4b8ace0b2b 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_96c2ecf0-d98a-4fd3-af03-4eefb8ccf225.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_96c2ecf0-d98a-4fd3-af03-4eefb8ccf225.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:77f25db3c1046eafeaae142d663c6e9fd8b4c0bc3e8afdbe712e4970c65ecb6b -size 617769 +oid sha256:af406ba4dea6295986091547e1013221b10b5943ad68ca748679155dbc50a7b8 +size 648510 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9a099213-7601-400e-b8b7-37a54615abc4.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9a099213-7601-400e-b8b7-37a54615abc4.png index 3b31f826304fdd4d27f51c36378b36ed452ae706..e662271bbc157e3d17648d81af44bd628c72c70d 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9a099213-7601-400e-b8b7-37a54615abc4.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9a099213-7601-400e-b8b7-37a54615abc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b8588ed399cbc442c8edb0d461630ed042445c07e159226c695c0f097739658 -size 955446 +oid sha256:1d42b307a6378101a6f889319454ad76ab902efe7447c014711e5f4b0e453347 +size 1255247 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9ce95ce5-01c8-4a7a-87a4-aae8193cd6d5.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9ce95ce5-01c8-4a7a-87a4-aae8193cd6d5.png index a95394f14d9d8349374e13a9556c6946931e45fc..e2b32c5583556b4d7f2c615b49f79d1619d573a9 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9ce95ce5-01c8-4a7a-87a4-aae8193cd6d5.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9ce95ce5-01c8-4a7a-87a4-aae8193cd6d5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:43562fc23a8278f83a8e29bda825443f25d7a0a3cdcb7afed079345d9d2de533 -size 707065 +oid sha256:3f211ea42a50d80d54e117ff4c5159b31a5eb0b0eab806b879bf4ae112dc3dac +size 533295 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9fd6dca3-1d20-46bb-814e-0786016ce859.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9fd6dca3-1d20-46bb-814e-0786016ce859.png index 8ad7bd4eae155f0cbd4a542401ba3882e60bd5dc..db31ba9ace5dbd15a339dd21b94e3b7643123cdc 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9fd6dca3-1d20-46bb-814e-0786016ce859.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_9fd6dca3-1d20-46bb-814e-0786016ce859.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec8b4750d0369973532c8994f27f6a73866d6dc720397c657a213d537dbe9175 -size 1052491 +oid sha256:db7e0949c8bbb8f69b82d95db0ed621315b05ee53161a4f0905fc6dc0af95b6d +size 793211 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_c42e1fad-4d83-4494-bd83-247af16e8ea6.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_c42e1fad-4d83-4494-bd83-247af16e8ea6.png index f1614a3318b4339600290430d495fa662f36b2bd..af2d82b8923757aec3542e9c6b9c309ddd6ec3e2 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_c42e1fad-4d83-4494-bd83-247af16e8ea6.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_c42e1fad-4d83-4494-bd83-247af16e8ea6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3972c72ac2e56702aa0791cf68a7c51d9b268fd4fae6213a514c09e2b760f2d -size 646229 +oid sha256:e44d657257b5396d573a6923118dc839cc3e1ed799bc9693f1c892b52449d8ee +size 701228 diff --git a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_d1da37e1-babe-4725-9acf-6c1dbd955355.png b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_d1da37e1-babe-4725-9acf-6c1dbd955355.png index 580b2ba2ec61b6762bdca4452b3ee82655d8516a..9bcbb20cf3265acaca2dcec76a8b081bbce1fdb3 100644 --- a/images/efa705c1-f836-4704-b6c8-6e114a2eb865_d1da37e1-babe-4725-9acf-6c1dbd955355.png +++ b/images/efa705c1-f836-4704-b6c8-6e114a2eb865_d1da37e1-babe-4725-9acf-6c1dbd955355.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e32c47aa4c100737411d748244fa40160b00f8154e2a073e3b1d927ba44143c5 -size 1149540 +oid sha256:8c0eec376dba9e3a6a988d6e648033c2d39b4a8655a44e8a691dc3a73c43d854 +size 516894 diff --git a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_06fa60ae-2f59-4035-88f1-acb8471e415b.png b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_06fa60ae-2f59-4035-88f1-acb8471e415b.png index 172efb0685fc5ba30a42c5aa2daf76e6f4b7c937..6ba85b02ac1d2df2721b7ef155b7fda755e0a086 100644 --- a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_06fa60ae-2f59-4035-88f1-acb8471e415b.png +++ b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_06fa60ae-2f59-4035-88f1-acb8471e415b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93288fa5eed638b517a7ec5a2e6ca85eea5e990a54b643c040d24644a0c45deb -size 1442264 +oid sha256:8c2714294a49ec83a5cf86c23c23f7bb24e17597668644d63bff14352ad94b6c +size 895162 diff --git a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_312e9b1f-8d64-43f4-83e3-eb7d0b715739.png b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_312e9b1f-8d64-43f4-83e3-eb7d0b715739.png index c449abdcf95d9eb69b2f67c9f65ac18b639be28a..1829462095ec1c75a20b6111c4212c231361b282 100644 --- a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_312e9b1f-8d64-43f4-83e3-eb7d0b715739.png +++ b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_312e9b1f-8d64-43f4-83e3-eb7d0b715739.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec7a6c00cd795d346776d33f60936b5892fb9c78bddc3924011797c8e57fb5d0 -size 655110 +oid sha256:405467265853e64534058ec6afa37df50b1a797da8af8d4cfc1e5227d1c7dc2d +size 488119 diff --git a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_4c53006a-8253-499c-9e1f-0abe87119311.png b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_4c53006a-8253-499c-9e1f-0abe87119311.png index 68c1d2b2c9a1a3862546db8a20f7ca8a149c40a3..b82d7e4f5f180939c8fff42e60d6256191126b97 100644 --- a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_4c53006a-8253-499c-9e1f-0abe87119311.png +++ b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_4c53006a-8253-499c-9e1f-0abe87119311.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b4f4ef515f2594d106102536f8cd1f0f6f435ec9d77cbc8d0e8ffc087e34d3e -size 1524157 +oid sha256:0efc4514772e7110f5ae473c3ee0cde65a5035f45a861a7f0b8ca18023e8e45a +size 1387025 diff --git a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_5c0fdc7d-84e2-401f-a3de-6e925f591bc1.png b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_5c0fdc7d-84e2-401f-a3de-6e925f591bc1.png index 7dde76a307cc48db89b749f9ce014c5663f3f039..22d510d60bf1c725f5fc844cc417f1a177634c1d 100644 --- a/images/efe9051b-d626-443c-b8c8-e7844a0acd29_5c0fdc7d-84e2-401f-a3de-6e925f591bc1.png +++ b/images/efe9051b-d626-443c-b8c8-e7844a0acd29_5c0fdc7d-84e2-401f-a3de-6e925f591bc1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6e5ca4cba44024f8fab2daa3d7f878f58b328b03b430697ded3654baf1dc0efc -size 451016 +oid sha256:03a6d060f46e4cd9061a6ed47334e7da74cc049a4b1c5ca47ef9c990146b7a83 +size 350630 diff --git a/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_aadbdc6d-3710-4fa9-a11b-6941a191a7b7.png b/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_aadbdc6d-3710-4fa9-a11b-6941a191a7b7.png index 89a8edfda71c69aa19718a404a106939aa192d1b..9e58ab773b9feef1d818d41849e7bbd45fa271e2 100644 --- a/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_aadbdc6d-3710-4fa9-a11b-6941a191a7b7.png +++ b/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_aadbdc6d-3710-4fa9-a11b-6941a191a7b7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb389a0e7002014121a219695ff7801326dd0364cbab5fa4705e876371fb6a3e -size 1417186 +oid sha256:d0e71a99f595d5eaf778188c131358d9c19b2a6f26470d93d1c30af5f89e546a +size 1184937 diff --git a/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_b9aabd63-0aa0-4871-b683-29daf286e242.png b/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_b9aabd63-0aa0-4871-b683-29daf286e242.png index bfea86ee2a6ea030f971911841f1d6e8caa96bca..842aba85108c2309b74a7c5aefa0536c89b97d04 100644 --- a/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_b9aabd63-0aa0-4871-b683-29daf286e242.png +++ b/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_b9aabd63-0aa0-4871-b683-29daf286e242.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:439a568a67d9d0c46eb889ac9d569ca35fefdb4d1c706a47017695a0d4d054aa -size 1232945 +oid sha256:427674df5274145053ea4bc22a067c4f5d1a7438f2be7513ff5f050a75deb829 +size 561650 diff --git a/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_f3b29e1a-40cc-42f2-91d6-1d06d66f7941.png b/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_f3b29e1a-40cc-42f2-91d6-1d06d66f7941.png index 01ce099f10ce51319c74bcdfa25e8f24634cefcd..045ed9fa4a7af373b6279682e0f88c6866306274 100644 --- a/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_f3b29e1a-40cc-42f2-91d6-1d06d66f7941.png +++ b/images/effb9df8-3b3f-4349-8033-f79ba1587a4d_f3b29e1a-40cc-42f2-91d6-1d06d66f7941.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d266cd0119312e48a1f6aadaa5415516363bf9c70a17e22f14714c77e1b0bbb -size 1314026 +oid sha256:7599735f656385207710748e9270cb1c70cdc146c4768f44d4f4a41987e77b8b +size 740219 diff --git a/images/f0f8088f-46dc-453a-b695-772b30421ece_07dd93fe-4727-49e8-9b21-323de3c1d691.png b/images/f0f8088f-46dc-453a-b695-772b30421ece_07dd93fe-4727-49e8-9b21-323de3c1d691.png index ef24595cce84779c235e1ca79787f8bb476c0dbf..b34e7467d7a76ecac6b4ac2391975552f077fcad 100644 --- a/images/f0f8088f-46dc-453a-b695-772b30421ece_07dd93fe-4727-49e8-9b21-323de3c1d691.png +++ b/images/f0f8088f-46dc-453a-b695-772b30421ece_07dd93fe-4727-49e8-9b21-323de3c1d691.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db22c55de5e8e5510ccc1d061dc5c0f2b656ea3aeb9bf6f7630c51ad8d7120ce -size 1584941 +oid sha256:685652d5827171ce34369a613cbc902e1190245236bc5327786c2b5999ec8536 +size 1791740 diff --git a/images/f0f8088f-46dc-453a-b695-772b30421ece_136a2a2c-8b5b-4ac1-ac9d-5b30cc7d2840.png b/images/f0f8088f-46dc-453a-b695-772b30421ece_136a2a2c-8b5b-4ac1-ac9d-5b30cc7d2840.png index 91d0d4915c8261b22c1a2a26a59b595d838f1731..c090c2c0faaef918b53c762d2d48fd4f4630a888 100644 --- a/images/f0f8088f-46dc-453a-b695-772b30421ece_136a2a2c-8b5b-4ac1-ac9d-5b30cc7d2840.png +++ b/images/f0f8088f-46dc-453a-b695-772b30421ece_136a2a2c-8b5b-4ac1-ac9d-5b30cc7d2840.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c654eea209c6a646daddb8dd546664c204552c4006fa8ec21192a0b888d290a7 -size 1247840 +oid sha256:cd5d1a02395c40ff459e006d6de2f6f4b1999cdfd3d59f711080fcc531637f23 +size 1374533 diff --git a/images/f0f8088f-46dc-453a-b695-772b30421ece_1668cab6-5869-42d8-8680-ba5dcec3b260.png b/images/f0f8088f-46dc-453a-b695-772b30421ece_1668cab6-5869-42d8-8680-ba5dcec3b260.png index 7d416fa9731926c9535510c9c2be8d289d95af05..df47ceb71a61bb8b75e912a3315aad5ce1a318e7 100644 --- a/images/f0f8088f-46dc-453a-b695-772b30421ece_1668cab6-5869-42d8-8680-ba5dcec3b260.png +++ b/images/f0f8088f-46dc-453a-b695-772b30421ece_1668cab6-5869-42d8-8680-ba5dcec3b260.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7fe925dd2bfd9a50a7958ac0a0f7e7a1daa9aee5dff3ce156609b5fb9c0ed19c -size 1267150 +oid sha256:f8eacaca1ba7b39e5466dec4603446126ff2ee8044dcfeb6257b5902932ea413 +size 882408 diff --git a/images/f0f8088f-46dc-453a-b695-772b30421ece_26843443-9d32-4833-931f-cf80f8a6b542.png b/images/f0f8088f-46dc-453a-b695-772b30421ece_26843443-9d32-4833-931f-cf80f8a6b542.png index 314e7f4e1829b6e6ed0a20f71c0fdfb44c1e4ad9..579e948be878da64ffb276ae1ddd2016a1dbc8ab 100644 --- a/images/f0f8088f-46dc-453a-b695-772b30421ece_26843443-9d32-4833-931f-cf80f8a6b542.png +++ b/images/f0f8088f-46dc-453a-b695-772b30421ece_26843443-9d32-4833-931f-cf80f8a6b542.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d3b500a94b0633227773d424d3b844495ed3db1a579edea315b9aa1c96f8f70 -size 1207183 +oid sha256:88188212a1c734972d62f8fe41c5e346ec7553395edee73245d59fa6ad16cfd0 +size 1776276 diff --git a/images/f0f8088f-46dc-453a-b695-772b30421ece_5a0d43c1-dfc2-438a-b164-948f1f510d2b.png b/images/f0f8088f-46dc-453a-b695-772b30421ece_5a0d43c1-dfc2-438a-b164-948f1f510d2b.png index 90661c329bc24a461786d500f60a0793c02c3d67..4f6038f1da2c2a800a9d5dacaac22667deaf25df 100644 --- a/images/f0f8088f-46dc-453a-b695-772b30421ece_5a0d43c1-dfc2-438a-b164-948f1f510d2b.png +++ b/images/f0f8088f-46dc-453a-b695-772b30421ece_5a0d43c1-dfc2-438a-b164-948f1f510d2b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d3020d517527fd2f97e3c0407bb9cee4ce95edcfff4ec1833c41e33471c9d58 -size 1100994 +oid sha256:64e0d931fdf89117e5d13ae281dd05ec2a14b3501b6491dac72012bab809f693 +size 1674383 diff --git a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_2e3da2eb-62df-434d-b787-bbb106ebfb4b.png b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_2e3da2eb-62df-434d-b787-bbb106ebfb4b.png index a642b813561e075c2fd2f92f795e3f2a69a25c65..4af2d966988e7d4e8700e060685e85eef202a88b 100644 --- a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_2e3da2eb-62df-434d-b787-bbb106ebfb4b.png +++ b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_2e3da2eb-62df-434d-b787-bbb106ebfb4b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0293f9815f1cad76a81583a7aa0043358441b17efec868999e1359718aab6ef5 -size 374139 +oid sha256:3135319d8e0af35260cef2e3f23806614f38083974e529ac7fce21aee5c8dee3 +size 570203 diff --git a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_3ed11a2a-8fd5-4f13-af2b-be976fd73a0c.png b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_3ed11a2a-8fd5-4f13-af2b-be976fd73a0c.png index 3ce645ffa6b9335d8f956b2dd18dd4ba01769824..cc580e08bbd31d186096df360f311fa51151a999 100644 --- a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_3ed11a2a-8fd5-4f13-af2b-be976fd73a0c.png +++ b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_3ed11a2a-8fd5-4f13-af2b-be976fd73a0c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e837bb320cadde1b62017246597ac93cf53de31b4daff2bd2d4869f54c8f1e86 -size 974068 +oid sha256:ad807de39f436d841c2ca3554edcf171f2642c4dce40bc196fa33504a695317f +size 842096 diff --git a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_4df4d08c-48cb-452b-bd28-c31f36f0c7f0.png b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_4df4d08c-48cb-452b-bd28-c31f36f0c7f0.png index 912f9fed24dd9d30390936f96d9563f16d5a37a2..06c632ca59ffbd40f05c61e1645cd9f7e5393763 100644 --- a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_4df4d08c-48cb-452b-bd28-c31f36f0c7f0.png +++ b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_4df4d08c-48cb-452b-bd28-c31f36f0c7f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:92fc67976e45e597225e06ad4729167ef2c99fd5c40a99d0dc3a2c61bb01099a -size 405971 +oid sha256:3b768c07374f8474661f3dd4480c4a0a4fe1209d6fd55a4cb25ac7e892421301 +size 845915 diff --git a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_6d0e6630-2780-436f-8ab3-47c831fe077c.png b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_6d0e6630-2780-436f-8ab3-47c831fe077c.png index 7ff3b9eb8be105807e5b71411fca77e74eafd297..daa7c397076f5eab248a44dfb6fa6e21d4e72250 100644 --- a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_6d0e6630-2780-436f-8ab3-47c831fe077c.png +++ b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_6d0e6630-2780-436f-8ab3-47c831fe077c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4bc49d2e29c1c74f23efe8b253c9d607433da2219ddef4b4466e61494ff05140 -size 1776835 +oid sha256:073bd1cde3dbf1cb7cb923639a1e6b11db23e608cc12b4fb91f9d253fbfab1fb +size 1754968 diff --git a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_aa2386b0-24c1-4193-add3-fb6646cfc330.png b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_aa2386b0-24c1-4193-add3-fb6646cfc330.png index 1437e4aa6e05f68020602390fab8c641e0af81be..a5cd5fd39fdfe88c5df64b03eef3c4a095749d26 100644 --- a/images/f118238f-ef8f-4b63-9159-a81e981ef46e_aa2386b0-24c1-4193-add3-fb6646cfc330.png +++ b/images/f118238f-ef8f-4b63-9159-a81e981ef46e_aa2386b0-24c1-4193-add3-fb6646cfc330.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63d48d986b28cb7e109cf678bdefea4f272dc2a558169981b00ad58959d9ba75 -size 1697615 +oid sha256:1dab8bcffc60a25c6f751b74e4b3c8ceab652d3e5899ae3eb4f93c5ca820cd23 +size 1443517 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_48ff5ffc-bf68-41ad-b37f-e0470a5754dd.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_48ff5ffc-bf68-41ad-b37f-e0470a5754dd.png index c4f71be4ee4cfebbc5b1a75e27c0061981528bd8..8942181834edc4bdcd1b4c02398b47fc22208eb5 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_48ff5ffc-bf68-41ad-b37f-e0470a5754dd.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_48ff5ffc-bf68-41ad-b37f-e0470a5754dd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8bec53bf43326b836c35c22636649c316dd77f2a1f1da9f8467c26ab1d8361d2 -size 1424113 +oid sha256:213f8c104279165534dfe149509fd191efcd2df13ab6928ff8897dd62d46c80b +size 1201791 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_61803cf9-7251-4771-ae40-e0694bec96e3.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_61803cf9-7251-4771-ae40-e0694bec96e3.png index b3d6fc09376dcf6a33c836bf2aaa0f3a96f178cb..fbd6659af4ecb0a1b34fdedc31aa960403bf92e0 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_61803cf9-7251-4771-ae40-e0694bec96e3.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_61803cf9-7251-4771-ae40-e0694bec96e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3342565390e0f5578be4ab4cf1abc648cc0a3be9f7a57daf098cfd97e36245d1 -size 1412174 +oid sha256:f95c1e4d392c08447da1acc32dfac6160d38f79725639340105d9df962232f82 +size 1501614 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_78a07e5a-688d-4a24-9bde-901d62e64a5c.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_78a07e5a-688d-4a24-9bde-901d62e64a5c.png index 99743732498bea82fc024613776b9a9c536cc5e7..d2d3d00a20e9e918adcb22aa77f5ee309d764e4a 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_78a07e5a-688d-4a24-9bde-901d62e64a5c.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_78a07e5a-688d-4a24-9bde-901d62e64a5c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:afa22e9b7b1f9e2b3a1214c0c69025e5fa9cf24f7e1c91393765d8cc1dcde7d3 -size 1345057 +oid sha256:7f522e9b7df865d69c39ea12176fc2023fc547bce8471f8c7261bd6a2819b6c5 +size 1420601 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_898d9963-7fbd-4ab4-9300-01d6fb45ca32.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_898d9963-7fbd-4ab4-9300-01d6fb45ca32.png index d86e319f9795ece3ffbdc68c11bd440ca087e18f..af2156e10a792fcf92c262da609e0566f7b05ce1 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_898d9963-7fbd-4ab4-9300-01d6fb45ca32.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_898d9963-7fbd-4ab4-9300-01d6fb45ca32.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c0fb0104f41aee190d027c9817210ea11192bad106bfe1814dc0dd38429a22b -size 1371014 +oid sha256:3208bd6eec181934ea6c8600d1514c119dc1d56456df8bda718783cbcda77866 +size 1429357 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_981565cd-b59b-4909-b094-0e73023b641d.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_981565cd-b59b-4909-b094-0e73023b641d.png index 70ff629508e44d26685d52595946276a187414d7..7bd3487e39e2ce519ca2dc488ba148827c91de5b 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_981565cd-b59b-4909-b094-0e73023b641d.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_981565cd-b59b-4909-b094-0e73023b641d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:33b610a2906a0726709fd6051b7a18f424e4493d049d155a602c8c65111c6ede -size 1437051 +oid sha256:b4b0a63285b06906fad2273ff37c2dd1f2aec9be49f11dd4b036134df8b732ae +size 1291267 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a52e515e-a366-4168-9ee0-8206421aeb6a.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a52e515e-a366-4168-9ee0-8206421aeb6a.png index ae33803af16ea6b44b1cf04fffb04ae2aa59083c..6dfb2fb128a7d6998d023bc3ad02f07de1e0e91e 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a52e515e-a366-4168-9ee0-8206421aeb6a.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a52e515e-a366-4168-9ee0-8206421aeb6a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e59d546f54c5b45132f0993506d03d5ce9cb3142e08cc3a87444ec3d3552ae31 -size 1407512 +oid sha256:acedeecbcb6c2c55faf671029fb39119071be67f80248b5aa5be80c1362bb883 +size 1313907 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a5fb29c8-6ada-490f-9b03-3c28febc5b78.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a5fb29c8-6ada-490f-9b03-3c28febc5b78.png index 2744a0aa4d7335a10b829568829e108a11e1aa76..4dde1ad1566fee20c10228dbce9266e1fa6c7335 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a5fb29c8-6ada-490f-9b03-3c28febc5b78.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a5fb29c8-6ada-490f-9b03-3c28febc5b78.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61e4d022bf754200ead97e540ac4e49eac488f413cbef8864b92579c9f5a9d66 -size 1424052 +oid sha256:0e61919ff935529fd61fc762a61df841cc53bcb2f44d2c2f2b9753a3187053c6 +size 1512849 diff --git a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_fce397d8-aece-4a06-8c4f-1a90b6b1a8bf.png b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_fce397d8-aece-4a06-8c4f-1a90b6b1a8bf.png index 8e4070b84f98286d99bd988e1af47ac86a37dea1..4c55a96b1794e32c367cbe7b7f4cc9687981be48 100644 --- a/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_fce397d8-aece-4a06-8c4f-1a90b6b1a8bf.png +++ b/images/f122e4c9-7634-4193-94f9-8623cd75d1f1_fce397d8-aece-4a06-8c4f-1a90b6b1a8bf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:72b504b2ce9ba5e5b1b824d1d0df2d526598a89bf23c7c6e28a1fb0933ed072f -size 1424236 +oid sha256:24cc463aff2405a6c12294672f963383b7796ad1c017fa70d18518884dd7a7d8 +size 1423977 diff --git a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_92faefb0-d3d9-46ed-a6e1-200c685e21b1.png b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_92faefb0-d3d9-46ed-a6e1-200c685e21b1.png index 6d36257dbf5895f2ca73f5d34a0b1768c8ae3779..bd7377908cf83f8f54302f76e7a686a23578e6fc 100644 --- a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_92faefb0-d3d9-46ed-a6e1-200c685e21b1.png +++ b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_92faefb0-d3d9-46ed-a6e1-200c685e21b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e427ee6b1c6652640e9aefc03b34847f39bcb6f283bd851a811bda07a785ee1a -size 983753 +oid sha256:c72e63eb9b5a5008ca549b06f7ba69863eb984fbd0c4f309297f6bde5a72150b +size 1110229 diff --git a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9b72dec5-6c89-4886-9e53-c982e3601f5f.png b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9b72dec5-6c89-4886-9e53-c982e3601f5f.png index 7c687dd21eab9e5d62d63ae495e15231d079d4d6..66f1af50b5da3da19834a7b467aca7172cd006fa 100644 --- a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9b72dec5-6c89-4886-9e53-c982e3601f5f.png +++ b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9b72dec5-6c89-4886-9e53-c982e3601f5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6ce79f6847709efddbee1774e71ddb2e60ad470dd9b99d3296fe80e102fd409 -size 929930 +oid sha256:40aa73d3911c0f0988be825cbe80168d216fcf84205f712a86a6e04234281794 +size 929365 diff --git a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9d136317-f8c9-430b-a4d1-ecb67729f4c4.png b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9d136317-f8c9-430b-a4d1-ecb67729f4c4.png index 6ae4225ab4f07437c4df87ed1a6d7aab670fa238..df1a683b5395a5cc588e99d04d020918d790993a 100644 --- a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9d136317-f8c9-430b-a4d1-ecb67729f4c4.png +++ b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9d136317-f8c9-430b-a4d1-ecb67729f4c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f1a18b5323c7e6fbbe9fff29cf96d6cb14d749875131734af83bbc5907c5a0a -size 1086175 +oid sha256:b9753da38421e362584d15a92fa0b3533c02ea1550e2626eadf868ffa2902e0f +size 920429 diff --git a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_ea452bf8-dd86-41d7-91fd-c461362e9c16.png b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_ea452bf8-dd86-41d7-91fd-c461362e9c16.png index 28a680a40319b3e6e8581e2ff1b167979709f371..1fd10aa2671de32ce2a021f3c2e2b5c56bae1b49 100644 --- a/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_ea452bf8-dd86-41d7-91fd-c461362e9c16.png +++ b/images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_ea452bf8-dd86-41d7-91fd-c461362e9c16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5a9cd743beb3f69123b1edfac245c78efa1516a750499a96e27c52e1a1044652 -size 919822 +oid sha256:8ebe0591379b0e922a2621e71560e96d187011d0973f294d2f5ee51efaee854c +size 1100463 diff --git a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_1aa6829d-0535-4794-be5c-4f934498abeb.png b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_1aa6829d-0535-4794-be5c-4f934498abeb.png index 31a39ff89ac71cc389f43d88f596b4f0d021deab..ab28262846e40338abf8ddd682ea70dc6c398991 100644 --- a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_1aa6829d-0535-4794-be5c-4f934498abeb.png +++ b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_1aa6829d-0535-4794-be5c-4f934498abeb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b036d3923bf603c927ac739e1a067392ea5b2f175bb1795f3e123ad35979b167 -size 934560 +oid sha256:ddf314c2c72f167082baea62cd764fa027e133839d7772f86f9633a91750f57e +size 913301 diff --git a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_2dcc9e7f-6127-4495-9e4a-c3a5d59725f6.png b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_2dcc9e7f-6127-4495-9e4a-c3a5d59725f6.png index cbfd7a7f1060944d19a5c0152184c529acb79dd5..de4bc106b7492e210eba406e3a0596ac3f2a844c 100644 --- a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_2dcc9e7f-6127-4495-9e4a-c3a5d59725f6.png +++ b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_2dcc9e7f-6127-4495-9e4a-c3a5d59725f6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d30d9a665d3c53a377af4f7eedd25042c3eea9111c1e8f89d7827a0823ccd550 -size 1245625 +oid sha256:f60da9c8373c53013c13b2404dc8fc5e2fe0c7e842bb11511cef4d7b3b398453 +size 343212 diff --git a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_37123deb-b58b-48af-806d-b33471d5e546.png b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_37123deb-b58b-48af-806d-b33471d5e546.png index b45354e0b78331ec01707b64a0ca0e876fff5ec1..3aaf0e56106bd968f08d0a003130e75e2efb48c4 100644 --- a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_37123deb-b58b-48af-806d-b33471d5e546.png +++ b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_37123deb-b58b-48af-806d-b33471d5e546.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:504db95968953782f08647cab65d71359a75bb5b6170940564b1bd302bc839e6 -size 1339609 +oid sha256:c265dc544cf0af912ed1082d247ca2b334b28e31a0ca78ada5bec6a0892f74f8 +size 617403 diff --git a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_86075eb3-e1d9-44a3-899b-abb27b2a899d.png b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_86075eb3-e1d9-44a3-899b-abb27b2a899d.png index 7f493d6608c72178abddd20f5acc3d880635a828..0a5206435665b3feff423dcae20d18440de50f2b 100644 --- a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_86075eb3-e1d9-44a3-899b-abb27b2a899d.png +++ b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_86075eb3-e1d9-44a3-899b-abb27b2a899d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:27580a852b006e410c4ffd56246efdf7f34c9c73ad4bcd21a4eba8d231ec2a78 -size 706380 +oid sha256:7429604d0cff9d51e903446b13c88e958b3382a0158a1d49516013be68bdc275 +size 753455 diff --git a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_8d96a315-2e4d-4faa-ab1b-d3f7ddec978b.png b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_8d96a315-2e4d-4faa-ab1b-d3f7ddec978b.png index 2b58e89f7a5fd6ed5eab24b5b61276069c74322c..98c405ac8a33cc26d2791f0ad535be3cc02ec2ad 100644 --- a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_8d96a315-2e4d-4faa-ab1b-d3f7ddec978b.png +++ b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_8d96a315-2e4d-4faa-ab1b-d3f7ddec978b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:307191481658c8f5f93afe91eb014cf7f464b1557b8528e1024f80d0d68e2b72 -size 1114340 +oid sha256:80b4f118e55023d5585e03ae8718da97ffc1eaa1accf50bb68b0e5e133bc578e +size 1015942 diff --git a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_bb6621bd-5dc8-433a-b6fd-aaaf91bf4d06.png b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_bb6621bd-5dc8-433a-b6fd-aaaf91bf4d06.png index 650169f351e4b269748a491399b555a8ab8b3305..c1d7efd8547c068b40510eaae06002fc5c085ef7 100644 --- a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_bb6621bd-5dc8-433a-b6fd-aaaf91bf4d06.png +++ b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_bb6621bd-5dc8-433a-b6fd-aaaf91bf4d06.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d336b3f6b2165a2b1dddaefd521b7350ad3ec7770ac5dc42b432fdfb8c3ba49 -size 1018403 +oid sha256:3c542ad0960b432ec4cc99bf3984fe45ebcab67b52206bac38c18393e40d8c80 +size 1019559 diff --git a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_d25f5393-a999-4987-910e-9397e8e29ab0.png b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_d25f5393-a999-4987-910e-9397e8e29ab0.png index d718632c6525badffdc3bb3f11d1ad16df321b9a..7c7c9ff796b1a2095421d0666266c2f97f685f18 100644 --- a/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_d25f5393-a999-4987-910e-9397e8e29ab0.png +++ b/images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_d25f5393-a999-4987-910e-9397e8e29ab0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:119b484921fc2f86f7811a567af7ba19b518951a4c261876c43c35bcb9b11d37 -size 1031421 +oid sha256:6f84d249144efff4b76a213f5aacb92bbd845cefad4b47be5d79d482d58a871b +size 912381 diff --git a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_5e676867-a433-4845-8aa2-777f5e66b86e.png b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_5e676867-a433-4845-8aa2-777f5e66b86e.png index 3da5b1a2c3a2e29cb75c7aca1b6ea040dbf8caf5..999e026ef64aa15adfe1ca8a3b3db643f1f724b4 100644 --- a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_5e676867-a433-4845-8aa2-777f5e66b86e.png +++ b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_5e676867-a433-4845-8aa2-777f5e66b86e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:acd5e244efdd98bf74239086e274e126e7edef1ff72f0c895fa26792d5c3173e -size 1062195 +oid sha256:d39a6d19ce8005c62ed1cdd12136ddfb87f2cb6ce4e5c289e3618c5ae9e4db2a +size 945791 diff --git a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_8a5133cf-2c25-469b-97f9-4451368b96a7.png b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_8a5133cf-2c25-469b-97f9-4451368b96a7.png index ee2bd867e385c93bc1f7f83331ef335222348ab2..62a7cf4e4799fadd2257c0c6d0f18200fc68c772 100644 --- a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_8a5133cf-2c25-469b-97f9-4451368b96a7.png +++ b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_8a5133cf-2c25-469b-97f9-4451368b96a7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64c1b3270d6a7e4bb07901dafa1ec51ee7098e63812a9f420938e9181a41864d -size 490212 +oid sha256:c598cb43ad0aaea03b9d81f2e861bf69dcc4713138ce3c972522b1ce6f45e55a +size 537789 diff --git a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_b20324e8-7daa-49b4-b79c-fcc6ef95992d.png b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_b20324e8-7daa-49b4-b79c-fcc6ef95992d.png index 1b8b80cda9e02ffc88f03bc4e7198537e605d0de..f4abcb1a246715a54fc821088b01d7e3a198102b 100644 --- a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_b20324e8-7daa-49b4-b79c-fcc6ef95992d.png +++ b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_b20324e8-7daa-49b4-b79c-fcc6ef95992d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7dbf2d4da4b84c2202ab0f7f15ef0b7d889d2f60ecdc589a7478914a09f70015 -size 776338 +oid sha256:7bf64d29d9065050d956c6d317c3867ec4031a4ee0c2886ea01dc72102172f5c +size 546293 diff --git a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_bf68b1e8-6439-482f-9667-b1bd3845d2e8.png b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_bf68b1e8-6439-482f-9667-b1bd3845d2e8.png index d79f2b0b7a132c8b9327c75b04168c7577393221..f335b69e98f7a5b54185c28255b841015627d299 100644 --- a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_bf68b1e8-6439-482f-9667-b1bd3845d2e8.png +++ b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_bf68b1e8-6439-482f-9667-b1bd3845d2e8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8712b5db00c8464467cc235542bfe9a9752495ab26ed4bb44609747b1a3425f -size 1086947 +oid sha256:f5cf72790bee9413e12565288b2e6b017c22c9ab7a5d7e8a89f992ebbd77a107 +size 1766342 diff --git a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_c09834c9-a9db-4eea-83ed-69f3feb73903.png b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_c09834c9-a9db-4eea-83ed-69f3feb73903.png index e33d32d12da9102935f5f0338b813bf794aca1b0..09cffc778ec16d3d17ac17f1e78c12df36eb64be 100644 --- a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_c09834c9-a9db-4eea-83ed-69f3feb73903.png +++ b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_c09834c9-a9db-4eea-83ed-69f3feb73903.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa3b474596267a94e0ef3951bf4979c4b1e23bb515a0fde0e87791c12d54032f -size 943108 +oid sha256:6bdc1cdbae1847d20f3193a6630685689ca292f10c2cb87ad541eb40264ae94a +size 731452 diff --git a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_d8c4b3b3-80ff-4b99-b6c1-2591f2525750.png b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_d8c4b3b3-80ff-4b99-b6c1-2591f2525750.png index 8deb64391eda888be8f7480fb0e4ac5303725135..a517f6bc71ddf8635a40e4c11649a38ed1983172 100644 --- a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_d8c4b3b3-80ff-4b99-b6c1-2591f2525750.png +++ b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_d8c4b3b3-80ff-4b99-b6c1-2591f2525750.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ca4897b763c74f78e98a01e700964e25e0c1da48543da3e134e5bfceb7311c94 -size 755580 +oid sha256:1e1f6630aa92d03107b1fef51f8c26d55da8c34b2128ac3559a8ba7cc88614c6 +size 769330 diff --git a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_fc0499e3-1006-4cbb-ad05-f2530c0915e4.png b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_fc0499e3-1006-4cbb-ad05-f2530c0915e4.png index 6d1497a7d300b87324b375c02685d0f53fccc267..02524fe3dcbaa9e17abf7b70633f3202aefb373c 100644 --- a/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_fc0499e3-1006-4cbb-ad05-f2530c0915e4.png +++ b/images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_fc0499e3-1006-4cbb-ad05-f2530c0915e4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:283e197f625b2e1ffcb86f37780c6e261428f3e2e00506fe4498625b81108aae -size 1288892 +oid sha256:ca64c67071789fd0abc5ad93051c19b70a8b78428934a0608d5f6769c89c080c +size 1200625 diff --git a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_46828258-db63-45df-b5d6-3807c8d23840.png b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_46828258-db63-45df-b5d6-3807c8d23840.png index 78632127113bcbcfa707f9afd2c747af57c8ccb4..e949252df2133f2e2ba33a23d4dd331b31883b04 100644 --- a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_46828258-db63-45df-b5d6-3807c8d23840.png +++ b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_46828258-db63-45df-b5d6-3807c8d23840.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3364e0c06941060bbd0c230c0294db858d5262d651210b5c11425b51510908be -size 1411029 +oid sha256:5a7d4ed74678fd80faf7b17bfe9ec6352b8c447519d5e9e8562628cb38eb675b +size 1167241 diff --git a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_b1062855-4c2b-4283-9b44-d7dc68373578.png b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_b1062855-4c2b-4283-9b44-d7dc68373578.png index 8cee423972c344c6fad6096355f0c127bc044c8d..52fb53e794f6e27bfb1a6c196c6b987e03a341aa 100644 --- a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_b1062855-4c2b-4283-9b44-d7dc68373578.png +++ b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_b1062855-4c2b-4283-9b44-d7dc68373578.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:853b95bedafb3f3e7e213f07cfc1a188fde3fa8a527de0e3b9f3f1499bdb4f8d -size 1188973 +oid sha256:7712f1fff28af52c2b80355ac428d82c49fd45d3ccc447d83f88dbb9cfb442f0 +size 1476795 diff --git a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_c9e53d13-c6fc-4af4-b8c4-45ea969dd04a.png b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_c9e53d13-c6fc-4af4-b8c4-45ea969dd04a.png index b0aabbf8796299f1c57908eb607747ec2e7dda2b..dacda300c5ebb17fae499329ebad19b84933d131 100644 --- a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_c9e53d13-c6fc-4af4-b8c4-45ea969dd04a.png +++ b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_c9e53d13-c6fc-4af4-b8c4-45ea969dd04a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b083214e1927ddf31be4a1625a842a16bb65d7dac925f121ec744cc37264f2c0 -size 1082458 +oid sha256:9a8321aed6355d797e9e7b475d0393884ba7bd50faa70f5328e3c12c9747c87d +size 1078612 diff --git a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_cfdab859-8ea1-4145-a761-ca9ffa100107.png b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_cfdab859-8ea1-4145-a761-ca9ffa100107.png index 742b49b8e34166b6810b6c0e34f426b77dbab71f..4fb74566cfc79f9938a30e14b6d8b3b622939c20 100644 --- a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_cfdab859-8ea1-4145-a761-ca9ffa100107.png +++ b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_cfdab859-8ea1-4145-a761-ca9ffa100107.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ddf26b62bd07a824e447d633eca28d34ac26821766910c5ef353eca0d6bc0274 -size 1755587 +oid sha256:54723c994c7577df4d76d86fe6a6000ceefa376230418ed3c6a89ef3c4b1d56d +size 1630974 diff --git a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_e0785f0c-5b78-480e-96b3-ef282ad0f38e.png b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_e0785f0c-5b78-480e-96b3-ef282ad0f38e.png index e50d25f99a1b7c4d4faef0a90b5d68a72d552d2b..6b15ee5bd8db202fb7dd3506281affff89781b22 100644 --- a/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_e0785f0c-5b78-480e-96b3-ef282ad0f38e.png +++ b/images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_e0785f0c-5b78-480e-96b3-ef282ad0f38e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed2e30945e2a97a09e3adbab3ee53a3d43cb7b77e269e2dcd5a57656d9a9c232 -size 1115274 +oid sha256:5e41894f3f95f772f9ab0756e3cdba7f3f42ba07fbb9c9775ad268ca35ea4f2d +size 1096867 diff --git a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_0bace322-b3ef-449a-a74a-d80e3a3f0994.png b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_0bace322-b3ef-449a-a74a-d80e3a3f0994.png index 08bafd15cc43ddc7863e83e8211872540a05782d..0a96aee7d1242f7a331d096bf848a67c4ccb71f3 100644 --- a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_0bace322-b3ef-449a-a74a-d80e3a3f0994.png +++ b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_0bace322-b3ef-449a-a74a-d80e3a3f0994.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7caabfa0dce867e9d85a3f47f4b793000abb02e9c340854fef959fe05bc21857 -size 2440588 +oid sha256:c8d107fcce1a231c2e10ddcb65f1dcd17728bebc47b1421260b7c50a0ddf444b +size 2465262 diff --git a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_4f3c0b77-487b-4fe7-b29a-8b691d8fd423.png b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_4f3c0b77-487b-4fe7-b29a-8b691d8fd423.png index bfeb740fe83b3e9e20352a6a0f0c39bad326cf0c..d44689cfc5895b1088f0a9bbfecd233ac91e7875 100644 --- a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_4f3c0b77-487b-4fe7-b29a-8b691d8fd423.png +++ b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_4f3c0b77-487b-4fe7-b29a-8b691d8fd423.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7c1da9b90ec99da118805c7fe97cd80dfa06fdf25cd59d2543c57dd30d7218c -size 1451143 +oid sha256:94a33d2e814db7e4bccc83c8c6ddd34ca18d7b00fc7a5a5661066a0577533329 +size 1372079 diff --git a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_c011c7eb-4c26-49b9-8331-9f1e96f2331c.png b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_c011c7eb-4c26-49b9-8331-9f1e96f2331c.png index ec137a0d6fe5bb95fe48b90317201e19645f7ee6..134f63f4effbf1c807516dafe0e3b480039283ce 100644 --- a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_c011c7eb-4c26-49b9-8331-9f1e96f2331c.png +++ b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_c011c7eb-4c26-49b9-8331-9f1e96f2331c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:934eeddbe677d588634229564343d28d19d765cde9276d9848d705b9f31afa00 -size 1293306 +oid sha256:77ecddc1428a5480d92339033ea0936253337652819fcaf51321a3c73800ba56 +size 1484974 diff --git a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_d8c9ea80-5e0b-4dda-bb9c-d6c5b512622b.png b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_d8c9ea80-5e0b-4dda-bb9c-d6c5b512622b.png index 49ff8fcf3d73e70ff63b0001a76bcc22ddda4b82..9eb60e3e4069a716fcb25ba599484041d07879d5 100644 --- a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_d8c9ea80-5e0b-4dda-bb9c-d6c5b512622b.png +++ b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_d8c9ea80-5e0b-4dda-bb9c-d6c5b512622b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:99f3b0e6afdff847299cfa18c8311a941050144b90037b097b6925beab8128b6 -size 1445579 +oid sha256:9f46cfa4705654e00c6ea9d719086404c647ba0cb11f135aff92d136d0c408e6 +size 1000523 diff --git a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_da3756d6-bdec-418d-bd70-c9b28d7ae532.png b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_da3756d6-bdec-418d-bd70-c9b28d7ae532.png index 12ff7d7df388829b7f3161828d0d403390b13aba..9c7ae9dfd0b5bf248c22eecf4e9dbc2f1ffdabc1 100644 --- a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_da3756d6-bdec-418d-bd70-c9b28d7ae532.png +++ b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_da3756d6-bdec-418d-bd70-c9b28d7ae532.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c0336efbe2bb0174f5d94acb31f9104537b68c315c692920b271efbac402b50a -size 1295041 +oid sha256:923fd338eb22c6d6dd42b53fa1dea9dc6d59d306810050061eff34598716f377 +size 1682137 diff --git a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_fa285b88-bf55-4afc-a580-255bd2e0b867.png b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_fa285b88-bf55-4afc-a580-255bd2e0b867.png index 1c4305741d72699cd8b54feb054597d43bebee09..b47c71491e093abd99feb36988fe7e301a82d3d4 100644 --- a/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_fa285b88-bf55-4afc-a580-255bd2e0b867.png +++ b/images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_fa285b88-bf55-4afc-a580-255bd2e0b867.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:076376da248c657e5e7db87f99153b48dec8ff185c257d6afa22a6b88826a49a -size 1448482 +oid sha256:cc840ce193c4f83b93ae6c7bd25581f176852aeb4214206816879635fc7f9a93 +size 1578392 diff --git a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_03e8e495-0e91-49be-902c-3a0f659ec428.png b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_03e8e495-0e91-49be-902c-3a0f659ec428.png index 550d0f1f9585eace9523eebcb15002136dbb2530..669c529ee9945c667d4e2ce91259ae5a53368d73 100644 --- a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_03e8e495-0e91-49be-902c-3a0f659ec428.png +++ b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_03e8e495-0e91-49be-902c-3a0f659ec428.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ffeffe16ed892d91708f45c6411b4d4f37e7cfb2301c8fad63dd4afdb557ed7 -size 296389 +oid sha256:6744ed171312eb2406f96e51f8f54f8246cb52ffe54eca0abf8d666ab00eaa1a +size 823105 diff --git a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_197bb442-771f-4fff-84e1-cef8b3978bd3.png b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_197bb442-771f-4fff-84e1-cef8b3978bd3.png index 11ea388af3ff464f93f3b47456a6caf34faf71a2..72eeb3297cdf2dad3d0b23d4bbfe2b95dfb914de 100644 --- a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_197bb442-771f-4fff-84e1-cef8b3978bd3.png +++ b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_197bb442-771f-4fff-84e1-cef8b3978bd3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5fb62eb0e30d845c2a3d4e1495e2a9cef805cdcead083edd0733121642b51a5 -size 679521 +oid sha256:21c9a716794934a548cd399b5dfe826924ff90f0a61cbf9e3672b23dff9bf7aa +size 907853 diff --git a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_2861d75f-d51e-42a9-bf74-e1e0fc01fda6.png b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_2861d75f-d51e-42a9-bf74-e1e0fc01fda6.png index ed1f0596cf78c28338d0a52f503c41d84b9a62ec..1056d2ebdd91407c7a2f8dea20c372a9823332fa 100644 --- a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_2861d75f-d51e-42a9-bf74-e1e0fc01fda6.png +++ b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_2861d75f-d51e-42a9-bf74-e1e0fc01fda6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6b60947b5ec8bdd03a0ddf3cde986d3c264a6ef6d7177335f41d4462f8cb7e5 -size 161558 +oid sha256:cd23bce85cdf6b46a3a4653136365a3177f60bc192771b1abc10ae9291605819 +size 161624 diff --git a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_34e7f545-430f-4557-84b4-ec4cfea0876d.png b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_34e7f545-430f-4557-84b4-ec4cfea0876d.png index ed1f0596cf78c28338d0a52f503c41d84b9a62ec..0f9192332c0af7fb5c7a96fd5fd3ebb06166e5ab 100644 --- a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_34e7f545-430f-4557-84b4-ec4cfea0876d.png +++ b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_34e7f545-430f-4557-84b4-ec4cfea0876d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b6b60947b5ec8bdd03a0ddf3cde986d3c264a6ef6d7177335f41d4462f8cb7e5 -size 161558 +oid sha256:c2c180bc675c01d4318e4f5142568db0dbc4b94fe011328de99ee24e740a3b9d +size 191675 diff --git a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_adba973c-6ed5-4579-99ba-918691da9c24.png b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_adba973c-6ed5-4579-99ba-918691da9c24.png index dc1d6a75365fac5c32be290a6bfeadc5e1385047..15dea15915764f072585e41de1ceb6f8abd94974 100644 --- a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_adba973c-6ed5-4579-99ba-918691da9c24.png +++ b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_adba973c-6ed5-4579-99ba-918691da9c24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a6c3e9d639c5ba5ba0319bcc6ee282276e7174eb85cc8a1b967ac4678c58f06 -size 577275 +oid sha256:bf585f13454c097e0c28631a6da6e6067bee24edb76064ddff26e70de6f5820b +size 924192 diff --git a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_b8cb066b-326c-44a4-bb54-e12f4ba8f863.png b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_b8cb066b-326c-44a4-bb54-e12f4ba8f863.png index 10136607062c5f58872aa7c53759a1d04e1a927b..aa04ed7ff8cf01bafe246f620edebd2ca7ec91a9 100644 --- a/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_b8cb066b-326c-44a4-bb54-e12f4ba8f863.png +++ b/images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_b8cb066b-326c-44a4-bb54-e12f4ba8f863.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2fc3da97e5e9c9a410fcfbd1a782b846c692ab5290eb892ac575f213fdee9d28 -size 555614 +oid sha256:64dd4363371ca78ca8a8e07498d58cad7fe4a3e07f5d983de101ebf08a2ed6ee +size 579987 diff --git a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_54620a46-795e-4dd5-8616-41aba7dfed58.png b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_54620a46-795e-4dd5-8616-41aba7dfed58.png index b05aa6656bc7964652e9e4d1df20ff58e2dc4bdd..1dd4f1877c0300486d917814f47a07189245f3a7 100644 --- a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_54620a46-795e-4dd5-8616-41aba7dfed58.png +++ b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_54620a46-795e-4dd5-8616-41aba7dfed58.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:765af98686b745dd55b08298a3132bd019f3ec439f07474f36ffb32d8283ee7e -size 1516834 +oid sha256:646f436b321dbc23f29910178f80ba3def3c6b59eaf44abf84a4f14de5133307 +size 1633368 diff --git a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_70e1077e-985b-4404-8a85-fa82c80db258.png b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_70e1077e-985b-4404-8a85-fa82c80db258.png index 4b1adc033ccc6483ad5f0a9ecafcf5a141f5d4ff..16f9a693a6881e242d141ef601adee1c3eed12bb 100644 --- a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_70e1077e-985b-4404-8a85-fa82c80db258.png +++ b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_70e1077e-985b-4404-8a85-fa82c80db258.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a66b1c5cf5199243bb0fbf08f51dff0655fce20981b203e0710f2cb5139b74ad -size 1093979 +oid sha256:a91fe8d3c54ca12f0106408207c20c4f7af12fd2826cb1b9586bf20b2315767a +size 1684266 diff --git a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_d0f6e4c4-1e10-4b80-808a-2e0d70eb0ce0.png b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_d0f6e4c4-1e10-4b80-808a-2e0d70eb0ce0.png index 6bce96c5d9df11a83793388d492aec032b4b7b91..da856b80c99f3541d05ba5c89a71654265aabed4 100644 --- a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_d0f6e4c4-1e10-4b80-808a-2e0d70eb0ce0.png +++ b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_d0f6e4c4-1e10-4b80-808a-2e0d70eb0ce0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e7db2b2f66997b2397d8684d937e2b533e5a6693b83c8198e71ce2095ba78b1 -size 1123402 +oid sha256:1c19613df2495775123d5c527accc070b9c49dd9d5e2afd3157a7da3a4d56220 +size 1116333 diff --git a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_ed7f9c58-052e-4dd7-9452-9e017dc53f6f.png b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_ed7f9c58-052e-4dd7-9452-9e017dc53f6f.png index 5758e4b50cd819b86b12d162c3f6ccf1203e7b79..c0b3b39908e92ade7176e885e7dc3bc75e8fba76 100644 --- a/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_ed7f9c58-052e-4dd7-9452-9e017dc53f6f.png +++ b/images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_ed7f9c58-052e-4dd7-9452-9e017dc53f6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42177ef462b7123580a1884dca9dc927810e2300f50e087925af2212186a67ca -size 1394206 +oid sha256:6d4958c3d710a22dfc5f2e43c4af0e9489c5b5d696954d9f71ec8b995adf0764 +size 1498746 diff --git a/images/f3850ec8-bf7c-42c3-9469-457836914f77_56682bee-956b-4d10-ab12-b895346b9589.png b/images/f3850ec8-bf7c-42c3-9469-457836914f77_56682bee-956b-4d10-ab12-b895346b9589.png index 0fd157d2841245e34ed560e5c6aaa0d4691e5d50..8dd0bc64f958fdf1b83c38e745d01c8bc0b92ca4 100644 --- a/images/f3850ec8-bf7c-42c3-9469-457836914f77_56682bee-956b-4d10-ab12-b895346b9589.png +++ b/images/f3850ec8-bf7c-42c3-9469-457836914f77_56682bee-956b-4d10-ab12-b895346b9589.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:95ca3ffbc619d7137d4bbe9bcd1998ca2c73f1e1333081716278b393527865b9 -size 2226519 +oid sha256:a799e13517bead63458aa73087ac2559896e4e694a925ab4199463fc39c7176d +size 1082019 diff --git a/images/f3850ec8-bf7c-42c3-9469-457836914f77_b1bde9b8-0c8d-43b4-b47e-f36805bd8fc6.png b/images/f3850ec8-bf7c-42c3-9469-457836914f77_b1bde9b8-0c8d-43b4-b47e-f36805bd8fc6.png index d28cb5c87aa9fff05ca1e2495eb81d86b9f1aa33..7990d425e5bf0b791c6032e1118c081c41db3847 100644 --- a/images/f3850ec8-bf7c-42c3-9469-457836914f77_b1bde9b8-0c8d-43b4-b47e-f36805bd8fc6.png +++ b/images/f3850ec8-bf7c-42c3-9469-457836914f77_b1bde9b8-0c8d-43b4-b47e-f36805bd8fc6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cf9059b28cdd1e9efeee4176d146efd907290140445bab7d6fe510a0eefd5d64 -size 1841438 +oid sha256:9966f273b114af7c74569fa56cb8a9d4bca2c6100e7248a5b574e3c0e7bbfb69 +size 1603479 diff --git a/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_8d193167-cec0-4e41-b471-99c194209723.png b/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_8d193167-cec0-4e41-b471-99c194209723.png index 0303995704d71b52f9cdf3ca73ce1de4ac029aea..7f1767f85b4d6b23846ebbb70d9ec62faaf6c214 100644 --- a/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_8d193167-cec0-4e41-b471-99c194209723.png +++ b/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_8d193167-cec0-4e41-b471-99c194209723.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:48e5b17c47653844fdb899d189c6a6fe9a541f8cadd85a9cbda3d23b509d5e6e -size 475383 +oid sha256:d85b59a4a1070493701ba2213e1773ca18f8c76b8ca8f8ad444d71cbfc9fc6f1 +size 588156 diff --git a/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_d526c9e6-eb77-49e7-ac83-5cf979528a1e.png b/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_d526c9e6-eb77-49e7-ac83-5cf979528a1e.png index 5de38655ed6e5201c1d825c0d6932425f452a1fa..d68e64d9b0714ef509b5ecb24c6ecadaf500498a 100644 --- a/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_d526c9e6-eb77-49e7-ac83-5cf979528a1e.png +++ b/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_d526c9e6-eb77-49e7-ac83-5cf979528a1e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0aa18070f306776af8aeed28a794747785537c8b30458a45ad59b6595db7d3d2 -size 290665 +oid sha256:affcf8e7860ba4c9f0fad957cb778113c4622d24b6fbe2b42071a1230d95634a +size 347573 diff --git a/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_e86c1b4b-7bca-43d5-905b-9cb5119d4fc6.png b/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_e86c1b4b-7bca-43d5-905b-9cb5119d4fc6.png index 3fe03906ea80858eec865122246bcc009819d9fa..9c94dd26ef6685f5fed6ad0a2bfe68772043a560 100644 --- a/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_e86c1b4b-7bca-43d5-905b-9cb5119d4fc6.png +++ b/images/f385156c-4f2e-410f-bc73-7ec6d0f44448_e86c1b4b-7bca-43d5-905b-9cb5119d4fc6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:03025cd298ba1abdbbd6363f5a36104dae5cb00ff365b639ae6ca838b560f826 -size 288915 +oid sha256:ebe6f0f9f103bbb81a09159c8c89bc8313c64ec38e7a709c3aa180ecd462c45b +size 346173 diff --git a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_112135b7-86d9-4229-9794-e472f3ca4544.png b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_112135b7-86d9-4229-9794-e472f3ca4544.png index 5e0258cc8e508fbdf22de09c9f3b0a535cbf485e..a7c8997f2a34430721c18434d68738211b9de51e 100644 --- a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_112135b7-86d9-4229-9794-e472f3ca4544.png +++ b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_112135b7-86d9-4229-9794-e472f3ca4544.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb78a5a879f8b0fbd5406e36930979ec57d7b89804b96e736fe4cb85cd214fb7 -size 431052 +oid sha256:9d8b7fab4f68f68c47765b8da381398060ae6236a6938d925cbcb634108bb6d2 +size 829622 diff --git a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_1d43db7c-3bb1-443d-9a59-9ddf96651271.png b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_1d43db7c-3bb1-443d-9a59-9ddf96651271.png index 342c8cd7116987def0a2b0306285cdcd1cd06a8a..cd0312310fbffbd465763642dd0ce127766dfa23 100644 --- a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_1d43db7c-3bb1-443d-9a59-9ddf96651271.png +++ b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_1d43db7c-3bb1-443d-9a59-9ddf96651271.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:948aa91ea7fa128618240908c22ed65e80709d5146a1b488dfb54f59c457ecb2 -size 1234757 +oid sha256:08a850db9ccc9d81570f8efc1d1f2dd91f6523273cb87eb75705595213852954 +size 1474515 diff --git a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_3a8538ef-d7f5-4aa9-bb0c-3397534a6f13.png b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_3a8538ef-d7f5-4aa9-bb0c-3397534a6f13.png index 4ae9590bc6adefac635eb49ff7df88b25eda14bf..d850da6550fabf65c8bfdf41f803908120d63380 100644 --- a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_3a8538ef-d7f5-4aa9-bb0c-3397534a6f13.png +++ b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_3a8538ef-d7f5-4aa9-bb0c-3397534a6f13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7257c071195c22469656794eb9f6cfcb073767c968efe4d4138185a1560340cf -size 393835 +oid sha256:cadd763f0fe3d2a897b0b047594c439c49d164bd8d2b96aeec3d5d50bb9bef52 +size 1028838 diff --git a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_47be89ea-69a2-4c87-a4c6-2068241fee24.png b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_47be89ea-69a2-4c87-a4c6-2068241fee24.png index 8b728b1167f064b22d75e8152f5e4e78913583da..66af7adf3f929aeb399437d934856b30bbe1064b 100644 --- a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_47be89ea-69a2-4c87-a4c6-2068241fee24.png +++ b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_47be89ea-69a2-4c87-a4c6-2068241fee24.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dcf540755a1a5306d2c37b6526b385fb771571666d7df285332f41a79f38c867 -size 755297 +oid sha256:a4bf6aadefb49f47039c175249ea2569de1f4cae1dcaa52fc4709265b53533c3 +size 1514827 diff --git a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_b47b0fbe-e5bf-4cb4-a560-93d4d86a1f35.png b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_b47b0fbe-e5bf-4cb4-a560-93d4d86a1f35.png index 5c862172b38cbab547a669ffde88dbd035b01fa6..b67c6c294d7e16cd4aa6ed5fce1c8b60b151e8f8 100644 --- a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_b47b0fbe-e5bf-4cb4-a560-93d4d86a1f35.png +++ b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_b47b0fbe-e5bf-4cb4-a560-93d4d86a1f35.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0cdd43f1fd8c2ba18f19ff467a92256360943aebba0876ed20ee2b6e70a3801 -size 384394 +oid sha256:da06147e499fa7a68f545a976cac1d994ac24cd072d891fc7d7b946df5ab4b35 +size 574037 diff --git a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_bc2c2145-162c-46a9-8bfd-32e070aa3cb4.png b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_bc2c2145-162c-46a9-8bfd-32e070aa3cb4.png index 1307fe7a8d311254f6c597fd9a2259f614b09b54..0484329e1d40ed7d69f7c8bf71c8f3235fd322eb 100644 --- a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_bc2c2145-162c-46a9-8bfd-32e070aa3cb4.png +++ b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_bc2c2145-162c-46a9-8bfd-32e070aa3cb4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb032cd31ece9b253527ccd876bb20aec56f5a20cf97a6f06bce2a927b801bbb -size 1234341 +oid sha256:738b24d5c123f27980c11b609bf3ed4dff33cb763d9313aeaa536aab3b947010 +size 1436292 diff --git a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_c5abc24e-404c-49fc-905e-a250d8b1010f.png b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_c5abc24e-404c-49fc-905e-a250d8b1010f.png index c4ed8b828fd2afd942cb04def86689d987a8b4f1..e7acdd07ab3233b9d52e667da92f732138fbd257 100644 --- a/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_c5abc24e-404c-49fc-905e-a250d8b1010f.png +++ b/images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_c5abc24e-404c-49fc-905e-a250d8b1010f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:747f2db2e1e838e0601ddfdbfd39b2cf9421d9695e49fc101e78ea247f79386e -size 379151 +oid sha256:b142c4ec17954c0a9836e31969f07608f904807eab611095605a5bd8fdb90d99 +size 334145 diff --git a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_825f81a5-2ff7-4beb-8ba3-c99fe1d14250.png b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_825f81a5-2ff7-4beb-8ba3-c99fe1d14250.png index 9d20b4ffd1ee3e12aefe93639c95fe4ab918c443..b787edca2e07bc1dcd5644add5a0ed88062346e3 100644 --- a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_825f81a5-2ff7-4beb-8ba3-c99fe1d14250.png +++ b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_825f81a5-2ff7-4beb-8ba3-c99fe1d14250.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b8b54663ed623d5fdcf5420f1c0fc852a0b7e2ac06e0350379b9deca615151dd -size 1531852 +oid sha256:83cccb2f25696a9ea77bebd5018d40c4271a2e19dfeed5c6b36776cfbe227837 +size 1503943 diff --git a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_8eb4a4cc-f4f6-4cdd-979a-3eb10b5742a2.png b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_8eb4a4cc-f4f6-4cdd-979a-3eb10b5742a2.png index ea635cedcbacb5aec601f98a4d60d93415ccd65b..55eba00c8f08d551077b439a5022ffb08aac7837 100644 --- a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_8eb4a4cc-f4f6-4cdd-979a-3eb10b5742a2.png +++ b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_8eb4a4cc-f4f6-4cdd-979a-3eb10b5742a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ffe4dcbda77d8bbd9684f9639dfb3b946d4ccd10c92263bf360d618d4a1343f7 -size 1571132 +oid sha256:49a2bc90c441f4f8897f22f5f2e14187c5421e55b522ef7925bedd8cc7dea08b +size 2304178 diff --git a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_90311f8c-7889-459b-9739-5fe71a0f49cb.png b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_90311f8c-7889-459b-9739-5fe71a0f49cb.png index cf3f940f69b043e20f3c918a68fd5955f8516523..4c639fb3e01232a0fd7942381c618f823eb285ce 100644 --- a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_90311f8c-7889-459b-9739-5fe71a0f49cb.png +++ b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_90311f8c-7889-459b-9739-5fe71a0f49cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a3f34f7e1bf289a2c9b96a68d8f05b3b8a382f9467cb81d5b456ec3a166405ee -size 857249 +oid sha256:b6030d0eb892dce2eab5b2afe306a57688b65590afed8c918dd1d9479822cdfd +size 1424742 diff --git a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_c854a73c-ba42-4f57-b19b-514c037bdf3e.png b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_c854a73c-ba42-4f57-b19b-514c037bdf3e.png index caf19e6ca19dbb23f419e6b499bc03b0b53cca92..252d97385e756ba04104c63662930304611019b9 100644 --- a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_c854a73c-ba42-4f57-b19b-514c037bdf3e.png +++ b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_c854a73c-ba42-4f57-b19b-514c037bdf3e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:32089c5038e7307d228d5cb5cd4dccaa48f9c2c10e79d464d0abbb5869203b2d -size 1463389 +oid sha256:703b9042e5b6a8a4b5bb9b576145c8aabaccb644bde382f3ccbbf660558db8f0 +size 1587806 diff --git a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_d0ff114c-d1e0-4002-88e7-a44d33b20e16.png b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_d0ff114c-d1e0-4002-88e7-a44d33b20e16.png index 1506f652af0362cbad34d58c71e1508ce52df3c5..e57d788dceda55f32b9ca0f6a02812cc885b58d6 100644 --- a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_d0ff114c-d1e0-4002-88e7-a44d33b20e16.png +++ b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_d0ff114c-d1e0-4002-88e7-a44d33b20e16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae6354aabc3def46cbc7b5c846ca7f29e69bd75ac77ba4728d2f65cff55f3902 -size 1575712 +oid sha256:ae152c3288067e0994493b6dc6ee5b3d1f5286a5bb6ad2e3057c421237e27915 +size 1544182 diff --git a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fd0b1974-3c9c-4824-a547-7f4d6e47199d.png b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fd0b1974-3c9c-4824-a547-7f4d6e47199d.png index d0c8e6277bf60aea0c0e13bc055025b7c03700a8..8e8bb3650cbca4c8717832f734727a0c7640beea 100644 --- a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fd0b1974-3c9c-4824-a547-7f4d6e47199d.png +++ b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fd0b1974-3c9c-4824-a547-7f4d6e47199d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b86392806e76ddebfb205e53015d984d1a7fd58a855ae59cb1c725272fefd8cd -size 1482408 +oid sha256:80632a13ef7fd2aa00fcfe2e88b593cf110f04e796f34263ce33e0b8fd8728e9 +size 1238709 diff --git a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fde61447-6446-43dc-a3dc-63b8108c50e7.png b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fde61447-6446-43dc-a3dc-63b8108c50e7.png index 8a7d87f5336436770c7c4de855a6a6b75e6b8532..1e654743d22a3870c5985c99db1be78c362d8f46 100644 --- a/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fde61447-6446-43dc-a3dc-63b8108c50e7.png +++ b/images/f408cdf3-06c8-459c-ba08-71bd471341a0_fde61447-6446-43dc-a3dc-63b8108c50e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5ee46b1e30985f305e8a07818b0a47a2c0ed46f1b5088de99226a94af096bd7 -size 1273972 +oid sha256:9dd49eb05e7d15fb3e134c751dbe04304876e96fb59f98a94d67a99750f4c2f1 +size 1357236 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_416946e2-39b4-459c-a21e-e3133c02fb04.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_416946e2-39b4-459c-a21e-e3133c02fb04.png index 21c7277173b38b6b41ebaea939e564eda02aa462..902cd0960dcadc317bd18bd2628812cc15984a9e 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_416946e2-39b4-459c-a21e-e3133c02fb04.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_416946e2-39b4-459c-a21e-e3133c02fb04.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:73e54a1f6c99a1910c92c5aa2afe2bb43165018b54b58ba7f56c1c86481b3e2b -size 430353 +oid sha256:1ddd9583e15238d304570a1c1c0ee89baf9f694ac249f28bb63e06f3b440f90a +size 441274 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_4ebb9e93-734c-4664-9f53-e5562feb80e7.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_4ebb9e93-734c-4664-9f53-e5562feb80e7.png index 682fac600221710d223bbb93bb7b36cb81a775ac..4995b284eb199631f221398916d8303dcb252de3 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_4ebb9e93-734c-4664-9f53-e5562feb80e7.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_4ebb9e93-734c-4664-9f53-e5562feb80e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b9249323606a5f2ad6bf8be756af4f70ebf06cc87dbf4465382a1dd79ae25803 -size 501135 +oid sha256:1b3a64085470b890651851517a20fae750ea434e9e5dc100c43b0168d737dcae +size 457754 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_6c575a9e-0f6b-417e-855f-3e998fa406cb.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_6c575a9e-0f6b-417e-855f-3e998fa406cb.png index 65ddde7c69cc8f4ec5ed2f6e06eedbe3b8ef18fc..1e889b1302d15b2ae05645ae6f2e6a81a1662efc 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_6c575a9e-0f6b-417e-855f-3e998fa406cb.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_6c575a9e-0f6b-417e-855f-3e998fa406cb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7a550ac76f51351fcea8058499c37f99593d784d51bb15d347ce5418f33390f2 -size 223253 +oid sha256:3c85d8d6c5b3912d1bf04a65580bcf369fcc42ffeac599ba51ddc872016dc8dc +size 225713 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_74e08d5f-5c4d-4ca2-9071-4ca8955b2592.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_74e08d5f-5c4d-4ca2-9071-4ca8955b2592.png index f567568cd80509676436c61e5e96c33b36d68822..44523e7c1ac128e1265bff87370787dcffc84f7e 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_74e08d5f-5c4d-4ca2-9071-4ca8955b2592.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_74e08d5f-5c4d-4ca2-9071-4ca8955b2592.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e02c725e1c31877430e550d78c5fd14d9b47a7a2c274e5e4e55b7692e645299 -size 866268 +oid sha256:584b6de11e038f0d6f971edbd3d4f08ea9b85d85dda15843c915608eb3bc6b77 +size 533669 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_9a969207-0b5f-4c8d-a8a5-6474fcfd24ca.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_9a969207-0b5f-4c8d-a8a5-6474fcfd24ca.png index 61e7a6fa757cd5f935a58415f107693f68082acf..bec846a545c0ee708eda5f822a28cf2a895e14f4 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_9a969207-0b5f-4c8d-a8a5-6474fcfd24ca.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_9a969207-0b5f-4c8d-a8a5-6474fcfd24ca.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:825f7bd180b041720024fab60b3d49a88cf6e217498d1bf73e956984053cc748 -size 949036 +oid sha256:9a5a2d621c793f9043449879407dc78f935c7aa63d44c439503fbb408788b228 +size 463249 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_a4d6ed68-2cee-458e-92d7-a10c85cf0636.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_a4d6ed68-2cee-458e-92d7-a10c85cf0636.png index 1768fee13f1df23ca104e4dfc80880ee89cba054..978b64e53eb86ba518e1bdf6abdd227acddd8078 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_a4d6ed68-2cee-458e-92d7-a10c85cf0636.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_a4d6ed68-2cee-458e-92d7-a10c85cf0636.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ce895a3a77574be308e3d7a3f6ff65fbcaa973ebcf3194bf3c9112911be0ef1 -size 162607 +oid sha256:3a4dff880d27a344f22d0c2cc7dd83884ff28e8020b079813422f79456274a70 +size 159655 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_b5539183-9fb0-420f-a745-564979f75b5f.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_b5539183-9fb0-420f-a745-564979f75b5f.png index 3b35c7cb603f713c5d58aba3264a803566102528..ed164f0f6e4cc75857df25eb3b6066e042708936 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_b5539183-9fb0-420f-a745-564979f75b5f.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_b5539183-9fb0-420f-a745-564979f75b5f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5effec584713549a03448e91317e01eceb9c2882838668932f0666e70bf436c4 -size 160453 +oid sha256:44565b3473469bf74152c74b7642521da21428cb514e0868c27f1990a517f492 +size 158716 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_cb30eaad-9ff0-4869-bdf9-d2357ab500bb.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_cb30eaad-9ff0-4869-bdf9-d2357ab500bb.png index 0fe51018b1c3927fe74ef80cb5ad524c70553bd9..5c31a5fd1717fb2f09d79c7f84f0a6806096236b 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_cb30eaad-9ff0-4869-bdf9-d2357ab500bb.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_cb30eaad-9ff0-4869-bdf9-d2357ab500bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2d486a5606c9c45068d5defab66631523add36f28787cdde07c43da7cdfec4c -size 542293 +oid sha256:8f45ef42554d1f949a04718182dcd80c8ca329764c9b7717f1746912ae49c16f +size 396553 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_d7504220-9487-4929-8b6f-608bf6883e93.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_d7504220-9487-4929-8b6f-608bf6883e93.png index d60a8db00e2eca712bb391c04592f826c08489bd..030adca337fd38722b61baead69f5df73c37a859 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_d7504220-9487-4929-8b6f-608bf6883e93.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_d7504220-9487-4929-8b6f-608bf6883e93.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a4db9b22fcb370619d9ac8c2af716d11edf11a383a95cacfac6bb302ceb8675 -size 217622 +oid sha256:15f1e776b431283ca23d428fa1be55ce31c769775cd948bc5b93669d1210936a +size 164875 diff --git a/images/f4555944-010b-48d2-a4cb-b7103c686bac_e2fccb19-f1cf-467f-9917-1202c07e965e.png b/images/f4555944-010b-48d2-a4cb-b7103c686bac_e2fccb19-f1cf-467f-9917-1202c07e965e.png index 17fa41839742db560f6d303994a1e5b4d20bc2fe..39763fd72094f8408f907f74948bc7011204bfcf 100644 --- a/images/f4555944-010b-48d2-a4cb-b7103c686bac_e2fccb19-f1cf-467f-9917-1202c07e965e.png +++ b/images/f4555944-010b-48d2-a4cb-b7103c686bac_e2fccb19-f1cf-467f-9917-1202c07e965e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:84a04a4c59d85db4ad35eed3553bff7e4344fc5da5bb289456bc364f8c2146ea -size 220313 +oid sha256:ed247e2c2eb896c9c1b3ac3eafadd7d8aef2af078344ae6687a9d51075ac746a +size 232106 diff --git a/images/f45b0783-4325-4b3e-959d-c181971d72f6_2fb831d2-8c62-425f-befe-ee018631d50b.png b/images/f45b0783-4325-4b3e-959d-c181971d72f6_2fb831d2-8c62-425f-befe-ee018631d50b.png index 17d754712eadd983c838da2a7a98a901a42536e8..64b55036d020f813c6bd9cf2cfd6ed75ff70ec55 100644 --- a/images/f45b0783-4325-4b3e-959d-c181971d72f6_2fb831d2-8c62-425f-befe-ee018631d50b.png +++ b/images/f45b0783-4325-4b3e-959d-c181971d72f6_2fb831d2-8c62-425f-befe-ee018631d50b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb09712727285d1105e6b325a669e1fbec85d22b3c2bca0c2f936dc8a33b91b1 -size 1026340 +oid sha256:7d54723cb28283356d188dd146c2bcc3679fc8ebca18320f7650471a357c932b +size 1385081 diff --git a/images/f45b0783-4325-4b3e-959d-c181971d72f6_4dee51f3-114f-4ef5-924f-25e997581347.png b/images/f45b0783-4325-4b3e-959d-c181971d72f6_4dee51f3-114f-4ef5-924f-25e997581347.png index d386e447e3a6a2437b8b6f196950686f0e961398..3668aac427250b1b2e51418185ae6385664c6fc2 100644 --- a/images/f45b0783-4325-4b3e-959d-c181971d72f6_4dee51f3-114f-4ef5-924f-25e997581347.png +++ b/images/f45b0783-4325-4b3e-959d-c181971d72f6_4dee51f3-114f-4ef5-924f-25e997581347.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3f8e41dbc08a976be37270b189bc4c319ca71f13f70e509178bd71f76e2c7f63 -size 965599 +oid sha256:6e666b612585f4998d7c2263769f09fe3274c4ef9e068741e392c4c7608e5e96 +size 991002 diff --git a/images/f45b0783-4325-4b3e-959d-c181971d72f6_fbd5b363-535a-4675-9c4f-ce3b14af687e.png b/images/f45b0783-4325-4b3e-959d-c181971d72f6_fbd5b363-535a-4675-9c4f-ce3b14af687e.png index db5f43ee9f10dd96f483f2b42dea94dc4a5122c5..52e77b1f8f2fa7746aca2a8aa99da45b0e84ba90 100644 --- a/images/f45b0783-4325-4b3e-959d-c181971d72f6_fbd5b363-535a-4675-9c4f-ce3b14af687e.png +++ b/images/f45b0783-4325-4b3e-959d-c181971d72f6_fbd5b363-535a-4675-9c4f-ce3b14af687e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:106b7bba5b7dd9ea1319590dd36dd7ba4cf37c81201c1523bea96b23104c632a -size 831150 +oid sha256:bd6cc59e1ffbbb002a57d5c014893d3d05322b06f45d19b5488964adab5eb8a4 +size 822065 diff --git a/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_16766ce2-4e9e-4955-80fd-4578cec08085.png b/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_16766ce2-4e9e-4955-80fd-4578cec08085.png index 48ed0b0755d6871569bf72fa8a4040a42df28881..602e2583d89ced5e85846161118b27d10d4538f6 100644 --- a/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_16766ce2-4e9e-4955-80fd-4578cec08085.png +++ b/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_16766ce2-4e9e-4955-80fd-4578cec08085.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c45c76888f75689f7881132c712fe1b36fca1913cfbf01d911d8e320c64ed27d -size 1431457 +oid sha256:b6bc8989b3038c6465635acaa42b1ac922020d5d3612023fb7e5a4a25c3bd7e0 +size 1500265 diff --git a/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_2b0f415b-e7e6-4380-b66f-97cafe7a2a8d.png b/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_2b0f415b-e7e6-4380-b66f-97cafe7a2a8d.png index aa9ed3e0cb50e123d6d928382f55ac3ef2fc1f3f..0fea697c9c85bce6a336b24a14347919607dfc60 100644 --- a/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_2b0f415b-e7e6-4380-b66f-97cafe7a2a8d.png +++ b/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_2b0f415b-e7e6-4380-b66f-97cafe7a2a8d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a20ba331ff1524d679b6dd1a73adae58aa9e4f7195122a95d2f1d38b20daf09b -size 830752 +oid sha256:d7973c5c3ac66b5c030d4f11c36ba1c5b2fc1efd001beb926c1a2bc578652191 +size 1059503 diff --git a/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_b845c300-477d-4935-97cc-1ea84ec96398.png b/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_b845c300-477d-4935-97cc-1ea84ec96398.png index 21e146bea0e60a4eb6207baa374e575e6f8dd545..a1acddd55a146efcd8f3bf1159989c45e1271feb 100644 --- a/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_b845c300-477d-4935-97cc-1ea84ec96398.png +++ b/images/f4623be1-31c6-4546-a567-92bfd1da9cd7_b845c300-477d-4935-97cc-1ea84ec96398.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1255351a9af710fef885a87cce6d39bf322320152b1914cd348d514cc9b94687 -size 875252 +oid sha256:3b26d2952b5fa2b25d2d95431d8d5e61cad396f96d6b7e1f0a61ff70a73eb9b8 +size 939681 diff --git a/images/f464de6d-9923-4e8e-8046-56751a90a9a8_cb8dab61-9fd6-4508-bd9a-881a8e130872.png b/images/f464de6d-9923-4e8e-8046-56751a90a9a8_cb8dab61-9fd6-4508-bd9a-881a8e130872.png index 23dc9f528bede8f0dd37e992882b174fbb69a775..939a6074d0bbd9d03b6c70f8126a2ab9f0899b13 100644 --- a/images/f464de6d-9923-4e8e-8046-56751a90a9a8_cb8dab61-9fd6-4508-bd9a-881a8e130872.png +++ b/images/f464de6d-9923-4e8e-8046-56751a90a9a8_cb8dab61-9fd6-4508-bd9a-881a8e130872.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42b89c97ae014208989b48e94dae6c7e696f7e40dd52b5e6b7d713ea168ff05e -size 1054918 +oid sha256:a61a4454fd33f8f2a0fde7a567512d6312194907728caa340792275fee36657e +size 739157 diff --git a/images/f464de6d-9923-4e8e-8046-56751a90a9a8_d48017ee-76d2-4d21-8bd8-112aa9cb8bc8.png b/images/f464de6d-9923-4e8e-8046-56751a90a9a8_d48017ee-76d2-4d21-8bd8-112aa9cb8bc8.png index 0bdd3ae084d693ed006106811460f0f381ce9b55..27e84e9f23984e885163878f30d4ea430f2fcba4 100644 --- a/images/f464de6d-9923-4e8e-8046-56751a90a9a8_d48017ee-76d2-4d21-8bd8-112aa9cb8bc8.png +++ b/images/f464de6d-9923-4e8e-8046-56751a90a9a8_d48017ee-76d2-4d21-8bd8-112aa9cb8bc8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00af320a7511898a12d86e246f6d5fb220011b2af59977ab1a0e4976e4216ebc -size 1055636 +oid sha256:84f5859424df7dafb595f60228241ab800d4c8f0e4939213a30c304400606314 +size 782559 diff --git a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_75b1e3b3-0f65-46e3-9bdd-183e1fa89c13.png b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_75b1e3b3-0f65-46e3-9bdd-183e1fa89c13.png index 26ba0a77b7ed299a90ed5f89bacdd181eb244f07..4c034859664f2de3d8c81c070a5c8434b6acb9f6 100644 --- a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_75b1e3b3-0f65-46e3-9bdd-183e1fa89c13.png +++ b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_75b1e3b3-0f65-46e3-9bdd-183e1fa89c13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1b115ef18ed409e4a3238d2d00477488ff5593516a3c017ab7614ebe92e72e7 -size 864298 +oid sha256:cf05561a989d104b8ed32c6fa64c8f59b595104f03b286535240994682ddde34 +size 487867 diff --git a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_946b79ed-c797-471c-a2cd-668b999cf3a9.png b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_946b79ed-c797-471c-a2cd-668b999cf3a9.png index 176a4d3c099ca2f89f3685b6b1c1499e640b72f1..a583cc5ec1c4e67b1948f3e4f1d1c18e97e2f46f 100644 --- a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_946b79ed-c797-471c-a2cd-668b999cf3a9.png +++ b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_946b79ed-c797-471c-a2cd-668b999cf3a9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:96e3fc04d038840bca27f47bfb814d9163f5950617d6dd41a83fee2d8c0870a8 -size 280991 +oid sha256:402eda38bb3b5a01e6416d866b3ff474b4d442334b6e934fa207b46bd3b1345a +size 238507 diff --git a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_de5d2cb5-2deb-4b0f-817c-5a1f3d8f6b1a.png b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_de5d2cb5-2deb-4b0f-817c-5a1f3d8f6b1a.png index 432fd98f7a882657456495d74d8781a3c8698512..fbe29844b01d5d67b623144e135bdc2720c2cee9 100644 --- a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_de5d2cb5-2deb-4b0f-817c-5a1f3d8f6b1a.png +++ b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_de5d2cb5-2deb-4b0f-817c-5a1f3d8f6b1a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a9a8425f7b1a6a42a2fa0a1402829dea5c00c76e5b5d158b7e4ece2edf611005 -size 4685706 +oid sha256:9c002fd900bf0ca59d5dc19695b52bb16d019aa641c185fe9b884d263f8ba56d +size 2175256 diff --git a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_fad7b35a-6f9a-4294-8470-74b9cc85bd65.png b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_fad7b35a-6f9a-4294-8470-74b9cc85bd65.png index 445143d7456985d7b8636f10e0585cd9c9435c76..60c48f24e001ce361fd233aced51f9665fb11ac2 100644 --- a/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_fad7b35a-6f9a-4294-8470-74b9cc85bd65.png +++ b/images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_fad7b35a-6f9a-4294-8470-74b9cc85bd65.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5eee0a5fa259fb0fe27c34f592685b37e6ba5b64239b8d9307ed1682a835d26 -size 280706 +oid sha256:3821a01a14e15d72a57441caa176abcad5cbe8f60694fd414346229f144eb2d0 +size 471398 diff --git a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_4fc94d2c-6706-4bf3-8e0c-3c65f2f15b5b.png b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_4fc94d2c-6706-4bf3-8e0c-3c65f2f15b5b.png index 0a804090b40ec9f2faa0c08d8ec9642368e7e4e7..f119095f8e152c1b2fd6b7d1a7249a67d0d2dcb1 100644 --- a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_4fc94d2c-6706-4bf3-8e0c-3c65f2f15b5b.png +++ b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_4fc94d2c-6706-4bf3-8e0c-3c65f2f15b5b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de9023646ed7a25304903d37f445e7878a98a1ad0ee95b42c041b1e72d2556d3 -size 1108867 +oid sha256:a1cf9f80a990ca3244d21a4311b0ec1062ed74e05c7c57e4be5cd31b209c2169 +size 1982222 diff --git a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_71867b4f-43e7-401c-8d43-19485f985139.png b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_71867b4f-43e7-401c-8d43-19485f985139.png index 44d3407e469687d3b792d51710e1422fdcd0137d..2cb2e1ea68121b9e71c97602e939d2b05618514c 100644 --- a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_71867b4f-43e7-401c-8d43-19485f985139.png +++ b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_71867b4f-43e7-401c-8d43-19485f985139.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18a37453c8a4211713f0a039fe55cbb98431b2695c92d300842b2fb325528d29 -size 1269824 +oid sha256:604d47012f12617baf20ffb27c6fe6940af2a552d44dfd4c37d23c527e08387b +size 856158 diff --git a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_ca8ea814-846f-4509-8df6-9bcd231c1753.png b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_ca8ea814-846f-4509-8df6-9bcd231c1753.png index 143ac8a75bf3e7ecd7fe7cbe4ab2e6531d7bdb23..ba847ae984660bb4f8c711c4751032dda0694053 100644 --- a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_ca8ea814-846f-4509-8df6-9bcd231c1753.png +++ b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_ca8ea814-846f-4509-8df6-9bcd231c1753.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3543a80f880b62a0ca676255e78d69b7381529da0917ef240649009fa95b7c6 -size 840891 +oid sha256:99146164c8d7646d302d0c38902dff547475328e142c2a75a28cc8316f2d4ee7 +size 1091923 diff --git a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_f21feaf0-2f36-42c7-8714-70e118a11da6.png b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_f21feaf0-2f36-42c7-8714-70e118a11da6.png index 0101d0990346316c2a552cf3236286d0cb437dea..30de708715cb3e0d1f6f07a33a398c493202246d 100644 --- a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_f21feaf0-2f36-42c7-8714-70e118a11da6.png +++ b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_f21feaf0-2f36-42c7-8714-70e118a11da6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f42f6a4990db52e0e028f0074ea3701ac939412834593609ca0b42856706dbfe -size 1318826 +oid sha256:80137a4ef5101af37768250822b8b9c0f6e4cbcb7fd313fa039cda30662449e0 +size 1104373 diff --git a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_faf7979d-6c0b-4d9c-a40d-02f62a08fbc9.png b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_faf7979d-6c0b-4d9c-a40d-02f62a08fbc9.png index ec327f6381a48bb80079371318191546729c3967..e5bdfeea26f828421b4fc08a0932da92eb50b815 100644 --- a/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_faf7979d-6c0b-4d9c-a40d-02f62a08fbc9.png +++ b/images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_faf7979d-6c0b-4d9c-a40d-02f62a08fbc9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4552eee27ef7b45deb9f6611763c1839f459b64fc029f1d7e45442c6331fe445 -size 840329 +oid sha256:e440fd45003fe248861484e5d3e0d9c5e8de0e14d882a38a0a0fd5821ec88dc5 +size 1081576 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_0d7efda5-9240-4c57-9a5c-5446e9c2d83c.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_0d7efda5-9240-4c57-9a5c-5446e9c2d83c.png index b6c3fd7d3b7ce2e77a636ff1740c433034dd3444..25930658b10c752f1602fb572050705493a2723a 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_0d7efda5-9240-4c57-9a5c-5446e9c2d83c.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_0d7efda5-9240-4c57-9a5c-5446e9c2d83c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4d8e75acbf1ca20b035235caf8a513c7aeb53e0f5fbda1eeaf7298900556a15 -size 775157 +oid sha256:dcb2c48e116c3f3ab0f1b8cc643054ba7e44144744b05cd0d608304ca78ff170 +size 908300 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2e19a5e8-4201-46f0-a062-f812e7f06f8a.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2e19a5e8-4201-46f0-a062-f812e7f06f8a.png index ae12cb22e007693ff943ff5dc514dfadfd926725..46b19b9adae130004265852d9dea498f46e922d2 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2e19a5e8-4201-46f0-a062-f812e7f06f8a.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2e19a5e8-4201-46f0-a062-f812e7f06f8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ede7b5832cf488020f6b88e8857bfeff24543b4bf9de63625ff545ddd0855d8 -size 325506 +oid sha256:353427d5e88c912abcdab7445e71059c6cb7cd40f7a0f57c565b1f35df298fba +size 325139 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2f8b7429-46da-4860-82ef-dbfe229fcf9b.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2f8b7429-46da-4860-82ef-dbfe229fcf9b.png index 621d06d72d8171ab69ed6a934a022ef66f156680..7bf76701cf85db0fc5dc07a186edd80557b20439 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2f8b7429-46da-4860-82ef-dbfe229fcf9b.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2f8b7429-46da-4860-82ef-dbfe229fcf9b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e30d1b02292e2ed8b071c87474a4786afd428889017fc2413591dbb6ab5a5b33 -size 307589 +oid sha256:7c809f222369ff04ea0ed14de07e97064aa9359cd167f2f41b724f08ff4f7181 +size 324248 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_42ffdf8b-64e3-461f-8094-ca965c529368.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_42ffdf8b-64e3-461f-8094-ca965c529368.png index 83e84de558aa503ba067c365b2f603c984516a0f..6a55ed48250998273afce20d5457362532e977e0 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_42ffdf8b-64e3-461f-8094-ca965c529368.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_42ffdf8b-64e3-461f-8094-ca965c529368.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb46e68b7a065acddb857106559daf900f3a9dd3bc34bdc00c9da16fd5d4f5e7 -size 1124518 +oid sha256:a7be893591eb2cf286a072edb43cb249530eb5c44621c923405132ef8ca8aed6 +size 1115922 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_6a0e5751-e659-44d8-b355-64280b94b4a4.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_6a0e5751-e659-44d8-b355-64280b94b4a4.png index 7dae59960850db66d1c308b451681b6dbcf4c877..236bde1d178987dcd7f8396beb0eaf41a6f7e3ab 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_6a0e5751-e659-44d8-b355-64280b94b4a4.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_6a0e5751-e659-44d8-b355-64280b94b4a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:757e084773726dbb017054622d65c5ea46821bae5ac8b15b39b5a262fba519be -size 1425037 +oid sha256:272301febea20c98932b0dc02d249e7c255617748f5a202b14d21c25cacc89b4 +size 712010 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_88df9183-9dc1-4c06-9622-20981a4cf886.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_88df9183-9dc1-4c06-9622-20981a4cf886.png index e9b88003a2afcd8deeab9db264c61a69187bbce1..05aadfc866d427abae2b5529794a66ccd254c38f 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_88df9183-9dc1-4c06-9622-20981a4cf886.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_88df9183-9dc1-4c06-9622-20981a4cf886.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:801abae08a9e6ddaa21494963deaf43f19b495abed319ff4224b9e6580f17169 -size 775415 +oid sha256:140302289b5169850b329bae04ac9c8517557048ac67e86b44c7350d6690e698 +size 921374 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_9a924659-4b0a-4374-9e9c-e24889c4dac5.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_9a924659-4b0a-4374-9e9c-e24889c4dac5.png index 4009bc3da93d7a85bc6cc4590a2d8c8068f722b5..a8dfdcf59d0d00532c6c5b0e2025480a0d901fc8 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_9a924659-4b0a-4374-9e9c-e24889c4dac5.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_9a924659-4b0a-4374-9e9c-e24889c4dac5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3a4df7344f6e35122029efcc232ec26d5482477f397cf2247f1618ca0b8da964 -size 324108 +oid sha256:77e0697bc478d7f5d29f31e8e31f0da5e427ae0131d3d294594d558d4393a44a +size 324784 diff --git a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_fe53f6bd-ace7-4544-aab4-803e19962624.png b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_fe53f6bd-ace7-4544-aab4-803e19962624.png index fc1d453a52fff4ce98dd0ada914887b96e17cf63..207f2a5523db20c77f27576f8b85734abefac8d2 100644 --- a/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_fe53f6bd-ace7-4544-aab4-803e19962624.png +++ b/images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_fe53f6bd-ace7-4544-aab4-803e19962624.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:706eb1ea8ad7d809e93c6f0acaafaca03821dfebe267bca6f3803e9bd2a63357 -size 768724 +oid sha256:abc70708bae05a64d824e998ae9a22dd450c53c3c2f38ac00c82e365f5eaf12e +size 796435 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_0696d1e4-9bf2-40ab-a5c2-fa44e42a4e1c.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_0696d1e4-9bf2-40ab-a5c2-fa44e42a4e1c.png index c81e5c884ffa8f10400e3e805de52d71a331dca6..554d72580f8381730b67c35718c551db4c3e2cbb 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_0696d1e4-9bf2-40ab-a5c2-fa44e42a4e1c.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_0696d1e4-9bf2-40ab-a5c2-fa44e42a4e1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cf125f8f0a0000c9bad0d85ffcc08d2565775d961c3c1915dbd7694254e741e -size 704964 +oid sha256:6b7dd56dd4cebcbc92ed3b2ecf76880e855a07dc273fcef9792f58bd53f37a20 +size 1467536 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_152064fc-85f5-4364-aa81-6f9a6fa9941c.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_152064fc-85f5-4364-aa81-6f9a6fa9941c.png index a0a707c2301bcd30a953cc7c139e78bcd33d2d51..21eb70f8b1b93176192289dac10972300db0ac52 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_152064fc-85f5-4364-aa81-6f9a6fa9941c.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_152064fc-85f5-4364-aa81-6f9a6fa9941c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80107434d630377c33db632a69b2243e0dfbf706cffb84d55f08afb5983f7903 -size 811022 +oid sha256:07ec0d98c0654f4191b8aa798cc0d22401c0d6d5f38b4b43b184a543ad909ae0 +size 1008146 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_1bc4d405-9f40-47ef-80b3-eaf62e4f49a6.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_1bc4d405-9f40-47ef-80b3-eaf62e4f49a6.png index 9b3ce9da213d39fe6b8a7fa58280120ae8256119..cbf4d7326e43f05d5155b0652c6d78f8841ddf15 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_1bc4d405-9f40-47ef-80b3-eaf62e4f49a6.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_1bc4d405-9f40-47ef-80b3-eaf62e4f49a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0d0bab37c80cd3e54c4fd508a7c7e89865275480b762d02bfc99a274de1e6a57 -size 738390 +oid sha256:7544fd5a1b4f06438c59b1f809b04cb3a85f80de6d465976b46942c221a247c9 +size 1495912 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_20c42e23-c938-4889-8b06-e59438c1e794.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_20c42e23-c938-4889-8b06-e59438c1e794.png index e877e92fc2077835679fa6f737dbb0c8bcf93064..9eedacaf0826cfda155321e53d127059fb895ea1 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_20c42e23-c938-4889-8b06-e59438c1e794.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_20c42e23-c938-4889-8b06-e59438c1e794.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9c71f851629d0d2be6f29a5a29c1247766b87d91aa4712f1a079879d4d672d86 -size 301848 +oid sha256:240b10b78c91f794233e9b7def75c3ffdf83f78bcd00468ab63de87a5ea0819f +size 274641 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_33e83e8c-d733-4027-8a09-202f4e49feb1.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_33e83e8c-d733-4027-8a09-202f4e49feb1.png index 544a8f5f5503f000da351a8e2c7a1b40b551db8e..0165f5005f51bd50e34b491cbd93972bd5e306fd 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_33e83e8c-d733-4027-8a09-202f4e49feb1.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_33e83e8c-d733-4027-8a09-202f4e49feb1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8c34798f480a050ad31f743045670c54fd298513ebfecf09777b1e012b4372a7 -size 235264 +oid sha256:09caad69194eca6c1c762b7e8cb8b534ccf08a7750e15e8932ca9d4b893e5220 +size 202673 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_3ab91777-9f12-48f7-b203-03dc68f21c05.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_3ab91777-9f12-48f7-b203-03dc68f21c05.png index 8f7feb670abfeb035265999b798bd50c138425cf..2048e26d8df1840c0b26a5b3281a51ec671f4b2f 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_3ab91777-9f12-48f7-b203-03dc68f21c05.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_3ab91777-9f12-48f7-b203-03dc68f21c05.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b45cb549a8ea0a6ed7bae04f6ef798aea4769af544f2ae81f2346b2af86bb91 -size 2103932 +oid sha256:99e2f17639fa8cd6e8a87f40db27339020f9fd8f71bbf9524129b77846cc658a +size 1103178 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_5d24be92-5a3e-4d6f-949d-d4aec8a374a2.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_5d24be92-5a3e-4d6f-949d-d4aec8a374a2.png index 1359cd6df788023b98afd2df84bdc30e884f2289..215c3c1b5ced3c037b570200fe72072e7dfabfe7 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_5d24be92-5a3e-4d6f-949d-d4aec8a374a2.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_5d24be92-5a3e-4d6f-949d-d4aec8a374a2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4b4f163b8be05905da8c583913a40cc69e460c87abf7445cb5266463ce0c23fd -size 378624 +oid sha256:29866e4876719370d57540213fc484ef91d0ad5c0a17bb7cb8d6171064c0ee79 +size 775570 diff --git a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_941508d6-247e-481b-9735-b0798b4133a5.png b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_941508d6-247e-481b-9735-b0798b4133a5.png index 6ff5ed24f8bba258d380da2df75b41f898659bd2..1e03911dcb473bb4abe9013cf888cad3d114bdb1 100644 --- a/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_941508d6-247e-481b-9735-b0798b4133a5.png +++ b/images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_941508d6-247e-481b-9735-b0798b4133a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:834029fc4506f2c72d7e60b406a6f242acdb2dd537fb4f2923140cb29711a206 -size 319003 +oid sha256:4c85d02b08b69798f10df2c14d724575c49b647163044004cfa942a53485fef6 +size 410008 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_01228a6c-8f69-4071-b709-39c2001dbcdb.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_01228a6c-8f69-4071-b709-39c2001dbcdb.png index e6557b39511d72284b3965b5f7506b734c733285..083721c54c97e453f85af7cc477b4e5532c9559d 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_01228a6c-8f69-4071-b709-39c2001dbcdb.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_01228a6c-8f69-4071-b709-39c2001dbcdb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f00baf03dc653d1dbd0f3d9bf210d9571e44c79cecbda341edf4db2ad03908e -size 1986811 +oid sha256:b7434361123417f48eaee1cda79137d5f6f4e66afd1b7e0225ea3b11d8c7a409 +size 2148297 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_276ea5ec-d119-4d03-9121-f9ee4616da2a.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_276ea5ec-d119-4d03-9121-f9ee4616da2a.png index 35d205290a0c55ba143bcd6b467cf3378a7c9e7e..138554e42468c1ac3e71e17d8a65e3034755b422 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_276ea5ec-d119-4d03-9121-f9ee4616da2a.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_276ea5ec-d119-4d03-9121-f9ee4616da2a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf9ac7668287967fb2229063378af6631bbcbdcef8233ea8dc1dc70e170fde91 -size 3041933 +oid sha256:cd93e9e4f009fe3a330f2f133b750f641077141bb1a3cc8e32125ceaabd4794e +size 1613704 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_47a6ae98-a2c3-4fbc-8a06-389316088503.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_47a6ae98-a2c3-4fbc-8a06-389316088503.png index 961c34155c22e0932be963166d4902a4d640d0bc..89109d4c44ae387e36bce86ae19f52171b9d8bdb 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_47a6ae98-a2c3-4fbc-8a06-389316088503.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_47a6ae98-a2c3-4fbc-8a06-389316088503.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de1df10be61bd79c2e270ddf981caa91466f969b13e00218865dfe74119a41fa -size 2681065 +oid sha256:426cb5da51da61a7ff08c06ae734ba91ebc9282c80c2f6e654cfcd586f870923 +size 2044498 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_775aaaea-a625-4f60-aaea-007d6535c143.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_775aaaea-a625-4f60-aaea-007d6535c143.png index 37b8d89c4f23bfeaede55758664f9e10dbd578d9..0b515dce705490884febe3e082d6aa8cb9e46c42 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_775aaaea-a625-4f60-aaea-007d6535c143.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_775aaaea-a625-4f60-aaea-007d6535c143.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f00b9d9f2790f2d9bba4b85f8d8ffa67b566fc7fb323183ffb2607582c63ff21 -size 4815750 +oid sha256:e0eb46e6f2647acd9db82bef2b8231649ccced6ed445ad3c33e7863883b47b7e +size 1799517 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_841ce39d-d503-4b9f-a08c-4f24ac450c47.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_841ce39d-d503-4b9f-a08c-4f24ac450c47.png index 8322ef79be9171aa83bd4cf0c4929173dd5cb349..82ee9e735c37fa221b4a1b190992482b4a0ed394 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_841ce39d-d503-4b9f-a08c-4f24ac450c47.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_841ce39d-d503-4b9f-a08c-4f24ac450c47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5e6d15855b75db34d323307154c2f946cef63248fd0817ff7d28e51436eec3ec -size 1927570 +oid sha256:3decf15d0e0bb2c5dfa7bd376ecde95e8decae15a773a20b51d36c491668f4ab +size 1918235 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_8f862cf0-ac81-4f52-84aa-550ecc2e259c.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_8f862cf0-ac81-4f52-84aa-550ecc2e259c.png index c9ab1a20c3b625aaafe62d933bad5cab64c874c5..308500346a6dbf85e73f19fe61d8f08b93f0fb7c 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_8f862cf0-ac81-4f52-84aa-550ecc2e259c.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_8f862cf0-ac81-4f52-84aa-550ecc2e259c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:299ba009995214d88f7d64f4808e72f0be2616de6c9e26cdd7fd6e13e3120134 -size 959650 +oid sha256:29f7ca17911727c05e8080c5f370854c997e0c4c86a9894c0aa618acab83bd6e +size 1626393 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_b955660b-1f8d-4a21-b953-dac02bb5c70c.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_b955660b-1f8d-4a21-b953-dac02bb5c70c.png index 79eb1446bd16b8d99d671bc549545aa01ab13d5f..c2e39f13595ab0ec43c617e8076b6ecd36641d4c 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_b955660b-1f8d-4a21-b953-dac02bb5c70c.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_b955660b-1f8d-4a21-b953-dac02bb5c70c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3de9008ac563525d8af501ad88897a3e33d6820ce8a60775da86b95f8e05592 -size 4804316 +oid sha256:b53bc37db42f179b86196aba1a6b060c8fefe74b577b443c948cda0acd60008a +size 1540617 diff --git a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_d1b27abe-d3b2-458b-8b80-428b838fc9eb.png b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_d1b27abe-d3b2-458b-8b80-428b838fc9eb.png index f801cfa7ce5a48a52083ea3b5d15c307c8a90fb6..cf9f968123c07fa65e9e33ab4028118eeb38f30d 100644 --- a/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_d1b27abe-d3b2-458b-8b80-428b838fc9eb.png +++ b/images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_d1b27abe-d3b2-458b-8b80-428b838fc9eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d0235808b6b00c42b24449af84d11f763c60c733fab8a5c8651f95d989f1090 -size 3081161 +oid sha256:6f53bfadd914325c68cb810ac7a0dfaa1336e6277c70f5cd5685ae6a19ad1e7a +size 1217398 diff --git a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_62404140-be23-4dd0-838a-e9319f9c9381.png b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_62404140-be23-4dd0-838a-e9319f9c9381.png index 889537916582c0ec4fb6ba54d7814019ab13be3d..a7ac25ebb553d04e2a6420b68d6203f3e16021d2 100644 --- a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_62404140-be23-4dd0-838a-e9319f9c9381.png +++ b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_62404140-be23-4dd0-838a-e9319f9c9381.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bcdb67a4028b32a0283ac634bdf1d8c1a10438782428d09e138f6dfeacf9dddc -size 632679 +oid sha256:da9a58d4976d159e0669b0452ce4991603224fd5c04b56b1af1470490e3df154 +size 503844 diff --git a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_69dcb555-6f29-4d80-9783-dcd3f9ebdef4.png b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_69dcb555-6f29-4d80-9783-dcd3f9ebdef4.png index 0809a240e5e38e32cb48bbe20e6368b2bca99831..20e4300ac56e0661ac7f1820ae6f325386cb2115 100644 --- a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_69dcb555-6f29-4d80-9783-dcd3f9ebdef4.png +++ b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_69dcb555-6f29-4d80-9783-dcd3f9ebdef4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f73dca6dc8d03506780e861c16c5b275e887420e32d2291fdd52f16ba2d927a5 -size 1588927 +oid sha256:14bc4759e910f7186481a584e5043629b16bbfca5e84809e6e7cb8bf456d2769 +size 1426559 diff --git a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_aac48f29-af47-4b76-9b6a-d3eb828b87dc.png b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_aac48f29-af47-4b76-9b6a-d3eb828b87dc.png index ebd4180d33be8d44d01c8126116f2712dec2595e..c7ad63811b840e6b578fbdd736f6ba698d60db6c 100644 --- a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_aac48f29-af47-4b76-9b6a-d3eb828b87dc.png +++ b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_aac48f29-af47-4b76-9b6a-d3eb828b87dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b85c02babcf7e6924a7c0eded8793741c54bd4354792f1a68d87a7d536bd295 -size 1452654 +oid sha256:a772f6928ef34cb76e0c4710f516bc7ecbc0d17584785b0137ca13daf6ceafd6 +size 1301544 diff --git a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_c26ad15c-1ac6-4940-b5e5-4b16ad0d23e7.png b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_c26ad15c-1ac6-4940-b5e5-4b16ad0d23e7.png index f724040f77a4aa4a74bdfa2df40662e5bb930320..9656ea4260883e7fdde805e8d8e8cf03f9cb9d21 100644 --- a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_c26ad15c-1ac6-4940-b5e5-4b16ad0d23e7.png +++ b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_c26ad15c-1ac6-4940-b5e5-4b16ad0d23e7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fb6a2d009204e54670782e8c5c66b881329b83cbd483634835d7c16e69527c3 -size 1574832 +oid sha256:e80cf2629c786a9902c26092d689ae51226a0439de6f4e1e6201db7edd4ba1af +size 1360129 diff --git a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_d434647d-400c-4bf5-8f59-e1bffe2583ec.png b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_d434647d-400c-4bf5-8f59-e1bffe2583ec.png index 8c706071bdcc8cd1f1485651a6d60944b52161b1..809287c508bbb3837d1c2dcf402b1de1d57cbad1 100644 --- a/images/f5d4d405-9714-47a5-a66a-49a899b16b97_d434647d-400c-4bf5-8f59-e1bffe2583ec.png +++ b/images/f5d4d405-9714-47a5-a66a-49a899b16b97_d434647d-400c-4bf5-8f59-e1bffe2583ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d65a460c9bbcd28fb583457a81d04e2d901d9d7e212bc41a01b60b76d37d789 -size 2389269 +oid sha256:212b3e7c5f48cc8f755b3ea76ba6a68cbc5cf769531c275078d1d80f27c9cbeb +size 1807662 diff --git a/images/f61456ed-3cc2-41a0-b79c-56737515fac9_73731a6c-3075-44fa-90d4-903f8fc39520.png b/images/f61456ed-3cc2-41a0-b79c-56737515fac9_73731a6c-3075-44fa-90d4-903f8fc39520.png index f4ac8257ae2c8933eee693b3f6446d71732a6d33..5c9d72ba908c64a59f54cd8d3a0ee61c5d388d1f 100644 --- a/images/f61456ed-3cc2-41a0-b79c-56737515fac9_73731a6c-3075-44fa-90d4-903f8fc39520.png +++ b/images/f61456ed-3cc2-41a0-b79c-56737515fac9_73731a6c-3075-44fa-90d4-903f8fc39520.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2060897b3393f38274bdee4dad9120ccbe557db2dff200b574a22abb97efafac -size 1035718 +oid sha256:b99b78b793842b25ca0f2e49b3660900ca06194c09ab7e34a878f95bba2561d1 +size 573024 diff --git a/images/f61456ed-3cc2-41a0-b79c-56737515fac9_ee4acd97-3547-4a8e-ba52-b49838ed1d83.png b/images/f61456ed-3cc2-41a0-b79c-56737515fac9_ee4acd97-3547-4a8e-ba52-b49838ed1d83.png index fccacb4d0d55de1314b2aa35aaa85afe23f0fc5e..d3c1fa9d09dea7d09d2cad6fa2e3a1b93e21172e 100644 --- a/images/f61456ed-3cc2-41a0-b79c-56737515fac9_ee4acd97-3547-4a8e-ba52-b49838ed1d83.png +++ b/images/f61456ed-3cc2-41a0-b79c-56737515fac9_ee4acd97-3547-4a8e-ba52-b49838ed1d83.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:222ee3b9569c88b09005d8289aa648f61b8216ffab99b3409daab03259ca83d1 -size 885117 +oid sha256:d3c4322124c04ff0ccacb790b4f04afa63e502138ac19bc0739010b0e09f6f5a +size 1356695 diff --git a/images/f738b393-9234-4c07-8baf-20f05627d5ed_06490f6f-6835-4206-9d1f-35429e950324.png b/images/f738b393-9234-4c07-8baf-20f05627d5ed_06490f6f-6835-4206-9d1f-35429e950324.png index d095aacee5834ad6843d2bfbd2e2e0d2d320625a..c53e7391708e18465bd3e65bd5ccffa1c4c8b8e0 100644 --- a/images/f738b393-9234-4c07-8baf-20f05627d5ed_06490f6f-6835-4206-9d1f-35429e950324.png +++ b/images/f738b393-9234-4c07-8baf-20f05627d5ed_06490f6f-6835-4206-9d1f-35429e950324.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6dfc94b88fd48285c7b1886cc2621f412bc15fa9bad53e5b977316fec1fcb933 -size 2023354 +oid sha256:78b6f68a20470040e13d6c2c7de9270365d58b76ef88f3aa0f356ddbfd4619f5 +size 2510624 diff --git a/images/f738b393-9234-4c07-8baf-20f05627d5ed_29ade6af-9748-423d-8d26-30b16d0881a4.png b/images/f738b393-9234-4c07-8baf-20f05627d5ed_29ade6af-9748-423d-8d26-30b16d0881a4.png index 637ade6c6fc1c0ecddfd5f8f6558750a6e377609..b8a77432f05495a5c4eea6a3840915f2d6559d25 100644 --- a/images/f738b393-9234-4c07-8baf-20f05627d5ed_29ade6af-9748-423d-8d26-30b16d0881a4.png +++ b/images/f738b393-9234-4c07-8baf-20f05627d5ed_29ade6af-9748-423d-8d26-30b16d0881a4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:937df5015830c9685588ebeb48b2adccc9e1bf3e8e3d4956d5db1fab9587f4ed -size 1089928 +oid sha256:0db2538999dd8ea2e32843335c2b0e649cb7b918dfd52e69a833234ab48ca682 +size 1598293 diff --git a/images/f738b393-9234-4c07-8baf-20f05627d5ed_5af0f370-33fa-497b-a075-0a6acbc1cb7f.png b/images/f738b393-9234-4c07-8baf-20f05627d5ed_5af0f370-33fa-497b-a075-0a6acbc1cb7f.png index 2fe10fd7e12bc024db4db522cf5f45f1eded9f68..c41bf2abb4964d4b544c040e672d5bed469c5477 100644 --- a/images/f738b393-9234-4c07-8baf-20f05627d5ed_5af0f370-33fa-497b-a075-0a6acbc1cb7f.png +++ b/images/f738b393-9234-4c07-8baf-20f05627d5ed_5af0f370-33fa-497b-a075-0a6acbc1cb7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1d3cd5c350dd19f7517431da53746d978125ca021a9e8bca24b98987b9bb6c1c -size 1942411 +oid sha256:b46b3bfe39d435f0e7e0f003d5be1d8da0d3ae09209780dba07f01c01945d0db +size 1033945 diff --git a/images/f738b393-9234-4c07-8baf-20f05627d5ed_79c44cf2-97a2-4876-8e7d-99b6d5b1855d.png b/images/f738b393-9234-4c07-8baf-20f05627d5ed_79c44cf2-97a2-4876-8e7d-99b6d5b1855d.png index cd340f3094123e2c8774380dbbc4adf6dcd473e0..e6055251cf871d90fb077f6339810c686be424e3 100644 --- a/images/f738b393-9234-4c07-8baf-20f05627d5ed_79c44cf2-97a2-4876-8e7d-99b6d5b1855d.png +++ b/images/f738b393-9234-4c07-8baf-20f05627d5ed_79c44cf2-97a2-4876-8e7d-99b6d5b1855d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4f291d717737f9158551f361661e7253ab6e0b25e8ef60da32e092c3f1b87ef -size 1404170 +oid sha256:6dc0fe07624d8cc7260f686bd29e45e4acf6f676eb557aa982fa3cd531f44bc6 +size 1513393 diff --git a/images/f738b393-9234-4c07-8baf-20f05627d5ed_8c2bdab0-6b93-4326-83f2-c3bd35cff1de.png b/images/f738b393-9234-4c07-8baf-20f05627d5ed_8c2bdab0-6b93-4326-83f2-c3bd35cff1de.png index aea88dc2110c1e61e266ad48f53a8419df56626f..3944ac3b9f742cbb827d409f96020b7d53a71389 100644 --- a/images/f738b393-9234-4c07-8baf-20f05627d5ed_8c2bdab0-6b93-4326-83f2-c3bd35cff1de.png +++ b/images/f738b393-9234-4c07-8baf-20f05627d5ed_8c2bdab0-6b93-4326-83f2-c3bd35cff1de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c8c0f7795561627d8ae8409089d89ad8c69084c66e0cb4dcd9d0f8bbf7431dc -size 1562081 +oid sha256:19d1103d8d91af590b10c29a566f02d0fcafb407789c8e98f47608e2ffd4c54e +size 1302995 diff --git a/images/f738b393-9234-4c07-8baf-20f05627d5ed_c45c6c0c-d446-41da-99c0-cc44abed21eb.png b/images/f738b393-9234-4c07-8baf-20f05627d5ed_c45c6c0c-d446-41da-99c0-cc44abed21eb.png index 58c1c9cbb0ec66b1873863078c8e6503206a8b8f..b90590bca41ad169d0f16041d6e331caf64aa3e6 100644 --- a/images/f738b393-9234-4c07-8baf-20f05627d5ed_c45c6c0c-d446-41da-99c0-cc44abed21eb.png +++ b/images/f738b393-9234-4c07-8baf-20f05627d5ed_c45c6c0c-d446-41da-99c0-cc44abed21eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:564bab16e2e961dc5a2a5d3df475c3416c41e50d8fe53e5ce4912fc8a7a76429 -size 1216602 +oid sha256:67f577de83beec5c0cb0d0a67cfddd3b77532c0267f0aa9487a18fa99f85d93e +size 1653139 diff --git a/images/f738b393-9234-4c07-8baf-20f05627d5ed_f29b8470-f482-4097-a3c0-014d0ea77cb8.png b/images/f738b393-9234-4c07-8baf-20f05627d5ed_f29b8470-f482-4097-a3c0-014d0ea77cb8.png index 083f7c5ace65d23955d9eab5c347dab6da6cca17..f8ea3241bdad10699051e732008cefbe897f755c 100644 --- a/images/f738b393-9234-4c07-8baf-20f05627d5ed_f29b8470-f482-4097-a3c0-014d0ea77cb8.png +++ b/images/f738b393-9234-4c07-8baf-20f05627d5ed_f29b8470-f482-4097-a3c0-014d0ea77cb8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6a128d90b1b9b668de36a5ca656c14484538389a28216ce7fa48f6679e4f72e4 -size 1552353 +oid sha256:949a34f98fc86b6d4c8f25593cda269064a2a94e23f02df69eb43e3655fda848 +size 1649439 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_00c2eafc-0309-4341-9e34-ea1868d3867d.png b/images/f754d919-2191-464e-a407-0f3da9409081_00c2eafc-0309-4341-9e34-ea1868d3867d.png index c391f0000fb564937a43a0b52c2eac7864a0f364..3c6c18c3db6360d352808cdd7b5f12c43a774dbb 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_00c2eafc-0309-4341-9e34-ea1868d3867d.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_00c2eafc-0309-4341-9e34-ea1868d3867d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4bd77ab65691259bbe53c8923644f390cbe39a3bd56a4517692dbed4e626ea6f -size 1044061 +oid sha256:90c75d314f76a53e968ca23fce4e1b533d3c8b872406bca758527c60ebf2a2b5 +size 1256093 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_29e613e0-4f71-4265-8c2c-7ecf158499b4.png b/images/f754d919-2191-464e-a407-0f3da9409081_29e613e0-4f71-4265-8c2c-7ecf158499b4.png index a7ab540af5b964e6c628c79ee05d09372737eed6..60a3acbc850bdfe8e07fc3b5c7780184dfe3deb6 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_29e613e0-4f71-4265-8c2c-7ecf158499b4.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_29e613e0-4f71-4265-8c2c-7ecf158499b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ac58e6be07529f249fdd7e6b978caeb2ca016cef69cf588f243fe9b1ccae0ba -size 701205 +oid sha256:c8811396d53c823f80aff26c41f4c82e432b61eaabf8ff5495c5dab10948c951 +size 505164 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_3707f7d8-e3dc-4f98-965d-5b7cbc562c31.png b/images/f754d919-2191-464e-a407-0f3da9409081_3707f7d8-e3dc-4f98-965d-5b7cbc562c31.png index 6289d086ac32e6010a339012fbed0ab7e0a2985e..0b139adb8104fd25cded8f0ea850e750a80a295a 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_3707f7d8-e3dc-4f98-965d-5b7cbc562c31.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_3707f7d8-e3dc-4f98-965d-5b7cbc562c31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:585bf9e1ae2be328b18e76d6c9cb23c404ecdba8e5691928db24244a0c00c2c1 -size 1148468 +oid sha256:312e124941c978fc1a5ceddf9ec3f332de408d0a0734f2a3bc870e3c2595ab2e +size 1806734 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_3b050be4-1d7e-43f1-a584-1bf2ce238aa0.png b/images/f754d919-2191-464e-a407-0f3da9409081_3b050be4-1d7e-43f1-a584-1bf2ce238aa0.png index aacdddd9f39c39732930c96fa59ac73564ce24da..80ce73c72808842c570cba3b0316f6637e89d266 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_3b050be4-1d7e-43f1-a584-1bf2ce238aa0.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_3b050be4-1d7e-43f1-a584-1bf2ce238aa0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:771c5fded8f8e9bda80c4dbd4e3b4146ddd4015b916d7a79dbb73c5d7bb950bf -size 1253402 +oid sha256:87a370ccca85912d103107c754e87b9d05e6f8a71fab023a5b292d3d5518b365 +size 1391752 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_60755e3c-381c-4066-bf09-930cb0c80bd8.png b/images/f754d919-2191-464e-a407-0f3da9409081_60755e3c-381c-4066-bf09-930cb0c80bd8.png index fe357c6920e0399d32109fd91e5b845c21aa7d1f..32a7fd043811f733b23b17e16777c286739d29b4 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_60755e3c-381c-4066-bf09-930cb0c80bd8.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_60755e3c-381c-4066-bf09-930cb0c80bd8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c71c3ebdfbae342763d046d22cb2801405c2b22e5ada5c11d36b46bc9535fded -size 757014 +oid sha256:35a4aa02e53f3f22a70ab1f2d323c8fef31394e9044fcb2d31bab99c4d30d9ed +size 1916343 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_efd43e42-a268-4260-9e20-0333d7e55f50.png b/images/f754d919-2191-464e-a407-0f3da9409081_efd43e42-a268-4260-9e20-0333d7e55f50.png index 64cf4a91da4010336db135e2236edc5c6681ea57..0d073da53d19546bcc8a15bcba717ae39ded913e 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_efd43e42-a268-4260-9e20-0333d7e55f50.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_efd43e42-a268-4260-9e20-0333d7e55f50.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40a98050a3e41b84e89d3b790c37d5443dc365b648322e9412fed6d35f0eb2ff -size 2540698 +oid sha256:4fb95adfc10fd2ca3abb617f8f9f35b601dc4f0e494b2a5c8e5fb47d694be8c9 +size 2082983 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_f363d0a7-38a5-49f0-90be-ee433c2505b5.png b/images/f754d919-2191-464e-a407-0f3da9409081_f363d0a7-38a5-49f0-90be-ee433c2505b5.png index a424eea2b1aa7bd1bbf5d5369a83d202b845085e..306a7ef4ac51687a2d50a8d6de77ab27e4efa0ba 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_f363d0a7-38a5-49f0-90be-ee433c2505b5.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_f363d0a7-38a5-49f0-90be-ee433c2505b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01482d3f19f3621f95f9d4f83898c6519b2aa7eff8cbf1f526fd9341850cf9a0 -size 755921 +oid sha256:111d04aa4ac9dec38ae860c4357b6eb98a65af99c3438e18c01b88cfba5aec63 +size 1258000 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_f3cedb20-a49f-49a2-922a-8cfd2a350ec1.png b/images/f754d919-2191-464e-a407-0f3da9409081_f3cedb20-a49f-49a2-922a-8cfd2a350ec1.png index bc50e6998c951c4af4239fe240c6f177e4247927..b5dfdfe396072e2543cdec041bb3aa05f49500ea 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_f3cedb20-a49f-49a2-922a-8cfd2a350ec1.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_f3cedb20-a49f-49a2-922a-8cfd2a350ec1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:102bcb50e59e64f0cad0c2d316e2e7c58bf1d66565d22ca7d0249642a00cfbe5 -size 762407 +oid sha256:505856be96dca5fde9bea4afe5e449fb415e3526823cc55f24a4e05c96a0f759 +size 1393192 diff --git a/images/f754d919-2191-464e-a407-0f3da9409081_fe05d80f-01d3-43b5-9568-9204d53f7100.png b/images/f754d919-2191-464e-a407-0f3da9409081_fe05d80f-01d3-43b5-9568-9204d53f7100.png index 6ad609bbd911c772ba881d08fb5e6e1cd705fff4..4b649135662fc8d97f72fc0e25f529ea5ddc5452 100644 --- a/images/f754d919-2191-464e-a407-0f3da9409081_fe05d80f-01d3-43b5-9568-9204d53f7100.png +++ b/images/f754d919-2191-464e-a407-0f3da9409081_fe05d80f-01d3-43b5-9568-9204d53f7100.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b74f81bbe1cf9588baab793462294e9a8174f5f11c7d6d355b837a0e049b3159 -size 972706 +oid sha256:07d44ee9b76928d49e8f8a093c98eaf3a2b87697009338b2c2ac6c8a16c928b6 +size 1301920 diff --git a/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_aac4d2a7-211c-44bd-9b2f-c1652193926f.png b/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_aac4d2a7-211c-44bd-9b2f-c1652193926f.png index 8ac38b815ce6f116ffb9d67b0ea9770964a6501d..81fa8dd5e08866cb56c805dea36a0b6f558193af 100644 --- a/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_aac4d2a7-211c-44bd-9b2f-c1652193926f.png +++ b/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_aac4d2a7-211c-44bd-9b2f-c1652193926f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ac23136d9381fd9579fb23a0c45a422cf14fdecced0d1cd0c7d269c493f89fc8 -size 503623 +oid sha256:074c3255209cccad849f572bf836cce4a95e8907d43c43ccaa92f9b27d32b94d +size 653043 diff --git a/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_ccbaad1c-72a2-47fa-9eed-220da3dc67ef.png b/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_ccbaad1c-72a2-47fa-9eed-220da3dc67ef.png index b27f29c36b9bf57bf4f19a7486d90c23d507e86d..200e40fdc13607e953c2a8199e921a2c3ddaee92 100644 --- a/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_ccbaad1c-72a2-47fa-9eed-220da3dc67ef.png +++ b/images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_ccbaad1c-72a2-47fa-9eed-220da3dc67ef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01a4d0fefb75e9432c63db8dce176fdecd067d8bbbb9f390ff88ae1f76b87f55 -size 1347246 +oid sha256:bcbdaeb0b7d72e806888fd64361cdfac7f76a39b6450efb41008d39c6f2fc985 +size 1345960 diff --git a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_23f0080b-9e6b-46fe-8c36-a1ea6f957e0a.png b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_23f0080b-9e6b-46fe-8c36-a1ea6f957e0a.png index cbad87d95bbc61a77c80ae55dce922188d4c497e..2cda6c50a06258658ec52aa7eb3e749a61f06e5f 100644 --- a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_23f0080b-9e6b-46fe-8c36-a1ea6f957e0a.png +++ b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_23f0080b-9e6b-46fe-8c36-a1ea6f957e0a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0823a2ef353c42d85f1d35262042cc273a9b34ccb4cd30b7f6cd693ca8288e22 -size 912089 +oid sha256:cd5745db3ab0f160456e8a489f75bd8db291ec46283fc491df1126c961215cdd +size 719684 diff --git a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_6c5a2ac8-5f7c-4c89-8151-a9429ef3797b.png b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_6c5a2ac8-5f7c-4c89-8151-a9429ef3797b.png index 6319bbce8470a068ff0500a19e3a6725c0c7a8ba..19f5fdebe53b9324802600cbfd90768b99462ab0 100644 --- a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_6c5a2ac8-5f7c-4c89-8151-a9429ef3797b.png +++ b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_6c5a2ac8-5f7c-4c89-8151-a9429ef3797b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7ab287605fc39630f15a9ee6f54e0a13a882bb44330b12bdaca4ceacf194ece -size 913668 +oid sha256:a798781cfc432508602fe0a76d76f0f422079374b77a78cbb92e2386966b5f3e +size 866736 diff --git a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_96e23488-6063-4efe-9b16-86d2e304cbc3.png b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_96e23488-6063-4efe-9b16-86d2e304cbc3.png index ac3109caa0e7a7eb8590df1e5a0269d1689b747a..e06154c8b01ced84556f977c6d8d8b6ea6f6b13d 100644 --- a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_96e23488-6063-4efe-9b16-86d2e304cbc3.png +++ b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_96e23488-6063-4efe-9b16-86d2e304cbc3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:15f7059caef0f07558f45e82f2607ebf584c44be6e0071bed2f69b2e2b57c28c -size 1439073 +oid sha256:ef4de860689f69e2860300c6d8bb86ca56682940d4ac8e616d226175fc1da60d +size 1573289 diff --git a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_b3d17b95-f512-463c-8359-a1ed302829ee.png b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_b3d17b95-f512-463c-8359-a1ed302829ee.png index bc5fffa02e76a7081b4d7dad604654b357ea7ad1..1ab8088044b6c7bf4b62b0610e77b4ed377f1db2 100644 --- a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_b3d17b95-f512-463c-8359-a1ed302829ee.png +++ b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_b3d17b95-f512-463c-8359-a1ed302829ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4362435bebb5a8cef155ab34d155be12764e38b81c1cd1fca2d7b886d62841d6 -size 877378 +oid sha256:b9f76aaddd13a1aedd71c4f165fdd7fbdbd054af6342a1883178586cefe68ed2 +size 774253 diff --git a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_d4b41cf2-20f4-4ed5-bcd0-ae109880502e.png b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_d4b41cf2-20f4-4ed5-bcd0-ae109880502e.png index b47a32b7e8e26298bef4f3e8223e8dcd5b719115..851c736ba2e41eb477f6ba0a43a8cd63f565be53 100644 --- a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_d4b41cf2-20f4-4ed5-bcd0-ae109880502e.png +++ b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_d4b41cf2-20f4-4ed5-bcd0-ae109880502e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:12bd775a0194683b6b8de7970f118a5a2aa2d51637bedfb57fd149e6ae60e957 -size 881275 +oid sha256:15df83ade73e7ef9818aecb6dd3227f7d5defbaca5f8c988cc6643b821e4ac6b +size 1252082 diff --git a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_fcf8a62d-5909-423d-b5d8-241e0adb4dac.png b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_fcf8a62d-5909-423d-b5d8-241e0adb4dac.png index e31739e8c7c5dfa3bbc265f9494790113fc8a2a3..5d1407a63407a8c53e2536e1ac42b6046a5ab6b2 100644 --- a/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_fcf8a62d-5909-423d-b5d8-241e0adb4dac.png +++ b/images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_fcf8a62d-5909-423d-b5d8-241e0adb4dac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c61d88d9597824845eed340c4eb15352d6ffed5e31c019cfaf73897bcb30a5ce -size 931497 +oid sha256:4fddeeaba8405771322628d78957d88deeb57f9603b292f7431b26b4d2879bf4 +size 772309 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_1882a323-7275-4bea-95a4-89908286cee0.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_1882a323-7275-4bea-95a4-89908286cee0.png index e51f02eaa3a60c24afaae250b4f0089289746c6c..a9111fe96dadb6c127fe46282ab6a2a332dc02e0 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_1882a323-7275-4bea-95a4-89908286cee0.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_1882a323-7275-4bea-95a4-89908286cee0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f516f617d9f8bd10256fc71195e1592d72e3317a50a46c7ae9ba9954ba8e63fb -size 598346 +oid sha256:d06c2295c3ee61566fe3b21acdaab2325bdbd036e2a658fd8aaf02143fd39a1e +size 627905 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_22ad9507-f8b7-4f15-bd7f-c0f99312acd7.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_22ad9507-f8b7-4f15-bd7f-c0f99312acd7.png index 606a89b144452e888e89ba01b1ceb3c7a1c74365..7a281cb837ea36431c7d5a5477beb1d8a7d242f0 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_22ad9507-f8b7-4f15-bd7f-c0f99312acd7.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_22ad9507-f8b7-4f15-bd7f-c0f99312acd7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e5c2553831557af5d5247280631141adc8a6c6d10fd3c14373616b6195874b5 -size 348424 +oid sha256:221b319e9c0ea036d6396652b259f911b825bf1977c7d60c4f2988298457a405 +size 352479 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_36632f49-e9c1-4dbc-866d-eb03522d0614.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_36632f49-e9c1-4dbc-866d-eb03522d0614.png index 4791078fc19c9a5fd5c54e474621dc5784e5e486..80163459622df57b2d9dd7cbb418075a6aa1d9f2 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_36632f49-e9c1-4dbc-866d-eb03522d0614.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_36632f49-e9c1-4dbc-866d-eb03522d0614.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2d662fd92c2073887dc24e13d64de1a73ad623f542d7c8c7d8fa21190045d12c -size 913057 +oid sha256:ee889672b40ea387c9cc31025066c163df1d5397a6b7c0cf4db0ae1183c43903 +size 1022756 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_4b2dcd3a-0b40-469e-845d-e7b4f050d030.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_4b2dcd3a-0b40-469e-845d-e7b4f050d030.png index 25003db7930305874bff4e1d70172b457e71a3d1..c3527ad7a9e3ae15ab2f3148972c0e376f512d4f 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_4b2dcd3a-0b40-469e-845d-e7b4f050d030.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_4b2dcd3a-0b40-469e-845d-e7b4f050d030.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8bef62eeef440bd1418d41c842947c47b05ce900d910692c215ffe309900cbce -size 905092 +oid sha256:abd8810591cfbab35b142d176dcecbb50a66a5ed981a0d32a627663ba2dddde8 +size 939423 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_9533f5d5-15e2-4474-9fc1-a25f829529a3.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_9533f5d5-15e2-4474-9fc1-a25f829529a3.png index 81936e4e672085c7fba84709e10aa36eeb6e339a..3531ba87646054a92c2466ff6850b7d3a6a618ad 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_9533f5d5-15e2-4474-9fc1-a25f829529a3.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_9533f5d5-15e2-4474-9fc1-a25f829529a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ee59c8337b381607f8907d716301c164da431ad343c5ce4e11d32773dc6e528 -size 899839 +oid sha256:f7c96fd446cfaeb9867de1752f870c0e5efffd4b73c6c1c17ac642acf910cd57 +size 1012393 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_b62199e4-1022-40dd-a88c-5dae5942658a.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_b62199e4-1022-40dd-a88c-5dae5942658a.png index cc52b446077a26b18a05feea4fdfe47711a34b60..8316945c07a3dc344ac0dfcf288435f6c4267ec1 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_b62199e4-1022-40dd-a88c-5dae5942658a.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_b62199e4-1022-40dd-a88c-5dae5942658a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:78ef5221d4564e51e84718edfc8329f9086d608196f6b85993b73f5639c75477 -size 756001 +oid sha256:00dd614e0c2a08efc2c985dc6ef021ff041f84b3240c6702165d04c1bc442fea +size 787393 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_cb175ec6-1b33-4e7b-a205-3e5fae52fd07.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_cb175ec6-1b33-4e7b-a205-3e5fae52fd07.png index f17bc12f2130695dcc70f2429547d5af101becb4..a87f4819e8bb113c23eebb136da82b9a0e1f035d 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_cb175ec6-1b33-4e7b-a205-3e5fae52fd07.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_cb175ec6-1b33-4e7b-a205-3e5fae52fd07.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e2eb521fa3b1c60c29f9ded7333c819a1879e28ea8f092850f3c108c2cc9837f -size 801611 +oid sha256:5f33a743eef2e67a6eeae3e89ecbcf9598ae5017bf4d39e1ebdd5d76266eecf8 +size 831898 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_dd9f6105-c072-46c9-b958-1a67631c68b9.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_dd9f6105-c072-46c9-b958-1a67631c68b9.png index 8b4191f51406b3faaef288db32fd0a6abb3ff834..e779bc1cd634aae3280245941beec2cd441d39d4 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_dd9f6105-c072-46c9-b958-1a67631c68b9.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_dd9f6105-c072-46c9-b958-1a67631c68b9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:04c6a3bd8146f513c216effaaa60d2e9714d1f0b301b7ef905d21a697a880edf -size 889154 +oid sha256:39ea18e5bf11687ddbdcb45228d39be4bfc84fdf2cedbe86a1418dc454385a8c +size 1065442 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_ef7e1558-a90e-4187-81be-290734f69625.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_ef7e1558-a90e-4187-81be-290734f69625.png index 4f9493ab1149bec6b352f760f3bae81b088fbfd5..52909d3cb0dd11bd3d873f1297196cb127ae60fe 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_ef7e1558-a90e-4187-81be-290734f69625.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_ef7e1558-a90e-4187-81be-290734f69625.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8876b271169930f005f15e24d6fa298b78ac7e81de2ab1d6923a24ba93054945 -size 940030 +oid sha256:6c3cbf24e50b5e62cfdce07d3d8eb44ec50ce0ace6de9e1579a7ca950bf238cd +size 1050872 diff --git a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_fd219d72-5b65-4045-a4f4-04587d1c4cf2.png b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_fd219d72-5b65-4045-a4f4-04587d1c4cf2.png index 8ebe78b57e2df07857721d11544b21e7acd31c25..b4716436b8e253e8d382b8ff3fe8a196f3e72403 100644 --- a/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_fd219d72-5b65-4045-a4f4-04587d1c4cf2.png +++ b/images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_fd219d72-5b65-4045-a4f4-04587d1c4cf2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a66263af9da65864452e90d078257202b059c33361c6302e4f8682d85c631c8d -size 874908 +oid sha256:6529f5f155466d4ddca9d06510c9152279c810d4c8676b0f9351a72c8e5704eb +size 903642 diff --git a/images/f8027aa8-c193-47c8-a97b-432836093939_1fdaf4a6-f9af-477b-a6ad-d549a923e148.png b/images/f8027aa8-c193-47c8-a97b-432836093939_1fdaf4a6-f9af-477b-a6ad-d549a923e148.png index ca5378f90f8c1b3663395692fc5a96589cf6836e..f4f1bc7a873a0bb5512336f7a7bad9623b86d710 100644 --- a/images/f8027aa8-c193-47c8-a97b-432836093939_1fdaf4a6-f9af-477b-a6ad-d549a923e148.png +++ b/images/f8027aa8-c193-47c8-a97b-432836093939_1fdaf4a6-f9af-477b-a6ad-d549a923e148.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b134b34dc78649ae5b19dfdb858e2fa5fbd2ed24b736c215583bc872a27dff2f -size 1877153 +oid sha256:6f5beef328a0ff0415131cb682bfbcb1b3a9d7e1971de05a5d47d003e12534b3 +size 1945940 diff --git a/images/f8027aa8-c193-47c8-a97b-432836093939_4da18a4b-7a48-4342-aced-13ac7ea17785.png b/images/f8027aa8-c193-47c8-a97b-432836093939_4da18a4b-7a48-4342-aced-13ac7ea17785.png index dc819bee2365aade9280e612d3345bd81cf6976c..14559eb366a7a0869abbf0a03dbb9ee7d669ec65 100644 --- a/images/f8027aa8-c193-47c8-a97b-432836093939_4da18a4b-7a48-4342-aced-13ac7ea17785.png +++ b/images/f8027aa8-c193-47c8-a97b-432836093939_4da18a4b-7a48-4342-aced-13ac7ea17785.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:325c7ecd1c8a849b8018a7625d2d54429499ad219c5789a93f4a0987083d1127 -size 948199 +oid sha256:5bbbd9d15fc29d78a6febc9cb7ce8c6276c6bfaed672eb82380e4414c7ddc38c +size 1214412 diff --git a/images/f8027aa8-c193-47c8-a97b-432836093939_e4a42325-a654-487d-84c7-bf3df4ef3fdd.png b/images/f8027aa8-c193-47c8-a97b-432836093939_e4a42325-a654-487d-84c7-bf3df4ef3fdd.png index 7761801fb4f940ba13552d1f0103a382cf262419..894c94f6ce95c909c6c6d092420cce629f43e4d5 100644 --- a/images/f8027aa8-c193-47c8-a97b-432836093939_e4a42325-a654-487d-84c7-bf3df4ef3fdd.png +++ b/images/f8027aa8-c193-47c8-a97b-432836093939_e4a42325-a654-487d-84c7-bf3df4ef3fdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b40bf7c46ae7e0fc01bd45d550b832c653612c9f0578c73a8e9b9454fd292e14 -size 1975019 +oid sha256:89e2b03b2050ae60938cff69c48957281c9b26bb11bea91e7c8f7e500104d896 +size 1341765 diff --git a/images/f8027aa8-c193-47c8-a97b-432836093939_ebb6e2a1-73dd-4ef0-9dae-4f80fc30e110.png b/images/f8027aa8-c193-47c8-a97b-432836093939_ebb6e2a1-73dd-4ef0-9dae-4f80fc30e110.png index c9e4a4e13043108b8bd3f02663b3813477d9fff3..28c68a6684b74f65bbf96609f3308f4590343ac8 100644 --- a/images/f8027aa8-c193-47c8-a97b-432836093939_ebb6e2a1-73dd-4ef0-9dae-4f80fc30e110.png +++ b/images/f8027aa8-c193-47c8-a97b-432836093939_ebb6e2a1-73dd-4ef0-9dae-4f80fc30e110.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8051d90ba72cad931c2fa86652563f64f0dfb4f0250856a5d4eb241a6fa3bf1c -size 1876172 +oid sha256:1ba1d6f10cac8bdb8d4d9d694943ef343f7e7b3ef2a5186f7ebc3a7d733f05ef +size 850146 diff --git a/images/f8027aa8-c193-47c8-a97b-432836093939_f783514e-6e62-4f91-bf42-00e2edb90295.png b/images/f8027aa8-c193-47c8-a97b-432836093939_f783514e-6e62-4f91-bf42-00e2edb90295.png index 312b1a34898f3bc2b6fc3d61d55cb006b715c011..3eeeb6afa9465ffd394729eed54859ce035aa9f4 100644 --- a/images/f8027aa8-c193-47c8-a97b-432836093939_f783514e-6e62-4f91-bf42-00e2edb90295.png +++ b/images/f8027aa8-c193-47c8-a97b-432836093939_f783514e-6e62-4f91-bf42-00e2edb90295.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e0d2a076da28db434703fb650ac65359ff4fbf898b8e2d178df20b12841164a1 -size 804647 +oid sha256:3f73a26486cc6ba1d3a18467b7e984704e56ca094a473323769602039b51ef64 +size 959703 diff --git a/images/f8027aa8-c193-47c8-a97b-432836093939_fe3b630c-50fb-4bd4-8414-5a22fcbf3de8.png b/images/f8027aa8-c193-47c8-a97b-432836093939_fe3b630c-50fb-4bd4-8414-5a22fcbf3de8.png index ae7a94bbd6adc41edb66034981dd6b6bf63b60aa..2e730daea54e87dc3f1adb8dec31b72e224ab317 100644 --- a/images/f8027aa8-c193-47c8-a97b-432836093939_fe3b630c-50fb-4bd4-8414-5a22fcbf3de8.png +++ b/images/f8027aa8-c193-47c8-a97b-432836093939_fe3b630c-50fb-4bd4-8414-5a22fcbf3de8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ab5aebc6605020fe7b9c5bada2211af7c83970787e6a6a410955f3a05346c673 -size 1038176 +oid sha256:f372fb0d5167c4c471aad821bdbc83a7467f9c75ce5368bf4fb9fb805f18f88e +size 1355822 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_0b2e5b9a-bed8-4064-8057-bb32b4bc6111.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_0b2e5b9a-bed8-4064-8057-bb32b4bc6111.png index b978042f8152c204551635515ac1ab20b9e793ae..652b931835b5cd698bd48584242b865939516689 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_0b2e5b9a-bed8-4064-8057-bb32b4bc6111.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_0b2e5b9a-bed8-4064-8057-bb32b4bc6111.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6cc49e591cd4e65dd8477a9fb466d3d77fdd9f2b4ae4e9cc49f8ce5a4f23e296 -size 805244 +oid sha256:89ed3cd2bddedba278430c8cdfa042edd918a15dda1003431a43857543f60b77 +size 1122401 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_2f631344-e9f0-4a3a-87bc-273d5f604b5a.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_2f631344-e9f0-4a3a-87bc-273d5f604b5a.png index 0e8f60050ef4d3986df32124878a80387b282357..553174e3a82eae6eca14a5ae802bce8c89949e21 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_2f631344-e9f0-4a3a-87bc-273d5f604b5a.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_2f631344-e9f0-4a3a-87bc-273d5f604b5a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:de7897ae027d3b248bd4b747d70706db232197f362ae0cfc37f9d3c9a3955faf -size 854508 +oid sha256:ca06d462294b9e55a2413eb3fc46a8303bc75b35c273ed34b3a8d7f73bd7296c +size 709711 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3c33e494-db1f-4561-ac74-d928cd7cca26.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3c33e494-db1f-4561-ac74-d928cd7cca26.png index 294f057a07158d7875ba72fb741ae5ace81dc831..71494a5757cf86a1acce42d1ef0ee6a706c0beff 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3c33e494-db1f-4561-ac74-d928cd7cca26.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3c33e494-db1f-4561-ac74-d928cd7cca26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3391696844662216b681ba3e0028d10f5430a34b9791c814b5a6c428d2656224 -size 1202097 +oid sha256:e69cb0e23bf3618b69f4b4914503b73253e2e1dfa3b847a85bfac4191d722e0a +size 1161799 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3e6b66ed-8220-4c62-8a25-6d6f0815a83c.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3e6b66ed-8220-4c62-8a25-6d6f0815a83c.png index cc3a78dda05eff94f8ac7766d3f2ddd58240d8e4..e6f0c9dbb5408a75fc6bd68e4fdad022d21c23be 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3e6b66ed-8220-4c62-8a25-6d6f0815a83c.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3e6b66ed-8220-4c62-8a25-6d6f0815a83c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b85e0d5aadc913d6ebadcc5e46b65277a88b1601a62b59665f47280ce2dd10e1 -size 1318905 +oid sha256:1a561facf7b22284c05f640b919bc18ed663eb66a4bcedfcbb7861a2955ce563 +size 1334093 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_6f9d9303-c179-4500-90b0-311631d41991.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_6f9d9303-c179-4500-90b0-311631d41991.png index 4bdf1b00855eeb3c2352c54d162c2a3ab852d99b..e98839ebffd35f15eac8b340d29623e6a82f8615 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_6f9d9303-c179-4500-90b0-311631d41991.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_6f9d9303-c179-4500-90b0-311631d41991.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d3a405ffe0f277b79915e841d74dfdfbf868243fec8191369eb16f30d1ad3ab -size 1651066 +oid sha256:fe3733321b3418e2ffc0a00fa18b5894026d3274daa23389b1969badc18caad5 +size 1941844 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c88bb1ab-d1fe-4205-af84-9542a145f787.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c88bb1ab-d1fe-4205-af84-9542a145f787.png index 029bcecbdd014fd36560121dbdc9b66ae80f2f1c..6e5cfc7dc13a3a063f81eeb51fe904275fea1043 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c88bb1ab-d1fe-4205-af84-9542a145f787.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c88bb1ab-d1fe-4205-af84-9542a145f787.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e30378b9bdc20468da368053a4075831cde070f375ebdf1ccd3f399a4a8688b -size 820783 +oid sha256:493b71c4e0e61682341bd5caae0d26c524fc56828c0726c82ffd28a871609546 +size 755360 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c8b6e56e-4973-41fa-8ffb-0b3e044b052a.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c8b6e56e-4973-41fa-8ffb-0b3e044b052a.png index 4709f3e39679e5f6fe64f822e9420e7ed2ece266..b65439ba34a925a446de494d5361bbc55cf4f157 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c8b6e56e-4973-41fa-8ffb-0b3e044b052a.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c8b6e56e-4973-41fa-8ffb-0b3e044b052a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c3c5ba93de4985b7c843d8ca32111d897e0d9f17235bbd8822ab220a16cb519 -size 857396 +oid sha256:4c39428ad4a601c95bec2cfe30e2a757c8ae58d049b0c13fb18344b17e2f75c7 +size 613396 diff --git a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_ff4a9a5f-bb9a-4fe4-ac0d-1b6e7ef9a46d.png b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_ff4a9a5f-bb9a-4fe4-ac0d-1b6e7ef9a46d.png index c38f8e7a03c2d527ae15529962a8f97ed2e90f6a..ec7b0f6d24f3c8f5fb6329afc0168536078ca2da 100644 --- a/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_ff4a9a5f-bb9a-4fe4-ac0d-1b6e7ef9a46d.png +++ b/images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_ff4a9a5f-bb9a-4fe4-ac0d-1b6e7ef9a46d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1bb609110cd05a44610feb302106d842e2b5469a30a807571889d37709bad029 -size 786894 +oid sha256:51ba72340a3093d29ef8746de63b63699ad951095275eefd3748b041410dc76a +size 1104405 diff --git a/images/f8089c50-e80c-4532-82a1-96009f485c57_276f37d7-195e-4140-8216-7cd9f629c82d.png b/images/f8089c50-e80c-4532-82a1-96009f485c57_276f37d7-195e-4140-8216-7cd9f629c82d.png index c39a4c53c311a7b93c4deddbce59fb2c71c7987e..09d1703c36b9236fdfaba005c34ade2887db6d2d 100644 --- a/images/f8089c50-e80c-4532-82a1-96009f485c57_276f37d7-195e-4140-8216-7cd9f629c82d.png +++ b/images/f8089c50-e80c-4532-82a1-96009f485c57_276f37d7-195e-4140-8216-7cd9f629c82d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2a0de58f302b6860a76af83f51ce7e02fd3242bc50bfcd498321dc3801d6763e -size 434670 +oid sha256:e5abfb1aff910e258eb2ff439074cca624ac83b759fd5de8de21f4170ff7c408 +size 342750 diff --git a/images/f8089c50-e80c-4532-82a1-96009f485c57_3484246f-8de9-4c54-884b-0ffcaf153cb2.png b/images/f8089c50-e80c-4532-82a1-96009f485c57_3484246f-8de9-4c54-884b-0ffcaf153cb2.png index 0f0c897e16c4b88c0c1e2021fd76bd12ec81acdf..70bb6d2bea4a1cac0fa33dcefa2f4064e7f81257 100644 --- a/images/f8089c50-e80c-4532-82a1-96009f485c57_3484246f-8de9-4c54-884b-0ffcaf153cb2.png +++ b/images/f8089c50-e80c-4532-82a1-96009f485c57_3484246f-8de9-4c54-884b-0ffcaf153cb2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f25166ef0f4bcd84f606e6bc02b1e73d8159acfd5d409b034f271c127f3f3dbe -size 1060506 +oid sha256:9c965559f766698870f1fbe49dbec2fa905e3cc9e16de88a736984c8a92290e4 +size 767472 diff --git a/images/f8089c50-e80c-4532-82a1-96009f485c57_60eff40d-6f5d-42ae-ab89-1cb9059a3eeb.png b/images/f8089c50-e80c-4532-82a1-96009f485c57_60eff40d-6f5d-42ae-ab89-1cb9059a3eeb.png index d2331f534e93a52b0155db1e0b77b4cdd75ffd62..df80ee959afc85c7f57bc5bd36fc6ce953447955 100644 --- a/images/f8089c50-e80c-4532-82a1-96009f485c57_60eff40d-6f5d-42ae-ab89-1cb9059a3eeb.png +++ b/images/f8089c50-e80c-4532-82a1-96009f485c57_60eff40d-6f5d-42ae-ab89-1cb9059a3eeb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d5fba5275c74fa8dce38a2eb35d885aa904fb1e70de7a5a9565a361fb62e21d -size 855873 +oid sha256:2892ece21c590a41d3079b1cdad6821358bb75dc42e7e8e03ac66656b47e7270 +size 775191 diff --git a/images/f8089c50-e80c-4532-82a1-96009f485c57_87981f4e-2b84-4c8e-a7ab-0b3a2813ba20.png b/images/f8089c50-e80c-4532-82a1-96009f485c57_87981f4e-2b84-4c8e-a7ab-0b3a2813ba20.png index b5b4d91d857fa5a626b0af62e93b0301c363e34c..55661b4f03a65a5a480230779874efe562c70fd1 100644 --- a/images/f8089c50-e80c-4532-82a1-96009f485c57_87981f4e-2b84-4c8e-a7ab-0b3a2813ba20.png +++ b/images/f8089c50-e80c-4532-82a1-96009f485c57_87981f4e-2b84-4c8e-a7ab-0b3a2813ba20.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6b4417dd8274db8068a8107964a713b1fec3a0a3f527843168ebec6da158327 -size 693374 +oid sha256:883781b22b67de3b0f54ac5ff4ee46d6d94119dc180cf294ae23d26d6eb67d73 +size 480806 diff --git a/images/f8089c50-e80c-4532-82a1-96009f485c57_931c5aca-4b73-4e84-9797-1c93a3bd176b.png b/images/f8089c50-e80c-4532-82a1-96009f485c57_931c5aca-4b73-4e84-9797-1c93a3bd176b.png index 849edc231e3f619f2f98f64de5948d920a494723..4d5a9857dc114131da12efc792eeb47c5cecc0db 100644 --- a/images/f8089c50-e80c-4532-82a1-96009f485c57_931c5aca-4b73-4e84-9797-1c93a3bd176b.png +++ b/images/f8089c50-e80c-4532-82a1-96009f485c57_931c5aca-4b73-4e84-9797-1c93a3bd176b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3501f8cdec586750d685863b184141fd4dc2003a5be917018704ead162e07e2 -size 584550 +oid sha256:cb9e2c164f35ad101e669213bfe06c80591ace7ebb10ae0d61bacac3e0ea64ea +size 607147 diff --git a/images/f8089c50-e80c-4532-82a1-96009f485c57_b084de8c-0fa0-44f0-853f-12afedc35be6.png b/images/f8089c50-e80c-4532-82a1-96009f485c57_b084de8c-0fa0-44f0-853f-12afedc35be6.png index 15b36a8bfb827670c69f457fcff587f4882389e1..15af78488f582064e4bd57853333151e7d05f9d4 100644 --- a/images/f8089c50-e80c-4532-82a1-96009f485c57_b084de8c-0fa0-44f0-853f-12afedc35be6.png +++ b/images/f8089c50-e80c-4532-82a1-96009f485c57_b084de8c-0fa0-44f0-853f-12afedc35be6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d899de9664e7b769eb83dd55a8112f466e048fcf6d4b51e6957d71a3375480b3 -size 453953 +oid sha256:131af92e115a124fcd85a5402e37c7b5bdd7eb32725368078670629cee3cc9c9 +size 458959 diff --git a/images/f8089c50-e80c-4532-82a1-96009f485c57_ef25943f-68a6-4969-91d1-956e78f70336.png b/images/f8089c50-e80c-4532-82a1-96009f485c57_ef25943f-68a6-4969-91d1-956e78f70336.png index f34b7cd792a2b4e57ae811d43f452d5cc06da8a7..23735e72b3e5dc111b0981f585edf358019dcf8d 100644 --- a/images/f8089c50-e80c-4532-82a1-96009f485c57_ef25943f-68a6-4969-91d1-956e78f70336.png +++ b/images/f8089c50-e80c-4532-82a1-96009f485c57_ef25943f-68a6-4969-91d1-956e78f70336.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c599c16b5b1c921fc4240c2e8d4b458014b4e3a9080332537303aec9e96f869a -size 532865 +oid sha256:c67a2c0a1dd68848280095ecc49dee91dd43e47cb7964f20307cc32d333f8783 +size 535330 diff --git a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_47176cdb-d2c8-4197-8b6c-cb83c22fe1ac.png b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_47176cdb-d2c8-4197-8b6c-cb83c22fe1ac.png index 42ada728768d8a3faf09785e75ce67b63b41bad0..5928a5e2c66a7d7c292ed697acfcd69235c636ce 100644 --- a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_47176cdb-d2c8-4197-8b6c-cb83c22fe1ac.png +++ b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_47176cdb-d2c8-4197-8b6c-cb83c22fe1ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5cb98ccf433a215d503c807b65d7987c914cd84ae09e355c19d1ea7d8006fdde -size 1187267 +oid sha256:96077fdd553dcc4db1d1644d74c2b46f59caa06636ac2cd3ac60cca26bc070bd +size 1370833 diff --git a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_48e48dce-c73e-4ccf-86e0-9aa26363e0e0.png b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_48e48dce-c73e-4ccf-86e0-9aa26363e0e0.png index b85b7ea51e23ca24edb313d0e89624a4e22298e7..65fa16e13553ea79be9fad9abf2601e27ba44b01 100644 --- a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_48e48dce-c73e-4ccf-86e0-9aa26363e0e0.png +++ b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_48e48dce-c73e-4ccf-86e0-9aa26363e0e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:13caf43025cbd8d4a987de44674292e153ff0297f4ad3435f58d6bfbfd4030fc -size 1191039 +oid sha256:5b092989695a7b4e4f4495d21ce230439eeb0cc82c51766300103357da073f01 +size 1599305 diff --git a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7109924d-4f35-4dd3-a5ec-af0b66f8cc53.png b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7109924d-4f35-4dd3-a5ec-af0b66f8cc53.png index 302f3ba07d2c3ab960aa1ff7c44b24727048ddea..d59c5451db8389351e10cd2cf246b14b613b25c8 100644 --- a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7109924d-4f35-4dd3-a5ec-af0b66f8cc53.png +++ b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7109924d-4f35-4dd3-a5ec-af0b66f8cc53.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:314a17ed8e8bab578f68dcbc051752c1ab847cc6617aaf0763c7529e9708768a -size 1188883 +oid sha256:ce4a8acd81ae46e7b80e9e438499b6601778ca68b5d4f3becca3efba53ae5c8f +size 1765274 diff --git a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7d12b26e-cea6-48a9-84bb-31a71783af9e.png b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7d12b26e-cea6-48a9-84bb-31a71783af9e.png index 618a29cbeb3ac183da0d5a822b1a444ec328fccf..b5990c10d4d15fb0ff8b23cda48f9b0db1f8b1cf 100644 --- a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7d12b26e-cea6-48a9-84bb-31a71783af9e.png +++ b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_7d12b26e-cea6-48a9-84bb-31a71783af9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:deb88988a7396aef1f5573971e2fbe9490bd9ebdff22bef42e15e755ec202cc7 -size 1181337 +oid sha256:1d5413ad8da7fcea4f942106d16c669d85ffefa1a6948c7f38229a3a659dbd25 +size 1285053 diff --git a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_b319eb24-8b9a-449b-9d38-9e9fc2ac0bcf.png b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_b319eb24-8b9a-449b-9d38-9e9fc2ac0bcf.png index 15713d6d36fbdf4a583c3fce55455da86e089df7..13b404ac8b9768f0f4c4cdd1614219cb77facff7 100644 --- a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_b319eb24-8b9a-449b-9d38-9e9fc2ac0bcf.png +++ b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_b319eb24-8b9a-449b-9d38-9e9fc2ac0bcf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71b3427e3d4ba4e9a7968b3fc2ec8f05b576a4c9ab2c23c49ac6b888d892c73a -size 1187263 +oid sha256:04b97ef043f29c6ac26c48c9514c57bf04045e35e83d0b3b797fdb98f7dab593 +size 1678123 diff --git a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_ba81a68d-2108-4b3e-a009-e9f9c0992380.png b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_ba81a68d-2108-4b3e-a009-e9f9c0992380.png index 13160533ea2e386295e2c6b1fb19dab97fb134cd..2ffeae311c30eef68c387097953a2589533d9818 100644 --- a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_ba81a68d-2108-4b3e-a009-e9f9c0992380.png +++ b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_ba81a68d-2108-4b3e-a009-e9f9c0992380.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9dd05e4d5ccf5b3aea60d0f74e2035b7c6b9ae51f2db14e807bc98e6633313a1 -size 1206950 +oid sha256:3d57126c8ae28086208a4b2e85e8be3dae610daae92ebe3a97463072ec79ccee +size 1907789 diff --git a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_fa257e3f-253e-412f-8144-5d901eb5b29c.png b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_fa257e3f-253e-412f-8144-5d901eb5b29c.png index bf38fd1a0eaf7c6d650122c38732f45c7e990d51..4d6dd9fde675626a007501e9cdddb08ca944efcb 100644 --- a/images/f84075a2-9d97-4964-9c80-70f5bee8a418_fa257e3f-253e-412f-8144-5d901eb5b29c.png +++ b/images/f84075a2-9d97-4964-9c80-70f5bee8a418_fa257e3f-253e-412f-8144-5d901eb5b29c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8077f7459be1ffd2f62bef908596a2e60997beef3b922779003172ec11d2cd5f -size 1136561 +oid sha256:8a5dbeef6c70c52d21e7be7e77475100d8f38677cf6defab2ebbbe746fffe01d +size 1884662 diff --git a/images/f8428085-905f-4190-9404-3e28fb691252_2326f36e-ad6b-4850-a5cd-83eb7df45721.png b/images/f8428085-905f-4190-9404-3e28fb691252_2326f36e-ad6b-4850-a5cd-83eb7df45721.png index 25577c8377cdd996a274a868be3721e07d3b382c..0cf4fe89fa6a96e8867829e0ccc0f3006cd686eb 100644 --- a/images/f8428085-905f-4190-9404-3e28fb691252_2326f36e-ad6b-4850-a5cd-83eb7df45721.png +++ b/images/f8428085-905f-4190-9404-3e28fb691252_2326f36e-ad6b-4850-a5cd-83eb7df45721.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:702e087a5acc7405178115b1cb7494f8b07c378e32670c8467968a0d294f189d -size 1048666 +oid sha256:e20faacb13f3eafcd413258b27a02ef974a7602737cf753145565a3ddc42194e +size 1142678 diff --git a/images/f8428085-905f-4190-9404-3e28fb691252_314dfd22-8e83-4475-b8eb-430c8eb22cef.png b/images/f8428085-905f-4190-9404-3e28fb691252_314dfd22-8e83-4475-b8eb-430c8eb22cef.png index 61321f4013f1c735e4f246360a986107049729eb..7115cd35391ce186aee31cdadb0bdae3a9b224ec 100644 --- a/images/f8428085-905f-4190-9404-3e28fb691252_314dfd22-8e83-4475-b8eb-430c8eb22cef.png +++ b/images/f8428085-905f-4190-9404-3e28fb691252_314dfd22-8e83-4475-b8eb-430c8eb22cef.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0804c5ca87dedad84f8ee854c4b6f586b2a1ef26696cf89bc6fdc46349a2d3a2 -size 1531949 +oid sha256:9c8712603a367b5fde3fe1a9ea31e4c1db231ae8c79afe16a6e0ab434491ec8a +size 1543930 diff --git a/images/f863168b-1d7e-4f51-9681-79891abc4b45_0a904ba2-dca6-4ad1-8af2-f6568ea6d8eb.png b/images/f863168b-1d7e-4f51-9681-79891abc4b45_0a904ba2-dca6-4ad1-8af2-f6568ea6d8eb.png index 985c8f8f08783df3137f4a3adf8f799df6538362..b9f9460070e50a154633540b9864936a1a79f950 100644 --- a/images/f863168b-1d7e-4f51-9681-79891abc4b45_0a904ba2-dca6-4ad1-8af2-f6568ea6d8eb.png +++ b/images/f863168b-1d7e-4f51-9681-79891abc4b45_0a904ba2-dca6-4ad1-8af2-f6568ea6d8eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3149e65122714657dbd5bd07f49a5b3b7ff9dd87630eff8e2b4c4bf31d3d6bf3 -size 1891395 +oid sha256:ce3493f2e1b5f2583e6009d4f2b7ff08475ab655794521cd2c29f2e0d48fbb4b +size 2085821 diff --git a/images/f863168b-1d7e-4f51-9681-79891abc4b45_32e3bc8b-4bdb-4e41-b530-c6856fd481ba.png b/images/f863168b-1d7e-4f51-9681-79891abc4b45_32e3bc8b-4bdb-4e41-b530-c6856fd481ba.png index ebefa6507c80ef5c08b9215f7e8e3a813f25a122..c7d0b0b26bfaa5e06733d29d12c473909ff46a3a 100644 --- a/images/f863168b-1d7e-4f51-9681-79891abc4b45_32e3bc8b-4bdb-4e41-b530-c6856fd481ba.png +++ b/images/f863168b-1d7e-4f51-9681-79891abc4b45_32e3bc8b-4bdb-4e41-b530-c6856fd481ba.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a312f61f861bb44245ac0638d8b2ecd68f2f1f79ed99716a316fc3ef2b83b46d -size 1549158 +oid sha256:fbcb5e4da77164015449c0913793d0688e1af85a446b3d9ae22c113626513050 +size 1323415 diff --git a/images/f863168b-1d7e-4f51-9681-79891abc4b45_7872a569-29b2-44ff-9e06-811c8577edff.png b/images/f863168b-1d7e-4f51-9681-79891abc4b45_7872a569-29b2-44ff-9e06-811c8577edff.png index d311f3cf27d1ea486eed54ae7ad8d95958a21ee6..2b2f52683b1f59fe59d5ba6c139ada241774676e 100644 --- a/images/f863168b-1d7e-4f51-9681-79891abc4b45_7872a569-29b2-44ff-9e06-811c8577edff.png +++ b/images/f863168b-1d7e-4f51-9681-79891abc4b45_7872a569-29b2-44ff-9e06-811c8577edff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:358785b7d26c7bcf64d18cb84d922f6b9601a2e201aa088ef36cb05f194b5d50 -size 2434486 +oid sha256:943ddbf15b0480b4906e31657ca1242db72887223643081e1508bc4f5d1db109 +size 1580396 diff --git a/images/f863168b-1d7e-4f51-9681-79891abc4b45_91007a2c-d94c-4c8d-ad80-1b8ca2c877db.png b/images/f863168b-1d7e-4f51-9681-79891abc4b45_91007a2c-d94c-4c8d-ad80-1b8ca2c877db.png index 65c404355a6d97cd4a1e5e69382f3a5df06a3543..181bbdc8ccdc773a723bcb654f5cd0b6e2e82a49 100644 --- a/images/f863168b-1d7e-4f51-9681-79891abc4b45_91007a2c-d94c-4c8d-ad80-1b8ca2c877db.png +++ b/images/f863168b-1d7e-4f51-9681-79891abc4b45_91007a2c-d94c-4c8d-ad80-1b8ca2c877db.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2c414503533587c496f4aa02c32f5ac751f65f734013d757b0f262a727e20c74 -size 2893038 +oid sha256:47d8f32934d503aa6db5bed12c88baddb879c3bcdd440532d5438d8a5551621a +size 1999163 diff --git a/images/f863168b-1d7e-4f51-9681-79891abc4b45_cb1c3f11-8fe0-41ee-bb51-2e8061bdfc57.png b/images/f863168b-1d7e-4f51-9681-79891abc4b45_cb1c3f11-8fe0-41ee-bb51-2e8061bdfc57.png index 11b7b5e3e506b1d9b52a24fb0cc84f985c59204b..111d567efb2c4d77ca393708f46b5c4623e4917b 100644 --- a/images/f863168b-1d7e-4f51-9681-79891abc4b45_cb1c3f11-8fe0-41ee-bb51-2e8061bdfc57.png +++ b/images/f863168b-1d7e-4f51-9681-79891abc4b45_cb1c3f11-8fe0-41ee-bb51-2e8061bdfc57.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5260cd5968d546d189a1ad9063d34bbe9a22f1b2570a03dc6ad172f607c8ac01 -size 1868901 +oid sha256:e0494bb3baabba7d61d76e4817ee728c0a42d1effa75e5ba80f26dac938ce079 +size 1524602 diff --git a/images/f86b0a14-ed91-491d-85be-b0af0b849353_1d57e568-92c5-431a-acb6-65d74fe5e11c.png b/images/f86b0a14-ed91-491d-85be-b0af0b849353_1d57e568-92c5-431a-acb6-65d74fe5e11c.png index a707eb0a7089fdcd58adc7b8b2038d2a9d3bbeff..56222fcf9ec1c978be314ce71a2c2438534237d7 100644 --- a/images/f86b0a14-ed91-491d-85be-b0af0b849353_1d57e568-92c5-431a-acb6-65d74fe5e11c.png +++ b/images/f86b0a14-ed91-491d-85be-b0af0b849353_1d57e568-92c5-431a-acb6-65d74fe5e11c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9ae5f5b9c43ef2c25a7d5a076896c0dc261881a77c1ba3cb3e101af33ce290fc -size 1834940 +oid sha256:eff0540d4b53944197245a506fae78e9611b24fb3ef165e649d5dd76f3048cc7 +size 1204279 diff --git a/images/f86b0a14-ed91-491d-85be-b0af0b849353_4221ed36-d0a6-4821-b352-b9cdb97af2ee.png b/images/f86b0a14-ed91-491d-85be-b0af0b849353_4221ed36-d0a6-4821-b352-b9cdb97af2ee.png index 12f89ba7735c6249a086cd5f26b685498f7c67db..8309e0489430385c561eb60355fd483bd53e5eb3 100644 --- a/images/f86b0a14-ed91-491d-85be-b0af0b849353_4221ed36-d0a6-4821-b352-b9cdb97af2ee.png +++ b/images/f86b0a14-ed91-491d-85be-b0af0b849353_4221ed36-d0a6-4821-b352-b9cdb97af2ee.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fa046662b5ff47eaa8de289a0d951159ab8f66ff2fa73151d9167ee32a45fd97 -size 1832918 +oid sha256:c7a3dbb32c1d835ee209af7eed834e9c6c9c478468b7af8b9b2f45a609466f76 +size 2475202 diff --git a/images/f86b0a14-ed91-491d-85be-b0af0b849353_b3025bc4-bb1f-4587-b640-2f7606f07007.png b/images/f86b0a14-ed91-491d-85be-b0af0b849353_b3025bc4-bb1f-4587-b640-2f7606f07007.png index 83431b55fe79a9e73697fdc86fb3d8eda4028c0c..4675d823a30f3ebc7baef0dd92296d2be9f64a11 100644 --- a/images/f86b0a14-ed91-491d-85be-b0af0b849353_b3025bc4-bb1f-4587-b640-2f7606f07007.png +++ b/images/f86b0a14-ed91-491d-85be-b0af0b849353_b3025bc4-bb1f-4587-b640-2f7606f07007.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5c5ff2822c2c02869e321365754473bde6c894a091ee6d16fabab2243a08af71 -size 2283271 +oid sha256:edb758c01c3397bae8aec1434d3c14e55c46583d34846e0681f363cfa795418b +size 1837568 diff --git a/images/f86b0a14-ed91-491d-85be-b0af0b849353_f7f63924-6669-400e-b187-76d3b6243151.png b/images/f86b0a14-ed91-491d-85be-b0af0b849353_f7f63924-6669-400e-b187-76d3b6243151.png index 0aae00c7ebffc748df2dbb55117beab870a17d33..60cf3ffc38b6ffe0e3516dd6a02a94d2fb8e50d6 100644 --- a/images/f86b0a14-ed91-491d-85be-b0af0b849353_f7f63924-6669-400e-b187-76d3b6243151.png +++ b/images/f86b0a14-ed91-491d-85be-b0af0b849353_f7f63924-6669-400e-b187-76d3b6243151.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d39948c84b0b34b0292540f05e37c5680bdb7d99221232fb8812415f50920340 -size 1772374 +oid sha256:f016c3ae12a05b709e527aabaad58b13f0b6f78d9b2c43adb6cc1de1ec4e1edd +size 2294540 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_1063a7d1-40b8-4b02-a6cb-f320875209b1.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_1063a7d1-40b8-4b02-a6cb-f320875209b1.png index 7dca0f067a44c7b8f3bea97fea17dddcd7bb468f..a7acf9f9e09afc64d0b745cff4424edd19ba7df5 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_1063a7d1-40b8-4b02-a6cb-f320875209b1.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_1063a7d1-40b8-4b02-a6cb-f320875209b1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4b65e7949c39e73438fde1eca5a3fe65d9cf898d9a777151ab2dfc7c3d29ad6 -size 2030477 +oid sha256:10b9bf491dadca779de8ce48f18199ce2277a52bd1209c12db3d5d78aa5238aa +size 1914089 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_290eda81-bc60-42d8-95be-e11a2a5de824.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_290eda81-bc60-42d8-95be-e11a2a5de824.png index 8cfa408cb09bb38c09c9258c1d12e58c2c65076a..986b08509dbdacbbf68f4134ca00fad25fd7fe59 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_290eda81-bc60-42d8-95be-e11a2a5de824.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_290eda81-bc60-42d8-95be-e11a2a5de824.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cfc4d8add8c0861d8fca5ed4593a7e35980b118a7ff1cf87e5514c6efcb1310 -size 469422 +oid sha256:2ff919be1afc7c983ecbb33dea672ccb4d7ef76b280bbe33fd7714a55517ab41 +size 625205 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_2f0192ff-5e1a-44a6-8649-bfff77330b42.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_2f0192ff-5e1a-44a6-8649-bfff77330b42.png index ecf2b43d8ce6662718780304e0bb578e2f397a2e..b76452b2f039c253f1800b6a33c1d800b58d122d 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_2f0192ff-5e1a-44a6-8649-bfff77330b42.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_2f0192ff-5e1a-44a6-8649-bfff77330b42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d173a332dcf06153eb8d99f4eb2b591b53f4d44a670cc6c6193af51ee74b5ecf -size 809695 +oid sha256:c9e296acae05f5060ff24101a7a1e9cf4c06645b3d53a590cd0279410bd8e35d +size 727140 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_62d7062f-27a8-4e87-b201-1aebf4117985.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_62d7062f-27a8-4e87-b201-1aebf4117985.png index 57a2633789c17df52575f579008dc2f7b74baeee..babe04bcb6503d001c73e69993ccfeab0c1421e5 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_62d7062f-27a8-4e87-b201-1aebf4117985.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_62d7062f-27a8-4e87-b201-1aebf4117985.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6afb5089b876d0f955c30b5efb37980df4a8c6b89331d76cd5032712c608c641 -size 247611 +oid sha256:d05717246a0a78764ba526b154ac4397418ac171f69485ef7ba6072615f8b959 +size 304714 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_64c3c386-4170-4ca4-a34b-5e3c589da638.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_64c3c386-4170-4ca4-a34b-5e3c589da638.png index c615ad9bcdd8f295c1d5d31b05260eaade4316fe..ad2ea1f80cb8b1e09f7a59f0f0a0b44001b1ab9e 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_64c3c386-4170-4ca4-a34b-5e3c589da638.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_64c3c386-4170-4ca4-a34b-5e3c589da638.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae5a351e970918f53def75e47b0aebb8cbf22fe9c80f1c2baf5698615d5f75c7 -size 251209 +oid sha256:69e670901fc6f19cadb530855725b5ec20d6739902da775c30599fb3bed14c59 +size 316819 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_79cf4364-cc02-439e-a7c8-3244a668dd67.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_79cf4364-cc02-439e-a7c8-3244a668dd67.png index 8cfa408cb09bb38c09c9258c1d12e58c2c65076a..f6c3d9e25e1c2fc811c6ac0db8e91fc7375e6836 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_79cf4364-cc02-439e-a7c8-3244a668dd67.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_79cf4364-cc02-439e-a7c8-3244a668dd67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3cfc4d8add8c0861d8fca5ed4593a7e35980b118a7ff1cf87e5514c6efcb1310 -size 469422 +oid sha256:b4646be7f7a1de6c0eeb6d36eed1975b3041185852c19da59daa03a9e37c2f1d +size 336124 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_7a02fae6-8fcb-46dc-b718-4bfdd02729dc.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_7a02fae6-8fcb-46dc-b718-4bfdd02729dc.png index d54dcbfd334dc55ffc65c4d8aa1bc523c516e64f..0b9834fc9aaec1407e15331bb7fcddd6672bc4b7 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_7a02fae6-8fcb-46dc-b718-4bfdd02729dc.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_7a02fae6-8fcb-46dc-b718-4bfdd02729dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df89f6e3ce7f68fad2a48afefac2cf16e4b4acf21d3afd62dc9cdda255436972 -size 345071 +oid sha256:fae4f4e627d15274cfebebda8c165b611f251219519349c5d3fb86302dcc17b5 +size 505118 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_aa293e50-9e36-4097-9e71-1a21249be4a6.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_aa293e50-9e36-4097-9e71-1a21249be4a6.png index e52b48e2853eb85bef92769e858dfa8e07ae9d0e..f31e7d0a7cdb8e0a81fbaf13076d28c31acc80db 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_aa293e50-9e36-4097-9e71-1a21249be4a6.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_aa293e50-9e36-4097-9e71-1a21249be4a6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b57eafc9acd7d7fba6ed0461fbc6a260d378847586f05e5aca4eb3fb5622fdec -size 326571 +oid sha256:a6eb945a96ccbbb3e66fa6e226b5c98eb89be7e00494c8d12f55a3fc506cb2a2 +size 326022 diff --git a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_f235354f-5877-4b33-82b4-dd854cf552a3.png b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_f235354f-5877-4b33-82b4-dd854cf552a3.png index b8cde827acddc0bf41de530e83807dce0b7777fb..952d1ebe501874d64466d8b686616bb2828ac53d 100644 --- a/images/f9062def-8361-48df-849c-4d7dbc1e11d1_f235354f-5877-4b33-82b4-dd854cf552a3.png +++ b/images/f9062def-8361-48df-849c-4d7dbc1e11d1_f235354f-5877-4b33-82b4-dd854cf552a3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5317f4ace7fac9b2e47ed73a333817fe61cc1ecd8309822d910f7bd1b4822b7b -size 697699 +oid sha256:2e68353f712be43535cc316165bffa2be7defec79ead156e71e828123cbc3cff +size 475363 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_0e9722ee-d2e7-4a8a-8a00-ef91a11a39da.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_0e9722ee-d2e7-4a8a-8a00-ef91a11a39da.png index 030a16d08648b0447cc1ad1867c4b366c0207c14..854c8b5583f1eaefb135c94bfaf5158ab846b538 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_0e9722ee-d2e7-4a8a-8a00-ef91a11a39da.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_0e9722ee-d2e7-4a8a-8a00-ef91a11a39da.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:744ffb6a25f916b112f4dcb02dd806b7ff2393aa32d2077c970ac4b808ef39ab -size 647994 +oid sha256:b80d487429cef5ecf302a0e9589531d34a7b865e707423713efea7c28d49c082 +size 742466 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_734546b2-50bd-404e-9a03-995d32d7a59b.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_734546b2-50bd-404e-9a03-995d32d7a59b.png index 9e99ed70aba6fc1db57da478365a1fc99b3c8977..ed0a4ef6355a106a8f8dd2c80fc0d7169c195e5c 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_734546b2-50bd-404e-9a03-995d32d7a59b.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_734546b2-50bd-404e-9a03-995d32d7a59b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ef48696bd29fd708bfdba8821cf907777b3c8f47953b9a675f8dd2f0b8fa512 -size 608149 +oid sha256:8e67b6e65c981a2f96c0e0791991dff7aaaa96ed593d24f2375e7275327c9071 +size 733729 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_8b97f306-2eff-498a-8a45-2e113edfc5dc.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_8b97f306-2eff-498a-8a45-2e113edfc5dc.png index 10ff96a1ef680dc83051905f5f7b7b29a8b4562a..5921751c038b5e8d3115c15d4f0a969294ab6570 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_8b97f306-2eff-498a-8a45-2e113edfc5dc.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_8b97f306-2eff-498a-8a45-2e113edfc5dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4dc1e64fc385eadeef996eafe526a8a0e0fd35c3603122f477adea5a0e217279 -size 744988 +oid sha256:7b900cc53f170272415f1cd16c3afc1b6471b50eda56a6dbdcea874c240801a2 +size 592402 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_a50b95ca-ee96-44a7-bd4a-87fc4ceaaaf7.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_a50b95ca-ee96-44a7-bd4a-87fc4ceaaaf7.png index ce68514b60a9e7d32c92136557cfc62180948beb..f6489f829d344ff50c0ffcf65f863e2d405ee9b3 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_a50b95ca-ee96-44a7-bd4a-87fc4ceaaaf7.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_a50b95ca-ee96-44a7-bd4a-87fc4ceaaaf7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0fdfdd32061fe17e92d245f4d258311613a29edc5f817bba38d5adb1ef1f61df -size 1716258 +oid sha256:96a08efe75993e8d70582806b238c3d6f3a161457dfa188e82ff5b96bfa27dd1 +size 1793995 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_a8388b0d-2e41-4cf0-ae0b-be1a72f3df55.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_a8388b0d-2e41-4cf0-ae0b-be1a72f3df55.png index a49a8e6c7862d85be0b85f526c513fb6338c4cf8..6c08764616b6e9c33d5fa623a4448c91543ff0c1 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_a8388b0d-2e41-4cf0-ae0b-be1a72f3df55.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_a8388b0d-2e41-4cf0-ae0b-be1a72f3df55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7f4c3f094b6548010dd84e6e68901843b2a15aa170f86c67d79c2d0f97b13c3 -size 898994 +oid sha256:e040b9ae74418cf1c3cdbaaa1fd7233b908f9f3ac28b32c5e2af2ade70253ef3 +size 788024 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_b7ac89f0-fd43-4114-b900-87d7d0c36444.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_b7ac89f0-fd43-4114-b900-87d7d0c36444.png index 6dd5b4703eee17bf72cdb883b27c7d3bb3f8f4a6..91c063d70e2f04befc21fc6d6c23d58604b1d10b 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_b7ac89f0-fd43-4114-b900-87d7d0c36444.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_b7ac89f0-fd43-4114-b900-87d7d0c36444.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5939c9e6c0d3d22d2e1323620473d6b525fa2fdc8c6a96b4528e4451df22cbcf -size 690693 +oid sha256:cd1d71da6138dfc1d04f9cd8d4016028e694fa522c283f713443802e314fb0d9 +size 638981 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_bb12a018-b966-477f-8fec-249635e955eb.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_bb12a018-b966-477f-8fec-249635e955eb.png index 0bb525bcfff42943c1254c840a02f2dfad950b70..697b5355c741a852f65194a9b8e83353b843fc29 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_bb12a018-b966-477f-8fec-249635e955eb.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_bb12a018-b966-477f-8fec-249635e955eb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39420848bf94bdbbe038ee230f464bf5626838d3a20a67cd44fce29a9ba69137 -size 516943 +oid sha256:ae19189d4a113909418e420e93a19b0affde21516e40d8be47198e079dc7efbd +size 467716 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_c8f29a0e-e477-43e2-a3c5-2895cac22164.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_c8f29a0e-e477-43e2-a3c5-2895cac22164.png index 183b086e89ae076bc12089579cf41ad476ae6f31..c23d59ee1158294d99d3761c29cea0aa9a2f296d 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_c8f29a0e-e477-43e2-a3c5-2895cac22164.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_c8f29a0e-e477-43e2-a3c5-2895cac22164.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:52841154e1e700399df4ea57bc5425476c80c26919cf0eb6ed5326034ffce8aa -size 1087445 +oid sha256:96b97c736960d03eb4576363f71589b7570829b268b09bda8113de66c4833b7c +size 1051267 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_dbcd3c77-a7bb-42ba-bf7f-c2d693cede67.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_dbcd3c77-a7bb-42ba-bf7f-c2d693cede67.png index 8216b4ef7432a5eb727aa1c72a1c7dba5a66b6ba..5199909fcd2a17a4e8662d34acadbcbbd05da278 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_dbcd3c77-a7bb-42ba-bf7f-c2d693cede67.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_dbcd3c77-a7bb-42ba-bf7f-c2d693cede67.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66e5b5cbbeba700b848fb99e1e0dd90ee7b64ba1c85e1e2db30141c18b266455 -size 810865 +oid sha256:1d3ade93840d3a8f5f120d03115b487e322f3e355bcf3ee3ed09317aa816992c +size 720651 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_e3069f03-e2a7-49e3-9c70-b9538cfd103a.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_e3069f03-e2a7-49e3-9c70-b9538cfd103a.png index e450a68e36e40ff58f420bcd0c874a0f8e21290f..aa5d8447ba7520655357dd2819ed3457194cd16d 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_e3069f03-e2a7-49e3-9c70-b9538cfd103a.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_e3069f03-e2a7-49e3-9c70-b9538cfd103a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c4b711a73d7e1a4a2ec525de0baf216120ca8a139545e09f22006540b890dc2 -size 896025 +oid sha256:744ad63332776d599306a243013160b66f45d2a4b8843d443ad221242b61c4c2 +size 739461 diff --git a/images/f9723022-04b9-4778-ad33-cf54eac3d393_ee5fabe8-604c-4450-a735-09accff76895.png b/images/f9723022-04b9-4778-ad33-cf54eac3d393_ee5fabe8-604c-4450-a735-09accff76895.png index 19eefa76ba6037f8566d901c3833addf295c8a00..f78ea2ea57efa6bace3f1aecd45cc3f44aca238e 100644 --- a/images/f9723022-04b9-4778-ad33-cf54eac3d393_ee5fabe8-604c-4450-a735-09accff76895.png +++ b/images/f9723022-04b9-4778-ad33-cf54eac3d393_ee5fabe8-604c-4450-a735-09accff76895.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1330f2a528ae5069a436e4d86171beaad791ebd82eb5594a75635671f80f535 -size 1122389 +oid sha256:ae0c46511c123502a0d9c227741726ad56cc7aa5c9da23be4fd4e725bb97a897 +size 1129399 diff --git a/images/f9e88baa-a109-454b-839f-1ab0746a5f13_7ef7d650-69ff-4cbf-a538-30a540a0be22.png b/images/f9e88baa-a109-454b-839f-1ab0746a5f13_7ef7d650-69ff-4cbf-a538-30a540a0be22.png index a06273d89fc7259ec9ecd1e83472796e829eb33b..201443306cc097a473a2c39474ec3988985fdba6 100644 --- a/images/f9e88baa-a109-454b-839f-1ab0746a5f13_7ef7d650-69ff-4cbf-a538-30a540a0be22.png +++ b/images/f9e88baa-a109-454b-839f-1ab0746a5f13_7ef7d650-69ff-4cbf-a538-30a540a0be22.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5bade1461ad950fa6e61aee62822cfe8cc90d6b44cd37573e0f28dc376fc6a7e -size 2284220 +oid sha256:b8b9a6e588c76d809128149b74775859b07b08225ae8aea4cceed1c907bd10b8 +size 935927 diff --git a/images/f9e88baa-a109-454b-839f-1ab0746a5f13_dd18b502-ec91-4183-9051-0866c49b0936.png b/images/f9e88baa-a109-454b-839f-1ab0746a5f13_dd18b502-ec91-4183-9051-0866c49b0936.png index 35a62c7e94f24274c6840e5315cf654d8ae9b0bf..dc8c15ab17a4cff3f53d33860314b92b8925b5b7 100644 --- a/images/f9e88baa-a109-454b-839f-1ab0746a5f13_dd18b502-ec91-4183-9051-0866c49b0936.png +++ b/images/f9e88baa-a109-454b-839f-1ab0746a5f13_dd18b502-ec91-4183-9051-0866c49b0936.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:28a2b374fbba99b07cb0c41aef519ad5f8252be595f8336b6c67c1b946806ac3 -size 1225964 +oid sha256:1326b0ffa8a42fe6adfaefe9774d483f3135ce48ac0b46421ad9b367469824a9 +size 1156127 diff --git a/images/f9e88baa-a109-454b-839f-1ab0746a5f13_ec0001ec-792f-4e40-aa10-63b1286ebefc.png b/images/f9e88baa-a109-454b-839f-1ab0746a5f13_ec0001ec-792f-4e40-aa10-63b1286ebefc.png index 070a9ed41b8af9d4c5018dfd86a3b5cb3c64d70a..3771a7366efb6035ac24d66c2f6ae9a4abe0e65d 100644 --- a/images/f9e88baa-a109-454b-839f-1ab0746a5f13_ec0001ec-792f-4e40-aa10-63b1286ebefc.png +++ b/images/f9e88baa-a109-454b-839f-1ab0746a5f13_ec0001ec-792f-4e40-aa10-63b1286ebefc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f382b120be2c79e58efc84828eb9dcdb83a5dbd49701dcf064f0ed11320cd5b -size 476014 +oid sha256:38d67153e13767a65e42a0be141ac6285ceda853df5fea0aabb04f362f2072bf +size 641951 diff --git a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_38402c1f-8d43-4fce-97b2-4dde762c43cd.png b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_38402c1f-8d43-4fce-97b2-4dde762c43cd.png index c6ccc9c64fffb6a3c95b51efff951633589d8326..f36c7bbdaf9d82cf50382e7fb47222d08eb05629 100644 --- a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_38402c1f-8d43-4fce-97b2-4dde762c43cd.png +++ b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_38402c1f-8d43-4fce-97b2-4dde762c43cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2288587476bfb60bde3e874711c0c932109481268c073ec1a133358188f4ac68 -size 989024 +oid sha256:41a62a7a4442408c88121fc966787398422a2a2380c9d2c5e25f0a1be8b054ad +size 545225 diff --git a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_56328637-4511-4d5f-87dd-f73738934bf5.png b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_56328637-4511-4d5f-87dd-f73738934bf5.png index 3fdf868c885c825330a6a2f071dfbfb3a828bef1..8712febd2a07a4d8f06b1c70fb289498420aa7f2 100644 --- a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_56328637-4511-4d5f-87dd-f73738934bf5.png +++ b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_56328637-4511-4d5f-87dd-f73738934bf5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1a1f4673a858521a891b1b77266460aff3455e201e48d3ab67e6c6d22a28a7a -size 368453 +oid sha256:797b3802e81f4127a296af4e56dbec7a2a1e32821f2f9b9065960a93a4980316 +size 1780525 diff --git a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_7c62b124-6c92-4f88-acc4-c0200e2706f3.png b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_7c62b124-6c92-4f88-acc4-c0200e2706f3.png index 3ee64614bab2e5525f4f5b317b5f81b0eba950e8..cd957acbc116fcee1363c56d7b47402e69490f50 100644 --- a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_7c62b124-6c92-4f88-acc4-c0200e2706f3.png +++ b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_7c62b124-6c92-4f88-acc4-c0200e2706f3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8a908e5392adf7af479210f869fee2f40a5aa58d92cb407b8226aee973db67be -size 1037216 +oid sha256:b609d9ba679c31b59cc5e025c8ee0d3841fa5ace70c7a563d927d0aa5a7f2176 +size 835200 diff --git a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_8b009b24-ae1b-40ce-b188-25c36447b588.png b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_8b009b24-ae1b-40ce-b188-25c36447b588.png index 21bff9dbb4a2029231e9149174c5f5fd174957a6..f0df44fdadbfebc441c882ffd74f089705fccdcb 100644 --- a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_8b009b24-ae1b-40ce-b188-25c36447b588.png +++ b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_8b009b24-ae1b-40ce-b188-25c36447b588.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7248d5946cddea2fc1f0cd00e0b5c488704cae353f600c830b15bb6f611a98f7 -size 495450 +oid sha256:1257c7f3b26547751b927e532bc6d8abf74c332de5d5e0f917d75f045a0c58d9 +size 856670 diff --git a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_974c432b-99eb-42e8-a5a5-9ff19f60d0bb.png b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_974c432b-99eb-42e8-a5a5-9ff19f60d0bb.png index f93cd2f67a36813e933f1431815339eb3628716c..25c937136fe37cff6bc865624e71ad4248e001f7 100644 --- a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_974c432b-99eb-42e8-a5a5-9ff19f60d0bb.png +++ b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_974c432b-99eb-42e8-a5a5-9ff19f60d0bb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c3331e5ac44f3252234806e8977995351051bad76ce441d7848f2829f783b98b -size 1188147 +oid sha256:2a5658dfa1b78ea481c168cdaac350a5baf2dbed04e1a39aec1554748e437d2d +size 790192 diff --git a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_d7ddf3e3-19d0-496a-b683-73230cc1be3a.png b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_d7ddf3e3-19d0-496a-b683-73230cc1be3a.png index 64b5868b03630b68f9cc23c5010dca21e693d912..f502847afa67e3eacef193aa5b0cc3ab2cc7dd12 100644 --- a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_d7ddf3e3-19d0-496a-b683-73230cc1be3a.png +++ b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_d7ddf3e3-19d0-496a-b683-73230cc1be3a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f19f259552073b5e24849db64519842a3fbd743833718291a046aed09ae6e09 -size 1227801 +oid sha256:64ae562d34f4e5f7b23217398ccf979954ec31d7bf5a21611647671980f7e51b +size 740604 diff --git a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_e6d800be-8004-45eb-a793-b15400c0ccff.png b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_e6d800be-8004-45eb-a793-b15400c0ccff.png index 94eb3378b9b6cadc009d910700de4fc5dee1acd0..45ed0be15a89e3d13133626c15f5049f8474390c 100644 --- a/images/fa2828c5-44b3-446e-ae42-a26438ed8343_e6d800be-8004-45eb-a793-b15400c0ccff.png +++ b/images/fa2828c5-44b3-446e-ae42-a26438ed8343_e6d800be-8004-45eb-a793-b15400c0ccff.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8a012d4646296b57b8f603a35f4fd6551ec208a08ab62938459c46aad8f3f54 -size 514731 +oid sha256:3d28d773909e18716ffd478708ea05327ccc172c9e7b8cedc5ceee61b56e9b23 +size 1898048 diff --git a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_02970790-c898-4ec5-929a-dab35d6b4e31.png b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_02970790-c898-4ec5-929a-dab35d6b4e31.png index 9aa8e3f639b8efef495025f681d20d4d20fbf026..64bc2ee79f109f360182e76d70539a3ae6dfc98c 100644 --- a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_02970790-c898-4ec5-929a-dab35d6b4e31.png +++ b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_02970790-c898-4ec5-929a-dab35d6b4e31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4ac92e7ee3f77eb34a8fc4a674e215db18ad7ba9f3b6f7e29f36d67151419eec -size 1264689 +oid sha256:3629c4615870d990afc409fd1690924a70375f20e14b35e88a2dfac338c34058 +size 1241499 diff --git a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_1bdbfb19-e149-4494-8c81-823066198ce4.png b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_1bdbfb19-e149-4494-8c81-823066198ce4.png index 30c207ce9cd6655398277ef939fda22d9e19de2d..5bb3de2b169d200b25779d694fc0e8acca294727 100644 --- a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_1bdbfb19-e149-4494-8c81-823066198ce4.png +++ b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_1bdbfb19-e149-4494-8c81-823066198ce4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bbbc6aee1d2dc2dc45b20e76861b372896d01f14a5d7a8e02b4cbc7d2beafa76 -size 1275898 +oid sha256:e912b8b8b7c41860147cbc620496eefd6b0c3ee255bd9cb52b819a2553ece21d +size 1312132 diff --git a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_da0b4cb1-d93d-4810-a74b-cb1a47baded5.png b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_da0b4cb1-d93d-4810-a74b-cb1a47baded5.png index 7eaae304250e713cc5f3c73b0aec8be3736fd774..83ec70994728b8bdb25e1f38ffc9dd4f5f44aa87 100644 --- a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_da0b4cb1-d93d-4810-a74b-cb1a47baded5.png +++ b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_da0b4cb1-d93d-4810-a74b-cb1a47baded5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:580f45b5f9efea9c5d8ec8f29d8d654aae24cf3838c8e63a86debd52b43e83b0 -size 1882185 +oid sha256:deae4180ac7db8469b91370ccbc199c8932eefb1ad3498f282a3c9fde685f4f0 +size 2012431 diff --git a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_e8f30da3-c814-4d47-8040-6e285a960ae4.png b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_e8f30da3-c814-4d47-8040-6e285a960ae4.png index 6e0774adcfc54e4de5f3c8dd54b4281a5deaea52..62dc7e3d9e097a4af9ab4863f73ec5fb6cc076b0 100644 --- a/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_e8f30da3-c814-4d47-8040-6e285a960ae4.png +++ b/images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_e8f30da3-c814-4d47-8040-6e285a960ae4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fc6e5f8c59bb5be663ada4318030a4385fa42b9fb9d7af3f62f6554d17c2fcb1 -size 1219893 +oid sha256:25d19d0b5b4c29f03115acc8841462fd0c8337ae9ca73030850d0ee1d30a8de4 +size 1344827 diff --git a/images/fb73611b-dc68-4a75-bf5b-7e151dc151af_f20c0850-fa76-4979-b946-e7e48831e68a.png b/images/fb73611b-dc68-4a75-bf5b-7e151dc151af_f20c0850-fa76-4979-b946-e7e48831e68a.png index a1c1607ba5b4fb50477e726288e0413cb69506d4..3148ba565d41017b6be34d3cda38a4b2695c0d8a 100644 --- a/images/fb73611b-dc68-4a75-bf5b-7e151dc151af_f20c0850-fa76-4979-b946-e7e48831e68a.png +++ b/images/fb73611b-dc68-4a75-bf5b-7e151dc151af_f20c0850-fa76-4979-b946-e7e48831e68a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93d827d9a0fa6e5e0a99882feac226c47b89f979b5ceff3dffe4262231b52799 -size 1968760 +oid sha256:71efc43f10f617306f5f15e019d301788142e2ac3b3a6e4c830d9a1f492a41c9 +size 1656360 diff --git a/images/fb7741f6-f388-4535-903d-d07315ea995e_7ee7d0b5-88a4-40da-9ffc-b863efa019a5.png b/images/fb7741f6-f388-4535-903d-d07315ea995e_7ee7d0b5-88a4-40da-9ffc-b863efa019a5.png index fd77d6e6573e6fac390b58b7704203807ef1b3d4..f03fd4dfcace1990300cadb9ade43eb9576e81f5 100644 --- a/images/fb7741f6-f388-4535-903d-d07315ea995e_7ee7d0b5-88a4-40da-9ffc-b863efa019a5.png +++ b/images/fb7741f6-f388-4535-903d-d07315ea995e_7ee7d0b5-88a4-40da-9ffc-b863efa019a5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:93fd6bf9cb3f70aa0bf7dc7fa474d71d0f7837b3d1fe0f19444b23491cf2b13a -size 1680962 +oid sha256:1de49ba874535daa671e77ec6cca8c3d829ce9bfceddf07654f888360c8bfc4e +size 2114668 diff --git a/images/fb7741f6-f388-4535-903d-d07315ea995e_e41eb015-80b3-45b5-bc29-c9f672f163ac.png b/images/fb7741f6-f388-4535-903d-d07315ea995e_e41eb015-80b3-45b5-bc29-c9f672f163ac.png index 0071dbf77de5fb13fcd64b2bde59b2c2c07fca00..2639f71c6a31e79762ba053b82765d933787054b 100644 --- a/images/fb7741f6-f388-4535-903d-d07315ea995e_e41eb015-80b3-45b5-bc29-c9f672f163ac.png +++ b/images/fb7741f6-f388-4535-903d-d07315ea995e_e41eb015-80b3-45b5-bc29-c9f672f163ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:55ef706cd485ba88e3b3f975911b3d9eb0cc95f3b3f5ca71df11b10abeeeb21e -size 1598949 +oid sha256:7fb0530e1b2fc63a1390758741a9b96bd02043cb4e8cf3e05e0e05821e84f06f +size 1941224 diff --git a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_1cdaba22-bf16-40a4-a417-9191b610019d.png b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_1cdaba22-bf16-40a4-a417-9191b610019d.png index 984e8c0169fd8156d1d58943fb5185c5ff9101a8..eb4029f0692351f747f12e593a6e7ab2ee61b43c 100644 --- a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_1cdaba22-bf16-40a4-a417-9191b610019d.png +++ b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_1cdaba22-bf16-40a4-a417-9191b610019d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cec5fdc737c6e63f1e87678692d6649f6f0bd9c56de849b94faa43b9eab5c9a4 -size 737241 +oid sha256:9ea0f1ebcce5196d874898d4777e1630e8644d72b9f584fcefef1778257f79db +size 982443 diff --git a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_4b195391-b9c2-4913-bfba-18cb820a9858.png b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_4b195391-b9c2-4913-bfba-18cb820a9858.png index 9f9f638eafc844a921d7f6967d719c8d6522d435..b2f8a5dbf8a25c6a95bb5b9ae84db3cb8b24bd87 100644 --- a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_4b195391-b9c2-4913-bfba-18cb820a9858.png +++ b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_4b195391-b9c2-4913-bfba-18cb820a9858.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df0479bdd31346eea995c9257e02af5b94d5cce26b1e654e79c28451d6b4bf32 -size 1062936 +oid sha256:aba1f96a9c6c2fba2f2d978654717230aa447a4b2230cedfc9f97a3fd077da4f +size 1002274 diff --git a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_68aa9c94-69f6-4512-996c-58c416c098ec.png b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_68aa9c94-69f6-4512-996c-58c416c098ec.png index 32a226a5e0e38214b84d07e4c7a2e6edd625dfd9..6dd8b53e666256776a616f205c196ee5fb0baf0f 100644 --- a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_68aa9c94-69f6-4512-996c-58c416c098ec.png +++ b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_68aa9c94-69f6-4512-996c-58c416c098ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bc58d869ed73524fade7185743fc313c7b2ec078d397c5478465a46cc48987f8 -size 773432 +oid sha256:40728e06680c9c24318f242f6156acdc1b344495a7438a8ee8fd7b1ccd97d222 +size 1197044 diff --git a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_717f5404-ab6d-4271-b550-e620c34e6c75.png b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_717f5404-ab6d-4271-b550-e620c34e6c75.png index 3e88348ac2fa9926819b79ec5e6969f205c94bd9..0d007fa305d3cec25eda973d6455864ec5b26d8e 100644 --- a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_717f5404-ab6d-4271-b550-e620c34e6c75.png +++ b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_717f5404-ab6d-4271-b550-e620c34e6c75.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cae2ce7afd30d83cedd2f6f4460a8cfd3eb5fb21e1ed4bd3ea08406bc93f3db3 -size 619405 +oid sha256:75c210a77c24b07cf95d8f0b69b793c3f5601dffd8a7affc2d6b22113770590b +size 854331 diff --git a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_76103e97-41d0-43d7-9d47-732f0067c485.png b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_76103e97-41d0-43d7-9d47-732f0067c485.png index fc328766cad1fa3f0d6c4634f9f182d6bb61364f..d29c2168eb7edf3eded064c23ed50640b0d24aa8 100644 --- a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_76103e97-41d0-43d7-9d47-732f0067c485.png +++ b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_76103e97-41d0-43d7-9d47-732f0067c485.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20d4087ca08117fe583ccd355e68581fd786836c1341bbe3ed0b48071bdf10be -size 907845 +oid sha256:1372b920aa274ba31b5085e88296a9a24ca9886b8296d609cadc286b08b3a86f +size 1152650 diff --git a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_7a6574e9-178c-4a06-8a4f-3854f3d5279c.png b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_7a6574e9-178c-4a06-8a4f-3854f3d5279c.png index eb963327f7a9f25854735581d8e70d131e5d290b..8caf7a78f5a1c1eba038bd9a52461dfe2ed04cda 100644 --- a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_7a6574e9-178c-4a06-8a4f-3854f3d5279c.png +++ b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_7a6574e9-178c-4a06-8a4f-3854f3d5279c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e91f2f1a344c5cc712826758182773ab83f0af975173cbaa3233cb409ac95616 -size 1061223 +oid sha256:43643d7f4b007c259b34c798e4f4dca846b208ac1189a9886c022fff5b460532 +size 1061907 diff --git a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_c0dc76c0-098f-41f7-8e9b-2a548ded774b.png b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_c0dc76c0-098f-41f7-8e9b-2a548ded774b.png index 3ebcf55499844e5796070db2c2ff72c91ad5413b..c7b72f6cc2a28dfc723e8802291bb3c5ba5ad48f 100644 --- a/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_c0dc76c0-098f-41f7-8e9b-2a548ded774b.png +++ b/images/fb9c0e7f-02dd-453f-8c73-da19282abf84_c0dc76c0-098f-41f7-8e9b-2a548ded774b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:191fbb11fff569201673987711ed3a5c618d213cc412c575a7cd99f42a6c2093 -size 499118 +oid sha256:459f0b6463099a3a9ddb1de62b54a9c51682287d2b26ece7cef58054d22b0221 +size 546923 diff --git a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_baeb278e-2713-42b7-9253-d5c13138436f.png b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_baeb278e-2713-42b7-9253-d5c13138436f.png index a72cbe1c21107fddfc2a584b42af674398439996..eb67d31c8f3be97e06b6ace05afea6f3b476f095 100644 --- a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_baeb278e-2713-42b7-9253-d5c13138436f.png +++ b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_baeb278e-2713-42b7-9253-d5c13138436f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf263faf7da208d3068a4336cfc7b2c62acf8c7071f2ec47c6412e56d4afa1bd -size 988353 +oid sha256:5081b452317528c630ef200f1aa26f3c7b32c95a0a2689d8ecfdc174ebd26d7d +size 735077 diff --git a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_bce1747c-7034-48d8-8257-0157b7e1d6b0.png b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_bce1747c-7034-48d8-8257-0157b7e1d6b0.png index ad39054e2578829447dce109a29d2288377dc30e..c0f193dbb9e28e557c73753b08eb1210d0fc5003 100644 --- a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_bce1747c-7034-48d8-8257-0157b7e1d6b0.png +++ b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_bce1747c-7034-48d8-8257-0157b7e1d6b0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:70fdbd77c51a70076150382b8b5eec5ead7d54de7dcc3c1e618d25b221f45389 -size 1406782 +oid sha256:7c1d7cc4ea11130e7032ad67a3b7fca6e01e55450dbad33c7847b8fcb9ed6f3f +size 1144720 diff --git a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_c95b9c83-3c15-4619-af54-19f4b373ccdc.png b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_c95b9c83-3c15-4619-af54-19f4b373ccdc.png index c14657d642fa7ed773f4fc47e59a03ca223d4901..150e9e534633769ef00e9776dd2cf8642282aa52 100644 --- a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_c95b9c83-3c15-4619-af54-19f4b373ccdc.png +++ b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_c95b9c83-3c15-4619-af54-19f4b373ccdc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:446280a136c2bbcd8ade224871527833e51ea69aae752218557d6fd7c47288da -size 1424817 +oid sha256:59b0b4b7b71265dd3441f46f84cda88b97290adb4cd33af13d3f5b2b84874391 +size 780653 diff --git a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_dec5bc64-6ab4-47dd-bdd7-3d9b3c321864.png b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_dec5bc64-6ab4-47dd-bdd7-3d9b3c321864.png index a72cbe1c21107fddfc2a584b42af674398439996..3bcf5501367274b0ade77847348a5107dc881de9 100644 --- a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_dec5bc64-6ab4-47dd-bdd7-3d9b3c321864.png +++ b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_dec5bc64-6ab4-47dd-bdd7-3d9b3c321864.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf263faf7da208d3068a4336cfc7b2c62acf8c7071f2ec47c6412e56d4afa1bd -size 988353 +oid sha256:8b8b9742710b2ae8aca67fd27140924b053c4424d9bc6b701795b3304bd46c5f +size 658550 diff --git a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_fa548110-f9a3-4ec4-a642-bb38488e1d13.png b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_fa548110-f9a3-4ec4-a642-bb38488e1d13.png index a72cbe1c21107fddfc2a584b42af674398439996..529ba5a3417c55d7be49d7db959245d54cfb34f0 100644 --- a/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_fa548110-f9a3-4ec4-a642-bb38488e1d13.png +++ b/images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_fa548110-f9a3-4ec4-a642-bb38488e1d13.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf263faf7da208d3068a4336cfc7b2c62acf8c7071f2ec47c6412e56d4afa1bd -size 988353 +oid sha256:6ed5984f92e348f7f121ece467e42bec8c5e1ad769bedc12626da7194eb65cf5 +size 504834 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_2f3de943-1b23-4176-af7f-423c24803f39.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_2f3de943-1b23-4176-af7f-423c24803f39.png index 3b760bb37ab83a7d74d3f06ca43715465cb1de8b..4f601c00048b1537c100a5ca6a825828c674a930 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_2f3de943-1b23-4176-af7f-423c24803f39.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_2f3de943-1b23-4176-af7f-423c24803f39.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b3efe03af193abc997997256d3d1f5ab6beae094af2430ec60bc2593dcf4509 -size 574908 +oid sha256:8b0a7d127004317b90dee313fa6f212a598378ac0f8d0ed5d775a3f4412397b2 +size 498195 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_61810484-2d4f-4d88-b9b3-25dc95d9719b.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_61810484-2d4f-4d88-b9b3-25dc95d9719b.png index b4e5b6ec46131c09c577b145377d433f2c0a27d7..cd957b4a0f74776cdf25fe073c7aba9efb9ce905 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_61810484-2d4f-4d88-b9b3-25dc95d9719b.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_61810484-2d4f-4d88-b9b3-25dc95d9719b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eafa1d5b6f8a50b3f37ef5f685027c41ebfdfb91eae42df39fed93e777971af6 -size 630542 +oid sha256:e75642587618730e09e7a2a6792bfbc620c761fdca84159f737ba98b4bc5d7a8 +size 906498 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_76121b93-9b56-4b51-86a1-c62104d6fb48.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_76121b93-9b56-4b51-86a1-c62104d6fb48.png index 05d44e74d4e4699900f630df9b6747f9a5440ee6..cc924d812bee9fe1cc661754d99383a5f8af2617 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_76121b93-9b56-4b51-86a1-c62104d6fb48.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_76121b93-9b56-4b51-86a1-c62104d6fb48.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:858dd638b9bcebea325645a565cfa285ebe7b8d27ccc0093451aa5ca1f5b06ca -size 602922 +oid sha256:94e4f998c4248a26c519e027cf90c2f98634b78deeee5ecdb8f4a5323c41980a +size 629176 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_a702de86-38a4-4567-9959-b6515a416862.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_a702de86-38a4-4567-9959-b6515a416862.png index f27f25336ca40747141ae6d48718a8ef62499a8a..b4c7fb4989bb40e0cf3746df0d4b3d5a05c6be10 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_a702de86-38a4-4567-9959-b6515a416862.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_a702de86-38a4-4567-9959-b6515a416862.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:66df6f66c860750f389b372d5c257af560e3939616e3a7c9fa5397ba284472a9 -size 626605 +oid sha256:dba755f54032ae6b6b27d714f2f10fac229af1c62547a53198a1c4c1fdc3d054 +size 347163 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_c4c1be99-57dc-46bc-bff2-b0687469cc42.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_c4c1be99-57dc-46bc-bff2-b0687469cc42.png index 05e636610b00d2204536770bbf58c460e1ef6c52..bbdd4e700dacd1953d88baa830e25e6c32b86531 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_c4c1be99-57dc-46bc-bff2-b0687469cc42.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_c4c1be99-57dc-46bc-bff2-b0687469cc42.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:adc1877b4dd85ba2da7500062aa5f471761282218b2b2ae75130c6400cc3b5dc -size 599670 +oid sha256:b2fbc72b4446909e3bc4d6b3dc78fff400399c90b0b95604f5e36d048f684437 +size 1138032 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_d298cf02-a542-415b-a3ec-a168b352b112.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_d298cf02-a542-415b-a3ec-a168b352b112.png index 38c0acbcf6176b55f6402d0f527cc9eec8792512..00b688772a1c5363f42e799ff3c3e1e83b482861 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_d298cf02-a542-415b-a3ec-a168b352b112.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_d298cf02-a542-415b-a3ec-a168b352b112.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aa43157912755e3d281641c3845a6bc4e41f9c62f0ad0e48210e68a1bbad022b -size 685442 +oid sha256:8d5328a83df640307695113eef64cf08d23663eb22d1ac0b87aa7733942f79db +size 738571 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e0888309-3a6f-4b59-9c24-1eda62b45b6f.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e0888309-3a6f-4b59-9c24-1eda62b45b6f.png index e0c0facf8f2f1e05a85514e3a74025cc2a96db66..56f1751ba5fe5c415e847561d040d08d721b6bb6 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e0888309-3a6f-4b59-9c24-1eda62b45b6f.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e0888309-3a6f-4b59-9c24-1eda62b45b6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:861a7a716d55bb4621436690e8db9b2b4441044a950f5c03f2bdbce39362ad38 -size 596293 +oid sha256:7fd296688b02ad0f778898b26cfa2f1d8fdbdfb0f1ad991e07837a0659d3c48b +size 852990 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e975344d-35a1-4268-8e2e-d15e4617cd26.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e975344d-35a1-4268-8e2e-d15e4617cd26.png index 6996a00dd57355edad689503c2cb97992e4e86b3..1845490d69f8d9dba73708543e74ae4f77fd1548 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e975344d-35a1-4268-8e2e-d15e4617cd26.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e975344d-35a1-4268-8e2e-d15e4617cd26.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:065c665b52af7a9001bfef80ba7f8274c49e7d52fff57d36143b73ca2292f975 -size 666503 +oid sha256:19ad10691fdb979a5108272eaf413c922740d93c1c4a88643d35b092a2b2d650 +size 908027 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_f5a169c9-f5f1-465d-ad88-c56bf75aa1ab.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_f5a169c9-f5f1-465d-ad88-c56bf75aa1ab.png index 87b41f2d973e2bfa9276f1254718ea80202357c0..c2fd2daea598a33e9e31172be6ddf7402a8872fb 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_f5a169c9-f5f1-465d-ad88-c56bf75aa1ab.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_f5a169c9-f5f1-465d-ad88-c56bf75aa1ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:201799b5c0aece0b365a77d153098e3639eef972bc1d48344881754d12de871b -size 626113 +oid sha256:3d70bfd3a29ed35eb104782db0bc76f6fd2dafcbe36dbb94971267a20ca8e506 +size 777702 diff --git a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_fecbc521-d4d5-458c-a1bd-63931a9f4f54.png b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_fecbc521-d4d5-458c-a1bd-63931a9f4f54.png index e4cf34ad4a3f93963eb858cf7afe082f89d2e4c4..c33eabe113931ea7f09274134476a783c3810ce8 100644 --- a/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_fecbc521-d4d5-458c-a1bd-63931a9f4f54.png +++ b/images/fbe9f625-7b47-4e13-a2f5-6823195d7438_fecbc521-d4d5-458c-a1bd-63931a9f4f54.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df47c2650148c64e67bbec27c8a4bf23e858e96e8825a46b6ec6a2f5dac86466 -size 449898 +oid sha256:a376b969fcfb4e782664b1a629fbc3ae179360c163d17aee6387dccccfe2aa0d +size 154458 diff --git a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_1d11a5bf-821a-470e-af46-80630855a8f0.png b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_1d11a5bf-821a-470e-af46-80630855a8f0.png index 0974054f7275b8675a6d72089a765248947afe05..547dd633814ba8a4ecfc9cc7576bb77fd6429b4e 100644 --- a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_1d11a5bf-821a-470e-af46-80630855a8f0.png +++ b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_1d11a5bf-821a-470e-af46-80630855a8f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01d38b2031b1f1a157e2d8d417b88439d12b464ced280c55f699ef29c701372a -size 1257690 +oid sha256:67518de1b823d96c2992f061693d662d245fefd0ec21266c9ddf2f23d259d233 +size 874944 diff --git a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_464ccc80-cbfc-4c86-a72e-f4a240a53743.png b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_464ccc80-cbfc-4c86-a72e-f4a240a53743.png index 3bed5b4ef29fc73edf8f55b3003f6baae0437888..62f7bed00847fac3be5dc800a1a0bb0a5ea635be 100644 --- a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_464ccc80-cbfc-4c86-a72e-f4a240a53743.png +++ b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_464ccc80-cbfc-4c86-a72e-f4a240a53743.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8eae0927fa6dceb07a4c4d0c20aaf4c45ed0f35c02150eaccb99e47c5f961114 -size 363891 +oid sha256:c50c98a88be0420b4cca32519eae1951f096ce595fc478699edc5f84a2d738d1 +size 352384 diff --git a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_65943829-04b2-47f0-8962-29ec916f9463.png b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_65943829-04b2-47f0-8962-29ec916f9463.png index af3ccca49ff3d3baea7c80c8d11295e4bcca7f5c..350e3f6cc800650bdd1bb7301aa444f5c9a8aa62 100644 --- a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_65943829-04b2-47f0-8962-29ec916f9463.png +++ b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_65943829-04b2-47f0-8962-29ec916f9463.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:976a3970bc52eb98c7532b796a2a31e0b2f740ba4bbd7d3339236cf91f96b890 -size 487225 +oid sha256:ed7f5c018ef41c60532c0847a924892c0b0d42fc9097b981076dce58cd10f055 +size 545843 diff --git a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_771dcd89-5187-4dbd-bcac-6e4ea751b409.png b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_771dcd89-5187-4dbd-bcac-6e4ea751b409.png index 231e11bf7997b135deeca482a0500f4381951129..0f20034a864cd08c30950cecac86d3c1b8838b8a 100644 --- a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_771dcd89-5187-4dbd-bcac-6e4ea751b409.png +++ b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_771dcd89-5187-4dbd-bcac-6e4ea751b409.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:06ba7e32f8ddf89508830a2dfa9685fe702c40cb7df46dbde976a0195f608ad2 -size 474317 +oid sha256:f6a3e3b9785f0be41c1582422def5121d097642657785b61b47f7381a8457f73 +size 465236 diff --git a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_fae28de0-5ad5-40f9-9957-f28a133d78ac.png b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_fae28de0-5ad5-40f9-9957-f28a133d78ac.png index 04decdd1ecee7fa95e882b6f3509a4d814d13ef6..3bc471ec171b2551db002a2ebd9686491c6333d1 100644 --- a/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_fae28de0-5ad5-40f9-9957-f28a133d78ac.png +++ b/images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_fae28de0-5ad5-40f9-9957-f28a133d78ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45b61fe039ac9f2f277430ed9d0d1e909c1709c3970cd2a90a1a3c716f8cfed0 -size 491658 +oid sha256:087b63fc0cfccee9b07150f03b540fe7cbc362df0ad3e586055d43649a6962bf +size 306562 diff --git a/images/fc21339a-5dc0-489c-b348-2fac79483f76_0e11dda9-eff8-4ab3-a636-a4a0237becdd.png b/images/fc21339a-5dc0-489c-b348-2fac79483f76_0e11dda9-eff8-4ab3-a636-a4a0237becdd.png index a36976085cef0bbe4cb0c241573460b4daa48a3a..c60f841e17738882f8db48ea3ae98c73a196bad2 100644 --- a/images/fc21339a-5dc0-489c-b348-2fac79483f76_0e11dda9-eff8-4ab3-a636-a4a0237becdd.png +++ b/images/fc21339a-5dc0-489c-b348-2fac79483f76_0e11dda9-eff8-4ab3-a636-a4a0237becdd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6d732d05dfabec1ece7781f574d6b324f70b9738ed59f4dac5b3985d315416db -size 2464304 +oid sha256:963a3a32f8704776644aeafcf3b4e6700441b81c675fd1b1af2f99caeec083fd +size 1763519 diff --git a/images/fc21339a-5dc0-489c-b348-2fac79483f76_3118d8f6-34c8-4f6f-80f7-f5d6a50d8d16.png b/images/fc21339a-5dc0-489c-b348-2fac79483f76_3118d8f6-34c8-4f6f-80f7-f5d6a50d8d16.png index 0a0b7b752e8b8f56d57ffe66abc2d0dcedb9428a..4242b2a56cbcad7af55f73ff23d9e2229860cb70 100644 --- a/images/fc21339a-5dc0-489c-b348-2fac79483f76_3118d8f6-34c8-4f6f-80f7-f5d6a50d8d16.png +++ b/images/fc21339a-5dc0-489c-b348-2fac79483f76_3118d8f6-34c8-4f6f-80f7-f5d6a50d8d16.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c9f629b383adcefe551ca048d50023362236839e23bcc06549682d96c079fdc -size 487554 +oid sha256:a017c3712d8edbfdd36f8a738c2aa4cd19b039206c841584b209a9f749e9324b +size 757997 diff --git a/images/fc21339a-5dc0-489c-b348-2fac79483f76_3fbd0200-f2c8-4e2f-8708-d66f70d3a194.png b/images/fc21339a-5dc0-489c-b348-2fac79483f76_3fbd0200-f2c8-4e2f-8708-d66f70d3a194.png index e85a9abc5eccc9cbf32e8b7dd205551b4c19c9e6..c2173f1c1f16f2ddc0bd5fefaa10e4ec4502f639 100644 --- a/images/fc21339a-5dc0-489c-b348-2fac79483f76_3fbd0200-f2c8-4e2f-8708-d66f70d3a194.png +++ b/images/fc21339a-5dc0-489c-b348-2fac79483f76_3fbd0200-f2c8-4e2f-8708-d66f70d3a194.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:64e94c82fead8aafcf6b6d2c4d06ea952e27cb562a4baf64ae750f0e2d2ab8b9 -size 1293566 +oid sha256:ccc9a3fd5a1ab96bd6c53b8805b4a3753947392be26d4cdf26435450726db66e +size 805652 diff --git a/images/fc21339a-5dc0-489c-b348-2fac79483f76_902efeef-0e70-46fd-8f95-96df32535561.png b/images/fc21339a-5dc0-489c-b348-2fac79483f76_902efeef-0e70-46fd-8f95-96df32535561.png index f69473b1ec90d9ec7632859993999e22107aec95..60516563fce625cec223ab36f5ca04de89ff8c78 100644 --- a/images/fc21339a-5dc0-489c-b348-2fac79483f76_902efeef-0e70-46fd-8f95-96df32535561.png +++ b/images/fc21339a-5dc0-489c-b348-2fac79483f76_902efeef-0e70-46fd-8f95-96df32535561.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36b64c7adb45148a46e809ed90a1a75cb3e8dd315e9c895ddd16249be7ed58c5 -size 729564 +oid sha256:e410fa78241ff8dd459690c336494776cc71f54208a85b7cc3e4e3353ffa9826 +size 1504276 diff --git a/images/fc21339a-5dc0-489c-b348-2fac79483f76_a726af10-e02c-4e08-846c-e5d79fc1f8cd.png b/images/fc21339a-5dc0-489c-b348-2fac79483f76_a726af10-e02c-4e08-846c-e5d79fc1f8cd.png index 0bf4ce1b1c9ad6798d30e382e342b359694ebb12..43d67e2c0c9e1483c1faf316ade589e1c47066d5 100644 --- a/images/fc21339a-5dc0-489c-b348-2fac79483f76_a726af10-e02c-4e08-846c-e5d79fc1f8cd.png +++ b/images/fc21339a-5dc0-489c-b348-2fac79483f76_a726af10-e02c-4e08-846c-e5d79fc1f8cd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:871075ad95f1f2e6f9ef9f1dc0a53fa08e8d1268abedef1d1f90e05a0a0f3d3f -size 1311322 +oid sha256:ba889fee27b521bda16ccc32fae5f6d7e928747a5565c9c80a1a857bbcac967f +size 998568 diff --git a/images/fc21339a-5dc0-489c-b348-2fac79483f76_e506b344-947b-434c-a139-e271b049ba34.png b/images/fc21339a-5dc0-489c-b348-2fac79483f76_e506b344-947b-434c-a139-e271b049ba34.png index efbddbe893dfe1e65ddeffc35cb3e3b795374f9b..3c51dc80c914df7d818134e8ec326b35d7545fb3 100644 --- a/images/fc21339a-5dc0-489c-b348-2fac79483f76_e506b344-947b-434c-a139-e271b049ba34.png +++ b/images/fc21339a-5dc0-489c-b348-2fac79483f76_e506b344-947b-434c-a139-e271b049ba34.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:442ff152e586029bcf8a4856d470e5ead07e7941845302f73e2e8f21e8b970bc -size 665506 +oid sha256:d110e581d571ddf5846a3fa19be2f88dc0be241c4e2fb9f905754a58cc4551b9 +size 786463 diff --git a/images/fc21339a-5dc0-489c-b348-2fac79483f76_ea3f8f9b-8bcc-4435-8a9c-15a3bc749bcf.png b/images/fc21339a-5dc0-489c-b348-2fac79483f76_ea3f8f9b-8bcc-4435-8a9c-15a3bc749bcf.png index f69473b1ec90d9ec7632859993999e22107aec95..7b90b4c235a252b6f59cdc952757f8039d60ecf2 100644 --- a/images/fc21339a-5dc0-489c-b348-2fac79483f76_ea3f8f9b-8bcc-4435-8a9c-15a3bc749bcf.png +++ b/images/fc21339a-5dc0-489c-b348-2fac79483f76_ea3f8f9b-8bcc-4435-8a9c-15a3bc749bcf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:36b64c7adb45148a46e809ed90a1a75cb3e8dd315e9c895ddd16249be7ed58c5 -size 729564 +oid sha256:d2d329607d80dc58bf81bffea8d916a239e53a7f765437047eefe2f64521648d +size 1243505 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_0c3f522b-a492-4b58-b642-8899445f2ac9.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_0c3f522b-a492-4b58-b642-8899445f2ac9.png index aaffb45273aa5e9430ab976c6df2c9cdb39802cd..3c98cbac6e0ed0852b7beb72087cdcc32464b7ab 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_0c3f522b-a492-4b58-b642-8899445f2ac9.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_0c3f522b-a492-4b58-b642-8899445f2ac9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9472da0ec47910485b21a5fba35e96c010f2fb390d9e15df91b348fca5a00d0f -size 946680 +oid sha256:6dc703770b7ea61134bffd260e0d392a0b76c328e004e039a12b90cd9d0a6c7f +size 1113354 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_2c9106bd-de29-4ee2-a559-b876ebeec9de.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_2c9106bd-de29-4ee2-a559-b876ebeec9de.png index 0a4705f1001ed143b3f46fc1d5787cfe60b9c399..940b06f358ac616033912a01ad55b849e2c2b527 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_2c9106bd-de29-4ee2-a559-b876ebeec9de.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_2c9106bd-de29-4ee2-a559-b876ebeec9de.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e83ae29162c0a470107a188fd65ec5dcd5d1798558c7afaa0310e064270cc4e -size 891376 +oid sha256:fc59036a775d3744b6595ee4add91c9a62a064c4ef15a0f53bcdb23c318ae9d5 +size 1009170 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_36fb74bd-494a-45a8-9dd0-de77fd479449.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_36fb74bd-494a-45a8-9dd0-de77fd479449.png index 3d79691ef86beb058791c273079d98bbde9786fe..80957af62c53f81e98f48b938c358695283a8578 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_36fb74bd-494a-45a8-9dd0-de77fd479449.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_36fb74bd-494a-45a8-9dd0-de77fd479449.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d5b5660460af08aa6b7ebb02487cef10db299d56c5021355e339a7307648e84a -size 926969 +oid sha256:e2bbee33bf823d3f7a8fd916c9406feaf668e71bdb3d9b806d090026612065b6 +size 1152835 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4d36a13d-82ba-46ee-8587-497ec99d0638.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4d36a13d-82ba-46ee-8587-497ec99d0638.png index 56e95b3aaa77c2f27aba5fc915ed723e806453e9..fa87914c4c00fdfbcd77fcfa817899ba434402a4 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4d36a13d-82ba-46ee-8587-497ec99d0638.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4d36a13d-82ba-46ee-8587-497ec99d0638.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6f153decd7ff20379dcbb46b04d890fe2dcce24e098d25c7c6fc4f4c6b4bf47 -size 897420 +oid sha256:5458f2ef0e85988bec850d810dea9274bda25eddaa0feef16a5cb496c17c91be +size 905937 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4f65d11e-ef5e-43c6-8f29-3bb466f8c02a.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4f65d11e-ef5e-43c6-8f29-3bb466f8c02a.png index fdf93c2e85163ac270fa5ed6dbfcb36afe21bd56..fe15c8d21c578db31b28bd30bd58cef7ae730d33 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4f65d11e-ef5e-43c6-8f29-3bb466f8c02a.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_4f65d11e-ef5e-43c6-8f29-3bb466f8c02a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:20d92360ce7380355a02edabb9366fd4198b2d9ab3c9cb6de7ead6e68ad1f125 -size 809341 +oid sha256:fe664863cf145aa3cc774cff8057cdce09c6aa6988c7cca401c17af7efe0ff6f +size 532257 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_50a2f5c1-64de-41e9-abd0-4451f762fcea.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_50a2f5c1-64de-41e9-abd0-4451f762fcea.png index 6e07c12cbf1a91639ec89e61d1b348b4c9e51c61..77138883942ccfaa036f8e876953edb98d6c8915 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_50a2f5c1-64de-41e9-abd0-4451f762fcea.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_50a2f5c1-64de-41e9-abd0-4451f762fcea.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a52dd58403ae745fca3e45fda45fac9b4acbd83c38df34a9c019df537553957e -size 1976982 +oid sha256:818b75d0ccf9d2b825a66c118c30c7447f86b7868a4aa3bf996cf50655651211 +size 1978010 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_5e1367e4-40be-4bd7-a0e7-0f4cea0043e3.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_5e1367e4-40be-4bd7-a0e7-0f4cea0043e3.png index 9be8a41bb1b51abf288bb66ecc375ac3ac3fc526..0be46c5f85d3b6797aa111b97ba8934a0a66990e 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_5e1367e4-40be-4bd7-a0e7-0f4cea0043e3.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_5e1367e4-40be-4bd7-a0e7-0f4cea0043e3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c72715a3c4a044bb41142d1d9687f080390ae231d2f4c2f419e1a8459073c1d4 -size 812836 +oid sha256:0678b549e258919caea8bbf7260964739255e1e3a2bd8bcecc06da495f582eb4 +size 852543 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_622461a1-47ff-4c2c-b95e-05e62fa43a01.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_622461a1-47ff-4c2c-b95e-05e62fa43a01.png index 056b431ebb5906e7ec174cd771874d38261bbb41..9d64e45200069a01549ed9446bba45e58762f952 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_622461a1-47ff-4c2c-b95e-05e62fa43a01.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_622461a1-47ff-4c2c-b95e-05e62fa43a01.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4567b90e40a556f01e45f4e2ce0f4c558c0211e22ca86dc374b4c21c0c0c623 -size 569063 +oid sha256:ef1f88cbbe92d814a6d8ddaf7a916cac9108f479e7d49601b2b7454951109415 +size 658514 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_8031c316-8f33-49ca-85a3-4f274aac7fb9.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_8031c316-8f33-49ca-85a3-4f274aac7fb9.png index aacd53592535092bbd8f161f2e8dbef49a7fc3d3..9ec2033d6523028e08b10f6017f92b24ca59ceb2 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_8031c316-8f33-49ca-85a3-4f274aac7fb9.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_8031c316-8f33-49ca-85a3-4f274aac7fb9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79e5489b11e68f8509c10cfd92c86dc884d4bdf35514be415ece3373f5554d11 -size 808510 +oid sha256:4a4d7bb6f7b4ae2387844ca922f503f2588e93098afd7088c673b0f9b8fb3741 +size 1070239 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_98673272-fde8-4585-bcb4-8fb21d9ef497.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_98673272-fde8-4585-bcb4-8fb21d9ef497.png index d554f0fb9c8fa6e2ac776bbc8f98a1d645feffdd..558b788e8eca9efb8d79609b0194b746a87dbb9c 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_98673272-fde8-4585-bcb4-8fb21d9ef497.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_98673272-fde8-4585-bcb4-8fb21d9ef497.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5497ca277344de55986909fb6e02114f992890962fd53503cdf4a472f091c5d7 -size 570187 +oid sha256:b3780dfc144dce86a75b82bbcb491ec159f4fb8cdf46030f99295e3ed49a3610 +size 593278 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_9e20b613-6f39-45e4-b248-a3383bb160bc.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_9e20b613-6f39-45e4-b248-a3383bb160bc.png index e7654842328a8f534d6b3df0c3b7a86cfdb29cfb..7f7b73589932f32ee28c59181a45183c3e6de712 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_9e20b613-6f39-45e4-b248-a3383bb160bc.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_9e20b613-6f39-45e4-b248-a3383bb160bc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aaa931e430c2522d381357a0c59ed2b58ebc54a55ebc31ce544d9a9398a7cf5c -size 814261 +oid sha256:2da3c95107ce90b84460d613c2a79c2c6198c363faf49a74efd23cf91a49a644 +size 546163 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_a08689bf-a507-4c9c-b25a-dd1d2d5adc1c.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_a08689bf-a507-4c9c-b25a-dd1d2d5adc1c.png index 889d24acb21d5ca2726ac4e732357830e06c371e..b80a54b2eff846f8e03fde2f3a12f9f9497cceb7 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_a08689bf-a507-4c9c-b25a-dd1d2d5adc1c.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_a08689bf-a507-4c9c-b25a-dd1d2d5adc1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:00188a41a07b521306de08d658c9f902a686f255acdc13aac0cfc685845ea4b3 -size 968473 +oid sha256:de24653f3840c64d0745dc799b094ceb418a657a15eaef3ab2aeb10af5b8e42e +size 1180028 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_ac979c95-d410-4b40-83b9-32caefbe0fcf.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_ac979c95-d410-4b40-83b9-32caefbe0fcf.png index 17c1c2ae97ca221d5a8591872177ce439a053c3d..e83becba7484dda2d5df1ef8983e07b4202b3979 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_ac979c95-d410-4b40-83b9-32caefbe0fcf.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_ac979c95-d410-4b40-83b9-32caefbe0fcf.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26f8ef81261f27559b35a810cdef999254d2b02db636532dc22d6dd09acb8f16 -size 905108 +oid sha256:c2e3ac3bc47e157f6a58b4d05bd4674e5da676b6fe91da0fa162765ae1859bef +size 626802 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_c816be58-23d2-467a-bab0-d03ad0e88d90.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_c816be58-23d2-467a-bab0-d03ad0e88d90.png index f698a0f4cbbbd0ebae5c6973fb8d6c7760ff1aa0..512408f51caebce30cc768575143136c8ada2acc 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_c816be58-23d2-467a-bab0-d03ad0e88d90.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_c816be58-23d2-467a-bab0-d03ad0e88d90.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:876bbee8c385a1250410a0ea677959172e9339add5cf54522efc0a7c9605edff -size 929731 +oid sha256:e57027917d6822c2f0766fbf30f8fd3c2c7da5d1fdf21033351982e4545e95b0 +size 859518 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_d53a314f-6ecd-4ad7-ae39-6ef936c2809a.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_d53a314f-6ecd-4ad7-ae39-6ef936c2809a.png index 9f931fc2eceb50e17d4a3281255623b4dd6b09e3..c69785310ee8728ad6296982e4d6515e2245bce6 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_d53a314f-6ecd-4ad7-ae39-6ef936c2809a.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_d53a314f-6ecd-4ad7-ae39-6ef936c2809a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e733bd7165552dab99798c4517ceff3d436fde8fc6efa2e50700c437ec06a287 -size 626169 +oid sha256:21e699861f58fcf103b8577c8ab323d05ca22ea63215bb61a5b6d5c5ddae232f +size 630428 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_e6e07a93-605d-4da0-aafd-e8e6f39a344c.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_e6e07a93-605d-4da0-aafd-e8e6f39a344c.png index 1fbd6cf59ef6e7082bc8c34cebc13a8c17734889..51384243d3b690cad3ca587a3f295929b51ad285 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_e6e07a93-605d-4da0-aafd-e8e6f39a344c.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_e6e07a93-605d-4da0-aafd-e8e6f39a344c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:47f600a7504ea7d78c66a160a34d365fcac2f4f77a20e99307e5140b88faa72d -size 878002 +oid sha256:5d493b2db57c753aa4eb291d4ce7f09c3e61d66ac346c53f22a7b8753908bfeb +size 1239067 diff --git a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_f95e947b-7409-4178-aa26-31b38f194d40.png b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_f95e947b-7409-4178-aa26-31b38f194d40.png index d6c51805ec5bb63bef68287ad84218190239d485..cb1ac511e54277ce33e686ea295a48183b14f841 100644 --- a/images/fc552b69-feb8-4951-bf67-725071bf8c8a_f95e947b-7409-4178-aa26-31b38f194d40.png +++ b/images/fc552b69-feb8-4951-bf67-725071bf8c8a_f95e947b-7409-4178-aa26-31b38f194d40.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:019bb8f9af33feb733a1d62ab89390348f8ca4800bd6783b2202acaee2ec7978 -size 621961 +oid sha256:351ad641f81f68cc03fcf3d67cc05e647c16198a21bff987471b186bc533fb06 +size 608860 diff --git a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3bc606e8-219f-40a1-ac23-7465abf97b1c.png b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3bc606e8-219f-40a1-ac23-7465abf97b1c.png index 1b43976f071e418fb46ad567b2a1b9c2d219c79a..f8a14f74cd185a1473e217188231d33a134adc6f 100644 --- a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3bc606e8-219f-40a1-ac23-7465abf97b1c.png +++ b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3bc606e8-219f-40a1-ac23-7465abf97b1c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ba52d3eada55a53b37fae2d301d0bc8f537deb60007e6bc28050d0d35160b133 -size 3346118 +oid sha256:899a26219c8e24a57a923fd21046c341b162475caa3af9484ae4fc78c8eb31f3 +size 2452054 diff --git a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3fac526d-a878-4292-a372-861c97b8d5e1.png b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3fac526d-a878-4292-a372-861c97b8d5e1.png index 6353bee821cf05b3f09e60229a21d12c5746df73..84dcad311e3a8019baa93b0fc7e65e7b89acb821 100644 --- a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3fac526d-a878-4292-a372-861c97b8d5e1.png +++ b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_3fac526d-a878-4292-a372-861c97b8d5e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3ea39f6cffd9c609de6622b26694b95d73abf6e6df5e4f72a31e056352d9b9b5 -size 1368490 +oid sha256:fa7f84e9f3e5e3f53fde5aa71b413fe2fafcd2b4fbfaab3dd1889a1130138a63 +size 1360255 diff --git a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_81ccfa2d-166d-495a-831b-1dbb94eff401.png b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_81ccfa2d-166d-495a-831b-1dbb94eff401.png index 68f99203c702ceb8da1feabcd0430b76d18614ec..92563926ac237f716a84513de9a434c64febd2ea 100644 --- a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_81ccfa2d-166d-495a-831b-1dbb94eff401.png +++ b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_81ccfa2d-166d-495a-831b-1dbb94eff401.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38a0d6f6bd5ed549326d357d91520f7a91bb0da72f377112dea35b599eba494d -size 2872145 +oid sha256:e8be89b73ecba3600487917f9ef5f663de3157263f71c7beeff5c3b4d3503391 +size 2445311 diff --git a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_83a5d3f8-3fbd-407e-a301-d31950d83fbe.png b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_83a5d3f8-3fbd-407e-a301-d31950d83fbe.png index 2f35565e488f326162b315e58006daadeeaca693..62e9bd29e2cd837d1cceda16b2ebee90e8f0eeda 100644 --- a/images/fc81025d-f5a3-4b68-9551-e84175b87a63_83a5d3f8-3fbd-407e-a301-d31950d83fbe.png +++ b/images/fc81025d-f5a3-4b68-9551-e84175b87a63_83a5d3f8-3fbd-407e-a301-d31950d83fbe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:beb3fedb9dcc00f9f903cc6fb28f6e42290c41923190afa19daffe8d84f9a3b9 -size 1386233 +oid sha256:115cbcf1960150b3a7617d531367bb6db2473a0eccdf80818d5a8ba97cf5432d +size 1366333 diff --git a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_672b06cb-3141-4330-b5c0-dfa51a37ba3f.png b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_672b06cb-3141-4330-b5c0-dfa51a37ba3f.png index 22bfc96925417cd088fd18184d9dcab3824bf4d8..b99e55f8b5189a32ea85e6c509da922f01c68059 100644 --- a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_672b06cb-3141-4330-b5c0-dfa51a37ba3f.png +++ b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_672b06cb-3141-4330-b5c0-dfa51a37ba3f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a75f24a78bfe64d8bc2acab28979a15e9a19ab6dffc6e5f21319cbc290b89e02 -size 3009367 +oid sha256:bc526b1f0a8b2df104335cb55e27edec8139d6155239ba295e72ca87e2f32b40 +size 2777231 diff --git a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_89abfe33-14ce-492d-ab90-2fe0710f6f7f.png b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_89abfe33-14ce-492d-ab90-2fe0710f6f7f.png index 963dff3f18aa28c715e9d60255054d0949fd2c54..7ce57c66c07403c7f04b1441d6290e3efc28886c 100644 --- a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_89abfe33-14ce-492d-ab90-2fe0710f6f7f.png +++ b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_89abfe33-14ce-492d-ab90-2fe0710f6f7f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da8545d7971e0cd8b7faee8527faf41e8d776c473d314a6ba433849d2c3a195a -size 3913436 +oid sha256:e174ad27991875743b08870811ba9058e769c3805f703efac36a6199a7c460f9 +size 2259127 diff --git a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_b605b086-ec49-460a-ba68-c3117d5a3499.png b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_b605b086-ec49-460a-ba68-c3117d5a3499.png index 24e791313766af509017a6d59a20bcb72b2d53c3..90128ea9ea0a9edc90f3752a3ca1631c6f934aeb 100644 --- a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_b605b086-ec49-460a-ba68-c3117d5a3499.png +++ b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_b605b086-ec49-460a-ba68-c3117d5a3499.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4e86739e6ba65eb53fa3f4485e32c974a894fd380d5a74b58ed7c44ff142555e -size 1605053 +oid sha256:8e7a3c683eb0f87e1cca2f57b29857aaeb7d5a4bcdfdd3cc3c17ca8d4135d1a3 +size 1349205 diff --git a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_dc617f22-b94b-42d5-995a-b37fc818ba51.png b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_dc617f22-b94b-42d5-995a-b37fc818ba51.png index 79eba9cc930702331909109de165c1e5d25d15aa..f4e9d9eb4856a960c28c0e57baf9bb7fb5e70e07 100644 --- a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_dc617f22-b94b-42d5-995a-b37fc818ba51.png +++ b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_dc617f22-b94b-42d5-995a-b37fc818ba51.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be0c3d9d431d047d5e6f6420dab85cf7247ed9fcaf23d9c352fea12da19227cd -size 1243600 +oid sha256:e69a558b2bd4b91bcd4add892cb94f9685e5e275f228dae525cb730b512621ee +size 1229649 diff --git a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_e47414c2-b553-4232-82d6-5172de9eb75c.png b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_e47414c2-b553-4232-82d6-5172de9eb75c.png index 2bab8b9254c08bd820fece22335093a0fa6d0807..c83d39d81d6cd5be1d3a78f3628af24e6cdb0cd1 100644 --- a/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_e47414c2-b553-4232-82d6-5172de9eb75c.png +++ b/images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_e47414c2-b553-4232-82d6-5172de9eb75c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0528af50927382fb5a26d8b7b9f5aac2ac23557c2d114946755606f5ea702e6c -size 3268257 +oid sha256:0b13ccf070d8d33f0e7d3de1767bd1b696f266f8c41d687181fbf641f6b30e76 +size 3587001 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_39e3abcc-6e56-4032-b225-9e56cbd89bb7.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_39e3abcc-6e56-4032-b225-9e56cbd89bb7.png index f24b4769c59e39061a0c39b28fdeb2d036d37f25..6b3830746e35dfdd74048d05711351efe1114e60 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_39e3abcc-6e56-4032-b225-9e56cbd89bb7.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_39e3abcc-6e56-4032-b225-9e56cbd89bb7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:67a9206e6c7e4dc9f949b81845ecd4c35882631f90fa01daebdf0c9109038a89 -size 602013 +oid sha256:f9fad25e24e27645290aedccc5c2ae78fcf2c87de75703a3a77d6fac8fab201d +size 1047842 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_40e6c0f2-c0aa-4052-bf7f-47e27f5de990.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_40e6c0f2-c0aa-4052-bf7f-47e27f5de990.png index 666a543c26a095d743070c67c076b9f61af614c5..a28e98132f8f08386b42410dc91a329a97d00a9c 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_40e6c0f2-c0aa-4052-bf7f-47e27f5de990.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_40e6c0f2-c0aa-4052-bf7f-47e27f5de990.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c74af17760bb6ab08c1252748f9c6c2811b89e8fa6f24261b2154223c7bf4e35 -size 1112136 +oid sha256:3e5b39d9c2e39b08e119052d582d006e96fdfe90cb72544a3d49f66edee1840e +size 1331055 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_711afc2a-5dda-4d63-9704-e148390bbd8a.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_711afc2a-5dda-4d63-9704-e148390bbd8a.png index 0fbaa89c5c05a519bc1b372fe33961519a1d708d..cae34b36c45c630155f52edd1365578374ec6ca9 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_711afc2a-5dda-4d63-9704-e148390bbd8a.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_711afc2a-5dda-4d63-9704-e148390bbd8a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22fb773dc3eb3b4d3cd247276f94f7695282c43914ea871292cf26d1264c19da -size 550297 +oid sha256:e44f47c63606abbcb2efce38db67343e71d5b82a39f16a9b5ff92badff2a15bc +size 820234 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_851a313e-37e4-42fa-9dea-af461112eeed.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_851a313e-37e4-42fa-9dea-af461112eeed.png index 2073314d611c989fd5bd813b1378d1f0513dcf9d..ab5aa79d005d81a421bead2c26d2169c774aa17e 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_851a313e-37e4-42fa-9dea-af461112eeed.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_851a313e-37e4-42fa-9dea-af461112eeed.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4d4d2d7159b6925307c42e381905cbebe9e669227599f974576f78095fd04687 -size 642943 +oid sha256:490f622ceb0f37dd1a0b618d4831e5a1f248e0d5b02768f6675af7544a963af7 +size 1128566 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_93ed2d34-334e-4c25-9bdd-b1ed285fdd11.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_93ed2d34-334e-4c25-9bdd-b1ed285fdd11.png index 989109b7eebe4ba3de9b29fb23d8d9f537cfc535..31eab5258944e6cbafab4577839da5ceac2536cd 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_93ed2d34-334e-4c25-9bdd-b1ed285fdd11.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_93ed2d34-334e-4c25-9bdd-b1ed285fdd11.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f539a8cf4b9d8869f55d2d496fe667735d1b6cd34c96146d746d8eb475dae166 -size 597454 +oid sha256:3c3f661a0bfd2749e16bd430d082f866b56e3460897394b80f401d4fc1c4102f +size 1130869 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e33e7423-a005-4de8-89dc-d34c5f297820.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e33e7423-a005-4de8-89dc-d34c5f297820.png index 4a87e52e66bafdc12f63218d00520800cd1eb78c..e02ee3398a8608c19b461f62db4cf64e45678be2 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e33e7423-a005-4de8-89dc-d34c5f297820.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e33e7423-a005-4de8-89dc-d34c5f297820.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9895c6f14d4ec4be1f780c8f18b147337b9d3db72263e9e4bf7c1e8361a15267 -size 640647 +oid sha256:86544e3d12a3276db903aca6dfeeb2ed5f8256bdbd45602ac1501d39c75691f5 +size 628361 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e3e842b2-02c0-4a19-8fbc-05f8de17a805.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e3e842b2-02c0-4a19-8fbc-05f8de17a805.png index dd82b983411ccb539a216e34cdcf8f4d9266bb68..9fbb9e371cf7a5a55d87f3e6435bad88b0038de4 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e3e842b2-02c0-4a19-8fbc-05f8de17a805.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e3e842b2-02c0-4a19-8fbc-05f8de17a805.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf78359a037073bda2282ec939a513a3aa32f999380a6bbf5bcea6e055dba736 -size 495810 +oid sha256:fb90dbb984a2674cca1fc503fc651532768097d28c5ba9c3eedb7afda2b3c0b9 +size 359991 diff --git a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e76d75a5-5836-4b36-9260-e4877e687b79.png b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e76d75a5-5836-4b36-9260-e4877e687b79.png index 0ffb4590b6309cfdf585ae5efc00971365014525..1e78fa1a6eb31288ef28e314adf71c8859ea2d14 100644 --- a/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e76d75a5-5836-4b36-9260-e4877e687b79.png +++ b/images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e76d75a5-5836-4b36-9260-e4877e687b79.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e106348c4bd6ffbec6dfe349b5303662ae5536d2ca84ba5ed99b9ba620783543 -size 617998 +oid sha256:6f2ec394e382abc2f69f2fb942c16baa6665d977c35c8c70cc3234c78e76ad6b +size 1375715 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_016bf36a-0f5f-4e4b-a312-232d7232cea2.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_016bf36a-0f5f-4e4b-a312-232d7232cea2.png index 1ab94074998e5724c12f59dfcdf0d0cf4784cd34..74a2a6179d46a739b036399dc77e86b6ba76c042 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_016bf36a-0f5f-4e4b-a312-232d7232cea2.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_016bf36a-0f5f-4e4b-a312-232d7232cea2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:777e236098ba634553261f171779e94431c64481a702c54bd2f6c54889e8acdf -size 1169036 +oid sha256:23b16ac7891d744253527dc4bb8245429b928b5dc92ca2403be9b2d131eff975 +size 1258502 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_1f8f403a-057f-40c8-8f98-5ac2d1a46e9e.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_1f8f403a-057f-40c8-8f98-5ac2d1a46e9e.png index 60ffed1510e441de191e5b426a8241c541a26651..2fbfd38b010e13f1b6ab0e246dead7d3f0f8053a 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_1f8f403a-057f-40c8-8f98-5ac2d1a46e9e.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_1f8f403a-057f-40c8-8f98-5ac2d1a46e9e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ec5d8cf59b17ee9b959f25a27d29acee0e94ab64187391818475874275a45c3 -size 1783318 +oid sha256:bd6d7237833c561967573e92be43fb0b193e533d113b4699e02616f0227a7ee4 +size 1536777 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_35d8a6c9-4e1c-4b18-82fd-c4ee2821678f.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_35d8a6c9-4e1c-4b18-82fd-c4ee2821678f.png index ae6aece37b9b1250f95c19b440ece64c1080fc9a..15aefe704d7fe4c156b59618361c447fb381774a 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_35d8a6c9-4e1c-4b18-82fd-c4ee2821678f.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_35d8a6c9-4e1c-4b18-82fd-c4ee2821678f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fcaa8b7d4b1273e7b87938a0a9d6ee57653c0a496cde56cdc1da8b1ae74023e7 -size 1517917 +oid sha256:3f063f831b977f6d29a9524a9d095e8bfc28a52e140b57959815e2fefa85647f +size 2164755 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_3ba37681-2553-4a13-a574-56d3d82c6247.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_3ba37681-2553-4a13-a574-56d3d82c6247.png index 9bbee556c2d30afbf4b5d3f876512f10ac8a482f..912d810ef1117dd2313b45a9488dc4b0c8a230e1 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_3ba37681-2553-4a13-a574-56d3d82c6247.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_3ba37681-2553-4a13-a574-56d3d82c6247.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:98ae3b9a48ff7f665e81a9e8911add395106d7fed9cffc574768250c6d2c409e -size 790791 +oid sha256:c56f682a3fe86e80eee3d66250b93b652a2d39e0aaeb478732be7a24d96d48fa +size 1034922 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_48cd4b76-4638-41bb-8ee1-04df8cbba952.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_48cd4b76-4638-41bb-8ee1-04df8cbba952.png index 9e4eca245930fc5590a13d421d1ff8d198d7aa25..dc012b40ed5717d533a2f203507f5d4d145274d4 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_48cd4b76-4638-41bb-8ee1-04df8cbba952.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_48cd4b76-4638-41bb-8ee1-04df8cbba952.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f744158349e1c17a0f2e9a91e38ad369e2bde24117858a7592cbffae35a159c -size 791069 +oid sha256:cd1573b5a8d43cfedfe728c6360b4f5fe6aef0d3c63465c0e7f668c4195b5c96 +size 884845 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_532ea23b-b6fa-4d12-a857-8c60674dd2d1.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_532ea23b-b6fa-4d12-a857-8c60674dd2d1.png index 9156bfbe082bf72919ac0b662c244c4e82d1c121..87f75b7a2285cdfe940041ac3141f611496ab35e 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_532ea23b-b6fa-4d12-a857-8c60674dd2d1.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_532ea23b-b6fa-4d12-a857-8c60674dd2d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:18f9b6a54cb331e9d6fb63b18d64b6ea157b81d6c63452404b62e08d769e6b8c -size 764829 +oid sha256:8e0b88fdf95e8480d60981c66085b343f91efb345b2da619fdd8ab7c441aa83e +size 641417 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_81d303ca-5152-4c0e-bd51-bb508e5b8b61.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_81d303ca-5152-4c0e-bd51-bb508e5b8b61.png index 3efebdcf348f733636b12a5243ac36bc6b1df72b..e00e05c3c8253ab0ca8ce24b57c19594b126c98f 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_81d303ca-5152-4c0e-bd51-bb508e5b8b61.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_81d303ca-5152-4c0e-bd51-bb508e5b8b61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:51a0ce779a12d61edb2b2ef7cd995793407efe009251ec0d444c39aa4e13fadc -size 1092272 +oid sha256:9d229c4e07ad822ab586aa5c6f54bf20da8fa45d1d47d7e8d60c20f8653222de +size 1091937 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_9b1378d8-3623-4724-a26a-b493469ca55c.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_9b1378d8-3623-4724-a26a-b493469ca55c.png index ed34b0e19ca6157bb1d1e862a15084c3ca75a9da..0c3013d73e656777320c5a5b53ebfc17be3b4da2 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_9b1378d8-3623-4724-a26a-b493469ca55c.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_9b1378d8-3623-4724-a26a-b493469ca55c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:39d87b579d371e36e0c8337bccdf060735616e8689b2f9743d7fcbfa07551863 -size 1137412 +oid sha256:39dad6ca834d903290cc3cdc33abf591c4ba8cd7ef4eed8dac9d791586320383 +size 843290 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_bffe389f-8f10-4e39-870d-51c2d169992c.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_bffe389f-8f10-4e39-870d-51c2d169992c.png index 4c070639c4f3f0b434aa8df2745368f4e32403d3..783b46af32318ce6de7092a0fe9b7a9d2e9ba12d 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_bffe389f-8f10-4e39-870d-51c2d169992c.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_bffe389f-8f10-4e39-870d-51c2d169992c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:efb112f79867d8a71263cd9b33fe4a859522a63a7d6f60ef1df52235759a7d27 -size 1523126 +oid sha256:6483828cd587649f6f76d62960894df35980aa19e79a94ebd9bdfa4836f3692a +size 975860 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_c6255d44-9f9a-4dc4-9815-f52f35569c56.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_c6255d44-9f9a-4dc4-9815-f52f35569c56.png index 3747e1a84318cc11f3c0f399c32b388bf7d00463..392fffd60cdbabc45d77891358c1130c519e5796 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_c6255d44-9f9a-4dc4-9815-f52f35569c56.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_c6255d44-9f9a-4dc4-9815-f52f35569c56.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9510acbd7e1e8c297397942647395e45dc8ce6058e4902d67febe96f221ec53c -size 791343 +oid sha256:a833402d4de66844f506e298ae52331149997e921192fb399c08f71066d7a89d +size 1042829 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_ea3c2af5-fb9c-4145-ab53-a35548df1e2d.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_ea3c2af5-fb9c-4145-ab53-a35548df1e2d.png index 8678959223f4dc154d830b03760aff7d35169820..eadb861da808cb94085bf9094b2ba768144bfda0 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_ea3c2af5-fb9c-4145-ab53-a35548df1e2d.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_ea3c2af5-fb9c-4145-ab53-a35548df1e2d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:22e5a2f9ec56ad9d956fcf78baaa9fd4c2bf0ca49305967456b72f9b904a79cf -size 789476 +oid sha256:f96421dcebd3f1d01b02e8bae80adf34a3676c7406c93bce8f9e9dcb224649ee +size 1132591 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f11eadf6-b789-4a4a-94d3-46613bffdc98.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f11eadf6-b789-4a4a-94d3-46613bffdc98.png index 682320b6a9afed0f71aa0c2f6c6d1e39bc9a6d11..47bcb45a16198f6f5e2a4dd3fb42e9f804124d82 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f11eadf6-b789-4a4a-94d3-46613bffdc98.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f11eadf6-b789-4a4a-94d3-46613bffdc98.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0b9f2a0b9439805f62b22f3ed9fba0505dfd153b7a702ce1668d0a180c77aeb3 -size 1486570 +oid sha256:482e467f9c0d7aa59797bab08acfca4fc136f1407575ea1a62466f98b67b2b1a +size 1912961 diff --git a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f1682371-d43a-4a7f-8fff-491e2150b2f0.png b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f1682371-d43a-4a7f-8fff-491e2150b2f0.png index b4af012e5fc6ea494cba449b7cd9aaafaba40a34..5f4668af46bf20059ad9eba0f3680e646d78578a 100644 --- a/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f1682371-d43a-4a7f-8fff-491e2150b2f0.png +++ b/images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f1682371-d43a-4a7f-8fff-491e2150b2f0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:160f55112b61021aca508995dbfa3809ab954a3e7754f7efc22470dac8a44cc8 -size 1868859 +oid sha256:694c312eebb0be05849fa689fe01cb46feb0715cd3147cff50bd125ca6e2fb36 +size 1349388 diff --git a/images/fce75183-0825-42b1-baf3-a9214fe20ce9_1673940e-dfb3-43b9-81f5-6e65ead88503.png b/images/fce75183-0825-42b1-baf3-a9214fe20ce9_1673940e-dfb3-43b9-81f5-6e65ead88503.png index d816acfc1b405fd6e52226749657fecdd97f7e4d..1ce95d47ce70319c77448552b3c9582b01da7670 100644 --- a/images/fce75183-0825-42b1-baf3-a9214fe20ce9_1673940e-dfb3-43b9-81f5-6e65ead88503.png +++ b/images/fce75183-0825-42b1-baf3-a9214fe20ce9_1673940e-dfb3-43b9-81f5-6e65ead88503.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f3a6548c1b794c4e8e68296332925d16bf149b63c5cdd05ad698ec8cfb4c7ee -size 1640013 +oid sha256:d9ae7fadda81c49ffdc15418d712c3e09da2d2e53debccc794be50b1295c62f1 +size 1459580 diff --git a/images/fce75183-0825-42b1-baf3-a9214fe20ce9_36269d09-9e56-4e12-ac33-a0ac39b4a53c.png b/images/fce75183-0825-42b1-baf3-a9214fe20ce9_36269d09-9e56-4e12-ac33-a0ac39b4a53c.png index 267ecdefe05ec31d791cb1ce38580537d1f9bfe2..5174b1cee302c6116fc6ce154c2fcefb10be0721 100644 --- a/images/fce75183-0825-42b1-baf3-a9214fe20ce9_36269d09-9e56-4e12-ac33-a0ac39b4a53c.png +++ b/images/fce75183-0825-42b1-baf3-a9214fe20ce9_36269d09-9e56-4e12-ac33-a0ac39b4a53c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:54ca89e56a2e055892459828133698f7b69445a71fa03f8ccc8e0ff7fcc0ba2f -size 1666911 +oid sha256:ec5275ea05afac0baf2b4d00eb0caa16d4fd27c0240ab99f8fca7c3be84c14c2 +size 1170003 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_09dd0172-dd00-45b1-95e2-61dd15cf2d11.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_09dd0172-dd00-45b1-95e2-61dd15cf2d11.png index dca76a35f93ffc4b46844e89c2d947d725218e21..7fc26a315d5c54c33b34577bc4d8a3826aa7b076 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_09dd0172-dd00-45b1-95e2-61dd15cf2d11.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_09dd0172-dd00-45b1-95e2-61dd15cf2d11.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f2b186a3f1ba90c08399560527489f74d4c6360af9a99945d989286846f42756 -size 539827 +oid sha256:a2c0531dee2f1ec1e1acff406173e648addcc76fb1a3c3db93fde65e5372a786 +size 781272 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_0f19dcc1-254d-4a15-a862-941158d86dde.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_0f19dcc1-254d-4a15-a862-941158d86dde.png index f725eab2e9f0adbae2cde20d912fefbccfad1da0..325fe34c42341467063af2e86fc6f9ec6cdcbde0 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_0f19dcc1-254d-4a15-a862-941158d86dde.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_0f19dcc1-254d-4a15-a862-941158d86dde.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1f2adadd76352f71715d5eecc97a3ba060f602a96ea00f282ec7cdfd115896d8 -size 486431 +oid sha256:6d58321791f053160d409b2c2dc63a766f0572e94c9ce75eff207ec6009635fb +size 417598 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_30cd750e-6560-4cc9-ac1a-4bbce8e6444e.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_30cd750e-6560-4cc9-ac1a-4bbce8e6444e.png index 0982790ab7eaf1df2c58e3ea666f6ff40a9b7544..f57a5035e0a814b0fbf1824a2d463809522b6883 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_30cd750e-6560-4cc9-ac1a-4bbce8e6444e.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_30cd750e-6560-4cc9-ac1a-4bbce8e6444e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:136a5873da608af85e183de09f73c108cca8ea17c6e60bb36edf4bed49e1d3ec -size 652144 +oid sha256:05bf3403a8ad99165e8ac7816f6314306abc5d782915a2ce0f4c5daed690c272 +size 471209 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_491d1d9c-fb28-4878-a568-757c0b80241b.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_491d1d9c-fb28-4878-a568-757c0b80241b.png index 0d82575becf8d463bdd43e6b2fa16b45074f8670..cf4cdd21cf193ffc943cc6d670e98d08fe1ed189 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_491d1d9c-fb28-4878-a568-757c0b80241b.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_491d1d9c-fb28-4878-a568-757c0b80241b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3767e10423a026de87eeb27e808e8b2204e49066579e93edca5a4562b000b227 -size 497741 +oid sha256:0736d4c9d5202e646e521ec43800c193987ad51bd4d724ba18c9b32301bfe72c +size 770092 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_5fd3f332-ce1d-4724-b698-be9c1146adac.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_5fd3f332-ce1d-4724-b698-be9c1146adac.png index 692492f4ae3b7c0cbdfb7dfb2d057a01e99e4dde..8cc63899c49717aaa21b3da7c53bbf2fb9390548 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_5fd3f332-ce1d-4724-b698-be9c1146adac.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_5fd3f332-ce1d-4724-b698-be9c1146adac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccd329c452a6441c66ce82c7e604ed378f1f88193bdca2c1d7748ad8402f33ef -size 539903 +oid sha256:ceae1fa6dd281333312a28878ae8516d7e553ddc82e584e30571c52ef0d8b537 +size 700332 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_62c7af18-1fb9-4022-a9ec-a69d457cf223.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_62c7af18-1fb9-4022-a9ec-a69d457cf223.png index d720ed9af3595216ea402bbe1a18e8e7054ac7ca..dfb15f8c8b5f6f0349898d4f7366692b3cc9e30e 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_62c7af18-1fb9-4022-a9ec-a69d457cf223.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_62c7af18-1fb9-4022-a9ec-a69d457cf223.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3af89689e5441b124dd8c05432c073a26e84808e87e6d140f1e3a51afa4f0753 -size 449372 +oid sha256:308e2de4936fd5001f10fe073c4070faf50a8754b08e0d11ff98322c612a515d +size 672939 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_7d5af9ff-4e09-4a12-a7aa-870797c20fcb.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_7d5af9ff-4e09-4a12-a7aa-870797c20fcb.png index 4ea1c8b44050c1f8260b3ab4b183891b3bcb159c..05c4f8babc35d16a49c6c04076ff56f4728a27a9 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_7d5af9ff-4e09-4a12-a7aa-870797c20fcb.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_7d5af9ff-4e09-4a12-a7aa-870797c20fcb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f26908280676c6f90ac8c3252d0a27f53037c75c30a14283b66e079142b08c85 -size 338146 +oid sha256:f22a332ecc83ef5fcb7214941a6f2b5ab7333730b85bc001585a82e33e11017c +size 399948 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_91c7893c-d68e-4a03-80d1-ea26d677995e.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_91c7893c-d68e-4a03-80d1-ea26d677995e.png index 1e90df35c53de6da6f1d4f57f6bd9a965fdcc48d..4e2f10707662a6a830f9b71b9fe0e722ecc8af80 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_91c7893c-d68e-4a03-80d1-ea26d677995e.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_91c7893c-d68e-4a03-80d1-ea26d677995e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed63c9f662fe46525bb92d9be234b61e4620f8ed71118c0ed062667f4db2ef73 -size 345679 +oid sha256:a73703ef6e59577e56844920de1f5799167a08a1082d9354b872be9ee1d6afba +size 1532735 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_9e7c3106-335f-4ee7-8dc2-9638fbeb8f6e.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_9e7c3106-335f-4ee7-8dc2-9638fbeb8f6e.png index 0a4e30de078236d2f0a1dde47ef07efd8783358b..5b5639b1151a27f6500f4df9cf47eb0cde287218 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_9e7c3106-335f-4ee7-8dc2-9638fbeb8f6e.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_9e7c3106-335f-4ee7-8dc2-9638fbeb8f6e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e86f6d7efd6cb436116ba00dfc17b31c6c927adc119f30979529d30e25f6462d -size 518364 +oid sha256:ab9b795049e3d0e39b8b932ef7edeb3cd35794f63a6f70af115bd74ec137bea7 +size 651920 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_a73ad60f-4274-4365-bfbc-944f9bab2057.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_a73ad60f-4274-4365-bfbc-944f9bab2057.png index ef7eddd22374df0e833814089ada96e860f2b806..ecace9dae6a96b0989c336ba9d98c68495599833 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_a73ad60f-4274-4365-bfbc-944f9bab2057.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_a73ad60f-4274-4365-bfbc-944f9bab2057.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85070747aaeef95f75ef2f52c13d774242e314ad1fbd0b2ea324e5de42e880bf -size 391458 +oid sha256:353237d33c1f0bad7ee099ba51d64135d0db197c2f620862097b43d67d530e5b +size 746762 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_c5f99b92-14a8-475f-91a9-2350aeef1398.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_c5f99b92-14a8-475f-91a9-2350aeef1398.png index 3907826d209326ed9aebea67857d4104454ed820..d37b0c43ed23101f0a9f91ce8909a700581afc3a 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_c5f99b92-14a8-475f-91a9-2350aeef1398.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_c5f99b92-14a8-475f-91a9-2350aeef1398.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16e93f9f38122766222733c307a4ed036f920bceda732602f211957068101b90 -size 332011 +oid sha256:61dd6e66b47a39b970161a3f4e4c5bec101a8d7f10b0d62ba39ada5fca87a4e8 +size 921394 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_d4393929-345e-460d-859a-1600973ae800.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_d4393929-345e-460d-859a-1600973ae800.png index fe249ac7f18a8786f76bded97b82752e92257c13..6249f45991e63b8d7a9187ac4e4c81ed8804fda2 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_d4393929-345e-460d-859a-1600973ae800.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_d4393929-345e-460d-859a-1600973ae800.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:abdf701e25f1c261fe9f6682b95d6e46c41955cbd94f52fcc4af3ba13648c75f -size 346394 +oid sha256:034eba20777fc47f89f1399ca29db1be2d049b98fd3a67d20436524c3dcbfb9e +size 824218 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_e92228fd-d6ce-45f2-9dfd-42fc9c17c776.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_e92228fd-d6ce-45f2-9dfd-42fc9c17c776.png index 91dea3fffa876160c3cd6725fb7732338f7a50fa..dcb9fe92c9b004cb133d762a40d59093975ddb93 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_e92228fd-d6ce-45f2-9dfd-42fc9c17c776.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_e92228fd-d6ce-45f2-9dfd-42fc9c17c776.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:29d6e717609fe0b5e050dae65f4dc1eb74e8a580d5abfb9ca84b50b175ddeb04 -size 539641 +oid sha256:97b1e78e9f1a389fa942d5d5c7d9e115baa02e7c1c252f0b94337cad9a3110ae +size 667836 diff --git a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_f8261747-9bb4-4b98-98a2-f34f5eaba467.png b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_f8261747-9bb4-4b98-98a2-f34f5eaba467.png index e6cda7b473bb94b952fda9e047b900af20fd2fea..1475071663369de832d7af8f193462d79b99edc8 100644 --- a/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_f8261747-9bb4-4b98-98a2-f34f5eaba467.png +++ b/images/fd0e4520-b47b-4a24-9b21-d10e68c42472_f8261747-9bb4-4b98-98a2-f34f5eaba467.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:83b2dee4bc29e6d1ca1f4eec5d2fce6587b726a9b8f381da4562e936b901c8bc -size 574982 +oid sha256:3d26c2d08bc43053c637707e4cf111542ec21b872c41462342211209e1ef1150 +size 781201 diff --git a/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_b1b300fb-d1f4-423c-ba32-4dbfeb8cada0.png b/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_b1b300fb-d1f4-423c-ba32-4dbfeb8cada0.png index 87daae786bd54bdf9d1a43d29d38a16c9aecb2bc..28335e7f9c95bb7d42b1c990470e4350bc35cc0d 100644 --- a/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_b1b300fb-d1f4-423c-ba32-4dbfeb8cada0.png +++ b/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_b1b300fb-d1f4-423c-ba32-4dbfeb8cada0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b70a60de5b186d159a992c71ada05ec252ed61d1056e52f868e6ab45623da166 -size 130048 +oid sha256:a033b4b829e42ff18de59ae8150a9c2e6ae22260499c419fa05188ac266bfc32 +size 172672 diff --git a/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_f72407ee-c156-4ee0-b5f9-8f08171c28e0.png b/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_f72407ee-c156-4ee0-b5f9-8f08171c28e0.png index f7f337310695ff8dfece7cf5c3dbf88bc2e539fe..5fc2b82e348b197e4c45ade44b8aa5d74199941f 100644 --- a/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_f72407ee-c156-4ee0-b5f9-8f08171c28e0.png +++ b/images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_f72407ee-c156-4ee0-b5f9-8f08171c28e0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6b335df6b1a781ccefdb5dde69c5feeef1fec222e5220aad4db76f71e59fcdb5 -size 100735 +oid sha256:681fa1e68bf363f2e219d1870cc06c532576a0979a5323d734718da29b1c132a +size 129423 diff --git a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_1efe7992-b578-46ea-bbc0-6720f221b9fe.png b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_1efe7992-b578-46ea-bbc0-6720f221b9fe.png index b9a8cc73f1e057639d87171369f29f55130cd1ab..5dc510801b7eb34738cbcc51a5d73c93e68e6cdd 100644 --- a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_1efe7992-b578-46ea-bbc0-6720f221b9fe.png +++ b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_1efe7992-b578-46ea-bbc0-6720f221b9fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:227dbaf3f4d1f1ea82adc9861655e0014796fbc9ec6fb7132bc8f6a6af11033b -size 1206293 +oid sha256:6cccfc4c59a301b776b363beb13c71d0c5752f323389becb25592378d863d898 +size 1927663 diff --git a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_31182b46-b2bc-4c21-9b91-ef93eaff57aa.png b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_31182b46-b2bc-4c21-9b91-ef93eaff57aa.png index b7452f7fc80b4d7d4c067d557a12f7bbcf293529..88dcbfeff659698455a83b89bd1f12c7b862c006 100644 --- a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_31182b46-b2bc-4c21-9b91-ef93eaff57aa.png +++ b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_31182b46-b2bc-4c21-9b91-ef93eaff57aa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f260b885aaf48eb56bd5619fbefd66dbe2c14589abaacae4444b6389b73770bb -size 1205058 +oid sha256:5d46ed25adea406957b1f2f5677ca946e80ad3ca8b75b397885cefecbb18a7bb +size 1359256 diff --git a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_4b07f157-afb9-41cc-bc51-78f88a227dfd.png b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_4b07f157-afb9-41cc-bc51-78f88a227dfd.png index 3b40dfa23001f7b775f7b1abd4e522a9bfa79b69..60c7bbb9377411ed39ec9a4ab9e16108b3a1b72b 100644 --- a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_4b07f157-afb9-41cc-bc51-78f88a227dfd.png +++ b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_4b07f157-afb9-41cc-bc51-78f88a227dfd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4f44e78d7fe558fb4da23a34fff66d70226ad62953d10738606df7c9c98721ae -size 950673 +oid sha256:487f422f71312cfa700ed07500a45739a1b0941a326622b85a705a7d3b27cc07 +size 1550840 diff --git a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_87e392a0-87a0-45b7-9c89-069cc86317ce.png b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_87e392a0-87a0-45b7-9c89-069cc86317ce.png index 793d6d2dbf7f2990101d979a326592bea5a0aeb2..07c2c567d689086540d784b3a3515518229051f3 100644 --- a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_87e392a0-87a0-45b7-9c89-069cc86317ce.png +++ b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_87e392a0-87a0-45b7-9c89-069cc86317ce.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:149503ed4c5b2532f8a91ca40ffbf22db3119095cccaeb9f54a94898f9c2552b -size 587111 +oid sha256:b672fe4c0f4fa8a48b429ff8df73de45f843506c8cd80139c67fbc5f7a23c8a7 +size 1405568 diff --git a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_90d3a2c2-9fdb-4edf-ad99-15fd086454e1.png b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_90d3a2c2-9fdb-4edf-ad99-15fd086454e1.png index f276aacfde604133c279cc473b53a565eeb21918..6601cd25986a34b858144a9785fa3a99e6af6840 100644 --- a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_90d3a2c2-9fdb-4edf-ad99-15fd086454e1.png +++ b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_90d3a2c2-9fdb-4edf-ad99-15fd086454e1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e27d7226174f9bd41706ac937bfe8a978b59e911f945dbe7ef54d212a5a3798d -size 766834 +oid sha256:c90a1a90f99e078898ac6119840216ad231378e89325b580bdad66b618c732a0 +size 901645 diff --git a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_be3a6710-98f4-4b0c-8508-ef55b5dca3d3.png b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_be3a6710-98f4-4b0c-8508-ef55b5dca3d3.png index 4a3f19ea21fd0ba13a49db4b500bbc2e3d33ef26..e716b6edb0e7487774011666d0f83b16c215af15 100644 --- a/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_be3a6710-98f4-4b0c-8508-ef55b5dca3d3.png +++ b/images/fd2e9403-3201-4bb7-a19d-f475960cf5be_be3a6710-98f4-4b0c-8508-ef55b5dca3d3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3969b15efc5aebef7086899a9b05d18b6a4339b2e9743160cca6b1eb3ee34f17 -size 635364 +oid sha256:a9587e3b2994ec72d9ea4593b8a3b90ea0afa93108bc13d5b118e4d9c8deeee0 +size 596166 diff --git a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_11129f78-0976-4251-bc55-5dc77032e1c1.png b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_11129f78-0976-4251-bc55-5dc77032e1c1.png index e4ec91426ffedab20d95e22f72dbd40060ab6e2f..463740481136b447c3e8230c6a5d4c7790ea3e02 100644 --- a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_11129f78-0976-4251-bc55-5dc77032e1c1.png +++ b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_11129f78-0976-4251-bc55-5dc77032e1c1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:190abc1fa90a86065bf57a51ac9f41d28b2a776faeb72548f3a7dfc134bf2d39 -size 455737 +oid sha256:fddd09aed61e7cb6e455bb001b709cf3bd0415d38baf80171ad8036b3292bd37 +size 180260 diff --git a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_21aada65-2d3c-4713-abae-5b5693c2de68.png b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_21aada65-2d3c-4713-abae-5b5693c2de68.png index 7c10f8f5e98c0cb3bb5deb81182f02044c66e45e..071e8fc3a67885657ecd6b15a751d68843738a79 100644 --- a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_21aada65-2d3c-4713-abae-5b5693c2de68.png +++ b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_21aada65-2d3c-4713-abae-5b5693c2de68.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e11e7f65f9cb253663bd050367ddba4512542c91bdf0418d8460bd2b1777963 -size 472998 +oid sha256:02bc765b70111d45e2f1a129f2373296dfa07159aca05c1c4da6373bce06a661 +size 155899 diff --git a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_807e15c8-e808-4db8-abf7-e3e606df063c.png b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_807e15c8-e808-4db8-abf7-e3e606df063c.png index a30be0976cb1a742087be95694b8a3640ae21137..cf9555a863cd4a4da0add84bd0fcf6e1c836a937 100644 --- a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_807e15c8-e808-4db8-abf7-e3e606df063c.png +++ b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_807e15c8-e808-4db8-abf7-e3e606df063c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d2c741948b20fd390002dac55da3c224d7e54aac40d31c41de4a47160068c5b3 -size 449146 +oid sha256:f9b77ae03ab6f4c73e94747ba089033b061e156a504ff43f86f36437c0ad36c9 +size 186087 diff --git a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_c99ec0f1-3a4a-4e38-bc04-4ecab395b872.png b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_c99ec0f1-3a4a-4e38-bc04-4ecab395b872.png index eb197ba9927c36676b135173ab0702724f9ad042..ce080886a4fa8eb4f49bc301a7780ec8c667e611 100644 --- a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_c99ec0f1-3a4a-4e38-bc04-4ecab395b872.png +++ b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_c99ec0f1-3a4a-4e38-bc04-4ecab395b872.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8f8cee36d2b611e3912cfa88ca5b5634a0cc593296a5dcde8596e9783a0a5204 -size 445875 +oid sha256:691190b7034b3a96b7e2ed8cce2213780bce69daa2098d2728b08a48fc0079c2 +size 372666 diff --git a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_cb226f96-18d8-4dd2-bad2-d38a23094374.png b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_cb226f96-18d8-4dd2-bad2-d38a23094374.png index 24905a03208741f24b19354211586d85461c9862..ab9640b64e9e440cce79f9f9935291bcf9b06ef0 100644 --- a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_cb226f96-18d8-4dd2-bad2-d38a23094374.png +++ b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_cb226f96-18d8-4dd2-bad2-d38a23094374.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b90849f1a118e4d096db4895a7e7112e728bcd1148dca0760e521d4a2fba88f7 -size 449917 +oid sha256:2630a39c461358f5f0a529f61486d17540db3f42e515c905098f3eb52f77ffe6 +size 374407 diff --git a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e4847f0e-0607-4eb7-b856-7bbcd4c7dde0.png b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e4847f0e-0607-4eb7-b856-7bbcd4c7dde0.png index 8115932b61a403dbbe48403810f8f50616bb9fe3..e53923534e64686f15010e522de4377da1da8242 100644 --- a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e4847f0e-0607-4eb7-b856-7bbcd4c7dde0.png +++ b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e4847f0e-0607-4eb7-b856-7bbcd4c7dde0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d1f1def591ef4dfba9305f721348970b8df3aba924c4d4c2fefd9be1d6a1ef9f -size 431072 +oid sha256:2628770e6e17062a2ef5590c9be94c6b68b30fd9d5d0615b15330811d025d58a +size 326445 diff --git a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e888c578-870d-4ede-873d-2d09d7cdc189.png b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e888c578-870d-4ede-873d-2d09d7cdc189.png index 05b250ccae79b503aa2d63f0998523fad9a49945..8eba1f3bf2b3702df21c50848c7bc54af4137e40 100644 --- a/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e888c578-870d-4ede-873d-2d09d7cdc189.png +++ b/images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e888c578-870d-4ede-873d-2d09d7cdc189.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb3aef9917de3372a0dcc3b6f21ae49c62185123c8d62fb1a843bb5ea773750f -size 470570 +oid sha256:aeae66ca09bc02d123a4ab43dd9f79738500a634068d7aac17b7f902f14dfeca +size 446028 diff --git a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_2f28c65c-d95f-4e34-b76b-db3a412b8fc4.png b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_2f28c65c-d95f-4e34-b76b-db3a412b8fc4.png index a798e18159443eb62fc4ddeaf7687132c4a3d2b4..cfd5d145ef68458b7bd4d991e951893042636503 100644 --- a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_2f28c65c-d95f-4e34-b76b-db3a412b8fc4.png +++ b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_2f28c65c-d95f-4e34-b76b-db3a412b8fc4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:062d648bed7e9324cad54d7bbce85e6234f62b60b6f3386f2eb0c865341b273d -size 2150942 +oid sha256:cddd1f67a464f03b4aa4dc1b0a7c63e1ebd64fa9fb339a51693d53c2c0d21358 +size 804993 diff --git a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_42ebb272-99de-4d4e-b103-5062fbbb61ac.png b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_42ebb272-99de-4d4e-b103-5062fbbb61ac.png index a55a8803e198be1d3235cfbcbf7b9a6292fe6414..0c21f280b6837ac1f9eef1d52ada1b54ab8d89fe 100644 --- a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_42ebb272-99de-4d4e-b103-5062fbbb61ac.png +++ b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_42ebb272-99de-4d4e-b103-5062fbbb61ac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:477586c1f09ea87a6b8d8968c170d24888fbbc69240f99df00a29eb8b363c6a9 -size 2128973 +oid sha256:3bbdb8738a1b5fa41ef90a32992325a1eec92d5301a6f43d39f5c68f9010acdb +size 1267702 diff --git a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_49d5f5b4-1bec-409c-8e15-abd1df8fd680.png b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_49d5f5b4-1bec-409c-8e15-abd1df8fd680.png index dca484e3287348613ac8f6136b59d058b345af39..cb2e1a005bff90fc78ab6421241ab3de348420e3 100644 --- a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_49d5f5b4-1bec-409c-8e15-abd1df8fd680.png +++ b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_49d5f5b4-1bec-409c-8e15-abd1df8fd680.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1b742bc1407d6d409f883e0e3e8d837aabf27cbe493ae96311ec14f2811e0f91 -size 2025047 +oid sha256:0619abe1832059843e3f7b161f7805f3bb22fd7a55a7e4a1d25f9a2332c0b62e +size 1717171 diff --git a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_a7450e82-c348-402b-a662-a94d2c7f36d2.png b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_a7450e82-c348-402b-a662-a94d2c7f36d2.png index 99a761e8ca67e0ea8bc6953581e12459a0d0f15f..f53d0da7f842d04410617800e82b011b7d9a7c45 100644 --- a/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_a7450e82-c348-402b-a662-a94d2c7f36d2.png +++ b/images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_a7450e82-c348-402b-a662-a94d2c7f36d2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:79fd86b22e232cc2ce21339a59aa3637d79986b12c67717d00622819401c313f -size 1989484 +oid sha256:b48144f3e2a52332d9e71fe78841dc080607e3c70c646f18190d7fb4e7878eff +size 1249739 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_07db340a-8bc6-410d-9856-4888318261b4.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_07db340a-8bc6-410d-9856-4888318261b4.png index f12e002f2f0d2750f2b5959d05cc44eda060f118..12245e76e02e734e1caedea0df480432ae573a77 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_07db340a-8bc6-410d-9856-4888318261b4.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_07db340a-8bc6-410d-9856-4888318261b4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6676dff7f306c554a111e861b0888c4985c407bee26dda75297279311737da15 -size 2339507 +oid sha256:04d04d1653cc44110f236b13ff96bc08e10bff12d64893f3d4c303021f050a29 +size 1211954 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_0aeb38ea-3cdb-4e1d-95c9-4d8d93b7a0bd.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_0aeb38ea-3cdb-4e1d-95c9-4d8d93b7a0bd.png index f21ddb87a8612ab2ef5151b372fcc3be516e91f3..12506ca66e36e24e953a8d5b708b3846b1f67f80 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_0aeb38ea-3cdb-4e1d-95c9-4d8d93b7a0bd.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_0aeb38ea-3cdb-4e1d-95c9-4d8d93b7a0bd.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1662214b6cdb2e399163984aef703e3482a06e52602a1abb83100bb4719ccaa7 -size 2291876 +oid sha256:b658e1456d39b28aeb4fc62c1fd71a988309dca65a8a36a8c1052d321db88dc7 +size 818957 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_14413e25-3474-43a7-88a4-8c6017dfefc7.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_14413e25-3474-43a7-88a4-8c6017dfefc7.png index fb5a78b0000069e12015dff22546319acd09133b..76d84e575b229f5a062d2d9d0e2e52880ddb22e2 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_14413e25-3474-43a7-88a4-8c6017dfefc7.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_14413e25-3474-43a7-88a4-8c6017dfefc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c21da1735aba1f5f6a93a15f7c6e8c89d542c54f9cdfd3f0bc090d0463fb1944 -size 966777 +oid sha256:9f2b5cda54be134f8c2a4ccf43987dec6870987cddc7c239e893d441b128a9ee +size 1031340 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_229bf1f4-803a-4d87-9a8b-1715ae4dd3a1.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_229bf1f4-803a-4d87-9a8b-1715ae4dd3a1.png index 301da2413656a6ab442b1cff9419e3557544a4c8..0192507942d55ea97df779bc709cbfcc8fa19966 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_229bf1f4-803a-4d87-9a8b-1715ae4dd3a1.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_229bf1f4-803a-4d87-9a8b-1715ae4dd3a1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:89b31b5a2ee7a8d53874dace47ed2b45795ff70e5da8d5216f1bbe55333acce7 -size 1521089 +oid sha256:e912d4442d6b152f0e266afb4cbbc8856912e9e0dbfd52f2145d807ea44b36cd +size 2034114 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_331f8da6-9df1-4da3-bdf9-0a7b9f3a15d1.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_331f8da6-9df1-4da3-bdf9-0a7b9f3a15d1.png index 0032207ff81c35cbe58a64c606f6c6490006f233..d30783f3e1646d38224d6eeeafc275d6ff52fa2b 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_331f8da6-9df1-4da3-bdf9-0a7b9f3a15d1.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_331f8da6-9df1-4da3-bdf9-0a7b9f3a15d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f70da3996d2def7d8a86bfc1b2742431fac804fcff86ce4ec9c71045634c75bf -size 1691653 +oid sha256:c7b67fb404f06d53e33267aae212b90c3ca0fa20d76a091d9c789567768824ab +size 1741716 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_454e8a2e-689a-4cc6-b987-459a0ad78207.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_454e8a2e-689a-4cc6-b987-459a0ad78207.png index 78df5c6806043bf2f95429b24a56d921d6e1dee2..ecd7d0e801a2696a691d01fc9c5356cd4bfc3bde 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_454e8a2e-689a-4cc6-b987-459a0ad78207.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_454e8a2e-689a-4cc6-b987-459a0ad78207.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c4803cd6651dbafadc1cab88d63dc377a0ef9141d4c568efe9b25871f0e18b9 -size 2154013 +oid sha256:a1e9f6a521e5a5e96cdfd80f0172f0b442d9a1236a1b7de1a1ffea5714bd89c5 +size 1954351 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_477ae1d7-9f1d-45a1-9447-6dbd34e2ec6f.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_477ae1d7-9f1d-45a1-9447-6dbd34e2ec6f.png index 607302638f3e8454874e89afe87aff24621af9f9..0b2575b6f1cefee77441e1cd3b9a21a3875a0926 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_477ae1d7-9f1d-45a1-9447-6dbd34e2ec6f.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_477ae1d7-9f1d-45a1-9447-6dbd34e2ec6f.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3274ba0ddc6652c8956a265682ee9cb4166cf4b707e5ea30a0fc05f39d24dee -size 2331562 +oid sha256:6bb061e78b7d4004df89c820009e87a2871452c48ac88c78e5bbad1005f55c76 +size 1408597 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_647547bf-d59a-4a3b-b32a-9ea1fb94a530.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_647547bf-d59a-4a3b-b32a-9ea1fb94a530.png index 7d5fdc8f3beae0530932788a4b1d7508c0b26927..c2384c575edf5cc77e58e0bc73016682f6dad88d 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_647547bf-d59a-4a3b-b32a-9ea1fb94a530.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_647547bf-d59a-4a3b-b32a-9ea1fb94a530.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:848e288ba659a3e571f10b639a00fb0d4dfd230c15e3a051d2709b08002609c3 -size 2142604 +oid sha256:ceb425231998e6eec656eb51de33b979323f0ddf6921e3f58ca34dc51fe1c7cc +size 503840 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_7fea74c7-272b-4eca-a381-ce0ae5ec874e.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_7fea74c7-272b-4eca-a381-ce0ae5ec874e.png index 2fe5c3038700d14cc4a85cd0b3ae0917efeadf1b..6d83a3c99caa6bb6bce0a694c73878bf1111329a 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_7fea74c7-272b-4eca-a381-ce0ae5ec874e.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_7fea74c7-272b-4eca-a381-ce0ae5ec874e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5f71ddef33378ac7abd864583a1a85551aa4b984f95848b687172f6940ccf2ee -size 1911962 +oid sha256:f674b8426d87781212378b80f2df48ab8bb021d068cce3654f4184370a3ccae6 +size 1046757 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_8159f605-56cd-4c82-8451-7c0f4743d451.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_8159f605-56cd-4c82-8451-7c0f4743d451.png index cae5631d63be158a9c1d2f3f012501e803ed38ca..f8ece2e431ab0402a8c49fbc69d50250c2235602 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_8159f605-56cd-4c82-8451-7c0f4743d451.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_8159f605-56cd-4c82-8451-7c0f4743d451.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6204717447faf226de706a2754c2f9d183aa1ed1abe6d2ba5a16aebd133e5c22 -size 1528236 +oid sha256:de7f64576fc4b2929f6c7296a0d2f3a9232d718b7fc6de0119239b01ef6e6927 +size 1086501 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_adcc697d-c4b8-4329-8efb-83af89c3ad55.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_adcc697d-c4b8-4329-8efb-83af89c3ad55.png index 2d131568d4b2967eb1cc8f342045d80ea6da0db0..b1bedca7560fae4c0ac30eda4cd65eeac2759b05 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_adcc697d-c4b8-4329-8efb-83af89c3ad55.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_adcc697d-c4b8-4329-8efb-83af89c3ad55.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:645a6bfd91c325fac72b7b89cd346ba84907ce30f831e9b249cb9cf4e40c137e -size 1689238 +oid sha256:ed2998201d4d94c9e3a010ed6a3879ad125e87a1547658a2db17db4788aae6ba +size 1434490 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d1b5e61a-25a8-4b5a-8797-51292027172a.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d1b5e61a-25a8-4b5a-8797-51292027172a.png index 669d0782f65c80a45c15cdcd445a4ce8736c109b..1442103ed25b85944a76e215e8566354afa1ca45 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d1b5e61a-25a8-4b5a-8797-51292027172a.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d1b5e61a-25a8-4b5a-8797-51292027172a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7e286658fc08ea546fbd061d1ea05c4422fc7c9e12413697fa9677ba8469fa12 -size 1753299 +oid sha256:8e8e3821521f961a734600740ce5893feae90111ae36cd5aa6fafb2226330356 +size 1914887 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d3f9d0ab-c01c-4a80-a032-02c8fd2b4430.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d3f9d0ab-c01c-4a80-a032-02c8fd2b4430.png index deebdb58e0933338d5770512ee2fd50348d62e43..e6c41fbc990bc38e412ca8df057255f4b460b4fd 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d3f9d0ab-c01c-4a80-a032-02c8fd2b4430.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d3f9d0ab-c01c-4a80-a032-02c8fd2b4430.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:61cdec788e4565e6d96dfdeb8a974d162bff320f3cae85d8f0dfc49910e331d4 -size 1750833 +oid sha256:8a42b0f106f668cd9e153269ee813b716ed433d50b57f7864bb5a19bf0537e5d +size 2402746 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d61748b4-b9f8-457e-bc90-a8a516c8e12a.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d61748b4-b9f8-457e-bc90-a8a516c8e12a.png index 59dc406bf623b5f5ffc3d1b8461ec18096551b73..1c05f143ab0103ec8dce009285937e79f9d297fe 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d61748b4-b9f8-457e-bc90-a8a516c8e12a.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d61748b4-b9f8-457e-bc90-a8a516c8e12a.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1ba7c70849a9745a00cd7e727628df34071869a7528594769b65ec13ca68c456 -size 1856453 +oid sha256:064de6b32c76232f1453180cc20f8ac9aa6c8a568be512d2b5a62d15a80c40f3 +size 2790572 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_e835b0d2-5db9-498a-81f5-598bb3d144c4.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_e835b0d2-5db9-498a-81f5-598bb3d144c4.png index 4b7f8c3bff743a912f108bf8375057c7aa430910..39858f0b839dfc3192250f7938ddcbebcfbe9100 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_e835b0d2-5db9-498a-81f5-598bb3d144c4.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_e835b0d2-5db9-498a-81f5-598bb3d144c4.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a2cfe94786f10b8109c6c51ca4048345733e3c593c2bce1f6aa0e5bc63b84447 -size 2347569 +oid sha256:c9f74d5ef90a7e9a71d8fa32ef84bbc4e77e17fb9dceb2afd9f13510f90799be +size 1527148 diff --git a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_f973e7a4-81f8-4019-8c27-0eed5a0883fc.png b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_f973e7a4-81f8-4019-8c27-0eed5a0883fc.png index c1afdd3990bc4836a2b66b23c7f9383d1ce46e7e..6a0dbe396622a5bc93c4f5f948b6a263d5d37132 100644 --- a/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_f973e7a4-81f8-4019-8c27-0eed5a0883fc.png +++ b/images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_f973e7a4-81f8-4019-8c27-0eed5a0883fc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f66b5f22979e19bceec86480d016649dd39460def90b5bda7a15e060de26303 -size 1990766 +oid sha256:30afa9df978c61945391776dd4c8f4683fe584e5f2930688047515d561fb24b1 +size 1304039 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_0568946d-5e24-4ab6-aaca-f448308ff253.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_0568946d-5e24-4ab6-aaca-f448308ff253.png index ea7d905d6c553742f5082ad165fdf0e30f511893..de87e1a689aa9a44f5d6a08643fe04fc3aa12d35 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_0568946d-5e24-4ab6-aaca-f448308ff253.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_0568946d-5e24-4ab6-aaca-f448308ff253.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:71a38fcc810323689c0383605c6b954eebc56311eb68c547f49d845397bba789 -size 928589 +oid sha256:082c6d0c42f3cfdead166017a6902044159d27dc04d183ef46321f39f575b431 +size 744639 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_2a015d4e-2e74-4a02-ae2e-1e529eabf668.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_2a015d4e-2e74-4a02-ae2e-1e529eabf668.png index 1c6a44653d467001f603a425359b01bf4d9ff515..c3d4f678333c0171dbf956385a99724fc1149b2d 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_2a015d4e-2e74-4a02-ae2e-1e529eabf668.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_2a015d4e-2e74-4a02-ae2e-1e529eabf668.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb2ec63107fd0ec21768179dc3d697030bc2eae02d81bf632b85e6372e9707d8 -size 810867 +oid sha256:546e4f47bceb57de7c279f0a9074bdaf494a36865abed86340a5cdcd0f68a102 +size 813834 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_3c7bf6a2-a75f-434d-8bc2-e34824e43dbb.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_3c7bf6a2-a75f-434d-8bc2-e34824e43dbb.png index 32d00e2f223629b2d98b0dd7006e80e6a0c0663d..efc8fef6d1701ce2d281a2aead96e43463ea380b 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_3c7bf6a2-a75f-434d-8bc2-e34824e43dbb.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_3c7bf6a2-a75f-434d-8bc2-e34824e43dbb.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:11e84b5538e37a270789084c5e549b917fe61e79c59a1faf12a8d5789da9539c -size 644639 +oid sha256:fd8c3c1dd57b8da3d73d2e4fd679958acf51325f51661972f93c22ba60360e3c +size 1250957 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_9284698b-67c4-43a4-8150-0bf06a0fd54e.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_9284698b-67c4-43a4-8150-0bf06a0fd54e.png index fb98562ca418d602efc0376936fb21f6498c4e57..c32a808fcc48fae863b581c8db581934a860176d 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_9284698b-67c4-43a4-8150-0bf06a0fd54e.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_9284698b-67c4-43a4-8150-0bf06a0fd54e.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:38b550b198157c332d877b7fa48cb260587054dcb6b227cc7929808277255fa4 -size 703961 +oid sha256:dacec6de683c8582427d71a521ac826232a00fa1b4896aa58ca4f2b39312c3c8 +size 520903 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_c91647f3-2ca4-43e8-b3a6-0868ad5cbb47.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_c91647f3-2ca4-43e8-b3a6-0868ad5cbb47.png index 6a212da9a0ef5433f1d3cba86bbc752336ebe84e..fcc3a04f06b03a639982cb3948e3a80309c9b503 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_c91647f3-2ca4-43e8-b3a6-0868ad5cbb47.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_c91647f3-2ca4-43e8-b3a6-0868ad5cbb47.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8afc0faa31d4f0e42433bccab33e66039880e52453bf288b2a5244375ceb0d13 -size 689286 +oid sha256:92e54abb325476b2914ea0e45316f4ba856db7432074eb8cf1a50f8756f7444b +size 715771 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_cc1f3832-fb78-4fdb-a6a4-6ecd4c37c716.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_cc1f3832-fb78-4fdb-a6a4-6ecd4c37c716.png index d2f86dcb9d0ec0092abdab04eb20b9f90b8bb496..b069e313c7c6e8675eb71273a16d0b1cb4526824 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_cc1f3832-fb78-4fdb-a6a4-6ecd4c37c716.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_cc1f3832-fb78-4fdb-a6a4-6ecd4c37c716.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:843ff9e6c2f321a5d04385e094412e447777a9c1e60112872467f03e06ce31be -size 727017 +oid sha256:ec02ad69072848db82d3814c44ab1137f53b71072e8565aeeef934594e502bb3 +size 509495 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_e4fd0574-b204-4e03-bb92-4ece87b183d1.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_e4fd0574-b204-4e03-bb92-4ece87b183d1.png index e8e7da0c093efa72f95533e35341bb602153aae9..4bdb53f159be13b66fb66f5b8edb729e3c61dd33 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_e4fd0574-b204-4e03-bb92-4ece87b183d1.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_e4fd0574-b204-4e03-bb92-4ece87b183d1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2e8721537770068c489c8e29d62916c9c2777d134e28d1a4dbffc633629d2cb -size 462812 +oid sha256:e6a5b497e537bcef577012fbf68c35a290070e2412acdda5cafcb41cfc0350b5 +size 188519 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ea2935b1-0eeb-4873-985f-fcf52085b341.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ea2935b1-0eeb-4873-985f-fcf52085b341.png index d174fdbae12c823a7cc3f6e9a5ca1180e9e0cc40..3e52033145b2e2408fdfa1e8f44667d355d8e6b8 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ea2935b1-0eeb-4873-985f-fcf52085b341.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ea2935b1-0eeb-4873-985f-fcf52085b341.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df050cb928ad44107086f7217d30a55f8f095773587b21d7ddc99460f46c92c7 -size 1112613 +oid sha256:526458f215123f4320a5e249c1f52d0d630fb52384c2a849cde3ac802bc4da85 +size 634493 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ef3c3757-9751-4d0e-a336-271b2e09c353.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ef3c3757-9751-4d0e-a336-271b2e09c353.png index 10154b4c1bec6dd89016ac91a2afc80f423e8a1a..bbd2916e685303b10e07b3d1adf12220a60f6d2d 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ef3c3757-9751-4d0e-a336-271b2e09c353.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ef3c3757-9751-4d0e-a336-271b2e09c353.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:650bc377395d4341959bf8fb16dc972a95c0f1f8e036f3e2d0c495a010597489 -size 754479 +oid sha256:ba8565caddec5febe8f2771e8e97193bb7b096ac4958d0c07fbe2bfe0dfe9138 +size 398681 diff --git a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_f5bb5237-3617-4177-856e-81c617d0acfa.png b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_f5bb5237-3617-4177-856e-81c617d0acfa.png index 56878eef14024cabafedaa60038f32d3445ce73b..49cb1bfbc36d27f9c2b487cf23af052045c7cc33 100644 --- a/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_f5bb5237-3617-4177-856e-81c617d0acfa.png +++ b/images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_f5bb5237-3617-4177-856e-81c617d0acfa.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c1c9fb7b599f634a51ed629705c471c8c7acda076eaaccd6611698bf3e90c8a7 -size 591423 +oid sha256:1413545279ec840c34745f634cf13e82e37330ebd5d2389c705bd364c69029a8 +size 1092559 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_15b5ed14-0073-4fd8-ab5c-90d56475412c.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_15b5ed14-0073-4fd8-ab5c-90d56475412c.png index 193f86309f59c80ca7f3da59de7877f6eb3991a0..14ba6d9deb56b05ad02e5ab9a9ee2aad0b06edff 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_15b5ed14-0073-4fd8-ab5c-90d56475412c.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_15b5ed14-0073-4fd8-ab5c-90d56475412c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:57a56ab466b647cdabce97b92d0a8b562ada85a9f1433d064d36c179c559f357 -size 1388748 +oid sha256:a409c125d21b5133de7bfd3ed439e07da561b5e3dc34c3752c137bd350fa70c7 +size 1202120 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_3559cfdd-31d0-481e-9598-8b9b8f75aa31.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_3559cfdd-31d0-481e-9598-8b9b8f75aa31.png index 369c28b11fb2c147e339160c87a269c21ad112e2..c4f1a449cb63288ca114bd454698e55113fbf458 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_3559cfdd-31d0-481e-9598-8b9b8f75aa31.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_3559cfdd-31d0-481e-9598-8b9b8f75aa31.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d15cf4b6c3198db30a257a98f7f2f559ce4c54f71825ddb37df98c6b7a054acd -size 401206 +oid sha256:74fea36922d8f34e6c3dbc636224855c891b8548ba7fa05d9bf71159354f86ea +size 633362 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_98aca18e-749f-4dcb-a26a-02f3c7b20917.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_98aca18e-749f-4dcb-a26a-02f3c7b20917.png index cab2c4d9a36b49ec57513ad045916531dc26608d..929111e38f07651e65c5d6bffed1aef447de707c 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_98aca18e-749f-4dcb-a26a-02f3c7b20917.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_98aca18e-749f-4dcb-a26a-02f3c7b20917.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:69860b298f08b7f0eb45481b60ee83cf6214d10ab615892ee41b63a57bfdb289 -size 2368577 +oid sha256:44e5cbc36ea39f6d0c5311faab17ddda914184953a87927cf676fe3d9f3a3489 +size 2319459 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_a229ef07-963b-4d03-aacc-695b5b923058.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_a229ef07-963b-4d03-aacc-695b5b923058.png index 7ac5e50e3c8a783815d8698490de77bc8ce932f7..6bf1e9dd88e664259289d07cbc3b86f7d9da32c7 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_a229ef07-963b-4d03-aacc-695b5b923058.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_a229ef07-963b-4d03-aacc-695b5b923058.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0f58c4f33636b077a51473b3c2253c7e2ea5e30a07d3b42c248385683a5ded52 -size 644416 +oid sha256:94637e46c1e29888036cb00a75c3cc10675388705692a6fef3f1372e5a0a3e01 +size 417966 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ccf0ae9a-6d2e-48e5-be97-d91c9528fc61.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ccf0ae9a-6d2e-48e5-be97-d91c9528fc61.png index cf8b9a41425a88fcc2eea1d0cb75ab0ef1d543b8..4731ee9820710ab504335a27a8c70e73fdb5aedc 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ccf0ae9a-6d2e-48e5-be97-d91c9528fc61.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ccf0ae9a-6d2e-48e5-be97-d91c9528fc61.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:aad8df5bdef49f0aed4bec62bfeb54ce751509e2e55629f62e80c2343d50c628 -size 325161 +oid sha256:279818bc224fcc505ef587abdc91d73646d9138f7b10c7411db2d27fdfec9417 +size 322028 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_d0f8939d-9053-4418-a49a-b8bbb6bae5d9.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_d0f8939d-9053-4418-a49a-b8bbb6bae5d9.png index d76c7d6afb8b726c809fdf6bc42f6ab6a34fe32d..d00d80a51365015da6a3229d3e5e5ce71a9ddb3c 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_d0f8939d-9053-4418-a49a-b8bbb6bae5d9.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_d0f8939d-9053-4418-a49a-b8bbb6bae5d9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb2854924e5f24c5a65c19285a0077175b66e984a453f6fbfc3d35b8b22c8528 -size 275361 +oid sha256:630033567aabe51e18b19f688dde277b353bd22cf3e02ad715a0338804e9379c +size 908453 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ef60845a-ab21-45d4-a89e-c77ec915c0dc.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ef60845a-ab21-45d4-a89e-c77ec915c0dc.png index 193647b9b7a878f092ee8d9d5c2c77302da5f902..de10dd253e054c1178bd440789d379063d590382 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ef60845a-ab21-45d4-a89e-c77ec915c0dc.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_ef60845a-ab21-45d4-a89e-c77ec915c0dc.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b1c551ab382cc1cec8f9ce5b8e907bc3670f945dc5d2ca9f186bf710bcf5894 -size 283981 +oid sha256:770a8ca486a290d69f08fa0ec9109f8c16c38ffa5273fcfb86b40ff27ffed45e +size 286926 diff --git a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_f39d7ec8-50c8-4394-b48e-97e4a42e0dac.png b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_f39d7ec8-50c8-4394-b48e-97e4a42e0dac.png index dc6fc19335f4faf6ac6fc0b3322565a93cb7409f..a67c13409b264e51b8192fc99477c44e142949d2 100644 --- a/images/ff173880-e7f5-4b4e-b941-79e9c3504add_f39d7ec8-50c8-4394-b48e-97e4a42e0dac.png +++ b/images/ff173880-e7f5-4b4e-b941-79e9c3504add_f39d7ec8-50c8-4394-b48e-97e4a42e0dac.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eaf148a8305a4dcfcb919a5089f28972664ee47c1335342ff67f75ef0c391eee -size 320058 +oid sha256:19bc5fdc12ca4a0e4321897e5f31cfd9be1cbbca76e812ff13727635bce206b9 +size 1243377 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_0e448126-506e-4091-91af-91117f73e5d6.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_0e448126-506e-4091-91af-91117f73e5d6.png index 895e0d90a6616b542b247a1cd7c9a0af1d6f1ff8..ae38ad061e24cd0a34de357d051e734909a5908c 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_0e448126-506e-4091-91af-91117f73e5d6.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_0e448126-506e-4091-91af-91117f73e5d6.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3724d2fec0441131ffa575704c08bc45eb149fe5c098db3e4873f144d0b6132b -size 694727 +oid sha256:02abc1dc7ad10d9fec5241032372bacb7c1bda8750abb813324e2a16332e5f01 +size 546169 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_56a51491-e603-4275-841c-989da2b8d9fe.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_56a51491-e603-4275-841c-989da2b8d9fe.png index 43cd05e7cbc0f36f1f4dce7e27a2aa3f9be9edd8..b37293691ef45b2faa1fc2ea509843397176fd3b 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_56a51491-e603-4275-841c-989da2b8d9fe.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_56a51491-e603-4275-841c-989da2b8d9fe.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a6093e49a63e1203845f3cfe0198e5c71201100c9c9bc3ffc42556e4619d7c7f -size 602320 +oid sha256:737171cc5e35def77f53f3ff8bd63878d6655a84271bb7be3971545a4dec3e26 +size 228083 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_5971c14a-a5d3-406e-b9e1-73501ebafdc7.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_5971c14a-a5d3-406e-b9e1-73501ebafdc7.png index 5bbfa245a031806931dbe208fffcd6d6d3523786..3363ab460797c06c075832abc97a01b170087055 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_5971c14a-a5d3-406e-b9e1-73501ebafdc7.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_5971c14a-a5d3-406e-b9e1-73501ebafdc7.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:01c7a55494e9a8421f81aadb8c5ef2f9bba7dc3a6a8834e9d2a5b727bd5604bb -size 699130 +oid sha256:f2ce82fa04b5f5fded37cd8df4423c0c81fe41a0c49c3924a77e845db6612690 +size 645360 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_79c52401-21b9-4853-b92c-a16509ed72e9.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_79c52401-21b9-4853-b92c-a16509ed72e9.png index e103326dd7ec19d5a7e2224f69246ef8d1534042..8e54f5a6dd997471cda1c98d6c53dc89da0e1d8d 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_79c52401-21b9-4853-b92c-a16509ed72e9.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_79c52401-21b9-4853-b92c-a16509ed72e9.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d289bade7c7f311c1fb8aa31b5b74e84c9dce0c5cd69347c0e57cd75e1d5864e -size 623860 +oid sha256:00837236348d6a4d4419bd505d3ff95a0c823e18578a00c631646b9f11b940fc +size 623900 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_8e540eed-1de3-4c82-8db8-76b4c92dbf45.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_8e540eed-1de3-4c82-8db8-76b4c92dbf45.png index 27dcd51af3d468d0d7642e9aab546f55da02a4ca..8717fa0300c34d4acae4df1050c5bd79016fc5c4 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_8e540eed-1de3-4c82-8db8-76b4c92dbf45.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_8e540eed-1de3-4c82-8db8-76b4c92dbf45.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0e72dd51d3377ab6ae7695ff3fab17e018262ddb9d4603935b2b0c7ec81fc6d1 -size 1049845 +oid sha256:c08cff8d8f7b21e3cb622b59a8d31186aa32e7540c4d4fa6342c8230a4ad5ab0 +size 539548 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_a1b66ea7-5509-4164-b0a8-e7591a52b9b5.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_a1b66ea7-5509-4164-b0a8-e7591a52b9b5.png index 20b861c5c6fa70b815c66672b1f878a78e7da0e1..993909abba2428212b51869c585156ca90d83ae5 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_a1b66ea7-5509-4164-b0a8-e7591a52b9b5.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_a1b66ea7-5509-4164-b0a8-e7591a52b9b5.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:80a8d921a596a0d880d9bef87c258a7063892059c75842e1a3c1381acccb7f11 -size 723334 +oid sha256:a94bd93340004fe38cf4724eb8739037f2745e12d4a05af7c794bc8c56c14bf3 +size 610795 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_af68a13f-cead-4bad-9b91-ac1fbc14b005.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_af68a13f-cead-4bad-9b91-ac1fbc14b005.png index a59891c5fc5679e7d21606e34e54e3e4b7a979d2..e18bca278f4c1d598a70931619ab47ef2ca35cce 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_af68a13f-cead-4bad-9b91-ac1fbc14b005.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_af68a13f-cead-4bad-9b91-ac1fbc14b005.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3d47d0fd63e791032615a97f0c2c1bca1449a20192c045fb7159b85fea3bc717 -size 597901 +oid sha256:ccf53cc06c0c6708be47303a22ff173d0b0ff3fabf7cf08c725b63fd10131b5e +size 185928 diff --git a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_c7d6e34d-c623-4e8a-93f4-c19e1269d82d.png b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_c7d6e34d-c623-4e8a-93f4-c19e1269d82d.png index 35f337a0fe9eec28c9d76f07481a77a5dbf49bf1..a68add2b7b47575d111d748d29de51ca004d5afc 100644 --- a/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_c7d6e34d-c623-4e8a-93f4-c19e1269d82d.png +++ b/images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_c7d6e34d-c623-4e8a-93f4-c19e1269d82d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:07269cfb9344e3616d26de435bfeadff45fe2a8d76b22317ffd3b82e7bd1acb1 -size 616255 +oid sha256:75aa37d9021bc477ed872149e1e63e7d7857c0d3d99b7365a9f28a6c0d2b326f +size 598945 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_1d12dd7b-e729-489b-a3d9-6316947514ec.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_1d12dd7b-e729-489b-a3d9-6316947514ec.png index e9ef3a4bea087ac6af7fb73a6bc9e502e0ab871f..af0c50ee9a08e4ffa5b774f64701a88905b046bc 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_1d12dd7b-e729-489b-a3d9-6316947514ec.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_1d12dd7b-e729-489b-a3d9-6316947514ec.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b213c30b53b4ac523788d37bd9a9b045f32ee99d8b1d6b051f0ac27f9242aaa9 -size 737321 +oid sha256:5b2da1cd1721502a947509eb12a73c9cfc95e41440d3f701970358013be692fa +size 737583 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_2c87ab6d-07c9-414a-856c-558889e7cd0d.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_2c87ab6d-07c9-414a-856c-558889e7cd0d.png index f1b6257cd1a76809edc97ce9c2d6d51cd1106a34..2363b2af972da987a0acc2dbec5db9dd33f65548 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_2c87ab6d-07c9-414a-856c-558889e7cd0d.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_2c87ab6d-07c9-414a-856c-558889e7cd0d.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad71f23f678378227cf9c54aabd2c709ed20a28c60f42a2a1ba7fa1a1a6dd960 -size 790696 +oid sha256:7eff28f74acbb51bad3cc049ca481a1c310a4d686de6fcb15abe440c4a630550 +size 560141 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_48484940-7fda-45e8-a3c9-21da6c24a342.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_48484940-7fda-45e8-a3c9-21da6c24a342.png index 7ac49d27742cc668434dc69c7fde1529e0ebb8b6..028e651ac2f19073417ead66a734003556e1e349 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_48484940-7fda-45e8-a3c9-21da6c24a342.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_48484940-7fda-45e8-a3c9-21da6c24a342.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d6f5c5c7b7fbaed7d579a13e2acfb7f35dc3973c95e7c8ace5fa0c8d73327c8d -size 2693682 +oid sha256:35dec9bc819ed894c837dfec51be934b3c433d2f0b830a7a375869c433efe6d3 +size 680611 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_59dcc93a-860e-48a6-8b81-8097c3fee4ab.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_59dcc93a-860e-48a6-8b81-8097c3fee4ab.png index 5035c4a4b8611f40764283a6e8722af3429f42b0..f5f4d67425c708aba16bcaa31e78c0d81bde5c46 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_59dcc93a-860e-48a6-8b81-8097c3fee4ab.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_59dcc93a-860e-48a6-8b81-8097c3fee4ab.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fbfd8e71f789e8cb9ce8c711ca35dba8b073e0ed08e19530fa6eeabc5818a430 -size 758361 +oid sha256:9ec47a77c030a748cadbae00eda5512de2fb463be873c6223a3f6b7109ea797d +size 1168467 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_6e9ddb40-c8c9-49c8-b24d-23de6338158b.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_6e9ddb40-c8c9-49c8-b24d-23de6338158b.png index c90ac6a886acc71b06261631a4b8fd54f8ff0395..7d9ea8ee30a1b8ccf0418887b45cb0b5493e509e 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_6e9ddb40-c8c9-49c8-b24d-23de6338158b.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_6e9ddb40-c8c9-49c8-b24d-23de6338158b.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21bf568112e31dffe0a183e2c257ae65a7ed2477a42dcb7176a1f96cb8e1d99b -size 765876 +oid sha256:0ee8c39b552150308678c92cbf7f897de978f14e01cb4bef58d64b388c49cf48 +size 976203 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_85e67d52-dbf2-4548-bc57-7030b7d926c8.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_85e67d52-dbf2-4548-bc57-7030b7d926c8.png index 3cd02e23c67397209d4784e2d432e5d5886fe5d2..ce91accd6f0e65dffd41564fb7b1a8db36f2e119 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_85e67d52-dbf2-4548-bc57-7030b7d926c8.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_85e67d52-dbf2-4548-bc57-7030b7d926c8.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c5d9d475699dfc79d221dcc3708e5e1fdb7aa4ee4b84c3e261d62cb9c34bc95d -size 1437948 +oid sha256:22d6ac89a41f5b294a195218b724401af6f7cd01990bfd6637d54cffdb5cc9a9 +size 1579868 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_8a1e5242-bcce-46ec-8ba5-e1aa2b723b33.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_8a1e5242-bcce-46ec-8ba5-e1aa2b723b33.png index 18da0df6dd65bd768c38c7c6d6d7887af14be528..a0266f50a813329f35ce4965590e00bf4a4f4228 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_8a1e5242-bcce-46ec-8ba5-e1aa2b723b33.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_8a1e5242-bcce-46ec-8ba5-e1aa2b723b33.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:605e68ff539c04b88bb276b18c1226376d393c4456cdb3953d021c0a16f53bd5 -size 770107 +oid sha256:177b824230e6faf26b5fd3b11655cf14d55fdfb9dcef7c68fb459bf628016b5f +size 871877 diff --git a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_cb4e102c-6cbf-4b5b-ab7e-c6b1e6be700c.png b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_cb4e102c-6cbf-4b5b-ab7e-c6b1e6be700c.png index d2fb5a0b32840ac864d2eb2ce653fa4b0178155c..dd17483e83ac94141ab8016ab4b7276b59fdc1a1 100644 --- a/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_cb4e102c-6cbf-4b5b-ab7e-c6b1e6be700c.png +++ b/images/ff82e848-f29e-477b-a286-c807cbd0d8fa_cb4e102c-6cbf-4b5b-ab7e-c6b1e6be700c.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b8d919cc03f78229942bc8b1f56dfc48013c3ebb7eadc4906ef9ac7b06ee551 -size 592821 +oid sha256:74b53fb49bd967427288c074cd91f8434a4117d96a79282e183f1ed8bece6ba3 +size 486234 diff --git a/mind2web_train_llava.json b/mind2web_train_llava.json index f66473e97dff7bf10bb6f5ac5c4a2ba357002006..771a380577ecfc4b5b986ff2385b58bdd4ca31a8 100644 --- a/mind2web_train_llava.json +++ b/mind2web_train_llava.json @@ -1 +1 @@ -[{"id": "mind2web_0", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_ee0ac3c1-985e-43a9-915e-f489f926b0a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 0.342, 0.651, 0.374] in the image\nAnd my action is Action: TYPE\nValue: 1648926800"}]}, {"id": "mind2web_1", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_4c77cf02-5428-4f24-86c3-dd73dee21f63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.012, 0.546, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ddadf800-5a52-419b-a717-ac5acbec55d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop\n[button] \uf002 -> CLICK\n[div] Newest Lenovo Ideapad 3i Laptop, 14\" FHD Display, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.388, 0.952, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_7232efcc-03a5-4cf6-abca-52d962651164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Charging -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 1.985, 0.294, 2.004] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_7baddb2c-fc8d-44cf-be47-7d265a0c1d8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.122, 0.697, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3f010014-c3fd-457c-a17d-1ab30ce9a333.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.436, 0.958, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_e0931dce-0e84-42f5-91eb-b97b8d727e00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.124, 0.489, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7", "image": {"bytes": "", "path": "./images/4ff347e6-e911-4af5-8151-7805a9e91b28_7ac03130-a5e7-41b0-a3bc-01c1cb99f1da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show current alerts for red line subway.\nPrevious actions:\n[tab] Alerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.244, 0.392, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_8", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_888c5867-268f-4edc-a635-9bc336f1fef5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.086, 0.327, 0.114] in the image\nAnd my action is Action: TYPE\nValue: Edinburgh"}]}, {"id": "mind2web_9", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_0e2f5a42-2c2b-4554-9ec1-5e5a78e2f12c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon\n[input] -> TYPE: New York\n[link] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.204, 0.664, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_10", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_eea67014-a3f9-41e0-8b0e-e2ca7dd69079.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.0, 0.44, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_11", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_adb3f628-dbad-4824-8d8b-c53ac8161b15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.022, 0.602, 0.035] in the image\nAnd my action is Action: TYPE\nValue: Camarillo"}]}, {"id": "mind2web_12", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_48956859-b850-47a4-a0a2-7c0bdc12231f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon\n[input] -> TYPE: New York\n[link] New York, NY -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.27, 0.394, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_13", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c72c3d18-5a88-42c8-8c16-9294a6019000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.203, 0.359, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_14", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_f23cef2f-fa8c-4eb9-b7a4-4ece33aacea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK\n[button] Continue -> CLICK\n[button] No thanks -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.841, 0.875, 0.953, 0.894] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_15", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_39fe4719-f218-4d71-aab6-f8e6a4082dad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.112, 0.949, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_16", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_4f88ffbf-3206-47b4-84d2-d849707ed499.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK\n[link] Find a bike shop near you -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 10013"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 1.107, 0.366, 1.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_17", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_ee1e8ed0-43c7-4576-941e-61bb00b10218.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK\n[gridcell] Thursday, May 18, 2023 -> CLICK\n[button] Search flights -> CLICK\n[span] Refundable fares -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.181, 0.458, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_18", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_b76c6f62-43c7-465e-98ff-39332220d881.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.069, 0.094, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_19", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_d53a6991-303f-429e-a864-ef07350fe423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.316, 0.161, 0.345] in the image\nAnd my action is Action: SELECT\nValue: Daytime Only Parking"}]}, {"id": "mind2web_20", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_3f0533c7-e459-49b9-801c-2da4e0a3b04d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston\n[option] Boston area -> CLICK\n[button] Today, Tue Apr 11 -> CLICK\n[span] Tomorrow, Wed Apr 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.422, 0.272, 0.578, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_21", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_4abed667-f469-420b-9820-825cdd9e9b91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.167, 0.623, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_22", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_e834b996-182e-4755-bc68-504eb48496ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.215, 0.715, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_23", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_ea3e7300-7769-45d1-b36e-1958830a8e3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas\n[button] Search Jobs -> CLICK\n[link] Developer/ Senior Developer, IT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.22, 0.349, 0.241] in the image\nAnd my action is Action: TYPE\nValue: 8"}]}, {"id": "mind2web_24", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_59920c95-802a-4e3d-b08b-4807653406d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[button] Check in / Check out Any week -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Month -> CLICK\n[div] Add guests -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.213, 0.793, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_25", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_e99bb9c7-aeec-4826-b8f8-407c00622c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.439, 0.245, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_26", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_dff98ff2-9f90-4274-abe1-de38cb0767d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[button] Today -> CLICK\n[button] April 20, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK\n[button] View more availability for Canal Street Eatery & M... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.254, 0.185, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_27", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_1f4c17d0-a075-4249-8621-8b8366006cca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK\n[button] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.009, 0.418, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_28", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_d683e390-ec8a-47db-8772-cb52166ae30d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $14.45/Day$13.95/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 1.527, 0.777, 1.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_29", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_a2e11919-3d09-4a0d-bcb1-521927016889.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Greenville\n[span] Greenville -> CLICK\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.028, 0.335, 0.044] in the image\nAnd my action is Action: TYPE\nValue: McDonalds"}]}, {"id": "mind2web_30", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_6544d313-8e7a-42e9-a996-497789511924.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.217, 0.873, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_31", "image": {"bytes": "", "path": "./images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_adf176ec-c852-40c8-842f-2c4133f8aa43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for restaurants near my location with pizza and for 6 customers.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Pizza"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.09, 0.244, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_32", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_302070c4-a7ae-4fc4-957d-f31444de6ed6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Alien Worlds"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.082, 0.594, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_33", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_2046ff41-f18c-401b-b0b6-acb8c47d4752.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 45201"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.208, 0.514, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_34", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_1745b057-153d-4830-9fc0-a0dd6789d5bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK\n[input] -> CLICK\n[button] Next month -> CLICK\n[gridcell] May 07, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.283, 0.903, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_35", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a78fc3b8-5fae-4252-baf5-97f41c62fb6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.555, 0.393, 0.584] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_36", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_dd90c01f-05ab-4d6e-bfa5-3e9a3c00d161.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK\n[checkbox] list-filter-item-label-12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 1.324, 0.089, 1.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_37", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_33c7a999-38ef-4589-8279-fdf8c2302c63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[heading] SOCCER -> CLICK\n[a] FEATURED MATCHES -> CLICK\n[link] ENGLISH PREMIER LEAGUE -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.3, 0.454, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_38", "image": {"bytes": "", "path": "./images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_05b31466-ca97-48e8-a8b5-d7be869d2c7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow the Denver Nuggets NBA team.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.092, 0.335, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_39", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_dd9f6105-c072-46c9-b958-1a67631c68b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.139, 0.292, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_40", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_b315afc2-75f7-4067-a09b-2a8b3b31c8b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[div] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.297, 0.96, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_41", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_c5d08604-1632-41cd-89c7-39dbdcb8a353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[link] Polos -> CLICK\n[div] Size -> CLICK\n[link] M -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.199, 0.485, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_42", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_f1fcb650-e85a-459c-a24f-1140130da6b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.021] in the image\nAnd my action is Action: TYPE\nValue: Sam Harris"}]}, {"id": "mind2web_43", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_efa0a116-02af-4a54-a426-72d5b7f09ac1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.194, 0.376, 0.215] in the image\nAnd my action is Action: TYPE\nValue: Carla"}]}, {"id": "mind2web_44", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_163ac6b4-cfa8-4e29-8a90-0e0b9ed3c8c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK\n[div] Price -> CLICK\n[link] $25 to $50 (2,237) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.287, 0.974, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_45", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_8ee0dcc3-5cfd-49ec-9324-4e578df23877.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Fresno -> CLICK\n[img] -> CLICK\n[span] Order Online -> CLICK\n[link] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.423, 0.162, 0.575, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_46", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_d4753161-a0e6-48a8-bd37-6dacdc712fa9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris\n[button] sam harris -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] Paperback -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.242, 0.471, 0.428, 0.49] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_47", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_bd9a4601-4e54-41e7-ba10-2c10b0d6f156.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_48", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_3bc7e106-2d01-485b-bf0d-a1f32cca0604.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.512, 0.52, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_49", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_0310c46b-bca5-4bd5-b568-7af5cce54b97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK\n[span] -> CLICK\n[button] Next -> CLICK\n[div] License Plate -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.386, 0.481, 0.473, 0.509] in the image\nAnd my action is Action: TYPE\nValue: AZXA46"}]}, {"id": "mind2web_50", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_7911af81-53d1-402e-acf4-53625f86f726.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark\n[textbox] *Email Address -> TYPE: Johnmark@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.395, 0.711, 0.425] in the image\nAnd my action is Action: TYPE\nValue: 234567890"}]}, {"id": "mind2web_51", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_26a9327f-ce5d-41b5-b34f-e87ee369fe33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.041, 0.917, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_52", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_ecd96a58-af68-400d-bac5-e637de08d916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.178, 0.463, 0.184] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_53", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_18636549-3ec4-44ab-9778-216113946411.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.005, 0.789, 0.02] in the image\nAnd my action is Action: TYPE\nValue: Seattle, WA"}]}, {"id": "mind2web_54", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_9137881e-849f-4da9-bf17-076132e3b61d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.354, 0.28, 0.39] in the image\nAnd my action is Action: TYPE\nValue: colombo"}]}, {"id": "mind2web_55", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_44d4df9f-e984-403f-aefb-96169d606b23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK\n[span] -> CLICK\n[div] Leather -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.442, 0.096, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_56", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_450227c9-df44-4ea7-a169-6997823c8105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.233, 0.153, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_57", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_40ef5585-94d4-4cf2-97ed-691180d5b6ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.017, 0.428, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_58", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0ac88699-988e-4a0d-b0bd-1e73d8eb268e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard\n[combobox] Month -> SELECT: 01\n[combobox] Year -> SELECT: 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.465, 0.609, 0.498] in the image\nAnd my action is Action: TYPE\nValue: 123"}]}, {"id": "mind2web_59", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_330af54f-7d87-4d91-a5a8-f393a0f6e0e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[button] Next -> CLICK\n[span] -> CLICK\n[button] Next -> CLICK\n[div] License Plate -> CLICK\n[textbox] License Plate -> TYPE: AZXA46"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.476, 0.565, 0.514] in the image\nAnd my action is Action: SELECT\nValue: AZ"}]}, {"id": "mind2web_60", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_d25f5393-a999-4987-910e-9397e8e29ab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.173, 0.354, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_61", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_40062ba5-e84e-4672-adca-053020ff758e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.003, 0.561, 0.014] in the image\nAnd my action is Action: TYPE\nValue: Ricky Kej"}]}, {"id": "mind2web_62", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_e79a792b-7b25-460a-a0cd-ec532fb4a26e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.208, 0.754, 0.223] in the image\nAnd my action is Action: TYPE\nValue: Venice Beach"}]}, {"id": "mind2web_63", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_a3033f2b-83bd-41b1-9972-66f3135bd083.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.897, 0.23, 0.936] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_64", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_75b9ad08-ee27-423c-8cb0-0605c7531495.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.051, 0.712, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_65", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a950a8a1-9cd6-423c-8bb6-8411564ed498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.477, 0.595, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_66", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_f026582a-747c-4f3e-86bd-c219d7425d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK\n[link] US Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.4, 0.324, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_67", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_cb2a47dd-1c83-45d4-9186-65d56dd7ca78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.402, 0.693, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_68", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_a918c8d9-504e-4c11-b878-34e2b00a3cf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK\n[textbox] Depart date -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK\n[combobox] CabinTravelers with Economy (first checked bag cha... -> SELECT: Business or First"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.364, 0.238, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_69", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_d44f6051-c9f4-462a-a897-ffa2e2d4ef62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.031, 0.343, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_70", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_55d981df-d9d9-4428-998c-76ae31d88d41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.106, 0.848, 0.136] in the image\nAnd my action is Action: SELECT\nValue: Walking"}]}, {"id": "mind2web_71", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_fc698c26-502b-442b-8790-0538d09406bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Sweaters & Cardigans -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.448, 0.986, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_72", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5d28ead0-d4de-4f2d-9f18-759cd87611ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.553, 0.287, 0.565] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_73", "image": {"bytes": "", "path": "./images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_62df7775-357c-4748-b3ad-6d521606cb9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a living History event to attend in in April .\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_74", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_282c09d0-c9e0-4007-b88a-27887fe1e388.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[span] Sony -> CLICK\n[button] APPLY -> CLICK\n[span] Free Shipping -> CLICK\n[button] APPLY -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Highest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.581, 0.33, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_75", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_df163dcc-4779-4f0b-ad7e-ad149da8f2de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.242, 0.259, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_76", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_226fcc40-31c7-4c76-8934-4c6294ae162d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 0.293, 0.473, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_77", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_f219afff-4fbc-4b22-843b-347a60a6896b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.02, 0.183, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_78", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_3d867619-5934-4379-a470-a5f78405c6c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.124, 0.265, 0.175, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_79", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_f406f093-4ec0-4056-beed-b6f59270656d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.153, 0.546, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_80", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_bf997fb5-69db-4c87-9ebe-fba3ab9f26c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Style -> CLICK\n[checkbox] Family -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Air-conditioned -> CLICK\n[div] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.552, 0.113, 0.765, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_81", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_b75d1f3b-2376-4441-b2d6-624fd7a5e15f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.478, 0.309, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_82", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_6c2920e2-8409-41f9-acb1-4749cde8de5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.205, 0.406, 0.248] in the image\nAnd my action is Action: TYPE\nValue: pedicure salon"}]}, {"id": "mind2web_83", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_cf0f773a-5040-453e-91ae-e7416a2e470a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.026, 0.456, 0.058] in the image\nAnd my action is Action: TYPE\nValue: vegetarian"}]}, {"id": "mind2web_84", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_d17036de-b354-42ee-b6a4-9b0cbc5d44fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.202, 0.56, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_85", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_ce745d88-3511-43d3-9e02-401be37eca9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.009, 0.651, 0.039] in the image\nAnd my action is Action: TYPE\nValue: Google Play"}]}, {"id": "mind2web_86", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_54c8ce05-463a-4151-9ae1-6b09bb09a183.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.026, 0.564, 0.042] in the image\nAnd my action is Action: TYPE\nValue: 94115"}]}, {"id": "mind2web_87", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_590646aa-5dd0-47b6-9181-4cadfe0cf58e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.396, 0.622, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_88", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_1b07bed7-815f-4c71-8b77-0f9abc587b36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.19, 0.257, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_89", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_ed897c6d-603a-4159-9a7e-9b397bf2e289.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith\n[input] -> TYPE: 123st rd"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.645, 0.355, 0.657] in the image\nAnd my action is Action: TYPE\nValue: abc@abc.com"}]}, {"id": "mind2web_90", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_bcfbffee-6953-464a-8489-5fe5b67dc723.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.62, 0.296, 0.655] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_91", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_b3f45973-641c-4e50-bca1-519fcd6f135d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.615, 0.233, 0.647] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_92", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_9c7a03fc-35cc-4769-869b-469e1363dca2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.089, 0.478, 0.111] in the image\nAnd my action is Action: TYPE\nValue: kashi vishwanath temple"}]}, {"id": "mind2web_93", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_a77a3a7f-cc1d-447b-903a-d09588b8a89c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK\n[textbox] Flight origin input -> TYPE: madurai\n[div] Madurai, Tamil Nadu, India -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.173, 0.963, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_94", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_55ba9bda-3735-48f8-8ce5-bdb904725fe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.803, 0.2, 0.814] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_95", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_4bb48e3e-2a32-4135-b436-33621055fc36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK\n[label] 4 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.567, 0.652, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_96", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_bdf6ba83-c40c-4f99-89e1-56131fab37b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.264, 0.539, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_97", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_afa1433c-8d13-4e0d-9b05-e0d7299da884.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_98", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_776e07ac-4fec-47f9-8642-b4aa8dfe359e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.089, 0.263, 1.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_99", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_51e9982e-0a95-4525-af85-fba89b577a34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Highest Rated -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.716, 0.098, 0.729] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_100", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_4077127f-bf53-43c9-8fff-96ffb9ecb611.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.197, 0.237, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_101", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f0038f74-a616-44cf-b13a-29111280ae8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.225, 0.436, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_102", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_4b8526ca-6237-4769-b1de-06e1097f8783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.317, 0.474, 0.347] in the image\nAnd my action is Action: TYPE\nValue: grand central"}]}, {"id": "mind2web_103", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_41d4cbcd-ea80-4132-a2c9-b4da47c6a95f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.319, 0.263, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_104", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_e8eeebb3-8539-4adf-830b-b6bfeaa8a609.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 1.106, 0.296, 1.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_105", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_dec7212a-0ef8-4cbe-86b6-1aa9f3ec293e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.03] in the image\nAnd my action is Action: TYPE\nValue: Ariana Grande"}]}, {"id": "mind2web_106", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_513bf92e-6c28-43d9-9fb0-0d858631436c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2\n[button] Done -> CLICK\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.427, 0.709, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_107", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_51845b7f-fb7b-4bc3-9c13-2c0a2afb5e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\n[link] Solar Panels -> CLICK\n[link] Order Now -> CLICK\n[button] Help Me Choose Solar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.448, 0.838, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_108", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_4ddee31b-ec7d-496c-a1bb-92e16c3306ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.411, 0.192, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_109", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_91afaca3-3df5-479e-aa43-1717da3b664c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[img] -> CLICK\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK\n[textbox] first-name -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.397, 0.352, 0.418] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_110", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_0aa34099-a83e-4931-897f-1f1b7e0f7e16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.83, 0.192, 0.845] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_111", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_bfb91ba3-f56d-4ddf-893f-0742d11e5d15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.163, 0.868, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_112", "image": {"bytes": "", "path": "./images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_f321c6df-c46a-4b3c-85f6-255e8db91d65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the Resident evil 4 game guide.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.13, 0.079, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_113", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_93323461-5177-468a-b61a-e0248520605a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: easter home decor\n[span] easter home decor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.27, 0.986, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_114", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_8ae8c40d-2a52-496f-ac1b-a012fdf01d3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK\n[span] 11 -> CLICK\n[span] 18 -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.666, 0.522, 0.674] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_115", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_d89fe4e6-31ff-4e6f-97a6-498dfeac0525.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.065, 0.326, 0.104] in the image\nAnd my action is Action: TYPE\nValue: music"}]}, {"id": "mind2web_116", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_27502e8e-1ee0-49f3-a0ed-60b044dd585c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.07, 0.727, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_117", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_7cccd532-c34e-487b-9a2b-c0a0f96305b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.221, 0.84, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_118", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d6f41e4c-9843-4db8-a803-4844920ce2cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.462, 0.699, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_119", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_6ec122a9-3a93-4787-abb1-da425a910bc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male\n[span] Mal\u00e9, Maldives -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.163, 0.928, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_120", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_1a6370b9-054e-468e-8385-b363be981b1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 AM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.397, 0.349, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_121", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_6c9ca4fe-cd29-4b39-ad58-b099603ccc63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.816, 0.588, 0.824] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_122", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_6c575a9e-0f6b-417e-855f-3e998fa406cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK\n[textarea] -> TYPE: Directors\n[textarea] -> TYPE: To Watch"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.527, 0.609, 0.55] in the image\nAnd my action is Action: SELECT\nValue: People"}]}, {"id": "mind2web_123", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_b165ce6a-330e-4979-8733-1d329d59b870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK\n[div] Dual Motor All-Wheel Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.202, 0.748, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_124", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f4bbc6e3-9922-4100-b3cd-cf0322e739b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK\n[checkbox] Ford \ue066 -> CLICK\n[checkbox] Jeep \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 1.735, 0.408, 1.742] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_125", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8e4a80cb-3d65-4a00-9649-1985306aa50c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.142, 0.893, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_126", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_e6f0de37-a72b-4b57-94c8-6d65e77a025d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.314, 0.529, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_127", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_495a67cf-f571-4d50-ae2f-f2f2b6274b27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.58, 0.32, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_128", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_afec19b2-2c9e-4a02-b24f-00932ef73c44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK\n[img] AIRism Cotton Graphic Short-Sleeve Raglan T-Shirt -> CLICK\n[input] -> CLICK\n[option] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.665, 0.906, 0.689] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_129", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_dba7625b-d345-4017-a7cc-2381cc5b5348.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.253, 0.687, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_130", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_3c20ee92-54ff-4e67-9882-d6a25db69802.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 2.97, 0.107, 2.978] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_131", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_8729dbe9-778d-4dc9-a7bc-3f3a6f0125dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK\n[link] Explore the directory -> CLICK\n[searchbox] Refine Location -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.188, 0.12, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_132", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_f5bb5237-3617-4177-856e-81c617d0acfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK\n[span] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.375, 0.686, 0.416] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_133", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_96e23488-6063-4efe-9b16-86d2e304cbc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456\n[textbox] First Name (required) -> CLICK\n[textbox] First Name (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Buck"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.272, 0.833, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_134", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_43d69022-fad5-4117-b3a0-98489b7889a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_135", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_af1b2580-5ce6-48af-a29f-8d9152414487.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK\n[input] -> TYPE: 50\n[input] -> TYPE: 100\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.927, 0.232, 0.982, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_136", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_b991d354-d2d4-409a-9e22-98f3bc4c8ddb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.043, 0.869, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_137", "image": {"bytes": "", "path": "./images/f9e88baa-a109-454b-839f-1ab0746a5f13_ec0001ec-792f-4e40-aa10-63b1286ebefc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all watch options of Avatar: The Way of Water and where it's available.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Avatar The Way of Water\n[div] Avatar: The Way of Water -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.514, 0.632, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_138", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_c2d1eb13-7383-4947-af51-5a8233988ca8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 2, 2023 -> CLICK\n[button] Jul 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.242, 0.571, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_139", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7afdfbab-b581-4794-8584-185fa115bbba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.193, 0.78, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_140", "image": {"bytes": "", "path": "./images/52a8bace-f14c-41ce-980f-50d95e5ac259_bc681a63-23ff-493c-9959-e1b8e93a7aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of countries with the highest number of seen aircrafts.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.055, 0.427, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_141", "image": {"bytes": "", "path": "./images/69065697-a426-4e4c-80f7-82bf592b268c_87c4ceaa-e61e-4250-aa05-6deb28fe18db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find details on converting points to miles.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 2.629, 0.131, 2.643] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_142", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d0df170f-377c-437c-83e8-4519a6387c77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London\n[input] -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.403, 0.079, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_143", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8abb0cae-8fa5-4de2-9d2c-2b1f1476a3ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.12, 0.347, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_144", "image": {"bytes": "", "path": "./images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_ab7e03f5-52b6-4226-aa07-97b50ddf55cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the Resident evil 4 game guide.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.124, 0.216, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_145", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_35e66ac1-e253-4232-849d-9b68d27b76b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.532, 0.7, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_146", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_5e7a77e3-c722-4be5-9dd8-394a7d3ef942.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: Lubbock, Texas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.09, 0.871, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_147", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_1b1282e4-21fd-4dee-8bd1-d6e3b5e60e32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.169, 0.363, 0.186] in the image\nAnd my action is Action: TYPE\nValue: Timesqure New York"}]}, {"id": "mind2web_148", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_339ca2c3-dd91-42f7-bbe7-f6d60bff35ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.186, 0.12, 0.224, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_149", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_af642869-dd83-4561-92f1-f004a419fc6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK\n[link] VIEW RATES -> CLICK\n[button] Member Rate Prepay Non-refundable -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 1.082, 0.96, 1.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_150", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9440b99d-8aea-4482-9e40-7df8f1a3844b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.439, 0.27, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_151", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_532ea23b-b6fa-4d12-a857-8c60674dd2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.28, 0.407, 0.309] in the image\nAnd my action is Action: SELECT\nValue: Compass"}]}, {"id": "mind2web_152", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0eebe04e-32f9-4329-95c2-12ba3c6b59d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.456, 0.793, 0.476] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_153", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_c00eecee-db09-45ba-935f-9db580215fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.038, 0.361, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_154", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_da2291c0-9f33-4a50-ba54-cb1a4a4ec265.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.306, 0.804, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_155", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_f487d5af-079d-4256-aea8-c423f788c7b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK\n[textbox] Parking Start Date -> CLICK\n[gridcell] Tue Apr 18 2023 -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.157, 0.484, 0.183] in the image\nAnd my action is Action: SELECT\nValue: Shuttle Time"}]}, {"id": "mind2web_156", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c398aaf8-6107-421e-a9b4-8c7518e18c46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.423, 0.648, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_157", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_cb3687da-0349-4e99-a3e4-e8d30f34901a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.523, 0.526, 0.568] in the image\nAnd my action is Action: SELECT\nValue: 8 15 PM"}]}, {"id": "mind2web_158", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_8be97cad-f129-4f15-bdc1-5d22eb161c88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.359, 0.593, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_159", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_50b5e01f-dd2c-4329-b782-a44c27b2326f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Thalia Hall"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.326, 0.459, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_160", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_3707f7d8-e3dc-4f98-965d-5b7cbc562c31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 1.229, 0.08, 1.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_161", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ebb0133a-07b3-47ba-957c-3e48838a2827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.461, 0.834, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_162", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_f1a4664b-00c9-4016-8c61-d86520080cc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.315, 0.079, 0.44, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_163", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_341a74e7-e3bc-49bd-8c12-ff4d7c51fc02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.19, 0.473, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_164", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_d67c1e8e-be13-4094-9d39-bb0daffc2f14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Guardians of the Galaxy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.148, 0.463, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_165", "image": {"bytes": "", "path": "./images/6f1fe14d-543a-43c6-964a-0c74f6d86091_09e4b8ca-0a6e-4236-80ad-0662b8b16205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me movies produced by Aaron Horvath.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Aaron Horvath"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.037, 0.657, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_166", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_0f568b3c-9312-4f45-a919-af0b1d2e7d99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.05, 0.619, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_167", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_abc09fd8-c93f-4e0a-a150-52b8aa5a03f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.256, 0.297, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_168", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_a4c16da0-0706-4d0a-a259-eb7657bbbbc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.029, 0.553, 0.052] in the image\nAnd my action is Action: TYPE\nValue: wall art"}]}, {"id": "mind2web_169", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_4b7632ad-468d-42f3-933b-c11f40d27ded.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.234, 0.559, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_170", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_0568946d-5e24-4ab6-aaca-f448308ff253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[span] 9 -> CLICK\n[button] SEARCH FLIGHTS -> CLICK\n[span] Price per person -> CLICK\n[p] Cheapest first -> CLICK\n[div] Economy Light -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.441, 0.796, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_171", "image": {"bytes": "", "path": "./images/7f94386a-d032-43cf-9dbe-2b64430c9c28_ca58d7fd-9205-48c6-960e-83307f6d843c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: locate the store in IL\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.64, 0.977, 0.666] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_172", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_a8729521-544c-4677-bbeb-2aebc43bf83d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK\n[div] Price (lowest first) -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.131, 0.93, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_173", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_25de61d4-f92d-455f-8905-cbb26e30395b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM\n[combobox] Drop off time -> SELECT: 11:00 PM\n[div] Search -> CLICK\n[div] Premium -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.753, 0.075, 0.927, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_174", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_73a605e7-8819-41bb-8cfe-73fb22979a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.405, 0.273, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_175", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_853e26f1-d8f8-4821-b800-f3357b988e5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK\n[label] XXS -> CLICK\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.778, 0.22, 0.806, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_176", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_61b7da8b-1015-40c8-8a7a-7fe00288aacb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.095, 0.266, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_177", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_3622c0d9-2623-4c45-a5a0-cb7dacecec7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.162, 0.5, 0.187] in the image\nAnd my action is Action: TYPE\nValue: Roanoke"}]}, {"id": "mind2web_178", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_13a3d40d-9eaf-431a-929e-17a081ca2a59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] SUV -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.592, 0.105, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_179", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_c266e30f-94b5-4161-a61d-b00f033b1e7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\n[link] Find Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.213, 0.997, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_180", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_c249afb0-9d76-4cf3-bc7c-8dd58876ce45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.083, 0.628, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_181", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_1fb73b24-199a-4f34-9077-52fc82e584fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.172, 0.447, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_182", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_14fbcad5-7316-455e-af71-4205fb2df872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.344, 0.089, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_183", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_4dfdbc97-9aa9-466a-ab54-17f52d97a814.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: TYPE\nValue: mexico"}]}, {"id": "mind2web_184", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_5aa0d023-f4c7-4939-b947-5dc59943b1c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.245, 0.506, 0.287] in the image\nAnd my action is Action: TYPE\nValue: Aruba"}]}, {"id": "mind2web_185", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_e0f11846-4a63-435d-a2c7-49d804e28e5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.203, 0.652, 0.256] in the image\nAnd my action is Action: TYPE\nValue: heathrow"}]}, {"id": "mind2web_186", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_110c514a-0c12-4e5f-8a6f-68ea4fa545d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK\n[button] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.413, 0.393, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_187", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_56f01aa9-cfc2-423a-9c5b-daecf15e17a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com\n[checkbox] Career opportunity Career opportunity -> CLICK\n[checkbox] Office location Office location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.541, 0.684, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_188", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_2b1af735-6002-4a1b-a021-dd9d263f3e1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.054, 0.617, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_189", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_512499f3-8a7a-46a3-89c2-27ab80ecd283.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.277, 0.341, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_190", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_435a480b-10e4-45fb-b384-378735865d8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.39, 0.037] in the image\nAnd my action is Action: TYPE\nValue: xbox wireless controller"}]}, {"id": "mind2web_191", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_c7f740d0-cf88-49df-8733-a50c0383c393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.709, 0.193, 0.733] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_192", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_b8d7b8d1-7a8f-49e5-93af-d3f99b95b647.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK\n[button] Follow -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.208, 0.171, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_193", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_6c991c31-464a-4ba9-a214-c6f849212ea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.253, 0.154, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_194", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_790289e7-9b0e-4672-abeb-18703347e599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.368, 0.536, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_195", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_218890ad-5305-4f3b-b3dd-da31e5b40fbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.041, 0.282, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_196", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a6763793-cd95-45e2-8a89-7b39cd608221.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.255, 0.84, 0.287] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_197", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_2ddef786-a576-4379-8ca1-136036060c78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.865, 0.562, 0.875] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_198", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_c5f99b92-14a8-475f-91a9-2350aeef1398.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.217, 0.46, 0.243] in the image\nAnd my action is Action: TYPE\nValue: brain"}]}, {"id": "mind2web_199", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_21f89d91-cd21-47c6-9155-084a3ff620aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[path] -> CLICK\n[button] -> CLICK\n[div] On the Water -> CLICK\n[label] Up to 1 hour -> CLICK\n[label] 1 to 4 hours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.827, 0.236, 0.836] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_200", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0f806e67-309d-409d-8959-e24867e11888.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.064, 0.455, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_201", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_c60cbbc5-6586-48ee-b238-c2b3c0488113.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.194, 0.174, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_202", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_51154c4f-01db-42c1-8081-1c18d4786dea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[span] Milano (Milan) -> CLICK\n[textbox] Date use format: 01-Apr-23 -> CLICK\n[rect] -> CLICK\n[link] 26 -> CLICK\n[polyline] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.085, 0.262, 0.16, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_203", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_b71031f3-1e82-4395-8e53-3b038b707899.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.016, 0.546, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_204", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_aac48f29-af47-4b76-9b6a-d3eb828b87dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.565, 0.881, 0.584] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_205", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_ef03f2ae-a21c-44a6-b180-a23414d36bf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK\n[link] Stats \ue00d -> CLICK\n[button] Stats -> CLICK\n[link] Season Leaders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.308, 0.174, 0.325] in the image\nAnd my action is Action: SELECT\nValue: 2020-21 Regular Season"}]}, {"id": "mind2web_206", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_d74a9097-01f7-44a1-b1bc-6097432e6ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.195, 0.962, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_207", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_4607b007-3775-44ff-8b39-d20807e3572e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.069, 0.327, 0.091] in the image\nAnd my action is Action: TYPE\nValue: PARIS"}]}, {"id": "mind2web_208", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_99817004-e146-4ae0-91fe-42055681c14f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\n[link] STORE -> HOVER\n[link] POINTS SHOP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.487, 0.141, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_209", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_55d58f35-4297-41a6-a078-363060e92b32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.073, 0.127, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_210", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b88d7456-bced-43d2-886c-48cff487fdba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 12345678\n[combobox] Date -> SELECT: Friday, April 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.435, 0.875, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_211", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_3827733e-5426-4c24-b369-ebf496245627.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.143, 0.277, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_212", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_6004dfae-b262-479a-8e78-8ba5fbe68470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] MEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.302, 0.463, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_213", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_9b765e22-bd76-461a-abf0-47558fa3de83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.264, 0.539, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_214", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_03d09aee-1faa-4853-a0b0-d989d64b8c36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.856, 0.171, 0.974, 0.205] in the image\nAnd my action is Action: TYPE\nValue: 10"}]}, {"id": "mind2web_215", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_5e9cd272-fa52-47fd-826c-8c5a2ebd93e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK\n[tab] Hourly -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM\n[combobox] End Time -> SELECT: 8:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.312, 0.484, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_216", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f273ee73-79f4-4536-aaf9-db2ccf3d8e1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Set My Store -> CLICK\n[button] set store -> CLICK\n[button] Make -> CLICK\n[span] (954) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.344, 0.253, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_217", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_228b0634-9b76-4570-b428-fafc3b439443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 400\n[textbox] Maximum Value in $ -> TYPE: 500\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.772, 0.027, 1.782] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_218", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_83318fc2-ad80-4bda-8a6d-1be341afe2a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.455, 0.29, 0.476] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_219", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1036b5f0-eb6a-4ea1-b0f7-ed1c0e37abae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.251, 0.481, 0.285] in the image\nAnd my action is Action: TYPE\nValue: Phoenix"}]}, {"id": "mind2web_220", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_602a35f1-1a78-4137-8444-16379c1aa2e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.005, 0.492, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_221", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_41631711-b251-4fe0-9b5f-0b86f4b58466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.07, 0.468, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_222", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_a0a21e4c-4d0b-43da-9605-49c7968f34d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.241, 0.277, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_223", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_a702de86-38a4-4567-9959-b6515a416862.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.249, 0.326, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_224", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_e9c76458-26a1-4095-8726-6f6a158f1e25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.476, 0.848, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_225", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_00cf6ce0-d213-4a2c-bd82-a17d21179d40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.26, 0.312, 0.283] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_226", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_e7a69198-f985-4899-b721-e53fc38e8dde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.204, 0.963, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_227", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a3c07b8-54dd-4137-b462-bc030e3860d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK\n[p] Compact -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.334, 0.93, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_228", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_6e17b7d2-b893-403c-a122-e0256b285750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.164, 0.477, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_229", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_0004f2a7-90d6-4f96-902a-b1d25d39a93d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK\n[button] Overview of the Annual Pass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 1.134, 0.95, 1.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_230", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_6fe956a0-e058-4224-83a1-f19fd7d3f44c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK\n[textbox] Near -> TYPE: Texas City, Texas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.01, 0.62, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_231", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_8ea62d22-a406-479b-a65a-acb24a4adb33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: professional boxing\n[option] Professional Boxing -> CLICK\n[link] CES Boxing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.264, 0.941, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_232", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f1682371-d43a-4a7f-8fff-491e2150b2f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.938, 0.226, 0.946] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_233", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_7338cf47-dda0-4a46-85bd-3d8d340b7f21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.098, 0.757, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_234", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_2099ed07-e8fd-4a2e-9004-0351a78a8e72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.449, 0.059, 0.589, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_235", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_8ac9fd2a-bb62-4303-93e6-8a5c3276a367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.353, 0.567, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_236", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_9ac1fc21-dd17-467b-ad80-40db1092b18a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.099, 0.265, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_237", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_39381d41-f8cd-4298-a524-0412ae6ba389.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: los angeles \n[div] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.071, 0.398, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_238", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_fb2cfc47-01cf-4aed-96aa-7632b3d5e2e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.496, 0.039] in the image\nAnd my action is Action: TYPE\nValue: Western Digital internal SSD 1 TB"}]}, {"id": "mind2web_239", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_d980a252-0916-403f-8778-bc2e09948456.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.23, 0.487, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_240", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_ab1f2f97-9c1e-4336-8c3f-a252a460eb1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.639, 0.879, 0.667] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_241", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_018e6be5-2f73-4aaa-8710-7dea55fb84ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK\n[textbox] Your Name * -> TYPE: James Smith\n[textbox] Email Address * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone Number * -> TYPE: 6157075521"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.298, 0.557, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_242", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_0efc9d65-98b0-46ce-9791-66f408e8cd1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Phuket\n[generic] Thailand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.195, 0.686, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_243", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_890d1f76-6792-4972-a0e5-8d1215c8fea3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.139, 0.341, 0.153] in the image\nAnd my action is Action: TYPE\nValue: GOA"}]}, {"id": "mind2web_244", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_d50a905e-f895-4188-9ca6-63081d81b204.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.236, 0.742, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_245", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_6f68971e-2d54-4d7a-bd20-dc93e6b5b1fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[generic] Close -> CLICK\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.266, 0.341, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_246", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_2b13ecd2-3bf5-41ec-8dfe-063e95329550.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.543, 0.566, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_247", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_6d323066-077a-4cce-884e-23a3f42ac7cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: nba2k23"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.038, 0.228, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_248", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_6c351b5e-0ace-4391-ae82-bd84884f79f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.458, 0.263, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_249", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_f3514a0d-7a41-4ecd-93df-f14a6fad29a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[button] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 1.737, 0.651, 1.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_250", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_9c5ab548-979b-4b73-a0dc-144229a6a59b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Fit -> CLICK\n[link] Fitted -> CLICK\n[div] Size -> CLICK\n[span] Now Trending -> CLICK\n[li] Newest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.21, 0.485, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_251", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_429d9db7-1a1c-4bb2-8b4b-09d1a8b862b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] FISHING REELS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.426, 0.178, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_252", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_2b005599-77bb-4e09-9eaa-3cb686343ee2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.407, 0.72, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_253", "image": {"bytes": "", "path": "./images/effb9df8-3b3f-4349-8033-f79ba1587a4d_aadbdc6d-3710-4fa9-a11b-6941a191a7b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a projector which accepts crypto currency as payment.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.01, 0.39, 0.03] in the image\nAnd my action is Action: TYPE\nValue: projectors"}]}, {"id": "mind2web_254", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_143b4db4-0c32-4579-9fe3-edc5b7cdf40d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, NY, US Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.351, 0.271, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_255", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_f39fd0f5-1a72-4a43-8c03-6e9ce2d22de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.0, 0.279, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_256", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b5d844b7-0b88-4b88-9174-7ba4c6f5423d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.196, 0.257, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_257", "image": {"bytes": "", "path": "./images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_07e211f6-3f82-484e-8465-34c9b2f91f5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about the NBA Finals schedule.\nPrevious actions:\n[div] Sports -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.04, 0.082, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_258", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_404c41ad-b28f-42fe-a465-64585cbd1cd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\n[link] shopping. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.262, 0.395, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_259", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_b1b300fb-d1f4-423c-ba32-4dbfeb8cada0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.0, 0.191, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_260", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_cdadd9af-a0b0-47d9-8b2e-9b01d1ecf507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.24, 0.568, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_261", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_d5abb011-938b-47a2-965b-33584ed07f91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.129, 0.504, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_262", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_b7df6516-6050-426f-b729-a41885186422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.32, 0.277, 0.343] in the image\nAnd my action is Action: SELECT\nValue: 2018"}]}, {"id": "mind2web_263", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_3135abcd-a139-493b-8c7b-9321fa5acc73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 1.858, 0.158, 1.869] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_264", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_1c4496af-1ba1-49a9-99f6-61f547787b5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.282, 0.795, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_265", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_a7f02e91-d2bc-4941-a731-ad039f3c4cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.015, 0.189, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_266", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_cc798f2b-dcc5-486f-b9a8-98d352b378e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.143, 0.369, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_267", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_c5abc24e-404c-49fc-905e-a250d8b1010f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.159, 0.568, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_268", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_9a73b2c1-062d-4f9b-9d4d-af9d4c0abc95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.608, 0.526, 0.654] in the image\nAnd my action is Action: SELECT\nValue: 10 guests"}]}, {"id": "mind2web_269", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_d7ad69f6-d2a6-49eb-9b8e-d3dd23c57bbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.336, 0.5, 0.373] in the image\nAnd my action is Action: TYPE\nValue: accra"}]}, {"id": "mind2web_270", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_cfcf2ac3-e03c-4911-98d9-b75840eeddb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.384, 0.045, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_271", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_f2cae883-42b7-4d29-8f26-c4caf0e0b1d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] 2+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.132, 0.965, 0.158] in the image\nAnd my action is Action: SELECT\nValue: Price Low to High"}]}, {"id": "mind2web_272", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_e23eb900-deb5-4f4b-8941-625c60a5ea37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.441, 0.263, 0.485] in the image\nAnd my action is Action: TYPE\nValue: SHANGHAI"}]}, {"id": "mind2web_273", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_9ad5cd9f-cc85-44aa-bf91-8bc253839abc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[link] Search open jobs -> CLICK\n[input] -> TYPE: TX"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.295, 0.884, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_274", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_b319eb24-8b9a-449b-9d38-9e9fc2ac0bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.21, 0.345, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_275", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_cd3c2bb0-9c7e-4ac9-ad60-b26a34297217.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.119, 0.777, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_276", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_3ee72ba6-1685-4f66-8150-7ab99ac1d9de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.942, 0.31, 0.953] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_277", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7b70a5e0-27fd-4522-ba80-8655d4cfe594.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.564, 0.393, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_278", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_87fd7194-d9d7-433f-8ce3-af6697c92098.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER\n[button] Search -> CLICK\n[button] Filter -> CLICK\n[combobox] Location -> SELECT: Arizona"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.297, 0.512, 0.317] in the image\nAnd my action is Action: SELECT\nValue: Date - latest to soonest"}]}, {"id": "mind2web_279", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ceb95a13-7820-495e-913d-8cff0a0494c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[span] San Francisco, CA -> CLICK\n[button] All -> CLICK\n[radio] Key extraction -> CLICK\n[radio] Vehicle -> CLICK\n[generic] 2 filters Key extraction \u2022 Vehicle Clear all Cance... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.117, 0.66, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_280", "image": {"bytes": "", "path": "./images/bd4b77db-00a5-405f-bf0a-a4d168967d64_983d86c3-2498-4b43-8c31-a2e9549e0097.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Panini Diamond Kings Baseball cards set below $25.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: Panini Diamonds Kings Baseball cards\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.239, 0.179, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_281", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_d1da37e1-babe-4725-9acf-6c1dbd955355.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.113, 0.371, 0.125] in the image\nAnd my action is Action: TYPE\nValue: ac recharge"}]}, {"id": "mind2web_282", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_023f505d-630a-4860-be8d-bef956e29522.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Event space -> TYPE: 7\n[button] Find -> CLICK\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.529, 0.418, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_283", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_864d3cfb-f813-4b7b-ad22-bcf37afeb3c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.44, 0.374, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_284", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_ed7f3a80-da26-4473-8e9c-142ffcb114b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK\n[combobox] SORT BY -> SELECT: Low to High\n[checkbox] 4+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.233, 0.914, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_285", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2a5cdf7a-e6ca-4b12-bc94-645a6360642f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.849, 0.627, 0.865] in the image\nAnd my action is Action: TYPE\nValue: Wolowitz"}]}, {"id": "mind2web_286", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_797e693d-0b01-49e4-856c-74dc502eca54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.43, 0.609, 0.464] in the image\nAnd my action is Action: SELECT\nValue: 01"}]}, {"id": "mind2web_287", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_fa7bc1d5-ff65-4a55-8ea6-c8154a05c7df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.423, 0.281, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_288", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_4745dde5-a1c1-4426-8a1a-1eff32a73563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.301, 0.459, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_289", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_0d4d9216-bd04-4cdf-9c48-81a60644bb42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000\n[input] -> TYPE: 10000\n[input] -> CLICK\n[input] -> TYPE: 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.464, 0.37, 0.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_290", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_1e1232ec-6ba6-4991-b3df-4acf8b58f80c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.359, 0.647, 0.694] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_291", "image": {"bytes": "", "path": "./images/9ebd069a-7703-47b5-9c75-53958637e7c0_c36efecc-f544-470c-93bf-162cb1a83f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Watch the halftime show from the most recent Super Bowl\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.008, 0.519, 0.013] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_292", "image": {"bytes": "", "path": "./images/30e310ca-af64-46b4-a0f6-14b8f04fa734_d5387d67-6e85-40ef-8c69-412c86d9cd11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up Popular Photos in the Community featuring Aegean Airlines\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.055, 0.742, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_293", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c20e44a2-0e7e-4e24-865e-91167602faee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] Happy Birthday -> CLICK\n[button] EUR -> CLICK\n[div] GBP -> CLICK\n[button] -> CLICK\n[div] \u00a3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.498, 0.178, 0.515] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_294", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_c42cca35-7a05-4bb6-8671-737bb9dc9812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.03, 0.429, 0.045] in the image\nAnd my action is Action: TYPE\nValue: LOS ANGELES"}]}, {"id": "mind2web_295", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_9e4f6faa-1691-43ae-ad28-12414527bb85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\n[combobox] Search products and services -> TYPE: cough medicine"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.118, 1.0, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_296", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_1b0baac2-c9d2-4069-9290-d65c9bce964f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.308, 0.646, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_297", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_974168a6-b792-4dd9-863e-a6e6a9127534.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.441, 0.268, 0.485] in the image\nAnd my action is Action: TYPE\nValue: KATHMANDU"}]}, {"id": "mind2web_298", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_27abc650-25bc-4667-8e1d-cd1b4e7b42ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[button] 15 -> CLICK\n[button] 16 -> CLICK\n[div] Petaluma Music Festival -> CLICK\n[button] Get tickets -> CLICK\n[listbox] Select quantity: General Admission price: $60.00 -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.51, 0.573, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_299", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_2dbedcb1-963b-4445-9a0a-bc32144984ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.008, 0.323, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_300", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e76d75a5-5836-4b36-9260-e4877e687b79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK\n[span] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.438, 0.393, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_301", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_bcd7cb41-c53c-406b-bce1-125e51ce307e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.281, 0.279, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_302", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_9e27020b-4af3-474c-be31-2db12fbb98fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.29, 0.477, 0.306] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_303", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_1d43db7c-3bb1-443d-9a59-9ddf96651271.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle\n[span] Seattle, Washington, United States -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.185, 0.573, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_304", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_11dfd474-542d-4f33-b6c6-caf5d6b23e4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.228, 0.341, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_305", "image": {"bytes": "", "path": "./images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_4ba0fa22-4b33-464a-ba78-506bbb581b2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for restaurants near my location with pizza and for 6 customers.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Pizza\n[b] Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.22, 0.562, 0.231] in the image\nAnd my action is Action: SELECT\nValue: 6 Guests"}]}, {"id": "mind2web_306", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_b488a66d-0c85-42e4-8975-57b86557952d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[svg] -> CLICK\n[button] Choose your room -> CLICK\n[button] Book Business Double Room A -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.896, 0.135, 0.977, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_307", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_3b6a87ff-2811-4fa2-b5c8-e84a06e9231a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: TYPE\nValue: SE4"}]}, {"id": "mind2web_308", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_76dfc623-7691-4193-8b56-0c3e654a9511.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK\n[textbox] Confirmation or eTicket More information about con... -> TYPE: 12345678"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.295, 0.255, 0.499, 0.28] in the image\nAnd my action is Action: TYPE\nValue: Jason"}]}, {"id": "mind2web_309", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_4eb19133-5c75-4700-b3fc-0c913c32a1b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.613, 0.492, 0.628] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_310", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_e21752a7-a515-41b7-9ac0-e1cb6394a9ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.095, 0.549, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_311", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_db2eac70-945f-4c8f-aaac-b8ec140bc870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles\n[b] Los Angeles -> CLICK\n[button] Next -> CLICK\n[gridcell] Sun Apr 02 2023 -> CLICK\n[circle] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.34, 0.122, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_312", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_331f8da6-9df1-4da3-bdf9-0a7b9f3a15d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK\n[searchbox] Please type your destination -> TYPE: MANILA\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.421, 0.824, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_313", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_902efeef-0e70-46fd-8f95-96df32535561.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.848, 0.285, 0.88] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_314", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_6c70afce-e87a-4d1b-8d7f-f99589b2b407.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.264, 0.568, 0.286] in the image\nAnd my action is Action: TYPE\nValue: Alfred"}]}, {"id": "mind2web_315", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_a2b2735c-c36d-4565-b31e-00371ed0717c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK\n[option] 2:00 pm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.17, 0.963, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_316", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_b8b485f5-fffa-457a-98c8-3e3721b953f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.145, 0.712, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_317", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_2de322fb-c659-4be9-90bf-9c7010ba87e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22\n[button] Vehicle Class -> CLICK\n[radio] Minivans -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.568, 0.821, 0.595] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_318", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_64040e25-5374-49ac-bea1-3e0fbf44525b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Book -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.142, 0.109, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_319", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_cc11e618-5383-4745-a31e-9b971622ef02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\n[img] -> CLICK\n[textbox] Search Kroger... -> TYPE: bananas\n[span] bananas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.124, 0.969, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_320", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_6a89e073-7ebe-4c7c-8623-e310034d7e6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.368, 0.472, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_321", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_c0435cf8-b490-4f65-a376-0fc31e91ef2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.281, 0.981, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_322", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_5987f07d-d700-45ea-b55d-163cb8e28520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.331, 0.303, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_323", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_905eff11-9e4f-40ec-8794-0aa4dbad687a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.258, 0.588, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_324", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_b0dcb485-a80d-4014-8137-c2c7c9675b7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK\n[button] Show sorting options modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.302, 0.299, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_325", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_627981ff-61e9-46da-ab13-3e011fe1a748.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.295, 0.664, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_326", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_d6daa669-ddfa-48d3-90e7-2838239b4926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK\n[div] -> CLICK\n[button] Confirm address -> CLICK\n[p] Choose 3-hour window -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.433, 0.754, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_327", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0fef1419-a98d-41fd-ad31-a96c7cfd4f4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 1.035, 0.284, 1.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_328", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_f1269494-1cf9-4f7c-ab7b-43521cf53783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.392, 0.164, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_329", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_3db9ae03-d8ec-410c-a8b1-c8436fb9194e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK\n[div] Sports -> CLICK\n[link] Outdoor -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.564, 0.078, 0.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_330", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_a1264e25-26bf-49e0-b1f1-9efe9e8a1adb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK\n[textbox] Custom name your search -> TYPE: Jaguar\n[button] CONTINUE -> CLICK\n[radio] Daily summary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.425, 0.588, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_331", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_bf3377aa-05c8-44a6-a152-194d47239df9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\n[button] Fastbreak -> CLICK\n[link] Fastbreak Program -> CLICK\n[link] Join Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.927, 0.783, 0.949] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_332", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_0a1122f8-7172-4300-985d-5abcb7750ca4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[gridcell] 1 -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.258, 0.187, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_333", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_35d262d2-4f80-4480-9a29-7c095d95e029.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.389, 0.572, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_334", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_76439ab2-9cbc-4b10-9a8a-10aa688d53aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.101, 0.39, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_335", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_b53007a1-0221-4a80-88a1-ccc9575705d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.038, 0.917, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_336", "image": {"bytes": "", "path": "./images/e031c695-28e2-4507-949e-bbb65edf9f3d_1108cb6a-19ec-4e09-935e-67b15d2f8830.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an article about a new restaurant and share it on Twitter\nPrevious actions:\n[link] New Openings -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 1.04, 0.056, 1.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_337", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_bad8613e-08b4-4ae7-af27-263f36e2ff69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[button] Filter -> CLICK\n[button] Adults-Only -> CLICK\n[button] Airfare Included -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.48, 0.772, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_338", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_c201e18e-6089-4696-a09a-4c07559c3500.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.268, 0.34, 0.289] in the image\nAnd my action is Action: TYPE\nValue: 45201"}]}, {"id": "mind2web_339", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_d787b502-0951-4e7d-8f76-7883935a9359.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.601, 0.335, 0.613] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_340", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_a4034b18-98c0-4a92-8692-dd5255f8212e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[span] 02199 -> CLICK\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.358, 0.373, 0.388] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_341", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_558e8da1-899c-4f41-804b-8979032f2849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.257, 0.113, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_342", "image": {"bytes": "", "path": "./images/54112d86-1d85-4abf-9e12-86f526d314c2_8949caa0-b7f1-48f7-9c16-6303d8e5139e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the BGG rules for Game Submissions?\nPrevious actions:\n[button] Help -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.072, 0.571, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_343", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a60f2c73-7148-4798-a883-9b406aed93d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.455, 0.61, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_344", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_e02b279d-3e39-4465-81eb-d34ad716873d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[svg] -> CLICK\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 1.11, 0.088, 1.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_345", "image": {"bytes": "", "path": "./images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_b3e1739e-0621-49d7-a6c6-fd3bcdd807c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a red used tesla Model S.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.054, 0.261, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_346", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_f8e87034-4dc4-4109-ba01-2b7b0347713f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.093, 0.173, 0.154] in the image\nAnd my action is Action: TYPE\nValue: Mumbai"}]}, {"id": "mind2web_347", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_7b892c42-4c6d-4adf-af9b-b77b7adf8681.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\n[link] Schedules -> CLICK\n[heading] Long Island Rail Road & Metro-North Railroad -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.427, 0.5, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_348", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_01450a81-82d1-4492-a961-c81534798a36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.012, 0.1, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_349", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_64766e71-e258-4354-8bde-2a3a0b75014b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.127, 0.732, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_350", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc480331-a947-421e-90ed-891f11e70239.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[button] Search -> CLICK\n[button] Recommended -> CLICK\n[div] Lowest Price -> CLICK\n[svg] -> CLICK\n[button] Choose your room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.797, 0.308, 0.811] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_351", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_50d097a8-12d9-488d-bb6e-a7c7a0f4a112.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.343, 0.159, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_352", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_1ca0bcae-dc05-4a2d-bfd0-0838c8284ae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.551, 0.463, 0.578] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_353", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_9900996a-927f-4aeb-9632-a97400207554.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.072, 0.487, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_354", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_657f7043-a245-412d-843f-b4cc104f8b22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK\n[span] Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.442, 0.096, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_355", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_880ea728-2568-4211-8078-f7a92a2802b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High\n[img] Black Diamond Zone Climbing Shoes 0 -> CLICK\n[button] Add to cart\u2014$46.73 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.354, 0.494, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_356", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_cbfb435d-7c81-4de0-8bee-f5106b6b09e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.007, 0.393, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_357", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_a390dcd6-7459-4945-bcfd-9a161018eda5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.235, 0.559, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_358", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_62404140-be23-4dd0-838a-e9319f9c9381.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK\n[span] (0) -> CLICK\n[textbox] Max Price -> TYPE: 200\n[textbox] Min Price -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.686, 0.976, 0.728] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_359", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_746fec9f-bf5f-49eb-a9b2-0c96916e881d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.28, 0.312, 0.303] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_360", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_3828f926-29a5-4b41-99ce-471f499356c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.036, 0.164, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_361", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_b58648d8-209f-43b2-aab1-5d3835e84d59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[div] 1 days -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.157, 0.264, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_362", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_efd16945-67fb-4e57-ac60-d699b278ddb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\n[link] \ue92f Computer Systems \uf105 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.16, 0.434, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_363", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_08841ec4-c606-4419-a0ae-ad5480b680ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.091, 0.734, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_364", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_0ef22904-d5ae-48f2-bdea-a4f32b28521a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.387, 0.759, 0.415] in the image\nAnd my action is Action: TYPE\nValue: X123456"}]}, {"id": "mind2web_365", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_7e49b87e-b08c-41d7-bde4-c9a4cadedc66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[button] Go! -> CLICK\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK\n[div] All dates -> CLICK\n[span] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.282, 0.616, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_366", "image": {"bytes": "", "path": "./images/5a181549-c79c-499c-b7d7-90860f0e0068_df574cbc-d7ed-4bbe-bbc5-3e0694b79f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play Trailer of \"The Flash\" 2023.\nPrevious actions:\n[textbox] Search IMDb -> CLICK\n[textbox] Search IMDb -> TYPE: The Flash"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.257, 0.657, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_367", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_15b5ed14-0073-4fd8-ab5c-90d56475412c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.128, 0.522, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_368", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_069e32e5-39d1-4db3-88d6-167a919fd1c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.006, 0.995, 0.025] in the image\nAnd my action is Action: TYPE\nValue: Atlantis"}]}, {"id": "mind2web_369", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_073133f1-988d-4c73-b606-4934148a72ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\n[button] Soccer -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.245, 0.371, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_370", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_c8661052-8860-4fe0-b8aa-c95cd1ec01de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Sweaters & Cardigans -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.519, 0.974, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_371", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_4bd6b427-d3d3-4918-a9d8-605b56eb6ba7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.177, 0.211, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_372", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_86d43239-affe-4c5e-bc33-0670285d687d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.732, 0.032, 0.741] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_373", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_abb32f30-a508-44b6-a76f-b96667518a71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.211, 0.29, 0.241] in the image\nAnd my action is Action: TYPE\nValue: bhz"}]}, {"id": "mind2web_374", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_45f432bc-2147-4142-8762-ee4e46d23ec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.271, 0.721, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_375", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_20482680-aa27-49bd-8b8e-310c1b22ece4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Price: low to high\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.397, 0.422, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_376", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_f75324ed-ef88-4e36-9985-867b0955b5d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: SHANGHAI\n[div] Shanghai, China -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.441, 0.471, 0.485] in the image\nAnd my action is Action: TYPE\nValue: SEOUL"}]}, {"id": "mind2web_377", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_03fbc8e2-cd22-4438-99ce-6444f9cb06a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\n[link] repeat Trade-In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.603, 0.435, 0.802] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_378", "image": {"bytes": "", "path": "./images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_abf1e30f-2fb3-47f7-9b39-c10d02703d4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nfl game played by the las vegas raiders.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.046, 0.728, 0.084] in the image\nAnd my action is Action: TYPE\nValue: las vegas raiders"}]}, {"id": "mind2web_379", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_bd7c2bc5-c2da-40ed-a815-11cd373099bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Choose your room -> CLICK\n[link] Choose Another Hotel -> CLICK\n[button] Choose your room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.783, 0.308, 0.795] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_380", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_46bb7c15-23d8-4e39-872a-f5166565b18b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK\n[div] Jun -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.237, 0.171, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_381", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_8d25c01d-4501-4078-9cd8-f51b5498b1ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.255, 0.573, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_382", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_0a04ab16-035e-4c33-9db9-abfe44095a57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[input] -> CLICK\n[div] All dates -> CLICK\n[span] -> CLICK\n[span] 1 -> CLICK\n[div] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.572, 0.255, 0.689, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_383", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_b8cb066b-326c-44a4-bb54-e12f4ba8f863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK\n[link] Sci-Fi -> CLICK\n[button] Open Navigation Drawer -> CLICK\n[span] Top 250 TV Shows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.728, 0.67, 0.738] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_384", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_30197f54-0965-4a7f-8a1f-526d0351cbca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Los Angeles -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.338, 0.881, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_385", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8ce27faa-f678-4a05-8029-1541ca7578a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK\n[li] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.214, 0.347, 0.228] in the image\nAnd my action is Action: TYPE\nValue: 75"}]}, {"id": "mind2web_386", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_9d4fc01d-c792-471d-8fa9-dc5d5531aab3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.862, 0.29, 0.882] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_387", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_ecb09ce1-7354-4d48-a022-e402dc19cc48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[link] 26 -> CLICK\n[polyline] -> CLICK\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.252, 0.133, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 12"}]}, {"id": "mind2web_388", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_2ce00d9e-383e-4b57-86f4-b2e5bea18060.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\n[button] DEALS -> CLICK\n[link] MULTI-RIDES & RAIL PASSES USA Rail passes, monthly... -> CLICK\n[img] -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.627, 0.737, 0.641] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_389", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_5785c6c2-b69a-4770-be93-f0d6131e71fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK\n[link] Beverage Packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.462, 0.352, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_390", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_8b949019-9211-4ed7-8748-cdd325e6ca6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.212, 0.085, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_391", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_22c760c4-ab12-4ef2-ba74-9f42f6fab59a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK\n[heading] ENGLISH PREMIER LEAGUE -> CLICK\n[link] UEFA CHAMPIONS LEAGUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.55, 0.1, 0.604, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_392", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_2cb0354f-be23-454f-ade7-ab45bb1778f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.496, 0.235, 0.514] in the image\nAnd my action is Action: SELECT\nValue: 50 mi"}]}, {"id": "mind2web_393", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_c7e0e26c-67cc-4ef7-90b8-78c16829898e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\n[link] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.123, 0.362, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_394", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_712b6f48-ec3d-433b-9804-6663aa03c42c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] *Preferred date of travel -> CLICK\n[link] 22 -> CLICK\n[button] Continue -> CLICK\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.584, 0.451, 0.62] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_395", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_b62199e4-1022-40dd-a88c-5dae5942658a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[tab] One-way -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.221, 0.573, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_396", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_0866c57d-a360-4e2f-b879-f91e92979147.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.638, 0.317, 0.706, 0.345] in the image\nAnd my action is Action: TYPE\nValue: 50"}]}, {"id": "mind2web_397", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_6d14c32b-ee67-415e-80d0-045d489e0731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.591, 0.109, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_398", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_9533f5d5-15e2-4474-9fc1-a25f829529a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: NEW YORK\n[span] All airports -> CLICK\n[textbox] Flight destination input -> TYPE: PARIS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.221, 0.84, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_399", "image": {"bytes": "", "path": "./images/13cf0b14-422b-4486-841a-aa9ded048380_676972b3-6baa-442b-bb01-51684fb564af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find kinect camera for xbox one.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: kinect camera\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.304, 0.179, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_400", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_74581ca5-a492-41c8-89db-5f671285f014.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.529, 0.424, 0.557, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_401", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_3a180732-6454-48e1-b9f6-cdf9b5b339f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[label] Boots -> CLICK\n[label] Sale -> CLICK\n[div] Color -> CLICK\n[label] Black -> CLICK\n[div] Average Ratings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.886, 0.088, 0.898] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_402", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_d2931b9c-010c-4937-84c3-cbb43b1adec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.279, 0.259, 0.317] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_403", "image": {"bytes": "", "path": "./images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_29dcce31-0589-4080-9abe-16f7658e7693.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the user reviews for the game \"Cyberpunk 2077\"\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.195, 0.677, 0.228] in the image\nAnd my action is Action: TYPE\nValue: Cyberpunk 2077"}]}, {"id": "mind2web_404", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_2853cb4e-f67f-493e-be7a-7361e69c3d7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.428, 0.355, 0.44] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_405", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_9200113b-0a04-4426-8bed-76f54a25cd86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.243, 0.915, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_406", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b7062d2d-0889-4d33-9d6c-47e2e18faaf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[span] Cafe -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[checkbox] Outdoor Seating -> CLICK\n[checkbox] Dogs Allowed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.52, 0.688, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_407", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_c5bebb8f-e171-4b9e-bc26-c49c4e876152.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER\n[heading] Find your trip - Find a reservation - American Air... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.389, 0.363, 0.425] in the image\nAnd my action is Action: TYPE\nValue: sin"}]}, {"id": "mind2web_408", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3f78437f-f79a-4dba-b4aa-310f0fb87f56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Start Journey -> SELECT: Train"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.476, 0.61, 0.506] in the image\nAnd my action is Action: SELECT\nValue: Bus"}]}, {"id": "mind2web_409", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_42dc7dd0-6b08-48bc-b31f-c62882e67b35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_410", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_e56f828d-45bb-4858-98cb-9c6ab5b55e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.826, 0.31, 0.838] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_411", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_f7c7c715-3a1d-43f6-a391-7054a379dcd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.373, 0.269, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_412", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ce4d06b6-cccb-471d-a105-368e76a1aa28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.016, 0.328, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_413", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_5e1ca919-5001-4d5f-83a3-e8b5f8270ccc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith\n[input] -> TYPE: 123st rd\n[textbox] Email Address -> TYPE: abc@abc.com\n[checkbox] Solar Roof -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.89, 0.365, 0.914] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_414", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_c521c553-ed10-4e26-af9a-6e28c2563b07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.331, 0.206, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_415", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_d53a314f-6ecd-4ad7-ae39-6ef936c2809a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[tab] Fastest -> CLICK\n[button] See flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.731, 0.786, 0.76] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_416", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_e6fa804e-3e98-4f26-9433-3da3a3fa7bf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.942, 0.201, 0.977, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_417", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_e5453e8d-5e53-4cbb-b9b5-9066cf3ff1e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.023, 0.361, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_418", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_81da20ab-ac0e-46a3-a331-7680d55ffb13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.526, 0.33, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_419", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_03092a53-0508-4731-9c6c-27e82d5e74e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.118, 0.775, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_420", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_ef32e30f-b74b-49e3-87fe-bd6ec3dac346.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.186, 0.359, 0.204] in the image\nAnd my action is Action: TYPE\nValue: Grand Central, NY"}]}, {"id": "mind2web_421", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_f5c0b94f-00a9-48db-af5e-41b3312cced3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.271, 0.186, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_422", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_8fce0cf2-ba98-47ce-945b-36fc51b17258.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.155, 0.294, 0.171] in the image\nAnd my action is Action: TYPE\nValue: all star stand up comedy"}]}, {"id": "mind2web_423", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_056c07ae-695a-4085-a246-972a75091afa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\n[link] Investors -> CLICK\n[link] Financial Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.613, 0.867, 0.664] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_424", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_0bace322-b3ef-449a-a74a-d80e3a3f0994.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.074, 0.208, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_425", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7d744575-cd07-4e36-9871-3feb82f857f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.195, 0.953, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_426", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_237a3344-5afb-40a4-90f8-ac59015288ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.436, 0.846, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_427", "image": {"bytes": "", "path": "./images/1538e37b-9c33-48b0-b10e-662e192ad53a_3a3ea0a2-ac4f-4852-9eef-06f64dcc0b45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stops in Alanson, MI\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.045, 0.142, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_428", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_c8ae4adb-0aa7-406f-8732-7d52c7822725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK\n[div] Dual Motor All-Wheel Drive -> CLICK\n[button] 43235 -> CLICK\n[input] -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.28, 0.395, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_429", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_9fb3d99c-0a3a-4d49-b2c8-223e33028333.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI\n[combobox] Guests -> SELECT: 4 Guests\n[button] March 30, 2023. Selected date. -> CLICK\n[button] 1:30 PM Dining Room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.506, 0.523, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_430", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_b4d4000c-1f63-49c4-9616-44eecec411f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[polygon] -> CLICK\n[button] Heartland -> CLICK\n[link] The page with details for The Heart of America wil... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.386, 0.618, 0.485, 0.631] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_431", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_cedbbc01-d62f-4fcc-9b2d-e44336dabc7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.265, 0.249, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_432", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_63d785ce-1b1f-4f8a-ba50-8cf8ff40d73f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.188, 0.238, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_433", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_df3e9cc5-5632-4d45-b234-7994469d1625.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.015] in the image\nAnd my action is Action: TYPE\nValue: SOUTH AFRICAN HISTORY PODCAST"}]}, {"id": "mind2web_434", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_e9f82301-438f-4602-9a01-59d80d5bdae2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.244, 0.265, 0.265] in the image\nAnd my action is Action: SELECT\nValue: 17"}]}, {"id": "mind2web_435", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_ad5a4145-122e-4236-bc44-b1efcc78caf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] Depart , required. -> TYPE: 04/23/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.71, 0.992, 0.895, 1.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_436", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_a99be392-4e33-4fb6-9e75-0b6db4e3c636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK\n[button] State -> CLICK\n[span] Virginia -> CLICK\n[button] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.428, 0.137, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_437", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_165bc7ab-0f5e-4633-acf5-588ddbef6ef8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.4, 0.634, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_438", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_efa5da67-cadc-4dc5-b66f-b1f73acbac75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018\n[button] Make \ue920 -> CLICK\n[checkbox] Honda (549) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 2.01, 0.277, 2.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_439", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_718514fb-cc04-4b61-a21b-d9e159bd3e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.444, 0.984, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_440", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_b15f4f6f-413b-4ae7-bae8-1ca7e0b4d75a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.243, 0.514, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_441", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_63a284bf-17b9-4a58-81c7-7545cc57a69f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.057, 0.372, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_442", "image": {"bytes": "", "path": "./images/6df317e6-3414-4f2e-b5fc-b70914def4eb_8dfaa8ab-c597-47ab-a575-06c902f13b04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Jazz music genre albums and tracks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.005, 0.681, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_443", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_3de629ac-77af-43a3-b249-a76ed19aea42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.182, 0.609, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_444", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_15e52d78-d625-465e-b260-2fc9775b965b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK\n[img] Missing (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.162, 0.085, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_445", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_91a0d4fe-3524-448a-995f-8c4d570884ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK\n[button] Climb -> CLICK\n[link] Explore Climb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.815, 0.579, 0.838] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_446", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_3fac526d-a878-4292-a372-861c97b8d5e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.008, 0.519, 0.013] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_447", "image": {"bytes": "", "path": "./images/d29fd2a4-2305-4276-8a0e-2599291d0a17_0944f5d6-5126-4eeb-a660-bb87994aeb13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of reviews I wrote about my games.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.094, 0.552, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_448", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_e49a338c-0d26-48ba-a268-5b8914da3639.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Guardians of the Galaxy\n[tab] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.338, 0.677, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_449", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_745736e0-20b8-4366-bfa1-c023ed7c78df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.328, 0.195, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_450", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_fa625908-78ca-4882-a85e-528a818b3a77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[option] New York City\u00a0\u00a0 City -> CLICK\n[span] Mar 9 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.244, 0.607, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_451", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_13275433-ad18-45f4-8742-60e8c549e96f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.158, 0.991, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_452", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_d59408ec-1909-4c52-9d24-9d21802048b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.3, 0.495, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_453", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_90aba443-6a23-47d7-bd15-ccab225917fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.027, 0.614, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_454", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_961c3a5e-f8ce-4c71-a917-aa546dcea7fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.355, 0.292, 0.401] in the image\nAnd my action is Action: SELECT\nValue: Pickup"}]}, {"id": "mind2web_455", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_40bef6c6-0468-4277-8a65-6b4fd6ef2c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary\n[div] Calgary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.341, 0.619, 0.361] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_456", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_b3d17b95-f512-463c-8359-a1ed302829ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.317, 0.833, 0.349] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_457", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_35217f76-ff90-428e-a78f-72c14b82dc4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.424, 0.373, 0.448, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_458", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_0db62d4c-e735-47bf-bd3f-f5a51ee7f6fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK\n[button] All Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.386, 0.393, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_459", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_2edb875d-df64-4128-9e2e-c8d147290aff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[link] Your lists -> CLICK\n[link] Create a list -> CLICK\n[svg] -> CLICK\n[span] Walgreens -> CLICK\n[textbox] Add a title (Required) -> TYPE: Walgreens"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.698, 0.142, 0.787] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_460", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_b564668c-3c8e-4538-9bee-e1e48c71fa99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK\n[label] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.573, 0.952, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_461", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_d7379735-b499-442a-ab5b-b0b0d6e6d906.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.273, 0.312, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_462", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_6db2cc83-aa0e-4f3e-91de-71acfed5bdb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK\n[link] User Search -> CLICK\n[textbox] Enter First name, last name, and/or username: -> TYPE: Joe Bloggs\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.278, 0.736, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_463", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_a5e39e4d-8ef5-424e-9370-dc254fdbcb03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.39, 0.037] in the image\nAnd my action is Action: TYPE\nValue: wireless keyboard mouse"}]}, {"id": "mind2web_464", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_ed51f63d-6cab-4ecb-831a-81833977302f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.136, 0.454, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_465", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_fde61447-6446-43dc-a3dc-63b8108c50e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 44012\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.112, 0.63, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_466", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_d5d3ece8-7439-42f0-82d0-31f0ae61e479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.006, 0.348, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_467", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_8329edd0-0afb-4c85-8c1d-84666687cb56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[svg] -> CLICK\n[label] Points -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK\n[span] Price by Core -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.531, 0.817, 0.545] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_468", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b856c4ca-c796-45be-9390-70f6957a0bc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.046, 0.67, 0.057] in the image\nAnd my action is Action: TYPE\nValue: Barclays Center"}]}, {"id": "mind2web_469", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_62676939-ee3d-4810-b690-a00986baf799.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.465, 0.336, 0.495] in the image\nAnd my action is Action: TYPE\nValue: 1"}]}, {"id": "mind2web_470", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_98661977-f720-456c-a165-9c8609d94b0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.197, 0.366, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_471", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_ff31502b-357d-4a19-b304-f831b6999618.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.286, 0.359, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_472", "image": {"bytes": "", "path": "./images/fce75183-0825-42b1-baf3-a9214fe20ce9_1673940e-dfb3-43b9-81f5-6e65ead88503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse recent NFL-related videos.\nPrevious actions:\n[link] National Football League NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.038, 0.729, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_473", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_aad02f8a-a965-4f56-ae56-baf426db1a3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.283, 0.77, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_474", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d7f294ef-efb7-4ee6-907f-6bdd6dca408f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Crossbows and Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.347, 0.103, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_475", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_4b19285c-34b9-4ee7-9e6e-a6a4e9f23b4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Guides -> CLICK\n[link] Vehicle Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.3, 0.337, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_476", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_b8aff40e-1281-4f10-b20d-829d0ac854c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK\n[textbox] Down Payment -> TYPE: 500\n[combobox] State -> SELECT: New Jersey"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.336, 0.459, 0.364] in the image\nAnd my action is Action: SELECT\nValue: Challenged (< 580 FICO\u00ae Score)"}]}, {"id": "mind2web_477", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_99d67dc4-ec1a-4417-a267-430411f4c20a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.364, 0.196, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_478", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_da8122e0-c040-41c6-8481-3dcd54f56ac2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[input] -> CLICK\n[button] Next month -> CLICK\n[gridcell] May 07, 2023 -> CLICK\n[input] -> CLICK\n[gridcell] May 14, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.333, 0.831, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_479", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2e754e3f-f3a2-4f55-9783-bc7ad866d622.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK\n[combobox] \uf0d7 -> SELECT: 1 Room"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.466, 0.022, 0.556, 0.028] in the image\nAnd my action is Action: SELECT\nValue: 2 Adults"}]}, {"id": "mind2web_480", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_9e375a31-474c-4e38-a58d-96d951a5e0d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.191, 0.249, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_481", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_75df0b6f-d448-4ab5-8039-d32f11ab3fc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_482", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_ec710e69-c4be-4825-890c-7e865bcc443e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\n[button] Location Columbus, OH -> CLICK\n[button] Sydney -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: pasta"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.021, 0.657, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_483", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e1d01996-7299-4eb2-80e2-36f60c02f589.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK\n[heading] to next step -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.31, 0.371, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_484", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c06ef8b8-57d2-49be-bdac-79839ef57e7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.523, 0.546, 0.568] in the image\nAnd my action is Action: SELECT\nValue: 8 00 PM"}]}, {"id": "mind2web_485", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_65eeb779-e67c-43e7-a846-e15f5adb0238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.033, 0.576, 0.047] in the image\nAnd my action is Action: TYPE\nValue: KCCR"}]}, {"id": "mind2web_486", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_cc690f3f-8b0c-4cea-8060-4fb8bb31a372.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK\n[button] View more availability for Canal Street Eatery & M... -> CLICK\n[button] 2:00 PM Eatery -> CLICK\n[button] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.298, 0.523, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_487", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_cd2e2892-6319-4ee5-82a8-f4ea09e54de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.044, 0.176, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_488", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3650813e-c882-4d37-bbab-bed70f3b6dce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.574, 0.385, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_489", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_181a2bf7-0625-438b-85e7-5b0d10523e46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.251, 0.416, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_490", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_1766f5b3-b6c8-489b-9848-636317358a9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK\n[div] Bayern Munich -> CLICK\n[heading] STATS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.216, 0.564, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_491", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_29ade6af-9748-423d-8d26-30b16d0881a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.124, 0.474, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_492", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_3e587fb3-fd87-4df0-9a6c-74394cabc670.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 0.245, 0.587, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_493", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_2d9747a3-5834-4128-9975-1d676e3eff45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.396, 0.5, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_494", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_b0979f37-ab13-4dc6-b59c-6ff68a53d096.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK\n[input] -> TYPE: South Station\n[option] South Station, 700 Atlantic Ave, Boston, MA 02110,... -> CLICK\n[link] Go to route -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.244, 0.379, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_495", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_56612ecc-9966-4b43-bb15-24148c457635.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.797, 0.29, 0.815] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_496", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_3e80cc44-0172-49ab-b2b6-bf770c28f9e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK\n[button] sub 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.47, 0.401, 0.486, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_497", "image": {"bytes": "", "path": "./images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_0dc5da7a-7c42-43ad-8c6d-1270e1186f5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the latest news from rotten tomatoes.\nPrevious actions:\n[link] NEWS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.105, 0.91, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_498", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_e8482e40-59db-4fc3-aa34-8f93399de23b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.208, 0.463, 0.235] in the image\nAnd my action is Action: TYPE\nValue: Washington"}]}, {"id": "mind2web_499", "image": {"bytes": "", "path": "./images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_d95c30f0-a89a-4c2b-ac95-293e0904cf22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Travel Pack for hiking.\nPrevious actions:\n[button] Travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.219, 0.377, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_500", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c207e1a1-c5a2-4a3c-aba7-4bf4e98d6829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.014, 0.491, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_501", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_20dccc2d-feeb-4527-b6c6-8873b4b1f8f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.0, 0.482, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_502", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_8c6d633b-d81f-42ce-98c8-0704f88dd95e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.343, 0.256, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_503", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_398e93e6-b97d-4290-846d-dc5d8ec462a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[link] Restaurants -> HOVER\n[span] Thai -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.029, 0.564, 0.046] in the image\nAnd my action is Action: TYPE\nValue: WESTMINSTER"}]}, {"id": "mind2web_504", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_f29b8470-f482-4097-a3c0-014d0ea77cb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[textbox] Near -> TYPE: WEST HOLLYWOOD\n[span] West Hollywood -> CLICK\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.134, 0.632, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_505", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_2f2426db-0cca-4e67-a6b0-333de7a4b037.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street\n[span] 2983 Marietta Street -> CLICK\n[textbox] Apt, floor, suite, etc (optional) -> TYPE: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.491, 0.702, 0.539] in the image\nAnd my action is Action: TYPE\nValue: Buck"}]}, {"id": "mind2web_506", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_31825227-163b-4f9a-8253-81e49cd90371.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.146, 0.587, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_507", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_a5bc6789-f1f0-4aeb-9457-993e852ff0fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK\n[div] SUVs -> CLICK\n[span] -> CLICK\n[label] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.367, 0.916, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_508", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_72041631-5eab-482b-9413-7c3553541926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.073, 0.292, 0.083] in the image\nAnd my action is Action: SELECT\nValue: Delivery"}]}, {"id": "mind2web_509", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_b63cef7a-d7c0-4fd6-a051-17bf4be350e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] LKR\u00a01,120,521 -> CLICK\n[button] Select fare -> CLICK\n[heading] LKR\u00a03,125,932 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.564, 0.48, 0.605] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_510", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_8bd2ba25-f3c9-464c-8943-020e48f3c1c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Rap / Hip Hop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.243, 0.655, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_511", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_7c5a9c22-a130-4225-84ab-c661784863e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK\n[link] $189 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.861, 0.121, 0.9, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_512", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_a7f466e8-470a-4460-b10b-38a6ab84458c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.028, 0.45, 0.045] in the image\nAnd my action is Action: TYPE\nValue: Oakland, CA"}]}, {"id": "mind2web_513", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_4074b1d5-90b6-4f54-a8bf-80233191ff1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 1.205, 0.114, 1.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_514", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f5ffe0d3-f3c5-4f81-800e-d00052a64734.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.244, 0.546, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_515", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7a476beb-eca9-48bf-abb4-286b0d996196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK\n[span] New -> CLICK\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.644, 0.192, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_516", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_aefbb363-74ca-43d6-9af5-1ed82db20b26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK\n[link] Find one near you (opens in a new window) \uf301 -> CLICK\n[combobox] Find a store -> TYPE: 60540\n[option] 60540 Naperville, IL, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.29, 0.352, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_517", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_b1e774ca-1b61-47d4-bcaa-5e233a773cdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.488, 0.29, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_518", "image": {"bytes": "", "path": "./images/da386775-280b-4a84-9801-4ae3098044b0_1f1cfcc3-ec16-49ed-83bd-4b388b1948a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in California city for Limos which also offers military discounts and free wi-fi.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.062, 0.324, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_519", "image": {"bytes": "", "path": "./images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_6cab71d0-6ddd-4214-a367-31723a534fe1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an available wine at Kroger.\nPrevious actions:\n[path] -> CLICK\n[button] Departments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.293, 0.5, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_520", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_06c977cf-f2d5-40bc-ab31-453cdb6412b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 500\n[link] Show 684 stays -> CLICK\n[path] -> CLICK\n[textbox] Name -> TYPE: Togo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.5, 0.703, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_521", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_3bf9fc1a-cc1f-4276-a11a-485dc45a4eab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK\n[link] 2 -> CLICK\n[listbox] hour -> SELECT: 17\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.188, 0.925, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_522", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ffa34a7-0378-4a31-9367-019e2fa0115e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.396, 0.268, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_523", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_211ae34b-54e9-4b42-acbb-df977fb6dba6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4\n[link] Fallout 4 $19.99 -> CLICK\n[select] 1900 -> SELECT: 1995"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.427, 0.419, 0.508, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_524", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_24cd8cb6-fad4-4840-a423-b2bf2ce4de58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.027, 0.277, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_525", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_aa137e85-cbce-4920-89d7-24cb550fbf81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.261, 0.256, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_526", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5cf65818-7dae-4713-b976-169a11e7b498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK\n[button] Buy Now -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.367, 0.393, 0.418, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_527", "image": {"bytes": "", "path": "./images/a747bed0-0f45-413a-8f48-2c45795e4e3d_a436dc7e-365b-4bc6-ad92-f1444c628f9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give a like to the #1 track of the Real Time Top Chart\nPrevious actions:\n[link] Charts -> CLICK\n[gridcell] Like Crazy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.195, 0.233, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_528", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_27521355-59c4-4bab-a47f-d1d98c5617c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Adventure -> CLICK\n[div] Narrow By -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.161, 0.995, 0.294, 1.001] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_529", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_281334a6-f548-4381-9965-fdac05c9b599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Phoenix -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.333, 0.881, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_530", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_6e941c23-f01a-484c-bfdb-cbbac7ea4727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.204, 0.473, 0.236] in the image\nAnd my action is Action: SELECT\nValue: RX"}]}, {"id": "mind2web_531", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_58fa66a3-0966-4b45-bb59-6ae4cbac9f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[button] Go to next month -> CLICK\n[button] 2023-04-05 -> CLICK\n[combobox] Desired reservation time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.703, 0.45, 0.909, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_532", "image": {"bytes": "", "path": "./images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_4bbd596c-f120-4a5f-ad59-c4fa1887b64e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Remove the SSD on my cart\nPrevious actions:\n[link] Shopping Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.631, 0.463, 0.695, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_533", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5e01100b-9202-4e0d-84f9-a986283066f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK\n[button] CHECKOUT -> CLICK\n[button] Pick up in store Shipping: Free -> CLICK\n[searchbox] City, State, or ZIP code -> TYPE: 10005"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.423, 0.62, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_534", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_2b8a282b-a072-4637-8dc2-bbba1e04c12b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.103, 0.745, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_535", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_96b8805f-69fa-420b-823e-29ff28e471f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.309, 0.773, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_536", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2add8689-72ac-4b04-b149-3ae7d54b630b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.314, 0.748, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_537", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_d4851253-54fd-4a8e-bfee-8b3e7448733f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Michael Jordan\n[textbox] Search -> ENTER\n[span] Michael Jordan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.238, 0.126, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_538", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_bc0cace1-a724-4637-933a-587043f890c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.307, 0.808, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_539", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_391b3a9a-8396-4709-86c0-7d88ba2b43e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.009, 0.05, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_540", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c507a159-e69a-4a8a-9f3a-64cb387e850e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK\n[link] Edit -> CLICK\n[button] 04/11/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.429, 0.626, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_541", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_232b3998-bdde-46ec-839f-e1ddcd632443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.302, 0.378, 0.334] in the image\nAnd my action is Action: SELECT\nValue: Climbing"}]}, {"id": "mind2web_542", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_2211b437-d9e0-41b7-a052-7d3867619be7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[path] -> CLICK\n[link] View More -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.52, 0.83, 0.538] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_543", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1f2e7cce-dfb0-4d72-82ce-64467ec3600d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.122, 0.453, 0.143] in the image\nAnd my action is Action: TYPE\nValue: Aquarium of Paris"}]}, {"id": "mind2web_544", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_7d152147-a44e-4294-bc01-98b93f05e570.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 1.036, 0.192, 1.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_545", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f3d13de0-6b97-4f7e-acb5-77fc953b68f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50\n[button] Find -> CLICK\n[generic] Brand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.362, 0.34, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_546", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_222cd8db-5718-4f2a-9fe2-93b144ba93cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.6, 0.192, 0.612] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_547", "image": {"bytes": "", "path": "./images/1bf4f465-99cb-483b-aac1-a7512b150755_e209fc5a-4d34-43aa-88dc-898fc2cb3c9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hydropack and make the results to show only items that have a capacity of 21 to 35L.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[li] Hydration Packs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 1.101, 0.119, 1.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_548", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_2e5c3ba5-79d1-47e9-a404-03e9aa72be31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.44, 0.618, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_549", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_556ab0b3-e0cc-495c-a76a-93f9487d41a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK\n[link] Limited-Time Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.337, 0.369, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_550", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_b59fa278-4a1e-4869-866a-137ca9046aa1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[button] This weekend -> CLICK\n[link] See more -> CLICK\n[label] -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.61, 0.065, 0.622] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_551", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_f254e9fa-ac68-41f4-97fb-e721299de39d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.01] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_552", "image": {"bytes": "", "path": "./images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_e604833c-a61d-4011-9ada-fc0b36437ac7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show my Library and sort by Albums.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.007, 0.984, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_553", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b28fb6d0-74c2-492a-9834-7c55b317bc16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (18) -> CLICK\n[button] Show 18 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.27, 0.191, 0.307] in the image\nAnd my action is Action: SELECT\nValue: 10 00 AM"}]}, {"id": "mind2web_554", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_5e18d747-451a-47e9-a2b9-4bbeffaf596b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.29, 0.421, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_555", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_4a07dc77-6047-4b53-8808-a53fbd47e6a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK\n[input] -> TYPE: 50\n[input] -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.258, 0.978, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_556", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_f22832a0-47d3-4e90-8f12-3060a88514cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.286, 0.225, 0.337, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_557", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f617e4ee-1c75-459f-a9a8-f9edf83abf98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan\n[strong] Abidjan -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Accra\n[strong] Accra -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.325, 0.94, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_558", "image": {"bytes": "", "path": "./images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_f10d0566-f01d-41e1-a9e1-1838fb425783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the release date and supported platforms for the game \"Elden Ring.\"\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.191, 0.677, 0.223] in the image\nAnd my action is Action: TYPE\nValue: Elden Ring"}]}, {"id": "mind2web_559", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_8336d7c7-ca0a-4911-a354-f8a00a547a1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.113, 0.914, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_560", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_90d8d6e0-addf-4d9c-afc8-61048c824a2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.238, 0.114, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_561", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_771e94b4-061c-4b4f-9c67-23671ef91e2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.114, 0.825, 0.141] in the image\nAnd my action is Action: SELECT\nValue: Price low to high"}]}, {"id": "mind2web_562", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_d379fe7d-7df4-47a7-9759-adc5e3551cec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.139, 0.355, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_563", "image": {"bytes": "", "path": "./images/1bf4f465-99cb-483b-aac1-a7512b150755_0675f7cc-293d-46a5-a6ab-9f38810c1376.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hydropack and make the results to show only items that have a capacity of 21 to 35L.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.089, 0.128, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_564", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a92ff28c-7818-4e0d-b705-d0b8c171af63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.717, 0.38, 0.734, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_565", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_a41fc74f-1dd6-4352-a2f5-1d7bd52e9cfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[link] Popular tracks -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.149, 0.695, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_566", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ab92e162-cd39-4288-a702-dc080854bd00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.062, 0.931, 0.098] in the image\nAnd my action is Action: TYPE\nValue: Kevin Durant"}]}, {"id": "mind2web_567", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_a2babf92-d02c-488b-b82d-e051319ca1f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[link] Shower Essentials -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.654, 0.691, 0.675] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_568", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_022d62a7-4416-402b-a696-356bc1b74b88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.3, 0.598, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_569", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_5bb4feb3-f367-4e03-b999-c2007de19ba7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Jerry Trainor\n[button] Submit Search -> CLICK\n[button] Jerry Trainor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.727, 0.098, 0.735] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_570", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_4ff772da-2257-42ff-8770-abeb5ee76e71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK\n[combobox] Search by Service Number -> TYPE: SE4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 0.279, 0.264, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_571", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_ed78af31-521a-4b45-b4f7-9b09e5b15a09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.153, 0.32, 0.179] in the image\nAnd my action is Action: TYPE\nValue: california"}]}, {"id": "mind2web_572", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_1fdaf4a6-f9af-477b-a6ad-d549a923e148.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.089, 0.478, 0.111] in the image\nAnd my action is Action: TYPE\nValue: Las Vegas"}]}, {"id": "mind2web_573", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_057168d0-10f5-478b-88de-40fc2f2a1544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK\n[span] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.082, 0.961, 0.11] in the image\nAnd my action is Action: TYPE\nValue: atlanta georgia"}]}, {"id": "mind2web_574", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_ffe46fa1-15a2-44a5-8017-0b39e33fa3b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.438, 0.588, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_575", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_2a4fda62-04d0-4158-8033-5c8be0ba3f71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Shoes & Sandals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.107, 0.344, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_576", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_1cdaba22-bf16-40a4-a417-9191b610019d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang\n[div] Ford -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[heading] Used 2000 Ford Mustang GT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.131, 0.854, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_577", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1429b238-de97-4a6a-a14d-c14ac1c47e7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.081, 0.266, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_578", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d7c369dd-f0a0-4296-b42a-21e848626295.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 1.052, 0.956, 1.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_579", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_6d12a2b4-31a8-4534-97a2-6c84c75d3fad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.127, 0.967, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_580", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_258c60a2-eebd-485f-86e6-890e57a66ec3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.341, 0.875, 0.375] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_581", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_ec287a0e-e011-48ba-b37d-0ed8176625e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.241, 0.828, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_582", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_9d37eb8e-33e6-4a01-b91d-82a919ed0da2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK\n[generic] Economy -> CLICK\n[option] Premium economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.422, 0.478, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_583", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_78412c79-dfb9-4973-9e9c-c241d9af03fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\n[tab] Hourly -> CLICK\n[textbox] Search for parking -> TYPE: Atlanta International Airport\n[li] Atlanta International Airport, Spine Road, College... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.534, 0.372, 0.571] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_584", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_24838af0-91ab-40e0-808b-4f59a031f1dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.193, 0.438, 0.211] in the image\nAnd my action is Action: TYPE\nValue: north las vegas"}]}, {"id": "mind2web_585", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_449b0be6-d463-4adf-bc5a-5a7c1173d402.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.012, 0.559, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_586", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_18f61a73-f84b-4cb1-a2f3-7b6865a53d80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.127, 0.808, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_587", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9ae9ccd0-147e-4ce8-881a-79d4b3d8f717.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.455, 0.645, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_588", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b33f3bf0-ed3d-4894-92b0-3f5a17a350c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.105, 0.673, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_589", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_e34a10ea-d14a-452f-a318-785adaca157d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.145, 0.094, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_590", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_5ca5d0ff-3a34-437d-bee2-1fac238301f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Shoes & Sandals -> CLICK\n[link] Athletic Shoes & Sneakers Athletic Shoes & Sneaker... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.548, 0.986, 0.573] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_591", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f72fce46-9cea-405a-bda3-7fa5b65f08b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Neo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.138, 0.487, 0.173] in the image\nAnd my action is Action: TYPE\nValue: Thomas"}]}, {"id": "mind2web_592", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_417ecefd-898d-409e-b06a-fedebbcfd761.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.228, 0.644, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_593", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_36ac0711-a68d-408f-b2eb-1451647e0fe0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK\n[link] Fiction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.181, 0.834, 0.2] in the image\nAnd my action is Action: SELECT\nValue: Price, low to high"}]}, {"id": "mind2web_594", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_1a497e83-83d0-4ccb-ae4d-22ec497edc64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.057, 0.966, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_595", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_c45ca820-5c91-49f7-8eae-05462119775d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.534, 0.514, 0.561] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_596", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_fda96947-caa3-40a5-8f47-4413cf7cc0cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.21, 0.233, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_597", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_734808d6-57d3-4ae8-98c1-b2f33ee8aef5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Crossbows and Accessories -> CLICK\n[label] Limited Stock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.489, 0.056, 0.501] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_598", "image": {"bytes": "", "path": "./images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_62ad40a1-3e77-4bb7-bced-d863ee082eb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the Resident evil 4 game guide.\nPrevious actions:\n[path] -> CLICK\n[link] Guides -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.364, 0.276, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_599", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_e71abc2c-4ffb-460e-ba94-f76587391fc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.734, 0.086, 0.969, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_600", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_0bcc192a-b80b-485c-a8f5-66deacb89805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[span] , United States -> CLICK\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.294, 0.905, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_601", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1401227d-f0de-44de-86fc-af5ba0c6c520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.011, 0.578, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_602", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_6f2a987a-c636-4917-a2c9-d0396c21a1ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK\n[link] Advanced search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.233, 0.983, 0.257] in the image\nAnd my action is Action: TYPE\nValue: Taylor Swift"}]}, {"id": "mind2web_603", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_c3039d57-1d54-4442-b250-b233b580fd64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.431, 0.307, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_604", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_903f6b4d-3315-47b1-b88d-15cd49d43bb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.941, 0.008, 0.991, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_605", "image": {"bytes": "", "path": "./images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_77e8e128-c9e5-48a8-a691-331ce9696c14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of publishers for board games\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.005, 0.184, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_606", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_61d753a4-9e3c-4329-96bd-932d046f6f53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000\n[span] Good -> CLICK\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.799, 0.699, 0.824] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_607", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_b4e17a34-b113-4740-a22f-b3d783bf549c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.309, 0.559, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_608", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_48e48dce-c73e-4ccf-86e0-9aa26363e0e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK\n[input] -> TYPE: 3329456534543\n[input] -> TYPE: John\n[input] -> TYPE: Green"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.096, 0.934, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_609", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a5fb29c8-6ada-490f-9b03-3c28febc5b78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_610", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_d0dd6fda-cd1e-4d3e-b3a5-67611bb74e68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\n[button] Location Columbus, OH -> CLICK\n[button] Sydney -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.016, 0.423, 0.036] in the image\nAnd my action is Action: TYPE\nValue: pasta"}]}, {"id": "mind2web_611", "image": {"bytes": "", "path": "./images/9a462751-758e-42bd-967d-373c13b90382_3b89323b-0233-4588-b13e-f2ff2621ba0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current injuries of Phoenix Suns players.\nPrevious actions:\n[link] NBA . -> HOVER\n[div] Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.387, 0.306, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_612", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_4a1fc4f9-6541-4a12-b6c5-7228811c43d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 1.07, 0.266, 1.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_613", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_6434f7c3-76f6-4375-a6fa-3179c23dd6cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\n[link] Find People -> CLICK\n[link] BY PHONE\u00a0NUMBER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.37, 0.328, 0.4] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_614", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_d13bb605-b91a-48d6-a6cd-a915bf50dc3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK\n[div] Dual Motor All-Wheel Drive -> CLICK\n[button] 43235 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.244, 0.754, 0.262] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_615", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_9568b250-989d-4e11-a9e7-4b0dd6772a73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.245, 0.358, 0.277] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_616", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_851a313e-37e4-42fa-9dea-af461112eeed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.39, 0.3, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_617", "image": {"bytes": "", "path": "./images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_698c355e-5508-4e32-a78a-33f097743f70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a red used tesla Model S.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.314, 0.188, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_618", "image": {"bytes": "", "path": "./images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_3490d209-2313-4fd3-80fd-52801298b816.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse tickets for Chicago Bears games.\nPrevious actions:\n[button] SPORTS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 0.083, 0.319, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_619", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_6179284c-8010-42fc-9db7-c552407fe3b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.174, 0.753, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_620", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_3c6124d5-eeda-47c8-b0e1-bf30cf61aca1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.502, 0.281, 0.523] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_621", "image": {"bytes": "", "path": "./images/2159d768-6657-40af-b336-ad5726fec1e2_90f0dbff-5c71-40dd-aa7d-a28f941b2827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my wishlist the top rated JRPG game.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.446, 0.145, 0.471, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_622", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_360bdc21-2f56-4e3c-a631-3f81d3908dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.68, 0.62, 0.689] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_623", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_6290e759-d66d-4b88-9146-822223ef0530.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] Country -> CLICK\n[span] -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.446, 0.218, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_624", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_52bdee9e-0430-43f8-a614-9b708c175125.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2\n[combobox] Minute -> TYPE: 30\n[combobox] AM or PM -> SELECT: PM\n[button] Get trip suggestions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.378, 0.874, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_625", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_4618ccf0-6373-4138-aaab-c3e1e86094bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 6944"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.374, 0.62, 0.408] in the image\nAnd my action is Action: SELECT\nValue: Thursday, April 6"}]}, {"id": "mind2web_626", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_47029a14-ef8d-4d8e-89a6-9d7a672a7f00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.421, 0.48, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_627", "image": {"bytes": "", "path": "./images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_a3d9acf3-3ec0-4d6e-bcb3-2ae36600edbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Gloomhaven's ratings?\nPrevious actions:\n[combobox] Search -> TYPE: gloomhaven\n[link] Gloomhaven (2017) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.386, 0.26, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_628", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_85de58d9-3241-44fc-be41-d50c28190e22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] set store -> CLICK\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.201, 0.249, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_629", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_a91d01fe-afc1-4e3d-94fe-fd6f02b955af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.159, 0.554, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_630", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_d1ce353f-b6b7-4181-b6be-9430a19a75d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.28, 0.803, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_631", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_6e7e019b-e6dc-486a-9697-74aa496d4009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.279, 0.729, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_632", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_be3483db-df81-4a20-b60c-360fa9beb6f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.017, 0.353, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_633", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_0ffc571f-2b31-4854-8ed2-2f542d6baa0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.181, 0.383, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_634", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0ed3f86d-df38-429e-90d1-7fafffec69cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[link] Planned Service Changes -> CLICK\n[searchbox] Search Routes -> TYPE: 4\n[listitem] 4 -> CLICK\n[button] 04/12/2023 -> CLICK\n[button] Next Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.329, 0.811, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_635", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_69539b1f-4a50-4fd5-9700-d3406bff509d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.011, 0.645, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_636", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_b2723760-8071-47cb-9f11-2d675175cbe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall\n[em] Music -> CLICK\n[strong] Filters -> CLICK\n[checkbox] Motorcycle Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.465, 0.328, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_637", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_4eabbe64-62d2-454b-bad9-12f4206627dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK\n[textbox] Flight origin input -> TYPE: madurai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.227, 0.273, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_638", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_b4aa56db-e3ef-4719-8221-e887d800b895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.329, 0.498, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_639", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_316b6826-d405-4b45-9723-8fd585ef7722.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.615, 0.365, 0.728] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_640", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e9bb1cf2-1d7f-41d7-a17c-ebf215c4e011.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.158, 0.326, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_641", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2c8d72ba-69ce-4d1c-bfdf-192d600a3e99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.316, 0.384, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_642", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_724a9c62-8906-4da6-afd7-50a4c3a8864d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.363, 0.868, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_643", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_38c4b245-e414-4cce-b837-42706cb27f23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.356, 0.62, 0.376] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_644", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_ed70e106-6213-44c9-ae88-85dc9af09e6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.232, 0.433, 0.272] in the image\nAnd my action is Action: TYPE\nValue: Accounting"}]}, {"id": "mind2web_645", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_b7d1099e-22d6-4680-b1cc-95da2374335d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.365, 0.695, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_646", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_0c3f522b-a492-4b58-b642-8899445f2ac9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[svg] -> CLICK\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.287, 0.804, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_647", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_aec70830-4ba3-44b3-9aff-3d5399090ad6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.363, 0.255] in the image\nAnd my action is Action: TYPE\nValue: Abbotsford"}]}, {"id": "mind2web_648", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_797b5624-5baf-4735-a64a-a49edb4a6914.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK\n[span] 28 -> CLICK\n[button] Find flights -> CLICK\n[button] Roundtrip $681 United Economy (U) Select fare for ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.393, 0.497, 0.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_649", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_a30dba5b-dfd0-4cef-a4b1-2a1fe4a13829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[div] 5 -> CLICK\n[div] 7 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.323, 0.264, 0.34] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_650", "image": {"bytes": "", "path": "./images/85bd1881-6efd-458d-97c9-ae507ecba1ca_4ef4bad0-ed14-4946-8d06-4d672c0f9bdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the odds for upcoming NHL matches.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] NHL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.55, 0.114, 0.604, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_651", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_aa293e50-9e36-4097-9e71-1a21249be4a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.198, 0.991, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_652", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_3b5e5e0d-4c51-489f-84e5-48d3c9e81a1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.077, 0.777, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_653", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9abdee22-f71a-427a-a5b1-4ed0386a1de5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK\n[button] set store -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.337, 0.253, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_654", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_f4a3db2b-6081-40ff-9efd-57848cd9bfd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.07, 0.033, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_655", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_c736856e-e26a-4537-b3b8-82969aa2a016.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] Medium Light -> CLICK\n[svg] -> CLICK\n[label] 32\" -> CLICK\n[svg] -> CLICK\n[label] Less than $100 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.79, 0.368, 0.955, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_656", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_dc4596ac-20df-47d4-97db-d42b1c289351.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Locksmiths -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.021, 0.564, 0.034] in the image\nAnd my action is Action: TYPE\nValue: SAN FRANSISCO"}]}, {"id": "mind2web_657", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_477ae1d7-9f1d-45a1-9447-6dbd34e2ec6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[button] Show all 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.034, 0.263, 1.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_658", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_ad40c605-21b9-4aef-a231-fa346f287afc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\n[textbox] Search IMDb -> CLICK\n[textbox] Search IMDb -> TYPE: Prometheus\n[div] Prometheus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.489, 0.725, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_659", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_7b6615cd-da39-41f9-a701-e6becbf3bdaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_660", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_0f8ca7c0-8ab4-4a9a-b0e1-3a10056f7f2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.442, 0.648, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_661", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_dde39bfa-ddfb-4dab-91e3-1f242a32d253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station\n[link] T red line silver line commuter rail Zone 1A Sout... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.675, 0.343, 0.845, 0.364] in the image\nAnd my action is Action: TYPE\nValue: north station"}]}, {"id": "mind2web_662", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_71bf576e-88fa-448e-bcf2-cfefb6a34fba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.087, 0.257, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_663", "image": {"bytes": "", "path": "./images/22509b64-b643-44ec-b486-9828e686303c_71b4f18e-103b-420c-8bcc-da6f09c0d8cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the full menu for AMC Dine-In\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.121, 0.488, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_664", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_6a4017cd-86da-4732-92f2-308cdbaa27f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[link] Business -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: LAS VEGAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.263, 0.247, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_665", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_9fd66fb7-b3b9-44e0-8279-072992a676c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.317, 0.93, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_666", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_898d9963-7fbd-4ab4-9300-01d6fb45ca32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK\n[textbox] Date use format: 16-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.321, 0.471, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_667", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_6a1a9c2a-d65e-46be-92cb-b0a2527d8d6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Camping Tents -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.466, 0.127, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_668", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_bbd16dc3-1b95-4fc5-b68a-ff2a7e6cfb95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.506, 0.237, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_669", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_1451ea0d-a18c-48d2-a5fe-55d780698313.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.232, 0.709, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_670", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7dde899e-d348-46d1-90db-7e248ce0bf50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK\n[combobox] \uf0d7 -> SELECT: 1 Room\n[combobox] \uf0d7 -> SELECT: 2 Adults"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.094, 0.646, 0.12] in the image\nAnd my action is Action: SELECT\nValue: 1 Child"}]}, {"id": "mind2web_671", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_a499bf10-f7b8-4771-8234-002fd88c3439.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK\n[div] Bayern Munich -> CLICK\n[heading] STATS -> CLICK\n[link] GOALS Jamal Musiala Jamal Musiala 11 G -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.171, 0.678, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_672", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_3ec075a2-b4a2-41b7-80ab-fa807aac5c9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.194, 0.539, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_673", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_d8c9ea80-5e0b-4dda-bb9c-d6c5b512622b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.233, 0.196, 0.255] in the image\nAnd my action is Action: SELECT\nValue: US$20 to US$40"}]}, {"id": "mind2web_674", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_4f574815-5359-4ee4-95ac-4dd90be90835.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: smartwatch"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.088, 0.673, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_675", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31a26c55-4c7a-4283-92d7-1653956d7fe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.345, 0.868, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_676", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_5be2112f-8d62-404e-8ab3-6202c78c3536.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.468, 0.281, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_677", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_1ebf5cde-2bf7-47a6-8c3f-b567ff20ba4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[link] $25 to $50 (18) -> CLICK\n[link] $50 to $100 (146) -> CLICK\n[div] Size -> CLICK\n[link] 10 (131) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.417, 0.974, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_678", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_0eff6738-107d-4395-8229-d5632a45aedc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.344, 0.636, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_679", "image": {"bytes": "", "path": "./images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_c0b15fcf-8ad7-47dd-85a9-2ee548ce72d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the Dallas Mavericks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.058, 0.178, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_680", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_93810b4c-013b-4165-94ac-140b40837aed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.258, 0.091, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 2019"}]}, {"id": "mind2web_681", "image": {"bytes": "", "path": "./images/64051efe-53dc-4e79-9980-c3d75d34c4aa_7aa12e31-3e99-44a8-82c2-471c8e11d629.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my item inventory.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.125, 0.481, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_682", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_7d161c6f-4df1-4eed-8b1b-3c1d368a1694.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.3, 0.148, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_683", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_a3aeb3e8-0034-40d6-b184-86ab3f05d619.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.253, 0.438, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_684", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b7fd1e38-d007-46cd-ae33-f560d075b56b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK\n[textbox] 1 Adults, 18 to 64 years old, 1 of 8 passengers se... -> TYPE: 1\n[combobox] undefined Selected 1 room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.757, 0.339, 0.778] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_685", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_c2bc0783-09d4-44dd-b45f-ba953a1a7a08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[path] -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[label] Air India -> HOVER\n[button] Air India only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.001, 2.237, 0.284, 2.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_686", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_5e2ba778-2e6c-44e0-a6f2-e28df0337e1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Rating -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.546, 0.536, 0.669, 0.564] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_687", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_3db12611-f0a1-49e3-8ecf-cbdc23b3a727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.404, 0.377, 0.416] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_688", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_48484940-7fda-45e8-a3c9-21da6c24a342.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 1.697, 0.091, 1.702] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_689", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_dacb1820-0368-4992-843d-496d69231c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Electrical -> HOVER\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.153, 0.99, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_690", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_4d006658-126a-4f2d-bfef-45ca970c91a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter\n[button] Search -> CLICK\n[link] Harry Potter and the Cursed Child - Parts I & II -> CLICK\n[link] Add to wishlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.096, 0.716, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_691", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_6f196ecb-8ab6-4aba-82fe-6c3a0041637c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK\n[link] Men's Shoes & Boots -> CLICK\n[label] Boots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.404, 0.056, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_692", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_63c79386-dc4e-4073-b094-76e6bb7cb672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.265, 0.477, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_693", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_d7821dba-fdc5-4738-ae2a-d5bf94da0dcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.017, 0.509, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_694", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_48ff5ffc-bf68-41ad-b37f-e0470a5754dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK\n[textbox] Date use format: 16-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.331, 0.327, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_695", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_f1411a49-e617-4dfa-aaa2-a947056f2ceb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.325, 0.353, 0.362] in the image\nAnd my action is Action: TYPE\nValue: 1234567890"}]}, {"id": "mind2web_696", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4afe1528-c021-4d9b-8a67-b889a015436a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK\n[button] Join Waitlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.284, 0.351, 0.317] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_697", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_cd8f79bd-6ddb-40ff-b87c-d4c5e4dc6829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[svg] -> CLICK\n[label] Medium Light -> CLICK\n[svg] -> CLICK\n[label] 32\" -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.881, 0.112, 0.891] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_698", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_e4dfa148-ff80-4458-99ca-8d1c48572e37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.014, 0.546, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_699", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_3eacf133-cbe2-43ec-8bfc-839b3bc960e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK\n[button] Table of Contents -> CLICK\n[link] Uncharted: Legacy of Thieves Collection - Wiki Bun... -> CLICK\n[link] Collectibles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 1.028, 0.348, 1.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_700", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_428b2ffe-86ef-407a-9079-cfec97b80000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] The Bahamas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.244, 0.447, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_701", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_f72407ee-c156-4ee0-b5f9-8f08171c28e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.121, 0.387, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_702", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_fa548110-f9a3-4ec4-a642-bb38488e1d13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 1.036, 0.161, 1.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_703", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_b58ca5a0-af78-4288-9f0d-78f2c0f18b1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[div] & up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 1.82, 0.192, 1.835] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_704", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_4a9ffe3e-66a6-4eab-a124-a5d40e3594c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[generic] 30 -> CLICK\n[div] -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.713, 0.681, 0.752] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_705", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4888c4f0-14f5-4277-80ee-930c07442426.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.213, 0.072, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_706", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_fb6287ce-8359-4fc4-872d-a66acc862823.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[link] Your lists -> CLICK\n[link] Create a list -> CLICK\n[svg] -> CLICK\n[span] Walgreens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.109, 0.481, 0.16] in the image\nAnd my action is Action: TYPE\nValue: Walgreens"}]}, {"id": "mind2web_707", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_86b72b03-cfd1-47ce-9f4a-1dbb46866645.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.752, 0.633, 0.811, 0.639] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_708", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_439cfa5f-34dc-41f4-b19d-ef6a9cbae5c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[img] Sports car icon -> CLICK\n[button] Sort by -> CLICK\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.296, 0.045, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_709", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_33e764b3-9adf-43f0-9086-44dfb1bd8160.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: sofi stadium\n[span] SoFi Stadium -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.315, 0.379, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_710", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_c6894a9d-3c38-4df4-b21f-e4135fb0b585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.014, 0.461, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_711", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_8b6a9853-063e-4fc6-82da-0f226ba3679f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.059, 0.88, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_712", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_d6fc3568-7f65-4ebd-9102-c451c4285736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[polygon] -> CLICK\n[button] Heartland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.585, 0.157, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_713", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_bdb53510-a545-40a0-a881-f19507ac47ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.53, 0.31, 0.546] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_714", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_71b2d6dd-b624-46d6-9dbe-46939c0d6916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.094, 0.327, 0.116] in the image\nAnd my action is Action: TYPE\nValue: ZURICH"}]}, {"id": "mind2web_715", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_f493650e-0cf6-4904-89e7-62105a3d029a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $21.99/Day -> CLICK\n[checkbox] $13.00/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.312, 0.93, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_716", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_cec37f28-5752-445c-a4e5-68017784fcc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.121, 0.418, 0.159] in the image\nAnd my action is Action: TYPE\nValue: ohio"}]}, {"id": "mind2web_717", "image": {"bytes": "", "path": "./images/f3850ec8-bf7c-42c3-9469-457836914f77_b1bde9b8-0c8d-43b4-b47e-f36805bd8fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for events in Boston.\nPrevious actions:\n[button] CITY GUIDES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.077, 0.782, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_718", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_61a2a3af-3b29-4d5d-b252-856c6a60c022.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.07, 0.326, 0.111] in the image\nAnd my action is Action: TYPE\nValue: pet festival"}]}, {"id": "mind2web_719", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_2f4fed09-a787-4ec5-8706-4efca121d6a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.052, 0.365, 0.258, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_720", "image": {"bytes": "", "path": "./images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_18f4c557-d4ee-491a-96af-1a5bc2509a8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of critic reviews for the movie Creed III.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: creed III"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.037, 0.657, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_721", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_e41e3aa0-0593-4975-bd96-9add53085830.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK\n[menuitem] View All -> CLICK\n[div] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.627, 0.064, 0.638] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_722", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_3ab27e3b-370d-41bd-af2e-4cabd704a0c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests\n[button] Update search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.095, 0.57, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_723", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_46135cac-6070-4b3c-a706-4ba121a6e9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Casino -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.81, 0.089, 0.815] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_724", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_3b8bb1e4-be14-49ff-b042-11d4639daa4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK\n[textbox] Depart date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.46, 0.132, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_725", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_2cb27e58-8e3f-4926-a34d-f9fdefebe672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK\n[link] Discover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.417, 0.279, 0.452] in the image\nAnd my action is Action: TYPE\nValue: doha"}]}, {"id": "mind2web_726", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_af8eb390-8d1e-4b3a-b5d1-a3401025320c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Crew"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.179, 0.525, 0.214] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_727", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_d82e3a62-a555-4e74-8436-dd6204ae1de5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.244, 0.107, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_728", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_cacb9fb9-a747-403f-87e6-1d720ec9c876.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK\n[link] The Weeknd -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.476, 0.171, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_729", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_023d2f51-86fa-446b-8c75-47ad3f0c4643.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.149, 0.713, 0.166] in the image\nAnd my action is Action: TYPE\nValue: Athens"}]}, {"id": "mind2web_730", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_f49a5c09-68a8-4d43-9871-746acda3a89c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK\n[link] Tab -> CLICK\n[link] High rated -> CLICK\n[link] Far Cry 3 - Ukulele Girl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.07, 0.98, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_731", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_50ad5ebd-d46d-4c0a-ad59-f00475a2a57d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean\n[button] \ue9571 NIGHT Wed, Apr 19 - Thu, Apr 20 -> CLICK\n[span] 5 -> CLICK\n[span] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.353, 0.912, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_732", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_a91d399d-3343-457b-b60a-9d9d2ec0676e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_733", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_2b14fe2d-795e-420f-a424-5a0246897456.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK\n[combobox] Guests -> SELECT: 3 Guests\n[div] Today -> CLICK\n[button] March 10, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.987, 0.35, 1.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_734", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_00c2eafc-0309-4341-9e34-ea1868d3867d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.319, 0.05, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_735", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_6ce0bdac-5180-4167-939f-a6fc87f8c8e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.042, 0.652, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_736", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_bcafc9b0-d8a1-4b7e-8199-431d48b1765e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.175, 0.115, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_737", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_ffc5cf9b-30d3-4b1a-8a02-875d0aef04df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.133, 0.697, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_738", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_3cc263af-93ef-413b-a357-8826b6929b8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[select] Sort by Distance -> SELECT: Sort by Price\n[heading] 335 Columbia St. - MOTORCYCLE PARKING ONLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.674, 0.819, 0.711] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_739", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_f53535ac-ee85-47f4-9e60-9d64b5ce8005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.402, 0.365, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_740", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_fc4260f0-f7dd-44e3-8e76-33f0a7a4c96a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.172, 0.495, 0.202] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_741", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_33fe3c9c-2201-4208-b663-d6bc5160c097.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.088, 0.128, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_742", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_230d03bf-d64d-47b0-a803-2d9e20684510.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.393, 0.387, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_743", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_41b67b58-9eb3-401d-b495-ef5e61eca310.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.067, 0.77, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_744", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b1d71ec-5dd5-4948-a2b5-6303bf701bf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.288, 0.286, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_745", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_10934fcf-a23e-435c-9eed-281e77c57f18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[checkbox] Based On Comic Book (226) -> CLICK\n[strong] IMDb Rating -> CLICK\n[group] IMDb user rating (average) -> SELECT: 7.0\n[group] IMDb user rating (average) -> SELECT: 9.0\n[strong] Refine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.16, 0.54, 0.165] in the image\nAnd my action is Action: SELECT\nValue: Number of Votes"}]}, {"id": "mind2web_746", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_bc81411f-ea59-4192-b04d-e62b85850b5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK\n[div] Fri, Mar 31 -> CLICK\n[checkbox] 29 March 2023 -> CLICK\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.115, 0.927, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_747", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_b874a73e-65d0-4078-9549-063b0402b53d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.511, 0.643, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_748", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_0d1e50fb-654d-455c-96a3-27dd3238b205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.253, 0.407, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_749", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_0aeb38ea-3cdb-4e1d-95c9-4d8d93b7a0bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.558, 0.263, 0.565] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_750", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_21b5581f-18db-4928-9246-ddbfa5e1bc60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.457, 0.464, 0.58, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_751", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_b376aa10-6957-4130-b75f-17abc80fd6f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK\n[link] Dave Chapelle -> CLICK\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.771, 0.2, 0.799, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_752", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_8e1be294-c865-4017-b9c4-d0039658abe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.104, 0.27, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_753", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_b0af8890-c5ec-4c3e-b40e-069dcdbb91e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.0, 0.482, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_754", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e4847f0e-0607-4eb7-b856-7bbcd4c7dde0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.452, 0.841, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_755", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_4f07778f-b3c5-486e-ade9-13d279de1d0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.461, 0.834, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_756", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_e8593956-fe5c-4517-8903-06508cece040.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.297, 0.171, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_757", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_84c808ae-d79f-4884-8b59-0ae14f0dad91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.283, 0.252, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_758", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_89b591ba-9b75-494d-8261-e69acb082d04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.837, 0.884, 0.858] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_759", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_7a75dd20-71f5-40d8-88b2-b7f9ee035f48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.098, 0.46, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_760", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_7f3fc81f-dfca-44c6-aa5b-cca862f0d470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\n[link] Store -> HOVER\n[link] Find a Store -> CLICK\n[combobox] Find a store -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.117, 0.668, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_761", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_64b1e75d-0c6e-4f23-b134-1f8115a9bf31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\n[link] Schedules -> CLICK\n[heading] Long Island Rail Road & Metro-North Railroad -> CLICK\n[div] Long Island Rail Road schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.688, 0.275, 0.698] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_762", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e11f907e-6778-4c4f-830e-df9acf69eaaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Gaming Monitors Accessories -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.053, 0.49, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_763", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_83a1c672-32d5-41fe-9c95-86a1ac14c208.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[button] Departments -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.294, 0.177, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_764", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_6bae364e-de11-4195-b886-42576377408b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.03, 0.535, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_765", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_d64f4710-22b9-48cd-9649-e2969c135a58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK\n[checkbox] list-filter-item-label-12 -> CLICK\n[checkbox] list-filter-item-label-3 -> CLICK\n[checkbox] list-filter-item-label-0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.191, 0.633, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_766", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_2d7f2b77-ec35-4ff0-88c0-d11be25fb44c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.155, 0.561, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_767", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_29593e46-ab32-4882-a602-dd9905ebbea9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK\n[link] Edit -> CLICK\n[button] 04/11/2023 -> CLICK\n[link] 12, Wednesday April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.343, 0.855, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_768", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_53edb01c-5098-443e-bd99-d63dae18684d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK\n[radio] RENEW - An Existing Account -> CLICK\n[textbox] KOA Rewards Number -> TYPE: 1000000001\n[textbox] Postal Code -> TYPE: 10023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.648, 0.934, 0.682] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_769", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_90da1c51-47a3-4b4d-be32-7427c7b53fef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[path] -> CLICK\n[link] 18 -> CLICK\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK\n[button] Add railcard -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.481, 0.281, 0.507] in the image\nAnd my action is Action: SELECT\nValue: Veterans Railcard"}]}, {"id": "mind2web_770", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_e6d6233d-53a8-469e-b68d-dc33eb7a03f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo\n[textbox] Confirmation or ticket number* -> TYPE: 12345678912345\n[button] Add flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.52, 0.32, 0.537] in the image\nAnd my action is Action: TYPE\nValue: ian.lo@gmail.com"}]}, {"id": "mind2web_771", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b07726fe-d4d6-4d0d-a101-5bcffd3b52e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK\n[span] Guest checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.388, 0.319, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_772", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_2330fcb7-1d5f-4a97-b2a2-621ea171fcca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 1.272, 0.194, 1.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_773", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_d8e2d33b-a8de-4eaf-baea-973008afec13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.91, 0.346, 0.952, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_774", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_0b5688cb-71a1-4fcf-a156-bbac0e95e816.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Charging -> CLICK\n[link] Find Us -> CLICK\n[textbox] search input -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.206, 0.216, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_775", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_77dbcf0c-47d8-4597-abfc-2b218fe292e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK\n[combobox] Guests -> SELECT: 3 Guests\n[div] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.358, 0.408, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_776", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f3797a4b-3d12-46e3-a420-64ec64f1c501.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.803, 0.408, 0.808] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_777", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fc8e75d0-f09d-4cf2-a112-2f0184fa48e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Veterinarians -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.3, 0.388, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_778", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_19a9c862-0926-49c6-aa16-66a8e1138678.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.18, 0.75, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_779", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_f50ba556-898a-4e6f-a470-ce593af6304e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[combobox] By State -> SELECT: Colorado\n[button] Activity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 0.444, 0.382, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_780", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_a4fc7924-09c5-4edb-b4e5-a8c733c2942c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.01, 0.491, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_781", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_cac81dd7-bfc2-4d9a-ab71-29ee91f89e40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.331, 0.87, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_782", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_90a95512-a3cd-4f4e-8dec-561efd1c11b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.724, 0.158, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_783", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_e5f9b638-b724-473f-869d-615c6c141aeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.211, 0.423, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_784", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_6790b27e-ac5d-4b96-9a93-2e5c9e4d7b71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.35, 0.747, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_785", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_0f0ef35f-c591-41b1-af03-eda24e8e7abd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Camping Tents -> CLICK\n[link] add filter: 6-person(24) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.264, 0.428, 0.284] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_786", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ec97a061-a130-45c9-9ee0-c0db152698f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[textbox] Where -> TYPE: India \n[div] India -> CLICK\n[span] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.292, 0.739, 0.308] in the image\nAnd my action is Action: TYPE\nValue: 99"}]}, {"id": "mind2web_787", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_afd88868-e107-4cd8-9da4-f234e5d6a3b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.26, 0.783, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_788", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_94e90ad5-7d9a-4601-9812-255a72709a36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[section] Flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.389, 0.478, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_789", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_613c170f-ebe3-451c-ae18-a3d8ad9c5b0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.014, 0.369, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_790", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_0b463dd8-3b69-49da-9a8e-de032b2c24ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Color -> CLICK\n[link] White -> CLICK\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.852, 0.052, 0.862] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_791", "image": {"bytes": "", "path": "./images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_8ea37fac-98e7-436e-ad0c-0264750abc6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse tickets for Chicago Bears games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.034, 0.28, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_792", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86c95750-be3d-4f61-85ac-8399619f41de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[link] Kindle E-readers & Books -> CLICK\n[link] Kindle Books -> CLICK\n[textbox] Search Amazon -> TYPE: roman empire history\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.723, 0.068, 0.731] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_793", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_05abe37e-9ee1-4f51-a521-2ea404b58e7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[option] MEN -> CLICK\n[svg] -> CLICK\n[heading] Color -> CLICK\n[span] BLACK -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.31, 0.248, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_794", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9d608d6d-b482-4f2a-8241-567f41501af3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\n[textbox] Search -> TYPE: Gingerbread cakes\n[link] gingerbread cakes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.107, 0.165, 0.175, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_795", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_f7e594b5-3cc2-4b2e-8820-2dee88a6a1f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.354, 0.795, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_796", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_bc0fdec2-b755-425e-b766-c7376a85bd3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\n[link] Find People -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.182, 0.874, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_797", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_3a4e82f3-9dd3-42b4-9302-c5e41465df9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK\n[button] - -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.474, 0.263, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_798", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_1915589a-bee5-4557-82db-5244bdd93e0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.265, 0.652, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_799", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_d89d6e35-b522-4916-a7d8-8dd1410634bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.369, 0.748, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_800", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6b3a070-2485-4f86-bfd3-55de0ad13052.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[span] 11 -> CLICK\n[span] 18 -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] Motel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.172, 0.633, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_801", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_28dfe420-b64d-4b66-a40a-50cb80c95ac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.746, 0.285, 0.778] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_802", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_b5929444-c8ea-4c84-aaa2-f91432a827fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.238, 0.509, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_803", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_74a7c5bc-3967-4777-8fbf-48549de950af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK\n[textbox] Flight origin input -> TYPE: Mumbai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.264, 0.573, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_804", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_9ede236e-1cc6-4750-882c-9d9e807b32a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK\n[label] In Stock -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 1.023, 0.087, 1.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_805", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_c3442abe-d676-4250-9bb2-7fab9a09ab8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.272, 0.691, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_806", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_1adae572-b7a8-479b-8e02-5cff5c0f35b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.215, 0.171, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_807", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3ebb1cf3-9b8f-4c1b-9da5-6ae7225dff0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.421, 0.48, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_808", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_2adf11c4-9ff9-460f-932d-fafc19f37981.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.297, 0.612, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_809", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_41574158-9bb5-445b-8eb8-e3bd3ffc02bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[span] Boston -> CLICK\n[svg] -> CLICK\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.095, 0.448, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_810", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_95928075-0682-411b-bc35-436756ed5eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] List Explorer -> CLICK\n[link] Manage My Lists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.239, 0.046, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_811", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_f83bc69c-b77b-4683-998f-5d9e4694add3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK\n[div] -> CLICK\n[button] Contact -> CLICK\n[button] Contact the organizer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.288, 0.692, 0.315] in the image\nAnd my action is Action: SELECT\nValue: Question about the event"}]}, {"id": "mind2web_812", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_744fc34d-8efe-4c20-96ee-05cad5df1cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.316, 0.335, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_813", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_57ff6313-097f-456c-9fcd-a58f3e099011.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.297, 0.133, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_814", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_7be19ef7-3aff-4a44-8e6e-27ddc4be533a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.2, 0.889, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_815", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1abebad8-af94-4e45-880b-8bc9dd0bb103.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Choose room -> CLICK\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.248, 0.95, 0.282] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_816", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_4e8c633f-7da3-4beb-afea-a194df00dcc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.238, 0.375, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_817", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_4f567ffc-1405-4110-89d9-9b0671eb7202.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.365, 0.384, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_818", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b7659f78-46a9-4951-952b-37365caa2ab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Sort by -> CLICK\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.265, 0.249, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_819", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d72858b-a481-4bd0-bfb7-e2556ccf7ae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.015, 0.546, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_820", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_a94e1a10-31d7-4c5d-8020-06c9229283b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[div] Tomorrow -> CLICK\n[p] Startups & Small Business -> CLICK\n[div] #virtual -> CLICK\n[label] -> CLICK\n[div] How to Make Six-Figures as a Consultant or Coach -... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.464, 0.575, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_821", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_098efde6-eb53-45cf-890d-7ea0024c1471.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.068, 0.441, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_822", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_2b8a3c38-07a7-4ef6-af36-a725dc25cc96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.172, 0.476, 0.19] in the image\nAnd my action is Action: TYPE\nValue: Monty"}]}, {"id": "mind2web_823", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_ae94047b-798d-4a4c-a272-9afc85a85965.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[option] 8:00 a.m. -> CLICK\n[combobox] Drop off time Selected 10:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 1.128, 0.48, 1.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_824", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8951ad3c-1dc5-4117-a207-a89a61ef0655.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.141, 0.481, 0.161] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_825", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_b7ac89f0-fd43-4114-b900-87d7d0c36444.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.271, 0.174, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_826", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_f2a535a9-3a26-4aac-873a-ca97ed26b08e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[button] Search for zyrtec -> CLICK\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK\n[button] Check More Stores -> CLICK\n[textbox] Enter zip code or location. Please enter a valid l... -> TYPE: 90028\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.306, 0.891, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_827", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_5b91bcfc-b54e-4802-bd4f-397bba7bf1db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28\n[link] 7:30pm -> CLICK\n[text] J10 -> CLICK\n[text] J9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.606, 0.845, 0.657, 0.915] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_828", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_1d358c36-6333-4e3e-bb32-505bd9a44c2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.041, 0.668, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_829", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_4e2e6234-ed23-40c6-a6fc-c82108cd2f49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] Next -> CLICK\n[link] 1 -> CLICK\n[link] 2 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[combobox] Return Time -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.225, 0.5, 0.26] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_830", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_aa3d9f1d-e6d8-4a6f-bb93-ae6037c428f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Same Day Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.609, 0.446, 0.636] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_831", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_649b764f-bbb7-4b14-a135-4ecdf1d73419.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.151, 0.377, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_832", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_6f320723-118d-4b4d-b300-c9f924cf5926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK\n[link] MONTHLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_833", "image": {"bytes": "", "path": "./images/bd4b77db-00a5-405f-bf0a-a4d168967d64_46a54936-b04f-4a6b-8350-cc4259fe03d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Panini Diamond Kings Baseball cards set below $25.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: Panini Diamonds Kings Baseball cards"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.031, 0.228, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_834", "image": {"bytes": "", "path": "./images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_bbefae42-d680-4113-a45a-8319079ac7fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite the top rock track\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.005, 0.791, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_835", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_e29f648e-0dcd-4cb1-8bf0-dc33c40ffb98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.011, 0.82, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_836", "image": {"bytes": "", "path": "./images/612653f8-defe-41be-ae48-26ed859d98ca_1d82f2fc-e917-4ead-95cc-52fe5041676e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate Dry Tortugas in the state of Florida and find out the Current Conditions.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] Florida -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 1.085, 0.679, 1.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_837", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_dfeae7f8-eb3c-4d38-96e8-ddc4967e89d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.235, 0.331, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_838", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_0549de40-9213-46db-9cef-488a057eae19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.21, 0.5, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_839", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_2d1b8584-a901-4e9a-b1d6-fd6e6df2291a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] Indian/Pakistani -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.33, 0.214, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_840", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_725e6ba0-21ea-43c8-b477-46717892546f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.164, 0.324, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_841", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0a6e420e-940c-499c-a0b2-5bcd58f42594.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER\n[link] All MLB Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.213, 0.655, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_842", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_7ddd7a0d-971d-434d-9fe9-1dee38a402a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK\n[button] Miami -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.084, 0.637, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_843", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_e853407b-48cc-43e9-9872-9a927347af03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.003, 0.925, 0.02] in the image\nAnd my action is Action: TYPE\nValue: travel credit"}]}, {"id": "mind2web_844", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_33c337b9-44b0-4f88-af43-acaaec73c2c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.008, 0.781, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_845", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_9dc42eb4-30b5-4c98-8ae5-e1a1cca00859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK\n[link] Filter -> CLICK\n[button] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 1.468, 0.375, 1.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_846", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_84137f8b-2f70-4479-99db-8a8c3f1da091.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.454, 0.196, 0.475] in the image\nAnd my action is Action: SELECT\nValue: Audio (376)"}]}, {"id": "mind2web_847", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_ef34dfdf-8e92-4a87-bead-5c134aa1fd8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.322, 0.362, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_848", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_f39fb52e-050b-44d5-997e-e214bf88693b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.356, 0.365, 0.376] in the image\nAnd my action is Action: TYPE\nValue: san francisco"}]}, {"id": "mind2web_849", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_c816be58-23d2-467a-bab0-d03ad0e88d90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.159, 0.33, 0.188] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_850", "image": {"bytes": "", "path": "./images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_ccbaad1c-72a2-47fa-9eed-220da3dc67ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the score of the 2020 Super Bowl.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.011, 0.151, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_851", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_43231286-647f-4ce4-86e5-39ccda467b94.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: smith\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.794, 0.053, 0.832, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_852", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_60bbef2f-114c-4dc4-bbb4-3928f9225c62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.198, 0.931, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_853", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_2900919c-d57a-4636-940e-a1013a7efe4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK\n[heading] Pocketable UV Protection 3D Cut Parka -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.217, 0.768, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_854", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_f27219e9-c800-4270-9f5d-348090dff023.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Adults-Only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.185, 0.644, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_855", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_be1d8ecf-609b-4a6c-9485-2f010f65c215.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] Close -> CLICK\n[textbox] *Preferred date of travel -> CLICK\n[link] 22 -> CLICK\n[button] Continue -> CLICK\n[div] + -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.456, 0.711, 0.527] in the image\nAnd my action is Action: TYPE\nValue: Wedding Anniversary"}]}, {"id": "mind2web_856", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_54518298-2aa4-45ef-91ee-ccc0b8c495a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.479, 0.345, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_857", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_3139a384-73c1-48be-9299-680fcd57a365.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK\n[link] Crossover vehicle icon Crossovers -> CLICK\n[button] Make -> CLICK\n[listitem] BMW (389) BMW (389) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.346, 0.253, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_858", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_68be7878-cac0-4d19-8c5c-ccd542c407d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.008, 0.553, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_859", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_06490f6f-6835-4206-9d1f-35429e950324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.229, 0.053, 0.352, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_860", "image": {"bytes": "", "path": "./images/716ed90e-a138-452e-b5b5-167911871fda_da452918-14c7-4410-a6e6-4e50951940a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Playstation gift card of $10.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: playstation gift card $10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.031, 0.228, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_861", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_39200f36-86f3-403e-979d-0505ce6dad4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK\n[generic] 1 -> CLICK\n[generic] 3 -> CLICK\n[button] Let's go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.456, 0.201, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_862", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_a039d9b0-cf8a-4049-b01f-20740f97e6d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.282, 0.783, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_863", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_8ba82d19-87d2-49b2-889e-97dbe607f7d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Hawaii -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.205, 0.447, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_864", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_78f0467d-3283-4ead-972f-8e6d64bc3eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 1.087, 0.938, 1.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_865", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c3286ff0-a564-437f-b3c5-4362d51d4a5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.069, 0.327, 0.091] in the image\nAnd my action is Action: TYPE\nValue: Leeds"}]}, {"id": "mind2web_866", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_eec3cc44-2bca-4fe8-ac6e-df1f0467410d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[button] Done -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.899, 0.263, 0.905] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_867", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_2e506c3a-e3d2-4d40-8ad2-d345bedcb636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London\n[button] London England -> CLICK\n[circle] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.387, 0.137, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_868", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_602daa70-9988-473f-9c95-cff02a656628.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_869", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d59c93cf-f6a3-40df-b51f-40934918fa67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.009, 0.39, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_870", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7623cff3-2eb7-4a39-aadc-25e7d26866b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.218, 0.24, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_871", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_f669df1f-2c14-404e-b43a-e6dbb96e757e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER\n[link] Action Figures -> CLICK\n[img] Hasbro -> CLICK\n[button] All Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.188, 0.393, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_872", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_dbadff27-1043-4cf2-adb7-329d4aee6c5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK\n[textbox] price to -> TYPE: 70\n[button] APPLY -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Lowest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.517, 0.451, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_873", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_c196ba4d-236d-4ee2-8936-569abbd6f1bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.387, 0.095, 0.613, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_874", "image": {"bytes": "", "path": "./images/5fb9730d-f489-4cb7-a220-d406794cef29_501ce3db-36c4-4b7d-a7d3-392f4e797076.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List all George Clooney movies and sort them by box office revenue\nPrevious actions:\n[textbox] Search -> TYPE: George Clooney"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.44, 0.704, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_875", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_104bc12f-8c89-401a-9b45-17f03ab34fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.011, 0.74, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_876", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_bccd212d-8178-46a9-9e6c-adc13537d091.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] T-Shirts -> CLICK\n[heading] Price -> CLICK\n[label] $0-$10 -> CLICK\n[polygon] -> CLICK\n[heading] Mini Short-Sleeve T-Shirt -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.451, 0.906, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_877", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_d6343eab-e997-4934-8527-0d69f7db2bab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.225, 0.413, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_878", "image": {"bytes": "", "path": "./images/9a462751-758e-42bd-967d-373c13b90382_4ddaf55b-33d3-4784-8370-56d90014d635.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current injuries of Phoenix Suns players.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.101, 0.335, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_879", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_1d57e568-92c5-431a-acb6-65d74fe5e11c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.558, 0.72, 0.584] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_880", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_c39edac7-e345-4b0a-85b0-aeb1ffc251eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.684, 0.326, 0.701] in the image\nAnd my action is Action: TYPE\nValue: las vegas"}]}, {"id": "mind2web_881", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_593599e6-9edd-4f0d-bc13-d6d92f8ce00f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.518, 0.104, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_882", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_3281daa6-4a6a-4cdc-b3d5-b28e0f977a09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Done -> CLICK\n[path] -> CLICK\n[link] View More -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.52, 0.309, 0.531] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_883", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_05f51ed3-a3e2-4ef0-909a-353e91edf249.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.163, 0.432, 0.202] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_884", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_5dfefa56-2c3f-4f70-b6f5-da3a613a883d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 1.908, 0.192, 1.927] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_885", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_3498a297-8121-418f-a8a4-ce50490e51ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK\n[combobox] Activity -> SELECT: Climbing\n[textbox] Location -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.561, 0.291, 0.717, 0.322] in the image\nAnd my action is Action: SELECT\nValue: Within 50 miles"}]}, {"id": "mind2web_886", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_9a83aab6-af9c-4062-afbe-ca6eaa4e3249.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.151, 0.664, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_887", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_470c7cf0-6d9e-4a43-913b-fb309a888da5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.149, 0.181, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_888", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_57d7eb23-80b4-4046-9c8b-114b199b3b6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.645, 0.394, 0.665] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_889", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_229f869d-ed7f-453b-8924-56d1568435d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.209, 0.427, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_890", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_a294c5a9-20c0-46b5-b25d-a4153b76d065.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.341, 0.469, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_891", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_3b286426-04f0-433c-b13e-98ff7eb4ca88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.13, 0.209, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_892", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_d8be4795-fcaa-4230-b948-82c117e3d0d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.35, 0.011, 0.391, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_893", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_5f7d8dc9-cccc-4362-a752-71bf2543a680.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.255, 0.285, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_894", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_014f12cf-0228-4d0a-95f9-2acff952c74b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.294, 0.986, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_895", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_e39e31de-cae6-4cd0-9393-50238c808560.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.441, 0.481, 0.485] in the image\nAnd my action is Action: TYPE\nValue: SHANGHAI"}]}, {"id": "mind2web_896", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_9cad183e-9be2-406a-9b05-f4373367e1a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.126, 0.466, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_897", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_cdd03303-a8f9-4c95-9c04-fbe006ec7497.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.603, 0.43, 0.628] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_898", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_a107d49f-937e-412b-9e1c-4497d02bef15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.042, 0.309, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_899", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_ef81e5e1-5428-498a-98ec-6410b14f4ad8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.248, 0.414, 0.276] in the image\nAnd my action is Action: TYPE\nValue: 37863"}]}, {"id": "mind2web_900", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_88036838-a91e-4722-97c0-8136bdd850b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.462, 0.245, 0.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_901", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_0c26d744-d579-4c4b-a235-1e6127cc77a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA\n[input] -> TYPE: car accident lawyers\n[link] car accident lawyers -> CLICK\n[div] Sort: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.154, 0.683, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_902", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_6068b180-5a93-4752-8e12-0faafbbdd5c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: XA1234"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.244, 0.583, 0.286] in the image\nAnd my action is Action: SELECT\nValue: AZ"}]}, {"id": "mind2web_903", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_3fbd0200-f2c8-4e2f-8708-d66f70d3a194.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK\n[link] Amazon Pharmacy -> CLICK\n[span] Type your medication name -> TYPE: Metformin 1000mg"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.178, 0.367, 0.822, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_904", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_226219ef-0a76-4285-b2d1-b601d1086627.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.109, 0.753, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_905", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_5f6c4a74-bc21-4db6-a489-df0c42e7fc32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.096, 0.896, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_906", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_d3549f30-b63f-480c-afbe-9574b4078c82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.245, 0.359, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_907", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_4859f571-24a4-48b6-aed1-e9267843e8e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.228, 0.34, 0.246] in the image\nAnd my action is Action: TYPE\nValue: 10003"}]}, {"id": "mind2web_908", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_3ea5e794-345f-46e8-bbc8-b7b4484a3de2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.041, 0.697, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_909", "image": {"bytes": "", "path": "./images/0991035b-6acb-4dca-aaef-5384a0739781_90f4e811-90b3-4d4c-9a0f-ae51d58d2121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find careers at the company on the Product Management team\nPrevious actions:\n[link] Company -> CLICK\n[link] Career Opportunities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 1.812, 0.758, 1.847] in the image\nAnd my action is Action: SELECT\nValue: Product Management"}]}, {"id": "mind2web_910", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_0b62a0d8-1317-4f8f-b64c-d11d7f14b218.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[button] Load more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 1.448, 0.715, 1.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_911", "image": {"bytes": "", "path": "./images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_ff5d4950-a9ff-4c8c-b17e-036c857de1e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a red used tesla Model S.\nPrevious actions:\n[link] Model S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.24, 0.491, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_912", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_5a0d43c1-dfc2-438a-b164-948f1f510d2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.892, 0.047, 0.963, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_913", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_cbb3b97e-254d-43a0-90e1-31250caeca05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.059, 0.931, 0.094] in the image\nAnd my action is Action: TYPE\nValue: Golden State Warriors"}]}, {"id": "mind2web_914", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_63561ca3-8abb-4027-a3cb-4bbae12a9f7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK\n[button] San Francisco, CA -> CLICK\n[span] Dates -> CLICK\n[button] October 2024 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.306, 0.871, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_915", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_c4e0a089-60dd-4bb7-8945-bbe783357494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.401, 0.552, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_916", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_1bcc0fa7-4d6f-4144-90af-4829befb88b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.323, 0.647, 0.359] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_917", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_23d0755e-281c-4f18-a0be-b4c4da2fa859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.215, 0.312, 0.234] in the image\nAnd my action is Action: TYPE\nValue: Albany, NY"}]}, {"id": "mind2web_918", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_765af537-4144-47d1-8e0c-838a365b423d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.09, 0.246, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_919", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_1a8a24ff-8bbd-4682-a611-5cdfe6fe4811.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[textbox] From: -> CLICK\n[div] Dublin -> CLICK\n[textbox] To: -> CLICK\n[div] Anywhere -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.71, 0.483, 0.824, 0.545] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_920", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_7f718732-3846-4fe5-9b78-053b204a1731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.121, 0.199, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_921", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_218ae251-373b-48e8-8cf3-1af7deee8ebc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK\n[div] Select your dates -> CLICK\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.183, 0.92, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_922", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_17af7d32-c2a2-4881-bd39-083b934f6dde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.054, 0.454, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_923", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_91ccb5c2-07c7-4ad6-afd3-8371104390d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK\n[link] White -> CLICK\n[div] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.568, 0.107, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_924", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_a3dabbcc-97e0-43ed-b5eb-1c323c70302e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.064, 0.518, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_925", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_a9810361-2044-4872-9fea-484bc49072e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.241, 0.931, 0.275] in the image\nAnd my action is Action: SELECT\nValue: 6 00 pm"}]}, {"id": "mind2web_926", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_9d038530-f4c7-4d78-a473-7ab36b4280d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] 32\" -> CLICK\n[svg] -> CLICK\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.554, 0.074, 0.565] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_927", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_c629a825-fdc9-4dde-adca-9b8920a2ba7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.348, 0.393, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_928", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_74a2475d-3369-49de-8be5-e1aeaaa0f1e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.461, 0.096, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_929", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_fd0b1974-3c9c-4824-a547-7f4d6e47199d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 44012\n[button] Search -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.136, 0.612, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_930", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_bd391f4c-1ced-4137-99bc-1d337bbf2639.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[div] Price -> CLICK\n[link] $10 to $25 (3) -> CLICK\n[link] $25 to $50 (18) -> CLICK\n[link] $50 to $100 (146) -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.499, 0.069, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_931", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_47c0cd12-231a-4660-82ca-493fc19a1456.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK\n[button] Explore All -> CLICK\n[button] Age -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.528, 0.8, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_932", "image": {"bytes": "", "path": "./images/63d1f820-37bf-4adb-aabb-65eb7925790c_99c79398-3d2e-47f4-9ba3-1df19f5f70cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current roster of the Miami Heat.\nPrevious actions:\n[link] NBA . -> HOVER\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.378, 0.197, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_933", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2a757624-40fe-4ce8-ae18-82fe7d2a1c97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[polyline] -> CLICK\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.252, 0.194, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_934", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_101b5602-d0c4-43e7-8d2e-97a5ce286aac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.309, 0.777, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_935", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_179c061d-401e-4352-a450-913609704574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK\n[span] Select store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.14, 0.186, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_936", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_eac642da-a78b-49e2-a39a-8d9b8f0c1baf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK\n[div] May -> CLICK\n[span] -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.257, 0.739, 0.271] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_937", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_1ef62685-5086-432a-af32-b3cf57bab812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK\n[path] -> CLICK\n[div] 8+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.264, 0.161, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_938", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_261b1738-122a-44ea-bd95-54f319709a86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\n[link] CareersExternal Link should open in a new window o... -> CLICK\n[textbox] Search by Location -> CLICK\n[textbox] Search by Location -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.31, 0.814, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_939", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_4601bf91-26ec-4072-bef5-7a24ec700def.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK\n[link] Fare Finder -> CLICK\n[textbox] From: -> CLICK\n[div] Dublin -> CLICK\n[textbox] To: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.42, 0.432, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_940", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_4c5c24aa-b9e2-4824-a82e-3e44302e8707.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK\n[span] see all stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 1.811, 0.465, 1.843] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_941", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_bb20e275-f386-45b5-a913-79812fd3d5ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Los Angeles\n[span] City -> CLICK\n[div] 21 -> CLICK\n[div] 23 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.218, 0.194, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_942", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_6ed06c1b-36f1-4b31-8129-16887e34d948.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.073, 0.3, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_943", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_889e9377-6802-48fc-b4ac-abacdb2d89b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[strong] Miami -> CLICK\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK\n[span] 31 -> CLICK\n[span] Search flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.414, 0.591, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_944", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_3142cfd0-23ad-43a8-b417-f4d77c8545de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, May 15, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.162, 0.957, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_945", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_46264135-6884-4764-bf80-6f4645b46d2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.213, 0.377, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_946", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_c08cdc90-9282-4aa0-83c6-93436b95f425.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.425, 0.196, 0.446] in the image\nAnd my action is Action: SELECT\nValue: In Stock (41,088)"}]}, {"id": "mind2web_947", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_13a5e3e7-94b4-40ec-9580-9b81fe415d79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.378, 0.234, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_948", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_16980e46-484c-4532-873c-aa941e926a51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.261, 0.454, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_949", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_7c557819-bb01-4638-ab93-94a47f72ad22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew\n[textbox] Email address -> TYPE: johndoew@gmail.com\n[textbox] Phone number -> TYPE: 4533234565\n[textbox] Social Security Number -> TYPE: 234567895"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.834, 0.698, 0.851] in the image\nAnd my action is Action: TYPE\nValue: 06/23/1992"}]}, {"id": "mind2web_950", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_dc617f22-b94b-42d5-995a-b37fc818ba51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK\n[link] CITIES: SKYLINES -> CLICK\n[link] Bundle info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.357, 0.6, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_951", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_be3cf6de-378f-44d3-bcd7-8c7d715c04f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK\n[div] Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.01, 0.781, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_952", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_2a2b01e7-6723-4766-8f9c-83c518877422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\n[button] Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.206, 0.12, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_953", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_c148dec2-df53-4d6d-8da3-c1277ded7048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.405, 0.146, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_954", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_f5167534-c977-4b5d-9525-f05085be7f43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[option] Las Vegas Nevada,\u00a0United States -> CLICK\n[checkbox] 10 April 2023 -> CLICK\n[gridcell] 16 April 2023 -> CLICK\n[button] Search -> CLICK\n[button] Sort by:Our Top Picks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.21, 0.448, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_955", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_998f610f-7f61-4784-9f51-d37d0a3d635a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK\n[button] Search -> CLICK\n[checkbox] Hotel -> CLICK\n[slider] price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.127, 0.825, 0.155] in the image\nAnd my action is Action: SELECT\nValue: Distance from landmark"}]}, {"id": "mind2web_956", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_f3f7201a-72b4-4659-8a82-feec13d3cb17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.162, 0.763, 0.173] in the image\nAnd my action is Action: TYPE\nValue: Phuket"}]}, {"id": "mind2web_957", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_faf7979d-6c0b-4d9c-a40d-02f62a08fbc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai\n[heading] Senyai Thai Kitchen -> CLICK\n[button] Notify for Dinner -> CLICK\n[combobox] Preferred end time -> SELECT: 7:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.569, 0.75, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_958", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_a622a437-2d91-4253-b902-699ec35998f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 0.153, 0.281, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_959", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_90ccc0fb-f4f3-4a2a-a635-4de2b8634a4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.7, 0.43, 0.716] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_960", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_b2d669d1-77b6-48b8-8769-60bf2b316324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_961", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_0a81b2d9-3825-43c7-8e12-c6658b73422e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK\n[button] set store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.261, 0.249, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_962", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_589c149d-3ebb-478a-851e-ee098f3a2f14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.062, 0.905, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_963", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_08c1d250-f956-4e8d-90dc-d9ae433f1a12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street\n[span] 2983 Marietta Street -> CLICK\n[textbox] Apt, floor, suite, etc (optional) -> TYPE: 2\n[textbox] Business name (optional) -> TYPE: Buck"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.65, 0.702, 0.702] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_964", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_0696d1e4-9bf2-40ab-a5c2-fa44e42a4e1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.114, 0.291, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_965", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_ce7369ff-594f-473b-8e09-4a88c6876c80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[button] Number of rooms and guests -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[div] Update -> CLICK\n[div] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.552, 0.062, 0.765, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_966", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_75de3fc4-e791-4ae1-a8fd-765b72f24302.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.059, 0.129, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_967", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_1f18cb52-46db-4409-ad2e-47505cdbbcf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[span] AWD/4WD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.911, 0.277, 0.938] in the image\nAnd my action is Action: TYPE\nValue: 50000"}]}, {"id": "mind2web_968", "image": {"bytes": "", "path": "./images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_48e54885-a445-42a6-9b49-b473a0468246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up information on the potential side effects of rogaine.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.012, 0.804, 0.03] in the image\nAnd my action is Action: TYPE\nValue: rogaine"}]}, {"id": "mind2web_969", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_8c977ab8-7653-4549-a276-20b0a42543e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.306, 0.01, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_970", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_bc39ea75-d2da-4418-abda-7bda18e15c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.358, 0.939, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_971", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_f202d61a-2054-4123-96e8-3ef0008ddc27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] Indian/Pakistani -> CLICK\n[link] East Village (9) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.426, 0.207, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_972", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_153ea3be-8400-41e8-b0d1-339375d3b742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.027, 0.646, 0.036] in the image\nAnd my action is Action: TYPE\nValue: BATMAN"}]}, {"id": "mind2web_973", "image": {"bytes": "", "path": "./images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_a1f5c62e-b068-422d-8b69-407eb0f05496.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a park in the state of California called Castle Mountains National Monument and find out it's Basic Information.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] California -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 0.459, 0.672, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_974", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9c4fbe9e-68fb-4c08-965d-82474dfa64c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK\n[link] Edit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.646, 0.336, 0.651, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_975", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_4b20a92b-ba63-4d8b-818f-3b4f5f62d65e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Fiesta Texas -> CLICK\n[button] Go! -> CLICK\n[link] Events \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.622, 0.166, 0.735, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_976", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_19094dcc-b685-4d6a-bf2b-d3844ad0662c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 1.035, 0.822, 1.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_977", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_709e43ed-4bca-483c-8cf6-20e17da426c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.06, 0.153, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_978", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_d85c223a-4914-4d3b-b8fc-be2bf865f05d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[textbox] Near -> TYPE: WESTMINSTER\n[span] Westminster -> CLICK\n[checkbox] Offers Takeout -> CLICK\n[button] Thai -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.506, 0.529, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_979", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_31e673c0-8f73-4188-8f0b-6d94548c7ff8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.501, 0.984, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_980", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5ae033d1-dce4-48b6-b901-b87c39aff698.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.183, 0.695, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_981", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d34f2314-6468-4913-b6dc-56ae993b7467.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 2 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.153, 0.202, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_982", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_60575d32-63ce-4809-b1e9-936707216285.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Crew\n[input] -> TYPE: James\n[input] -> TYPE: Johnson"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.225, 0.777, 0.26] in the image\nAnd my action is Action: TYPE\nValue: james.john@gmail.com"}]}, {"id": "mind2web_983", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_59f55fde-ad57-424f-a2fb-3045b8b4d5a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.394, 0.166, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_984", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_e50dbda8-1633-4acf-9084-5102c30e7ae8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.068, 0.81, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_985", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_c2b8e0f2-ac12-492f-921d-3e6beba7ee58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK\n[button] Change location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.133, 0.705, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_986", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_9e69c679-e673-4c93-9bfa-0ef90cfd8822.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] The Bahamas -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.306, 0.716, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_987", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_e3aaa938-3cff-4e70-bf97-d5b032254b8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.115, 0.273, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_988", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_3974d678-91c4-437e-8d34-e418f635f93b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 AM\n[button] Select My Car -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.372, 0.567, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_989", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_49d5f5b4-1bec-409c-8e15-abd1df8fd680.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: los angeles kings"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.171, 0.396, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_990", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_b46b2518-cf00-4a95-9c7c-8be9d9ea9bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK\n[link] Planned Service Changes -> CLICK\n[searchbox] Search Routes -> TYPE: 4\n[listitem] 4 -> CLICK\n[button] 04/12/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.857, 0.29, 0.879, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_991", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_8d855d42-03e9-4258-a883-c730da0c9300.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.247, 0.238, 0.28] in the image\nAnd my action is Action: TYPE\nValue: Helena"}]}, {"id": "mind2web_992", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_75baa18c-3317-48fd-a276-3fd8f74781c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.363, 0.415, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_993", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_cd63bae1-61f3-4ddd-b567-98de6b542827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK\n[link] Find A Travel Agent | Carnival Cruise Line -> CLICK\n[textbox] City -> TYPE: Grand Junction"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.414, 0.789, 0.439] in the image\nAnd my action is Action: SELECT\nValue: Colorado"}]}, {"id": "mind2web_994", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_f15096b9-a0de-4645-96d0-19b3ddb924ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Fiesta Texas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.844, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_995", "image": {"bytes": "", "path": "./images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3a3dcc2b-8ba5-4ecc-9ebe-8935157be036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the most recent NASCAR driver news.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.106, 0.491, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_996", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f0ca54a4-d720-47a1-9ef7-1250abb05bbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Sort by -> CLICK\n[div] Top rated -> CLICK\n[link] Get tickets -> CLICK\n[p] Number of Travellers -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.338, 0.825, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_997", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_0cc51631-52d2-485f-a503-60d18725d858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.241, 0.713, 0.275] in the image\nAnd my action is Action: SELECT\nValue: 9 00 am"}]}, {"id": "mind2web_998", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_7751058d-19a1-4973-90c3-187dba735d4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.577, 0.099, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_999", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_84500f23-9176-446d-8886-4f791999ef9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1000", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_e02471ae-a287-4366-858f-e1e9c9166463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.29, 0.459, 0.31] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_1001", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_e1fe119b-be4b-474b-a766-4b1e38ee29e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK\n[dt] Price -> CLICK\n[textbox] price to -> TYPE: 1000\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.265, 0.701, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1002", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_629a8d57-269d-4468-b07d-4709c572f645.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com\n[checkbox] Career opportunity Career opportunity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.594, 0.684, 0.601] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1003", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_a0b7d226-b1ea-47b0-a653-d6eb5ba4ba05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.334, 0.196, 0.355] in the image\nAnd my action is Action: SELECT\nValue: Ages 9-11 (13,217)"}]}, {"id": "mind2web_1004", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_145c2b41-3f86-4ce1-af76-9eae6f8845f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Los Angeles\n[option] Los Angeles, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.174, 1.035, 0.268, 1.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1005", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_5ee7f323-9126-4fd2-9fe7-42af620acde1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.124, 0.257, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1006", "image": {"bytes": "", "path": "./images/9ebd069a-7703-47b5-9c75-53958637e7c0_21b9e2ba-8482-4690-94ca-59dbe0423aba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Watch the halftime show from the most recent Super Bowl\nPrevious actions:\n[use] -> CLICK\n[link] Super Bowl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 1.256, 0.561, 1.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1007", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_edb39148-8f83-4870-b6c3-459e520e1b50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK\n[button] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.257, 0.257, 0.285, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1008", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_4b040e38-9b37-4656-a247-f1e793174ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] African -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.302, 0.214, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1009", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f0deb072-39be-4a95-ad08-f142fd16bef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.176, 0.367, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1010", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1a2b5134-f49b-44a0-8398-2c8c34b3636f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK\n[div] Select your dates -> CLICK\n[checkbox] 5 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.191, 0.825, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1011", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_d8ffd9d0-a450-410b-843b-e17ac7c12022.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[span] 14 -> CLICK\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.098, 0.329, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1012", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_c32cfad9-2c31-4337-9ac8-0ec37245a3e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.09, 0.16, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1013", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_fb8e030b-a84b-4126-a14f-c1cb8d319e00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Katy Perry\n[button] Search -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.105, 0.191, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1014", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_114787f7-a2e7-4e60-876d-faed27ba9a6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[link] Route Map -> CLICK\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.007, 0.16, 0.293, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1015", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_addf3e71-975d-475e-8f5e-5d005886f8ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK\n[link] Jackets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.353, 0.233, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1016", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_d41e77ad-e822-4e59-8fd8-dcc8807c67f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.818, 0.832, 0.827] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1017", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_cf9b71f2-508d-43b2-abac-02d151aef07e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.346, 0.032, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1018", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_8cb52a62-2206-4347-b42b-aa230acd9a96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 0.339, 0.91, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1019", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_4741ca4a-48b1-4a2c-ba06-4b43ec6a2164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.225, 0.691, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1020", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_c4f097ae-417b-4e54-b706-78282c045acc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.207, 0.287, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1021", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_99b80b37-6099-456f-87a9-32b3ec8481d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.441, 0.261, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1022", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_83a5d3f8-3fbd-407e-a301-d31950d83fbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.029, 0.787, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1023", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_605cda38-8b6c-4335-a9b6-56f97387b951.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK\n[span] Agencies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.135, 0.234, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1024", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_32c92320-9fb6-4e3e-96d8-a1c24c44c451.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[b] Indian -> CLICK\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK\n[button] View more availability for BayLeaf Modern Indian C... -> CLICK\n[button] 8:15 PM Table -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.305, 0.523, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1025", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_dd7d101a-ecbd-49e4-b7ff-ca19a02e1703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.889, 0.426, 0.927, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1026", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_90eaa533-7bcb-43a7-9e66-21ceab440567.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\n[link] Model Y -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK\n[combobox] Sort By -> SELECT: Price : low to high"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.181, 0.273, 0.206] in the image\nAnd my action is Action: TYPE\nValue: 94043"}]}, {"id": "mind2web_1027", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_af4734a1-b0a3-4f99-9519-9c0a2ece32b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Beachfront -> CLICK\n[checkbox] Hot tub -> CLICK\n[checkbox] Pool -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.848, 0.089, 0.853] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1028", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_61a96c4a-8d14-4bb8-8181-f01bb9e493c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[link] New York, NY -> CLICK\n[button] Find -> CLICK\n[link] Beauty & Youth Village Spa -> CLICK\n[use] -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.402, 0.323, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1029", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_6d7e3a4b-2ba2-4c54-9f63-3bd480654856.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.451, 0.495, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1030", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b4a1a676-cba7-4e00-9d26-56a1b833680c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK\n[textbox] 1 Adults, 18 to 64 years old, 1 of 8 passengers se... -> TYPE: 1\n[combobox] undefined Selected 1 room -> CLICK\n[option] 1 room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.83, 0.197, 0.85] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1031", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_fda0099f-d128-4efa-800d-b5a118ac9d96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.252, 0.194, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_1032", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_ac505227-fbab-4016-b968-22a429f2788c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.241, 0.413, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1033", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_1b698a2f-400b-4069-aa08-252e1b41c7e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.846, 0.777, 0.876] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1034", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_c4f666ab-2efa-4467-b72e-e21775ff008e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\n[link] STORE -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.072, 0.388, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1035", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a4326b33-6f1e-44f7-bea5-3d9949eb8009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.741, 0.265, 0.756] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1036", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4e6f0232-53c5-4c0c-b655-3769916435a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.211, 0.492, 0.241] in the image\nAnd my action is Action: TYPE\nValue: ewn"}]}, {"id": "mind2web_1037", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31a7e3cb-79ad-49f0-bc24-1e908f91db3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK\n[img] Right -> CLICK\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.619, 0.234, 0.774, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1038", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_20627442-25be-4dc3-aa4c-e36a3b8a6f3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.201, 0.964, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1039", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_bb6621bd-5dc8-433a-b6fd-aaaf91bf4d06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.0, 0.716, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1040", "image": {"bytes": "", "path": "./images/41ff100f-582a-422e-b387-3abd9008cee4_47c29841-4175-4266-b729-28314be9ae13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open red line subway schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.013, 0.369, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1041", "image": {"bytes": "", "path": "./images/5fb9730d-f489-4cb7-a220-d406794cef29_f4792054-15e6-43d5-a50d-aab11eba8bf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List all George Clooney movies and sort them by box office revenue\nPrevious actions:\n[textbox] Search -> TYPE: George Clooney\n[p] George Clooney -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.496, 0.589, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1042", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_2860f79e-a8ff-44ed-af2b-0e95f2ac1731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.183, 0.765, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1043", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_0f4938d4-3d29-44c8-89bc-96b02e751dc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York\n[a] NYC - New York, NY -> CLICK\n[combobox] Date -> SELECT: Friday, April 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.435, 0.875, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1044", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_1e45626d-e3d4-4d8d-a2d5-e00027b696fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[button] Refine results -> CLICK\n[button] Add to basket -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.677, 0.244, 0.716, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1045", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_90fb8feb-ae40-4b6a-bf30-fd8be24554ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[span] Mar 9 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[gridcell] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.096, 0.807, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1046", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_0e80c45f-23d4-40ed-b1b3-013f44b2f9d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.004, 0.082, 0.029, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1047", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_fecbc521-d4d5-458c-a1bd-63931a9f4f54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.558, 0.845, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1048", "image": {"bytes": "", "path": "./images/39358d9c-6db2-4662-a91e-47a416eeacf7_8720478b-0b2c-4c71-a216-68ea7cc6ec42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what deals there are for Dish Outdoor.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.014, 0.855, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1049", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_68d59689-9de8-43ea-b8a8-de293b68448e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK\n[span] Grocery -> CLICK\n[span] Easter Baking -> CLICK\n[img] -> CLICK\n[button] Brand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.082, 0.79, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1050", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_c61429c5-6d45-4632-b80b-1cfe0e7532e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] WOMEN -> CLICK\n[heading] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.387, 0.096, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1051", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_137d1a09-cb26-41c0-b266-1d77219dcd09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.081, 0.3, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1052", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6cac47b8-d3d2-47b2-a53d-0457923f7d19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.439, 0.625, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1053", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_943c6b5b-f363-4e61-aa72-4dbd15fe24b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[span] Dry Cleaning -> CLICK\n[textbox] Near -> TYPE: new york city\n[span] New York, NY -> CLICK\n[button] Virtual Consultations -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.183, 0.612, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1054", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_3df37d9c-f0b4-46a2-b7ea-aa19650153f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK\n[tab] MOVIES -> CLICK\n[searchbox] Search -> TYPE: Prometheus\n[div] Prometheus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 1.275, 0.671, 1.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1055", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_4449ebe8-3714-47ac-b6ea-becc1926ca48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023\n[combobox] Number of passengers -> SELECT: 1\n[combobox] Passenger 1 -> SELECT: Adult (16-64)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.675, 0.29, 0.711] in the image\nAnd my action is Action: SELECT\nValue: Lowest fare"}]}, {"id": "mind2web_1056", "image": {"bytes": "", "path": "./images/ae969e05-d10e-4255-99f7-c27e071fad69_189310b5-d088-400b-a817-9a4ea975fb6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the standings for the English Premier League.\nPrevious actions:\n[div] \u2026 -> CLICK\n[link] Soccer . -> CLICK\n[link] Premier League -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.19, 0.377, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1057", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_ae451dae-1e4a-41d5-a580-57a183968ac1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.204, 0.257, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1058", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_1db700ef-960a-4147-b117-c0f64f18138e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[button] 1 adult -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.163, 0.702, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1059", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_592e633c-4a13-4a6e-9032-106326773974.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: XA1234\n[combobox] state -> SELECT: AZ"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.596, 0.26, 0.746, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1060", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_92f8e2ae-f711-4a6a-b624-4c42d87fb214.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.371, 0.169, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1061", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_ad240481-8606-4009-a2f0-7b4766c10fae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.014, 0.328, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1062", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_f3d717cb-a19f-4d53-a68c-8fb2658b1294.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER\n[link] M3 Rock Festival -> CLICK\n[span] From $86 -> CLICK\n[button] Low prices -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.176, 0.402, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1063", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_55f1399e-7b1c-4717-b348-bec38fe194b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[span] Honolulu -> CLICK\n[checkbox] Vehicle -> CLICK\n[checkbox] Large appliances -> CLICK\n[radio] Out-of-state -> CLICK\n[button] Virtual Consultations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.075, 0.63, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1064", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_54031223-8306-4317-b2cd-7c63428cc122.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[p] Number of Travellers -> CLICK\n[img] -> CLICK\n[button] Check availability -> CLICK\n[img] -> CLICK\n[li] English - Guide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.621, 0.524, 0.794, 0.546] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1065", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_d0d60f7e-1c65-476d-95b5-731034550fab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\n[searchbox] Search -> TYPE: game mix\n[button] Search -> CLICK\n[link] Albums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.219, 0.415, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1066", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_46828258-db63-45df-b5d6-3807c8d23840.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER\n[span] Tokyo -> CLICK\n[link] Food & Drink -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.238, 0.113, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1067", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_031cfcfd-3ab1-4519-9d37-3d418a54d4a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK\n[select] 1 -> SELECT: 8\n[select] 00 -> SELECT: 37\n[select] AM -> SELECT: AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.237, 0.855, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1068", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_fe71a180-7a37-416d-9728-db4936372cb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.501, 0.873, 0.518] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_1069", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_fe4765b7-9e8e-44f9-946c-dca01ee3049a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.34, 0.886, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1070", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_8897f156-5756-44ea-8035-2d1e1aa1ddd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.357, 0.71, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1071", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_f42668ce-2ef9-4046-99fc-b0bb221a96de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.033, 0.572, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1072", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_aeeb7a00-f146-4eef-9051-5cce99f1d2d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.179, 0.446, 0.198] in the image\nAnd my action is Action: TYPE\nValue: Houston"}]}, {"id": "mind2web_1073", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0018120a-8da1-4a36-a1c4-b4642c97211b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.155, 0.748, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1074", "image": {"bytes": "", "path": "./images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_6a28730e-2571-4d4b-aba1-4e7470873680.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show my Library and sort by Albums.\nPrevious actions:\n[img] james9091 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.128, 0.356, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1075", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_1688074f-20d8-4b87-be42-6323f8e78807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[option] 2:00 pm -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] SUV -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.554, 0.241, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1076", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_7af99269-815e-4229-bc9f-599cda4974ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.505, 0.389, 0.52] in the image\nAnd my action is Action: SELECT\nValue: 12"}]}, {"id": "mind2web_1077", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9dd4b28e-4bad-4611-b5e2-93ebd1ed35c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK\n[div] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.446, 0.573, 0.554, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1078", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_bd5dfac5-d874-4fb1-beba-7ecc203439f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Traveling with children -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.505, 0.341, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1079", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_c6af11c8-db3a-4a7c-8774-ded5eec79969.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston\n[button] Place Houston, TX -> CLICK\n[div] Search -> CLICK\n[img] -> CLICK\n[button] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.957, 0.632, 1.013] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1080", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a2b55fc4-8a30-4dea-a8e1-ef1fe9141036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.237, 0.249, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1081", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_8b75beda-efc6-4710-a083-8df8a18becd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] One Day Add-Ons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.813, 0.607, 0.847] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1082", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_4b438c6e-a35c-4841-979d-677c72c26074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK\n[link] Fiction -> CLICK\n[link] Romance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.185, 0.834, 0.204] in the image\nAnd my action is Action: SELECT\nValue: Price, low to high"}]}, {"id": "mind2web_1083", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_a1272cef-9bbf-4485-bdf7-3c6181cef0f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[span] New -> CLICK\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK\n[textbox] price to -> TYPE: 70"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.919, 0.192, 0.933] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1084", "image": {"bytes": "", "path": "./images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_9c452aa0-3de5-4570-992e-52374b7e7678.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about how to buy metro card on booth.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.0, 0.597, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1085", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_f88b0a6c-386b-4147-ba11-0b26bdf5f77d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.237, 0.329, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1086", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_35161327-e603-416e-871c-83dc1e489fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.245, 0.325, 0.287] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_1087", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_46cd12b5-61fb-4d35-9de8-082eeb0a11de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.07, 0.123, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1088", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_ced15527-c767-4abb-87d0-3add94dbb551.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.371, 0.522, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1089", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_2909b809-4844-45d8-b651-c4e61bb750d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.144, 0.094, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1090", "image": {"bytes": "", "path": "./images/f45b0783-4325-4b3e-959d-c181971d72f6_2fb831d2-8c62-425f-befe-ee018631d50b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest news and rumors about the NBA team the Los Angeles Lakers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.101, 0.335, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1091", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_b815ae06-e12d-4a76-bb21-6ea9e107b158.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[span] Continue -> CLICK\n[listbox] Direction -> SELECT: Forward facing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.346, 0.444, 0.377] in the image\nAnd my action is Action: SELECT\nValue: Window"}]}, {"id": "mind2web_1092", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_335f924d-95e0-4f41-a4b9-9398ac0c958b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.018, 0.564, 0.029] in the image\nAnd my action is Action: TYPE\nValue: Texas City, Texas"}]}, {"id": "mind2web_1093", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_11a99bb9-f182-4346-99d5-23975b4994b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.18, 0.375, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1094", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_78c8ae11-a625-4244-9dc6-d9b5c26f064e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.256, 0.044, 0.631, 0.08] in the image\nAnd my action is Action: TYPE\nValue: gobites uno spork"}]}, {"id": "mind2web_1095", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_22f36720-f49c-4bc1-a779-3156feea0178.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.046, 0.653, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1096", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_fa285b88-bf55-4afc-a580-255bd2e0b867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.277, 0.196, 0.298] in the image\nAnd my action is Action: SELECT\nValue: In stock (53476)"}]}, {"id": "mind2web_1097", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_3804f977-9b03-466c-a198-f4e922f1bb25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: John\n[textbox] Last name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.255, 0.691, 0.301] in the image\nAnd my action is Action: TYPE\nValue: Davis"}]}, {"id": "mind2web_1098", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_1fa9707f-8e33-4b26-924e-5290048b35d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[span] -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.439, 0.913, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1099", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_23f03cc3-d1ed-4273-9031-a4516ecac26a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1100", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_223e0643-acb4-4c02-ac89-012a32461ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Electricians -> CLICK\n[textbox] Near -> TYPE: WESTMINSTER\n[span] Westminster -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.085, 0.7, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1101", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_fab6b0d8-0467-4a98-97f4-e43c25baa36b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK\n[img] Women's Sonoma Goods For Life\u00ae Everyday V-Neck Tee -> CLICK\n[textbox] Product quantity -> TYPE: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.459, 0.931, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1102", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_eb4e73a1-9b97-4d34-ab8b-df4419a6dbc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK\n[link] Tom Brady -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.497, 0.168, 0.813, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1103", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_75619055-a693-4aca-9b29-0f8fe7efc2db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu\n[option] Honolulu, HI, US (HNL) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.528, 0.514, 0.707, 0.524] in the image\nAnd my action is Action: TYPE\nValue: 1300"}]}, {"id": "mind2web_1104", "image": {"bytes": "", "path": "./images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_8fda4ce4-72f7-453e-bc1b-0f357512edd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all used Tesla cars for 10017 zip code.\nPrevious actions:\n[combobox] Select Make -> SELECT: Tesla\n[textbox] Zip -> TYPE: 10017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.601, 0.243, 0.748, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1105", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_582334ed-53ce-436c-86f1-03525500363c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.601, 0.363, 0.616] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1106", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_15ac7d92-b5d1-4f92-8da2-54c3dd71a321.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 2 Guests\n[link] Este -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.381, 0.581, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1107", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_7e0ec349-8a65-494d-90cb-d3ecee8d23f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\n[button] Special Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.077, 0.265, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1108", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_2ef33feb-d103-4283-8d88-68fb0ca9c9c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.461, 0.096, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1109", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_6dd86b17-3d5a-408f-9230-2a4683dde8cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse TV Shows by Genre -> CLICK\n[link] Documentary -> CLICK\n[link] User Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.229, 0.333, 0.301, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1110", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_9b411bb7-0ebe-45e4-a378-0eee5a93e61b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.578, 0.48, 0.758] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1111", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_42038435-d0d2-408c-9edd-8c6b49b062b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.235, 0.611, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1112", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_846f8e5c-0ed6-4857-8e54-b61d65a4f687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] ADD TO CART \uf0da -> CLICK\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.428, 0.09, 0.533, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1113", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_fa257e3f-253e-412f-8144-5d901eb5b29c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.116, 0.345, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1114", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_3e785891-d6b3-42d4-ba08-316207cd0aeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.682, 0.226, 0.69] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1115", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_4b6c0d2f-2736-4ca7-890c-7b1a6b188e32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.007, 0.553, 0.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1116", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_02970790-c898-4ec5-929a-dab35d6b4e31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\n[button] Manage -> HOVER\n[use] -> CLICK\n[link] Visit our cruise deals page to view your offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.416, 0.556, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1117", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_d955349c-a5f7-4faf-90cb-4503103bb09d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.959, 0.918, 0.97] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1118", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_d882dd9d-3efe-420a-b0e0-ef2d36f1f947.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.357, 0.266, 0.395] in the image\nAnd my action is Action: SELECT\nValue: Honda"}]}, {"id": "mind2web_1119", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_2a1a10d0-4641-4d0a-85fd-024a5dbfef2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK\n[link] Close -> CLICK\n[textbox] *Preferred date of travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.487, 0.633, 0.518, 0.647] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1120", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_7c62b124-6c92-4f88-acc4-c0200e2706f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK\n[link] Go to route -> CLICK\n[button] View upcoming departures -> CLICK\n[combobox] Choose a direction -> SELECT: SOUTHBOUND Forest Hills"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.285, 0.695, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1121", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_2f2fab58-539a-48d5-acac-1c7f8dcd741a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.47, 0.636, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1122", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_cbd1bec1-9482-4cc3-87d6-74e3d455da4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: LAS VEGAS\n[div] Las Vegas -> CLICK\n[svg] -> CLICK\n[div] Tomorrow -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.281, 0.607, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1123", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_1c085040-b388-4cc6-988e-cda1e7b83177.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.244, 0.169, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1124", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_ad212d0b-ad5b-46ca-a911-3fe3755efd13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.334, 0.194, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1125", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_7973940d-9d78-4a0a-97f7-b98b5c80ae7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.239, 0.359, 0.266] in the image\nAnd my action is Action: TYPE\nValue: Queensboro plaza"}]}, {"id": "mind2web_1126", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_7a2802b1-aaa1-4e6c-82da-7ae9fae081b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Computers -> CLICK\n[link] Drives & Storage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.19, 0.163, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1127", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_04801367-454c-48fb-a8ac-db91f6d88bb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK\n[radio] RENEW - An Existing Account -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.431, 0.707, 0.466] in the image\nAnd my action is Action: TYPE\nValue: 1000000001"}]}, {"id": "mind2web_1128", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_c6af7067-da65-403a-be07-5e2f40406cdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.208, 0.754, 0.223] in the image\nAnd my action is Action: TYPE\nValue: sofi stadium"}]}, {"id": "mind2web_1129", "image": {"bytes": "", "path": "./images/f61456ed-3cc2-41a0-b79c-56737515fac9_73731a6c-3075-44fa-90d4-903f8fc39520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the photo gallery for Tsiakkos & Charcoal.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.027, 0.456, 0.062] in the image\nAnd my action is Action: TYPE\nValue: Tsiakkos & Charcoal"}]}, {"id": "mind2web_1130", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_38f4cf88-6985-4927-9e8c-c6115ea700af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.851, 0.45, 0.972, 0.469] in the image\nAnd my action is Action: SELECT\nValue: Most recent"}]}, {"id": "mind2web_1131", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_97a7578d-a8e2-4089-88bc-e45292ac3435.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: udupi\n[option] Udupi Karnataka,\u00a0India -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.466, 0.742, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1132", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_d468c98a-de53-4b08-b5d4-d7fd7df98ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City\n[div] New York City, NY -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.467, 0.053, 0.517, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1133", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_56fa7b74-7f71-40c3-9efe-af9d76d7d282.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.167, 0.421, 0.207] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_1134", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7647cc73-2926-423a-b613-cd280e8c3858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK\n[button] HOME -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.5, 0.216, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1135", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_dd055c45-e037-4fa1-8b06-c6d60efae226.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.346, 0.829, 0.356] in the image\nAnd my action is Action: TYPE\nValue: mumbai"}]}, {"id": "mind2web_1136", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_7da23b05-7a5a-46b0-90bf-ba2e4dfa8ec9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.107, 0.942, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1137", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_ee1a7f4e-9ba8-454a-a034-2b3c21806cdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK\n[link] Car rentals -> CLICK\n[textbox] Pick up -> TYPE: Houston\n[a] Houston, US -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.426, 0.467, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1138", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_65155648-ef0f-46ef-bde1-64d693369f03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.086, 0.327, 0.114] in the image\nAnd my action is Action: TYPE\nValue: BERLIN"}]}, {"id": "mind2web_1139", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_0900afdc-1ed0-4605-857a-0d5f0a186230.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[heading] Size -> CLICK\n[label] XXS -> CLICK\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.468, 0.906, 0.503] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1140", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_0aae2ff1-b036-4560-9beb-701ce59d4e71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.243, 0.325, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1141", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_1e44ded7-35bd-463e-a135-7e2098862504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[button] -> CLICK\n[div] \u00a3 -> CLICK\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.9, 0.916, 0.919] in the image\nAnd my action is Action: TYPE\nValue: Denise"}]}, {"id": "mind2web_1142", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_0c543e05-b392-4a39-ac27-7d2d82c1cb47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK\n[link] Men's Shoes & Boots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.333, 0.063, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1143", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ff9c2e42-59c7-4b21-8080-d0631b14b481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 2.58, 0.227, 2.606] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1144", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_b27add92-735a-4aad-a91d-1a8ca179d35e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.804, 0.142, 0.857, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1145", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_3a7fbb2d-26c1-497b-9e24-7a7d13a5d5ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.069, 0.713, 0.086] in the image\nAnd my action is Action: TYPE\nValue: florida"}]}, {"id": "mind2web_1146", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_e3069f03-e2a7-49e3-9c70-b9538cfd103a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK\n[button] From today -> CLICK\n[Date] FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.211, 0.287, 0.235] in the image\nAnd my action is Action: TYPE\nValue: 04/01/2023"}]}, {"id": "mind2web_1147", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_c488f9fc-084f-4e7d-9c02-41933cf52026.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK\n[link] Crossover vehicle icon Crossovers -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.346, 0.253, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1148", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_8873a220-4c4a-4217-9046-012a50badcdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\n[textbox] Search -> TYPE: Gingerbread cakes\n[link] gingerbread cakes -> CLICK\n[button] Recipes -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.802, 0.469, 0.969, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1149", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_798fb5b9-2d7a-463d-acac-2bef7f223623.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[button] Calendar -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.332, 0.514, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1150", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_cf1fff18-833a-4080-a441-29c38bb95682.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.338, 0.222, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1151", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_8668151c-111b-4824-8572-dd3adc202437.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.348, 0.595, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1152", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_b5462d25-73ae-4282-8023-716e111a610d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK\n[button] Search -> CLICK\n[button] Get alerts for this flight for flight 906 American... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.289, 0.636, 0.312] in the image\nAnd my action is Action: TYPE\nValue: lin.lon@gmail.com"}]}, {"id": "mind2web_1153", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_c343cfea-aace-4a34-bd4d-b2bb679d74b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK\n[label] Austria -> CLICK\n[label] Belgium -> CLICK\n[label] Bulgaria -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.887, 0.085, 0.936, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1154", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_c58a3392-4056-47f7-b1e6-16ecb0b2cc8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.418, 0.101, 0.466, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1155", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_961a671e-f10e-4dc0-bee9-429a8f389b15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\n[button] Explore -> HOVER\n[link] Dining -> CLICK\n[span] EXPLORE DINING -> CLICK\n[radio] Filter group Ship: Carnival Breeze -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.661, 0.055, 0.676] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1156", "image": {"bytes": "", "path": "./images/44284a24-b3de-44ef-bcfc-abf57c3f791a_0d85fdcc-ca3f-4b80-97ca-0d509c03ccc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flight #039028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.046, 0.572, 0.066] in the image\nAnd my action is Action: TYPE\nValue: 039028"}]}, {"id": "mind2web_1157", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_342fe554-d039-4d10-a909-323c6af8fead.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.402, 0.242, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1158", "image": {"bytes": "", "path": "./images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_ff1358c0-bfa5-4b15-aee0-ad09119d4bd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for news about the latest news about Lebron James' injury and share the article on twitter.\nPrevious actions:\n[combobox] Search query -> TYPE: lebron james"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.109, 0.259, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1159", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_27999ff5-6e84-4a07-995a-919b679d68a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 1.996, 0.62, 2.015] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1160", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_f8873fc6-9f55-4338-9b3e-08a10fba7047.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.021] in the image\nAnd my action is Action: TYPE\nValue: COMFORTER"}]}, {"id": "mind2web_1161", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_f949bab4-d297-47ca-926b-32bc1573d765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK\n[option] WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.597, 0.412, 0.626] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1162", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_3161aa29-78c0-4ab3-b776-10d894cf75fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.261, 0.16, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1163", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_35bc428e-f0a7-4e6c-a921-213d082a151f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[textbox] Where to? -> TYPE: SEOUL\n[div] Seoul, Republic Of Korea -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.829, 0.405, 0.858, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1164", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e672afc1-9115-45a5-acad-08c5dfcab90a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 07718\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.227, 0.352, 0.248] in the image\nAnd my action is Action: SELECT\nValue: 04 00 PM"}]}, {"id": "mind2web_1165", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_13d235dc-c6ce-45c6-a075-8da1683281ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.347, 0.331, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1166", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_9a099213-7601-400e-b8b7-37a54615abc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.0, 0.816, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1167", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_f68ffe3e-f73c-4de7-8726-93644fdb6ba2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Thalia Hall\n[span] South Allport Street, Chicago, IL, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1168", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_ee082227-512a-4438-bed0-43fcedf4d1ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK\n[link] Schedule & Maps -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.641, 0.815, 0.875, 0.863] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1169", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_80df35d3-a409-4097-b3c7-30f24edbb24c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.33, 0.416, 0.361, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1170", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_4687dd1f-40b5-4b03-bf86-49541ae51d01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.619, 0.455, 0.639] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1171", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_b327b28a-2cc9-4315-9cd0-1545d21d74f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[link] White -> CLICK\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.042, 0.378, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1172", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_a2035429-c51b-4259-b2d5-9eb766c20bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.102, 0.314, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1173", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_490f9126-f087-4622-9c3c-05e9efc3aaf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.237, 0.651, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1174", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_aa0b3bef-232f-4fda-aea1-7ad5aa44e543.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[button] Next -> CLICK\n[path] -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.39, 0.705, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1175", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_51737bb7-36f6-4b37-a121-8d829c2c17ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: New York\n[option] New York, NY -> CLICK\n[img] Concerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 1.029, 0.264, 1.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1176", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_5ee0bcc8-a842-47cc-a02d-2d0b9b7b3f3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.492, 0.473, 0.511] in the image\nAnd my action is Action: TYPE\nValue: 30000"}]}, {"id": "mind2web_1177", "image": {"bytes": "", "path": "./images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_bf77953e-e135-4403-9b3d-494a7bf161f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the San Francisco 49ers.\nPrevious actions:\n[link] Schedule -> CLICK\n[link] Team Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 1.044, 0.672, 1.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1178", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_e04c1a00-5b98-4724-8d98-7bc6a2fe9241.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Produce -> CLICK\n[link] Fresh Fruits -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.425, 0.193, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1179", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_bfe95a78-0720-4801-b97e-f4661a8e6de7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[circle] -> CLICK\n[button] -> CLICK\n[div] Sightseeing Tours -> CLICK\n[label] Private Tour -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.233, 0.954, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1180", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_a55fd649-8057-4b3b-877c-bbde4b4ec8a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.336, 0.5, 0.373] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_1181", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_5359e7ef-c441-4c68-a3f9-d54ea991b51f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.119, 0.719, 0.163] in the image\nAnd my action is Action: TYPE\nValue: CHICAGO"}]}, {"id": "mind2web_1182", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_5b577f52-8708-437d-956a-f196bb1aed0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK\n[checkbox] 30 April 2023 -> CLICK\n[combobox] Drop off time -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.162, 0.923, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1183", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_1b1deeda-16e7-4c5c-b287-5fb707328edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[link] English -> CLICK\n[link] Last 90 days -> CLICK\n[span] Featured -> CLICK\n[option] Publication Date -> CLICK\n[img] Records of the Medieval Sword -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.476, 0.985, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1184", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_15956c00-b71f-4623-834e-67fb0a4e40c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.271, 0.359, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1185", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_d26cb28b-0f54-4b70-bad7-c7e384c799c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.144, 0.159, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1186", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_dbcd3c77-a7bb-42ba-bf7f-c2d693cede67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.199, 0.459, 0.222] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_1187", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_ff0d9b95-71ea-4ffb-ba47-f3b317d24f09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.084, 0.969, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1188", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_8acda36c-321f-4831-af11-490704136e6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.245, 0.705, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1189", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_4fc388d4-400a-4097-86db-59e5f812f69e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.018, 0.564, 0.029] in the image\nAnd my action is Action: TYPE\nValue: Greenville"}]}, {"id": "mind2web_1190", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_9d8c2c30-a449-44bf-8f1d-120f3a8057f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.074, 0.664, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1191", "image": {"bytes": "", "path": "./images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_1b347a24-e015-4d1e-bce1-d999b5d80448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking spot near Dallas Love Field Airport.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.061, 0.563, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1192", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_7e4cd11c-208f-4b21-a9d4-0bd61c860ab8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.147, 0.453, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1193", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_53677991-4d59-4105-b3ad-6896b77fecba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK\n[link] HAPPY HOUR BURGER, Aria, Thursday, April 13, 2023 ... -> CLICK\n[combobox] 2 tickets for HAPPY HOUR BURGER -> SELECT: 1 Ticket\n[button] 5:00 PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.209, 0.523, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1194", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_94e763d4-5367-4cfe-8d21-24e4c5eeb937.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[img] Right -> CLICK\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.297, 0.677, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1195", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_da230e9f-977e-4972-bd15-c41c61617881.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[p] Video Games -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.549, 0.104, 0.564] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1196", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_4ee08fe7-5ec5-46d5-8ccf-d41e84d607b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK\n[link] My trips -> CLICK\n[textbox] Last name -> TYPE: Atas\n[label] Confirmation number -> TYPE: 1000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.554, 0.201, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1197", "image": {"bytes": "", "path": "./images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_b99c5581-8a56-4bd7-bbe4-782795ebf93c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking spot near Dallas Love Field Airport.\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: Dallas Love Field"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.107, 0.914, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1198", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_97db88b1-dff6-4fa4-a01d-b6e189ada5ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER\n[link] M3 Rock Festival -> CLICK\n[span] From $86 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.176, 0.308, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1199", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_f20ca446-98cb-4ce8-8ca2-96c8fb4fbb69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.203, 0.495, 0.256] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_1200", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_50186aaf-d2b7-49ce-91c2-7bf37430ea50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.252, 0.546, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1201", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_5224c061-6b17-495f-981e-d40d5de3af4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK\n[span] Search flights -> CLICK\n[link] Sort & Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.257, 0.439, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1202", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_9e80dd9f-5216-4dc7-8aeb-c7dc433119af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.481, 0.213, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1203", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_6e7d4501-298a-4ea9-a266-e9ae3bc160b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.733, 0.089, 0.741] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1204", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_66558d92-b0c7-4478-a66a-dcba598144ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.43, 0.24, 0.457] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_1205", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4e58e6b2-b337-42bf-8a9e-f516499a1f51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.18, 0.923, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1206", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_a046b31a-ea81-4dc4-9aac-e65fe81da727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[textbox] US City,State or Zip Code -> TYPE: 10001\n[textbox] mm/dd/yyyy -> CLICK\n[link] 23 -> CLICK\n[input] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.375, 0.352, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1207", "image": {"bytes": "", "path": "./images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_9585a0e8-17ac-4cc3-9f8e-3616fc8ef354.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Read the 1 star reviews of DayQuil Severe Cough Liquicaps.\nPrevious actions:\n[combobox] Search products and services -> TYPE: dayquil\n[button] Submit search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.378, 0.33, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1208", "image": {"bytes": "", "path": "./images/9f57055d-c269-47d7-99be-3525d725439e_dba417cb-26b6-43b5-a275-b52134a8df8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the best odds to win the NBA title.\nPrevious actions:\n[link] NBA . -> HOVER\n[link] Odds . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.306, 0.537, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1209", "image": {"bytes": "", "path": "./images/22509b64-b643-44ec-b486-9828e686303c_c466b584-1fa5-4720-a342-51bc560b65c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the full menu for AMC Dine-In\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Explore Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.171, 0.172, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1210", "image": {"bytes": "", "path": "./images/6d963cc0-90d3-4908-bee4-29a8530536af_8d663377-4e7c-4493-be53-0c5f14abeae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all my offers for 2-5 day cruises\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.107, 0.286, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1211", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_92c548c1-e0d2-4990-a88d-93230c0b8c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport\n[option] Boston Logan Int'l Airport, 1 Harborside Dr, East ... -> CLICK\n[combobox] To\u00a0 -> TYPE: North Station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.536, 0.229, 0.812, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1212", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_9ca6d59b-86bc-45b7-8234-669bc1d307a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.775, 0.192, 0.789] in the image\nAnd my action is Action: TYPE\nValue: 700"}]}, {"id": "mind2web_1213", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_69947f74-50aa-4d03-ae09-eead95ecefe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK\n[link] My trips -> CLICK\n[textbox] Last name -> TYPE: Atas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.5, 0.193, 0.503] in the image\nAnd my action is Action: TYPE\nValue: 1000001"}]}, {"id": "mind2web_1214", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f9091185-4976-4ac0-bc2d-a85a3143a6e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: smartwatch\n[button] smartwatches -> CLICK\n[i] -> CLICK\n[img] Sponsored Ad - SKG V9C Smart Watch for Men Women, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.7, 0.354, 0.838, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1215", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_fe05d80f-01d3-43b5-9568-9204d53f7100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK\n[link] Accessories -> CLICK\n[link] Gaming Mice -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.934, 0.104, 0.968, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1216", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_bd722609-34f1-4a98-bab9-25999877944e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK\n[div] Real Madrid -> CLICK\n[heading] ROSTER -> CLICK\n[heading] Vinicius Junior -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.041, 0.574, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1217", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_4c768a53-5540-42b6-8e12-ea72c58a1908.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[button] \uf002 -> CLICK\n[span] New -> CLICK\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.725, 0.192, 0.739] in the image\nAnd my action is Action: TYPE\nValue: 70"}]}, {"id": "mind2web_1218", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_ffa7efe9-9e11-490f-a776-0d1999334fcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> TYPE: Oakland, CA\n[span] Oakland, CA -> CLICK\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Hot Dogs\n[span] Hot Dogs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.096, 0.313, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1219", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_c8a0567f-38f0-4def-b8fe-9a7508661566.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Netflix streaming -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.433, 0.43, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1220", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18f385db-5b2a-4643-aead-754c6836369e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK\n[checkbox] 1 Stop (49) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.517, 0.048, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1221", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_baefd0f9-5e71-4c4f-9263-83765e760b4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK\n[button] Apply -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.212, 0.905, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1222", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_49ed6f6a-ecc6-4c70-986f-d9504322827f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\n[link] Restaurants -> CLICK\n[link] OPEN 24 Hours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.071, 0.672, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1223", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_993af1e2-ecb0-4a4d-bf38-6ba35b599c98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.456, 0.187, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1224", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ceba3c43-95bd-4b82-9110-676cb466aab9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK\n[textbox] Locations -> CLICK\n[option] US, CA, San Francisco -> CLICK\n[link] Manager, Field Sales - West -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.306, 0.131, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1225", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_76bbcf6b-64fb-45de-be5d-ade45a0b2247.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 1234567890\n[textbox] last name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.359, 0.94, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1226", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_2bfbc791-a24f-4b42-934e-a5d5e7ecd8cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.458, 0.209, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1227", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_9b26383c-fd9f-4719-80be-5cab61f9a8b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Used -> CLICK\n[button] Style -> CLICK\n[link] French -> CLICK\n[button] Material -> CLICK\n[link] Oak -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.612, 0.253, 0.767, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1228", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_464d3892-61e6-4d36-81fa-94b33577eda9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.004, 0.256, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1229", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a7343102-879a-40e8-8b57-6b2b96ee2dab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.163, 0.3, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1230", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_94af0adc-9075-48cf-a933-f0fad4d2a873.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.242, 0.285, 0.264, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1231", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_6743a749-1918-4fdc-8ec7-6d0319125849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.323, 0.272, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1232", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_43e6acc3-98fb-4911-8cc5-128d2ce4c14f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin D -> CLICK\n[checkbox] Buy 1, Get 1 Free 33 items, On Sale list 3 items -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.613, 0.501, 0.682, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1233", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_0191e556-51aa-4851-928d-12a02ca30a6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] December -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.203, 0.662, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1234", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_83c4ca2d-e6f6-4dd3-8981-904229809643.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.431, 0.141, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1235", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e888c578-870d-4ede-873d-2d09d7cdc189.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy\n[textbox] Phone Number -> TYPE: 123-999-0000\n[textbox] Email Address -> TYPE: RA@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.774, 0.391, 0.785] in the image\nAnd my action is Action: TYPE\nValue: 90001"}]}, {"id": "mind2web_1236", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_05fbfb1b-ca54-4bcf-afa1-df49a7a6b480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK\n[link] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.304, 0.384, 0.387, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1237", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_7d8e5218-f5aa-40c1-a37a-9b84def7e069.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.054, 0.443, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1238", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_ec48b801-b658-459c-8b45-d2e9ff9f4238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: diamond stud earrings\n[span] diamond stud earrings -> CLICK\n[img] 10k Gold 1 Carat T.W. Black Diamond Stud Earrings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.269, 0.931, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1239", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_db3a2cca-7d23-48a8-a3c6-7dd991378b98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.231, 0.34, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1240", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_ddb033d7-d0ba-4fd3-a207-ab678a2e12eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK\n[input] -> CLICK\n[input] -> TYPE: 1648926800"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.342, 0.969, 0.374] in the image\nAnd my action is Action: SELECT\nValue: Hindi"}]}, {"id": "mind2web_1241", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_a6d9b3ec-76dc-4e1e-b4cb-d6b607719bc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER\n[span] Gender -> CLICK\n[link] Neutral (7) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.388, 0.974, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1242", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_7a02fae6-8fcb-46dc-b718-4bfdd02729dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.298, 0.375, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1243", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_ed9c6c23-5d98-4945-a9c7-aae2cc041574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.018, 0.674, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1244", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_6a7d4322-2bb4-4427-a64a-a4e4e9ef5731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[gridcell] Sun May 07 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.575, 0.142, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1245", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_f12955aa-647c-4dff-af41-24a5357f42df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.08, 0.426, 0.121] in the image\nAnd my action is Action: TYPE\nValue: New York, United States, 100"}]}, {"id": "mind2web_1246", "image": {"bytes": "", "path": "./images/099a9da4-c8db-4900-ada2-76600f3655a4_adb826f9-2b2e-4ed0-979a-348c5e3bb7bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of baby products that are on sale and under 10 dollars.\nPrevious actions:\n[link] BABY -> HOVER\n[link] Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.455, 0.481, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1247", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_d5ea9d5f-8db5-43db-812e-7810f8c7a683.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Chicago Bulls\n[div] Chicago Bulls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.013, 0.601, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1248", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_6c9584a6-5745-4585-9d4c-56a9d0d4a24f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 29 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.228, 0.194, 0.25] in the image\nAnd my action is Action: SELECT\nValue: Leaving at"}]}, {"id": "mind2web_1249", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_68a47f57-3330-41da-9000-1e28dd7d0151.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Guest Rating -> CLICK\n[link] Pets welcome (118) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.257, 0.116, 0.38, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1250", "image": {"bytes": "", "path": "./images/370a037c-c397-4adb-ab7c-0c388f448f68_463f5123-4572-4b78-8a57-faf3f84441a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vinyl records at the lowest price.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Vinyl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.12, 0.898, 0.135] in the image\nAnd my action is Action: SELECT\nValue: Price Lowest"}]}, {"id": "mind2web_1251", "image": {"bytes": "", "path": "./images/4c578076-b877-4097-bf67-e231e349d56f_09ae59d8-4c49-4242-b028-24d761e54b7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of most popular upcoming game releases.\nPrevious actions:\n[link] New & Noteworthy -> CLICK\n[link] Popular Upcoming -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.507, 0.613, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1252", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_05076a4c-ba04-4130-b156-b64d7acf1594.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[div] New York, NY -> CLICK\n[div] Sat, Apr 1 -> CLICK\n[checkbox] 30 March 2023 -> CLICK\n[div] Search -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.304, 0.331, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1253", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_73be4bdc-72e9-42d5-b42e-b5f9ae0ab90a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK\n[gridcell] Wed Jul 05 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.33, 0.3, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1254", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_3075d6ef-74ac-4361-87f7-817f02e4a80e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.241, 0.36, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1255", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4cc3fe67-f860-4993-a050-ee0e7f64b481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.628, 0.32, 0.666] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1256", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_ad13c7b6-6d05-41fc-a140-cec783e3ca92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Products & Services -> CLICK\n[div] Learn More -> CLICK\n[div] Learn More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.668, 0.268, 0.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1257", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_2e444d2d-3922-45da-99f8-1e1843d6dcdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.255, 0.848, 0.282] in the image\nAnd my action is Action: SELECT\nValue: Walking"}]}, {"id": "mind2web_1258", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_dd77262b-0947-4de1-b81d-eb2b77b2382a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.264, 0.562, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1259", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_2ed256fb-5cb4-47e1-8d8b-49003507da7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.491, 0.359, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1260", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_874d2fc1-2cd7-41f9-9631-22c7542480fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023\n[combobox] Number of passengers -> SELECT: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.451, 0.492, 0.488] in the image\nAnd my action is Action: SELECT\nValue: Adult (16-64)"}]}, {"id": "mind2web_1261", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c748a44b-f6e8-496a-be28-c3b90ad70a2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.33, 0.178, 0.373, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1262", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_e1d28f7d-0da8-437d-aa75-220acdd712c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.448, 0.906, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1263", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_2d4ff701-58d7-4a52-b443-9927b918a992.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.026, 0.284, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1264", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_97b19ffb-e2c9-4bab-97f3-735bdb136ad7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.046, 0.81, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1265", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_59b5850a-eab8-437d-9b9b-571a2835604e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.023, 0.467, 0.029] in the image\nAnd my action is Action: SELECT\nValue: 1 Room"}]}, {"id": "mind2web_1266", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_82f690c6-2231-4e3f-b030-7f3f205d1e20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.646, 0.113, 0.657] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1267", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_c0dc76c0-098f-41f7-8e9b-2a548ded774b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang\n[div] Ford -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[heading] Used 2000 Ford Mustang GT -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.256, 0.838, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1268", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_2d83fc2f-5d8e-45ab-8a78-7b0c8705d37d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.193, 0.192, 0.232] in the image\nAnd my action is Action: SELECT\nValue: 4 Guests"}]}, {"id": "mind2web_1269", "image": {"bytes": "", "path": "./images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_9ed5695b-9888-4996-8d8d-fdc59e8b84ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of games I've played recently.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.0, 0.519, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1270", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_7e920296-08ae-40e7-a085-ce00bbd794e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.801, 0.273, 0.815] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1271", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_84c41c48-86b7-4420-8cff-e286908d36c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 1.406, 0.153, 1.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1272", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_b402a6dc-c2dc-4d0b-86d2-7ee0f55a3275.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\n[span] Top activities -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.052, 0.442, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1273", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_2f3de943-1b23-4176-af7f-423c24803f39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[button] Next Month -> CLICK\n[use] -> CLICK\n[div] 1 -> CLICK\n[span] 8 -> CLICK\n[span] SEARCH FLIGHTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.371, 0.151, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1274", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_25e05af9-37da-4d90-b855-8ab0b7020188.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.048, 0.158, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1275", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_ba013772-a229-43d2-881f-3b1edf1d1cf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK\n[button] Ship Location: Any -> CLICK\n[link] Canada -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.184, 0.312, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1276", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_87981f4e-2b84-4c8e-a7ab-0b3a2813ba20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.07, 0.257, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1277", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_8fd0040d-0a18-4fb3-8f00-7426b7c53bd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK\n[span] 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.353, 0.912, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1278", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_410c37f5-5711-41ac-893c-9b0a78045d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.193, 0.969, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1279", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_34eccecf-fd3e-43ca-965c-98d3be310a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.202, 0.734, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1280", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_7d9e6c1b-62ca-4ac9-9847-158fdce932e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK\n[checkbox] 30 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.162, 0.846, 0.21] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_1281", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_1e0f3579-bba4-479f-a9d0-2b7660f62767.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.249, 0.641, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1282", "image": {"bytes": "", "path": "./images/d637c171-dc6e-4a4e-a162-9c230e822932_9053f0e2-da05-4721-87b5-13edf923052b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show popular news which is at number one in comics.\nPrevious actions:\n[button] News -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.237, 0.216, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1283", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_986fa986-16ec-4b2a-ab72-03f8ab10bec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.255, 0.84, 0.287] in the image\nAnd my action is Action: TYPE\nValue: BIRMINGHAM"}]}, {"id": "mind2web_1284", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_7f4f3081-4f9d-4238-83ec-87f4b992e5f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK\n[select] All -> SELECT: In Stock (41,088)\n[select] All -> SELECT: Hardback (13,067)\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.566, 0.196, 0.594] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1285", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_b3ae5b47-1de0-443b-a314-b300e04cd29b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.011, 0.353, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1286", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_b45b2866-9761-4b0b-8e03-6b4264113621.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.729, 0.151, 0.852, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1287", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_ec119d53-7ced-4964-8fb0-95482559b137.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK\n[p] Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 1.441, 0.226, 1.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1288", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_7da9920d-d511-4a49-a6d4-482753f64cff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.411, 0.797, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1289", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_695f1ad2-16c0-4655-b15d-ad6a894df41a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.39, 0.385, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1290", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_fa95b8e9-d45b-456f-b16e-73f81d5dfb59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.667, 0.691, 0.684] in the image\nAnd my action is Action: TYPE\nValue: Hello World"}]}, {"id": "mind2web_1291", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c55541b2-0344-486c-b183-b3494993e838.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201\n[button] Antenna -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.112, 0.494, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1292", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_fe72704d-6041-4c96-9ac4-dabec16780df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[textbox] Search events -> TYPE: pet festival\n[generic] Run Search -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.084, 0.581, 0.105] in the image\nAnd my action is Action: TYPE\nValue: portland"}]}, {"id": "mind2web_1293", "image": {"bytes": "", "path": "./images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_e152cb15-af77-4b96-b03f-c5feb507de22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for upcoming MLB games taking place on Sunday, 3/19.\nPrevious actions:\n[link] MLB . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.118, 0.218, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1294", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9c008888-c54b-42fc-958e-a7023fea0765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.706, 0.179, 0.719] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1295", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_a054f615-6d0c-41cc-9d18-1b7a88647a37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.037, 0.036, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1296", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_e07b8f59-1316-4fc5-b5de-cd8befb4cbd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[svg] -> CLICK\n[div] RawElegant.Life -> CLICK\n[div] Envy Yourself With Beauty Makeup MasterClass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.379, 0.575, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1297", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_c2d92dd0-dd6a-4957-abed-473e7a82bd0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.155, 0.218, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1298", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_3c351c6f-edb4-43b4-89ed-cbabebaf4917.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1299", "image": {"bytes": "", "path": "./images/6a326478-2a1b-4e47-b298-53f3ac12ed51_51cc3205-af3d-43a8-b777-60200db8d366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with information about luggage and what to bring.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.107, 0.286, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1300", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_45fd9710-3ac8-4e4e-beb0-e624ac8a3e9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK\n[div] Choose date -> CLICK\n[generic] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.146, 0.953, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1301", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_1619fb2d-5d4d-45d4-b5af-0d2853002d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK\n[label] 0 - 5 Miles (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.944, 0.96, 0.985] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1302", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_65225c2f-524a-4d2f-b2f1-277e85b90696.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.268, 0.491, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1303", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_e6dd4954-4b1d-4f56-8049-0ff9698e56af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.042, 0.309, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1304", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_aa07204b-5ca1-4418-b291-5e699c085977.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: vintage clothing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.323, 0.643, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1305", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_da70fb73-dad9-4999-beb2-e770abc20a73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.334, 0.306, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1306", "image": {"bytes": "", "path": "./images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_bd575876-47dc-4259-ba68-82544768d412.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the most popular photos of the Adam A-500 model aircraft.\nPrevious actions:\n[span] Community -> HOVER\n[link] Highest Ranked -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.493, 0.269, 0.519] in the image\nAnd my action is Action: SELECT\nValue: Adam A-500 (twin-piston) (26)"}]}, {"id": "mind2web_1307", "image": {"bytes": "", "path": "./images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_d55856ae-eeb8-4e08-8c7e-9a4b384ab9f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an hourly parking in New York City of lowest price.\nPrevious actions:\n[textbox] Search for parking -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.113, 0.914, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1308", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_ea8a1212-eff4-48b1-9b49-1c10cd79ec35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[textbox] US City,State or Zip Code -> TYPE: 07718\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.273, 0.181, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1309", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_43acb344-07a8-4519-92c8-32d404a0ae8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.012, 0.1, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1310", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_6a440fc3-e9d3-4292-a5b1-1109388f3dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.0, 0.169, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1311", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_e8afff4d-d27f-4877-99d3-bc44942bf223.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.282, 0.954, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1312", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f0bdf42b-3c1b-4d04-a6be-4abab3133890.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.221, 0.487, 0.257] in the image\nAnd my action is Action: TYPE\nValue: Texas city"}]}, {"id": "mind2web_1313", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_ef49383f-479e-4814-b8f5-f010ee86a655.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.528, 0.285, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1314", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_312e9b1f-8d64-43f4-83e3-eb7d0b715739.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\n[searchbox] Search -> TYPE: Nirvana\n[link] Search for \u201cNirvana\u201d -> CLICK\n[link] Nirvana -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.17, 0.171, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1315", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_277dd183-dd49-4294-8b98-5da138f0cc1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Romantic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.438, 0.772, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1316", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_d9a196d8-e11e-4d85-aab2-89aa169ebc1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 1.234, 0.938, 1.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1317", "image": {"bytes": "", "path": "./images/da386775-280b-4a84-9801-4ae3098044b0_be766567-c42c-4657-8021-c37a5151f283.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in California city for Limos which also offers military discounts and free wi-fi.\nPrevious actions:\n[link] Auto Services -> HOVER\n[span] Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.024, 0.564, 0.038] in the image\nAnd my action is Action: TYPE\nValue: CALIFORNIA"}]}, {"id": "mind2web_1318", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_174d6b34-d5d3-4c75-907c-9547ae8607cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India \n[div] India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.122, 0.819, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1319", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_f0255e45-ed88-45fb-bb17-d493aabf1d30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.206, 0.086, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1320", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1a5ad52f-9758-4c99-992c-7f1ac68ef8bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.625, 0.897, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1321", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_a03147e4-838d-4a8e-a343-f72f05555caf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.4, 0.691, 0.526, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1322", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_3ff7c153-dc10-47fc-9bb3-1c5efd5307f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: Mexico\n[div] Mexico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.107, 0.964, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1323", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_b8eff4ca-52f9-4a19-af84-cfa36e4a376b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 20 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.388, 0.408, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1324", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c767c653-e622-4df9-8b1f-a83eb531e1fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201\n[button] Antenna -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.309, 0.773, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1325", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_f6f37749-676d-4faa-8a44-22139190c76b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.404, 0.285, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1326", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_fd1d1cfe-2054-45fe-9470-701b302cc200.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: SELECT\nValue: 1 Guest"}]}, {"id": "mind2web_1327", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_e12c9c4e-c1bc-4f69-9d7c-fce2c1ca59c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.157, 0.196, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1328", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5a4b0241-e72e-4a4e-abe9-afcd776fa96b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK\n[button] CHECKOUT -> CLICK\n[button] Pick up in store Shipping: Free -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.413, 0.595, 0.454] in the image\nAnd my action is Action: TYPE\nValue: 10005"}]}, {"id": "mind2web_1329", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_352d4ed9-383d-4f19-90ac-4167fe22c6e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\n[link] Recommendations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.164, 0.851, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1330", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_8840e68d-babf-4527-95cb-df13c183703e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[button] Products -> CLICK\n[label] Kids -> CLICK\n[label] Maternity -> CLICK\n[button] Store type -> CLICK\n[label] Large store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.978, 0.379, 0.997, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1331", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_eaf630ca-a28b-46b3-8f1b-a3b32cfb073f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston\n[button] Place Houston, TX -> CLICK\n[div] Search -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.116, 0.916, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1332", "image": {"bytes": "", "path": "./images/789b7d2d-fb01-453c-b933-383965e6123c_3f6c2c3c-bb4e-4a97-93a6-670b449ee82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cast and crew of Titanic and add to watchlist\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Titanic\n[div] Titanic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.209, 0.281, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1333", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1e1c4337-d331-40e4-81fc-395b7c639757.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Home Essentials Under $20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.483, 0.159, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1334", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_5c4ef2f2-8851-483b-9f3e-c966e222ae8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.116, 0.617, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1335", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_65752882-a6ba-4e9f-9b03-278bfeabdf73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.567, 0.697, 0.598] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1336", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_88dec089-2b92-494a-b781-c7f3e9cafe3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.59, 0.47, 0.614] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1337", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_1a0e511f-4135-4bfb-8e74-71286dd71adf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK\n[button] CONTINUE SHOPPING -> CLICK\n[checkbox] PURPLE -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.554, 0.491, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1338", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_e11f7437-a42d-4e13-8ba8-9dd466485e7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK\n[div] Fri., Apr. 21 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.476, 0.265, 0.501] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1339", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_a7534242-8fff-4286-9a78-7289a2e16c2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.007, 0.781, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1340", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_44857629-fc4a-44e4-873c-080e42c7c79e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.125, 0.048, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1341", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_29bbfb84-2c18-4a74-a208-f68abf1d3f48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.121, 0.782, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1342", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_bba1f237-a046-46b9-b0f3-96dd331c620e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[searchbox] Search by city... -> TYPE: Los Angeles\n[option] Los Angeles, CA -> CLICK\n[span] Filter by -> CLICK\n[div] 16 -> CLICK\n[div] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 1.239, 0.661, 1.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1343", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_f7ef0388-f470-4a60-8d1d-a720b444c577.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.467, 0.053, 0.517, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1344", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_f23a29a4-d885-4c86-bbb7-6eee5e6b991f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.203, 0.832, 0.223] in the image\nAnd my action is Action: SELECT\nValue: XL"}]}, {"id": "mind2web_1345", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_8e52097f-9096-4b87-9a14-afec783592f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.839, 0.008, 0.858, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1346", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_d5aa4626-0afd-483e-adcb-bb722903ce10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: diamond stud earrings\n[span] diamond stud earrings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.182, 0.385, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1347", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_bb95cc56-dd2c-4f80-9154-2b7fc0e2737c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[polygon] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.963, 0.609, 0.99] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1348", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_96c03dff-6653-4a1f-8dc9-e88932cd6e43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.011, 0.781, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1349", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_be2f81b7-9bba-4448-bfed-6a56c9582521.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: vintage clothing\n[option] vintage clothing -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.069, 0.905, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1350", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_fa153d9e-d788-4f23-b0b6-468c38cc3a47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.008, 0.211, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1351", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_24c1b90a-2057-4926-9fac-ee342f7d7299.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.547, 0.664, 0.572] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1352", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_42a254f0-62bd-4b7c-b209-b0dd924e05d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.26, 0.868, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1353", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_a2674f77-6ce4-4d25-a6c9-9c5dbbecd99a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.044, 0.615, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1354", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_9998f7f2-76a0-4860-8f9c-bd56a2bccaf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.235, 0.037, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1355", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_3d571853-dba8-4f55-a5ec-afcb5b710d90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.069, 0.567, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1356", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_8e5bdf6e-9a87-406b-b130-634faa438a4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[link] Car rentals -> CLICK\n[textbox] Pick up -> TYPE: Houston\n[a] Houston, US -> CLICK\n[button] Search -> CLICK\n[button] Economy cars 5\u00a0Seats 1 Large bag 1 Small bag From ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.286, 0.523, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1357", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_55038342-45e8-4973-a605-cadf080c5785.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.863, 0.539, 0.899] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1358", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_551a50ba-12b7-47fe-843b-b62606544767.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.0, 0.44, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1359", "image": {"bytes": "", "path": "./images/440273fa-f1b0-41e8-be75-90732ad5170d_c7d12711-12a8-4053-b048-362c7133caf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Mens size guide for bottoms\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 2.375, 0.555, 2.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1360", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_5ebf6d77-1802-40f6-a790-1445f8f6ddca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[link] Personal Care -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.744, 0.691, 0.762] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1361", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_81552843-1039-466f-8d45-f68f83177b73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.004, 0.056, 0.085, 0.059] in the image\nAnd my action is Action: TYPE\nValue: Nintendo Switch"}]}, {"id": "mind2web_1362", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_2d372e31-24cb-41f2-8bfe-95836f933805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK\n[option] United States of America (USA) -> CLICK\n[combobox] Vaccination status Vaccination status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.456, 0.897, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1363", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_841d4e1a-07cf-405d-97d3-771bfc9bd3fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old\n[div] Age range -> CLICK\n[select] All -> SELECT: Ages 3-5 (31)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.277, 0.196, 0.298] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_1364", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_b44807ac-6f07-474c-b99f-13d1a3841f1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.292, 0.498, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1365", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_12883b19-0053-43e8-9cfc-2b87ef699e9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] Any -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.423, 0.234, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1366", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_463a2f6e-e1fa-42fe-beb0-1e4fbe74ac51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.209, 0.494, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1367", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_1f65089a-5b26-4f98-a884-82c44e2cc83a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.163, 0.536, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1368", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_3b6fa8c6-be91-439a-b3bf-004e9f5da22f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.116, 0.326, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1369", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_f6c8fe1f-3ce7-4cb5-aac4-3843d4af5920.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.291, 0.472, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1370", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_047beaf4-32d6-4503-ab8c-605d51ef5049.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK\n[link] Navigate to New Releases At The Kiosk See More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.261, 0.174, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1371", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_202bd5cb-530f-45b1-8674-5aa0ff9b3e0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.242, 0.438, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1372", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_89d607c5-b591-4816-a2cd-068640e4e281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.096, 0.807, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1373", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_d204295e-0a07-4ad7-8dd2-92287300ce28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\n[span] Nearby Stations & Stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.179, 0.335, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1374", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_5234c799-fd49-4103-8be7-e15a1bcfd84c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Divorce -> CLICK\n[button] Apply Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.276, 0.372, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1375", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_ef9f87aa-ac59-4820-9149-5dbd1c644beb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 1.274, 0.298, 1.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1376", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_2a36bfba-4c63-4682-8629-38002691467e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.06, 0.482, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1377", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_e7011714-61ba-428e-903f-5c06b791549a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK\n[label] Austria -> CLICK\n[label] Belgium -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.202, 0.249, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1378", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_6d1655d7-b144-4284-a3f2-60ffcafeac40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.462, 0.956, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1379", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_d62876cb-1030-4f2b-a107-57a4769fbe1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] 31 -> CLICK\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.186, 0.705, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1380", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b054d49c-5467-4b56-be24-b91fbf14da65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK\n[div] Make/Model -> CLICK\n[combobox] Year -> SELECT: 2016\n[combobox] Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.389, 0.71, 0.433] in the image\nAnd my action is Action: SELECT\nValue: Camry"}]}, {"id": "mind2web_1381", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f265ebef-4567-412c-affb-b29a66b3318a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.23, 0.969, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1382", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_ed669e14-0f55-401d-80f4-9708fed8e93f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK\n[link] Denzel Washington -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.725, 0.653, 0.74] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1383", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_8e5642a1-2b18-401e-ae2d-1addfb70704d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] select to browse a kiosk -> CLICK\n[button] Show Filters -> CLICK\n[button] Filter by rent -> CLICK\n[span] Hide Filters -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.882, 0.197, 0.969, 0.209] in the image\nAnd my action is Action: SELECT\nValue: Newest"}]}, {"id": "mind2web_1384", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_085c77a4-a501-4a57-a42b-1fbe40737f32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.545, 0.276, 0.574] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1385", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_21a24a97-8661-434c-849d-b37228d48abf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.368, 0.556, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1386", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_79c52401-21b9-4853-b92c-a16509ed72e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.646, 0.233, 0.653] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1387", "image": {"bytes": "", "path": "./images/673841c2-de8c-4417-bdcc-dc48753a539f_b741cfde-eb55-4b8e-964c-40e90b358bd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the current standings for the western conference in the NBA and find the top team.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.092, 0.335, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1388", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_fef1a976-a670-48f2-818a-82e23cd8c1f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[span] Grocery -> CLICK\n[span] Easter Baking -> CLICK\n[img] -> CLICK\n[button] Brand -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.013, 0.988, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1389", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_5a450f14-ffec-4efe-83fa-4383f087c099.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.05, 0.246, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1390", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_9e02ef91-c028-4e4d-a052-20a65eddc765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.363, 0.568, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1391", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9c019089-5faf-48e5-a693-58652ee8c53d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 12345678"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.374, 0.62, 0.408] in the image\nAnd my action is Action: SELECT\nValue: Friday, April 7"}]}, {"id": "mind2web_1392", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_6ac17b5f-32f5-4a08-91b0-708e270d6d61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.216, 0.359, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1393", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_77299dab-129b-4d1d-a419-48f5c2ba558a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] 2 adults -> CLICK\n[button] - -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.415, 0.263, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1394", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_4fe53d58-b083-41ad-b7ec-0857093df247.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Flights & Car Rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.219, 0.079, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1395", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_3af71761-eb64-489c-a12c-fc741805c4a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK\n[button] Calendar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.253, 0.514, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1396", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_0e9722ee-d2e7-4a8a-8a00-ef91a11a39da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[button] APPLY -> CLICK\n[button] From today -> CLICK\n[Date] FROM -> CLICK\n[Date] FROM -> TYPE: 04/01/2023\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.185, 0.645, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1397", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_5fcb6fb8-1500-462b-902b-e0e689a6d351.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.569, 0.399, 0.7, 0.433] in the image\nAnd my action is Action: SELECT\nValue: Under 1"}]}, {"id": "mind2web_1398", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_3ba37681-2553-4a13-a574-56d3d82c6247.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.231, 0.407, 0.257] in the image\nAnd my action is Action: SELECT\nValue: Jeep"}]}, {"id": "mind2web_1399", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_ff077583-f04d-41d5-b21a-8ce068740bec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.212, 0.359, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1400", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_37c75273-7565-4e18-9ed7-981b670517c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.78, 0.073, 0.789] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1401", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_831dd65f-fe94-410c-959b-cbbbaaf170f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\n[link] commuter rail Commuter Rail -> CLICK\n[combobox] Search for a line -> TYPE: Oyster Bay"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.23, 0.433, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1402", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_03ea971c-d3cb-44e0-92d7-0470361bc977.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK\n[span] $200 - $300 -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.234, 0.4, 0.257] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_1403", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6f5a9bef-bc1f-4a26-8c39-1211813d1a79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.558, 0.179, 0.645, 0.226] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_1404", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_67c94a3f-d29d-4504-8e0e-4008a0d59813.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.797, 0.988, 0.804] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1405", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f562f61b-3bae-4e8a-b712-fdd0b009e09d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.346, 0.28, 0.386] in the image\nAnd my action is Action: TYPE\nValue: Abidjan"}]}, {"id": "mind2web_1406", "image": {"bytes": "", "path": "./images/69065697-a426-4e4c-80f7-82bf592b268c_ae5a5edd-5bd7-49a5-8be6-ed830b009bfb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find details on converting points to miles.\nPrevious actions:\n[link] Redeem Points \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 1.048, 0.796, 1.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1407", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_69ca24e2-a91e-433b-9e32-73b3ec203f00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.79, 0.419, 0.84, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1408", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_a50b95ca-ee96-44a7-bd4a-87fc4ceaaaf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.006, 0.887, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1409", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_89aed3f2-a0c4-4b0d-85e2-04f93aaca067.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.193, 0.375, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1410", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_afd9ae37-5686-4a76-8d2a-b5a040a49170.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[button] 23 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.309, 0.156, 0.326] in the image\nAnd my action is Action: TYPE\nValue: 150"}]}, {"id": "mind2web_1411", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18914d6c-86d1-4e1b-9a42-0517303af913.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.215, 0.048, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1412", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_2f631344-e9f0-4a3a-87bc-273d5f604b5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.311, 0.569, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1413", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_f4c92290-d674-4ed1-9ba4-d8a1d2be1464.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Polos -> CLICK\n[div] Size -> CLICK\n[link] M -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.134, 0.925, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1414", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_5f142677-efdb-410d-b3b9-917b3bd60b03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\n[button] Fastbreak -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.053, 0.954, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1415", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3be67d5b-638a-4f22-bed6-294e7fbce6b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[input] -> CLICK\n[option] Tops -> CLICK\n[heading] Size -> CLICK\n[label] L -> CLICK\n[heading] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.79, 0.266, 0.805] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1416", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c73c1b00-3a34-4287-8884-327b234c2dfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK\n[button] Change location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.133, 0.705, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1417", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_c2803987-3226-4c5f-b470-33c51dec0f99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Next -> CLICK\n[span] Black -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.294, 0.286, 0.306] in the image\nAnd my action is Action: SELECT\nValue: Good To Go"}]}, {"id": "mind2web_1418", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e54e343-24da-418b-bb3a-3695576b276d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.143, 0.769, 0.179] in the image\nAnd my action is Action: TYPE\nValue: Anderson"}]}, {"id": "mind2web_1419", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f7a84d47-0214-4ba2-90cb-e556c1ed2802.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.293, 0.709, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1420", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_f62b9966-0056-47a0-b9f6-8c6da5a0210c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK\n[button] 05/02/2023 -> CLICK\n[button] Date -> CLICK\n[button] 05/05/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.462, 0.837, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1421", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_1d11a5bf-821a-470e-af46-80630855a8f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.872, 0.789, 0.886] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1422", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_c95b9c83-3c15-4619-af54-19f4b373ccdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Chicago\n[option] Chicago, IL -> CLICK\n[button] See next Categories -> CLICK\n[img] Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.956, 0.158, 0.976] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1423", "image": {"bytes": "", "path": "./images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_8cc079c8-9a71-477b-8bf6-9bbdc8ccf88b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find FC Barcelona's next fixture in the Spanish Copa de Rey\nPrevious actions:\n[li] Soccer -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.676, 0.166, 0.818, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1424", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_cd75134c-4538-42c7-9197-02dc7aaa3621.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.378, 0.243, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1425", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_9d0945af-a93e-4af8-8aea-b8350b3741f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] Flavor -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.011, 0.988, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1426", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_8d39083e-f62b-4599-bcc0-c857a5abf85f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.132, 0.965, 0.158] in the image\nAnd my action is Action: SELECT\nValue: Low to High"}]}, {"id": "mind2web_1427", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_b9e530f7-a586-48bf-8e6d-99f59bf306d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[textbox] Monthly Start Date -> CLICK\n[gridcell] Sat Apr 22 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.308, 0.3, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1428", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_9baa2836-7809-4f44-8023-dc4c5f602eef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.427, 0.315, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1429", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_1a611a4a-8c22-4c5c-ab4f-c061be863c91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\n[link] National Football League NFL -> CLICK\n[button] Open More Dropdown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.115, 0.775, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1430", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_d85cc5e5-16ef-4abe-b03d-fb6d6e7372e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[textbox] Registration Zip Code Where you will register the ... -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.228, 0.282, 0.25] in the image\nAnd my action is Action: SELECT\nValue: 200 miles"}]}, {"id": "mind2web_1431", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_ebe6ec91-c8b0-4150-8180-728167110e5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] Indian/Pakistani -> CLICK\n[link] East Village (9) -> CLICK\n[link] $16 To $30 (4) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.556, 0.603, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1432", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_87e4d380-4acc-465f-a3ee-6c5084405805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.365, 0.036, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1433", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_27a5a40d-ef36-4bbf-9d79-d6a1269d66e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[div] Dallas, TX -> CLICK\n[input] -> CLICK\n[button] Next month -> CLICK\n[gridcell] May 07, 2023 -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 2.799, 0.037, 2.825] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1434", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_69dcb555-6f29-4d80-9783-dcd3f9ebdef4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.179, 0.294, 0.198] in the image\nAnd my action is Action: TYPE\nValue: New york knicks"}]}, {"id": "mind2web_1435", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_70a5e455-dc3e-45b1-a233-1c4f6a4b7464.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK\n[span] Select store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.105, 0.971, 0.125] in the image\nAnd my action is Action: TYPE\nValue: 60173"}]}, {"id": "mind2web_1436", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_d54537bb-6960-4393-a098-fb7b2390fd25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK\n[label] In Stock -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.599, 0.488, 0.625] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1437", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_fc8e8688-fe07-461b-a576-85b64a501827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.155, 0.713, 0.171] in the image\nAnd my action is Action: TYPE\nValue: bournemouth"}]}, {"id": "mind2web_1438", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_24383821-fccc-4aad-9072-cc8ce10bd95b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.085, 0.205, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1439", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_689b707c-5a1c-4d4b-a8c4-78b279f9f47a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male\n[span] Mal\u00e9, Maldives -> CLICK\n[button] Start date calendar input -> CLICK\n[div] 13 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.162, 0.975, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1440", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_d129b23b-eec3-4afa-9787-6e2e06042e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.466, 0.261, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1441", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_537cac8a-10bd-4de9-8487-99bf3041bd13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.295, 0.291, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1442", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_e32bf4f6-c213-4e3b-90a4-759546efe869.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] PlayStation 5 -> CLICK\n[link] Filter -> CLICK\n[checkbox] Shop Pre-Orders Shop Pre-Orders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.03, 0.378, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1443", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_610dceb2-f1d8-49d8-ac16-046af44796d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[link] More info about Barboursville store -> CLICK\n[button] make it my store -> CLICK\n[path] -> CLICK\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.258, 0.384, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1444", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_f267bc79-4189-4353-af8c-74f490c0c6fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.861, 0.162, 0.98, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1445", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4659e9b2-1197-45eb-b644-7c9166476d4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.399, 0.336, 0.432] in the image\nAnd my action is Action: TYPE\nValue: 7"}]}, {"id": "mind2web_1446", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_f59cc735-1769-4401-9349-c127435edfb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\n[span] Your store for 43219 -> CLICK\n[searchbox] Enter ZIP or State -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 07055\n[button] Search for CarMax stores. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.287, 0.709, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1447", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_0c62b3b9-463e-44c1-a5db-b91ffde052e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[link] Herbs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.261, 0.367, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1448", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_df0101a1-cdcb-4001-a99d-5fe01a9d5f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.253, 0.484, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1449", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_477c560d-170c-41df-8298-b3a5df097ed9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.72, 0.056, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1450", "image": {"bytes": "", "path": "./images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_0925b90f-0055-40f8-a347-3771f43852dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for news about the latest news about Lebron James' injury and share the article on twitter.\nPrevious actions:\n[combobox] Search query -> TYPE: lebron james\n[img] LeBron James -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.36, 0.186, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1451", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_c58b2bc7-5044-42d8-8804-2536761d5dd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 1.678, 0.087, 1.688] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1452", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_5e4a5ce7-a657-4c55-b4fe-52ef17b2c466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK\n[select] All -> SELECT: In Stock (7,640)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.319, 0.196, 0.34] in the image\nAnd my action is Action: SELECT\nValue: Spanish (42)"}]}, {"id": "mind2web_1453", "image": {"bytes": "", "path": "./images/a88676d0-c252-408f-b796-93c95f6b71fc_ba3a8dc1-ddd6-4c96-9215-fa4470fc1329.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my trade offers.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER\n[link] INVENTORY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.214, 0.804, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1454", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_4b023b85-2772-4077-afb4-13b4e39518dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo\n[strong] Camarillo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.593, 0.024, 0.605, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1455", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_6e4b5086-ab7f-4c94-8467-faf6a06f1082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.186, 0.478, 0.232] in the image\nAnd my action is Action: TYPE\nValue: Santa Fe"}]}, {"id": "mind2web_1456", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_13ee71ed-2890-462f-aab6-cefc95bf6e81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.805, 0.038, 0.817] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1457", "image": {"bytes": "", "path": "./images/13cf0b14-422b-4486-841a-aa9ded048380_3829d5f0-4044-4132-aba4-64fad198fbcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find kinect camera for xbox one.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: kinect camera"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.031, 0.228, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1458", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_47640ee0-514b-4bc9-85b9-e6000e0cfc0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[button] Next month -> CLICK\n[gridcell] April 10, 2023 -> CLICK\n[button] Find tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.737, 0.685, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1459", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9a136118-0c6f-44c8-b64f-5dbbc3e3deaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.417, 0.645, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1460", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_a67f3f29-e6ce-45fc-8672-47d8b5b4988b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER\n[link] Action Figures -> CLICK\n[img] Hasbro -> CLICK\n[button] All Filters -> CLICK\n[tab] Character -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 9.57, 0.443, 9.582] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1461", "image": {"bytes": "", "path": "./images/5fb9730d-f489-4cb7-a220-d406794cef29_193f402d-c499-4804-84e4-a47a487844d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List all George Clooney movies and sort them by box office revenue\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.118, 0.525, 0.132] in the image\nAnd my action is Action: TYPE\nValue: George Clooney"}]}, {"id": "mind2web_1462", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_262f044d-75e5-4a9d-863c-bbc2e8206b5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK\n[checkbox] Red Devil (2) -> CLICK\n[label] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.447, 0.791, 0.469] in the image\nAnd my action is Action: SELECT\nValue: Wish List"}]}, {"id": "mind2web_1463", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_36d9bad0-4139-4bb7-9fba-972a1c25c356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\n[div] Sports -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.11, 0.079, 0.211, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1464", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_3b94029d-b4fc-45d2-8460-41fe1a2dae10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.177, 0.388, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1465", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_be5d0683-3fab-48a5-9ce6-454a884f75b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[path] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.239, 0.83, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1466", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_414d6244-0798-42fd-8c5a-fba032091a90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[heading] Car specs -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.526, 0.916, 0.545] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1467", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_10d25542-b5a1-41fc-83f2-470ec16e6b0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.161, 0.006, 0.434, 0.014] in the image\nAnd my action is Action: TYPE\nValue: indian"}]}, {"id": "mind2web_1468", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_8e0f41cf-b371-4a85-a613-09c17e485957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK\n[button] Boston -> CLICK\n[link] {{ 'see_more_label' | translate }} {{::list.info.n... -> CLICK\n[button] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.27, 0.383, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1469", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_30bc3786-daf3-4ec9-a9ff-37b8f6c57ae0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> ENTER\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.43, 0.379, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1470", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e95c855-bc55-4b1d-95e7-4b68a2b075dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.221, 0.769, 0.257] in the image\nAnd my action is Action: TYPE\nValue: Texas"}]}, {"id": "mind2web_1471", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4379cbc1-7c06-473d-9df6-705f2b4e3321.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Exterior Color -> CLICK\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.329, 0.249, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1472", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_32e3bc8b-4bdb-4e41-b530-c6856fd481ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Activity -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.281, 0.154, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1473", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_5f0bc65a-a65f-4c3a-a9e8-ec714ee4a01d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] New -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.1, 0.367, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1474", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_9c2959ea-dc43-4168-b5af-91a91fccb5b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK\n[button] Price: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.107, 0.247, 0.123, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1475", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_bed3001d-01d6-431c-bff7-bcf8ff8ea839.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.082, 0.961, 0.11] in the image\nAnd my action is Action: TYPE\nValue: 60173"}]}, {"id": "mind2web_1476", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_93292f85-ace3-4d5f-9c66-58a5030b4526.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK\n[heading] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.661, 0.48, 0.688] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1477", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_4f8fbba5-548b-4037-bcbe-a63232bbf964.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.339, 0.375, 0.352] in the image\nAnd my action is Action: SELECT\nValue: New"}]}, {"id": "mind2web_1478", "image": {"bytes": "", "path": "./images/7f1f085b-5765-40f8-86c7-8df6e8b68053_71f6ece9-2a59-4408-8da1-d01c4e8a36a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about baggage allowance for business class.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.03, 0.456, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1479", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_ad14875a-2bee-4b4c-b9a9-5229a9213f46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\n[button] Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.07, 0.42, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1480", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_b91fc8ad-9716-4df7-89ed-d728a87b758a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK\n[span] see all stores -> CLICK\n[button] SET AS MY STORE -> CLICK\n[link] SHOP LOCAL CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.27, 0.13, 0.288, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1481", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_c1213d35-a9c0-44e6-a81b-c3f04bb4ef40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.157, 0.181, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1482", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_fe3b630c-50fb-4bd4-8414-5a22fcbf3de8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Las Vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.182, 0.573, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1483", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_3966eb03-525b-43a2-adc3-77b700f1eff0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK\n[div] Real Madrid -> CLICK\n[heading] ROSTER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 1.662, 0.587, 1.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1484", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_2167a763-7333-43ce-8b28-5dd161d43cf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK\n[link] HOT DEALS \uf0da -> CLICK\n[textbox] City, State or Zip -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.905, 0.108, 0.938, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1485", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_4a747255-e268-4174-9aae-ef927747e463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.008, 0.262, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1486", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f861f270-4006-47c8-abb8-b7c3ec0ee2c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.176, 0.379, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1487", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_d201ad20-3ae5-4d4f-95ee-54a12ba937e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.27, 0.304, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1488", "image": {"bytes": "", "path": "./images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_b0bd4807-1c83-4f24-a9ca-e6b59dd2d8b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse camping stoves that have an auto ignition feature.\nPrevious actions:\n[button] Camp & Hike -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.359, 0.543, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1489", "image": {"bytes": "", "path": "./images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_18af82b7-9edc-400c-be67-a8172c96e423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of HomePod mini\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.0, 0.737, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1490", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_dcc79ac5-57a9-4ec7-8035-f7bc14000e30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.673, 0.777, 0.689] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1491", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_db770456-a5f4-40c0-9b55-2e3e0857f4bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK\n[textbox] Confirmation or eTicket More information about con... -> TYPE: 12345678\n[textbox] First name -> TYPE: Jason"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.255, 0.726, 0.28] in the image\nAnd my action is Action: TYPE\nValue: Two"}]}, {"id": "mind2web_1492", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_159c7b17-5f58-4a88-bc18-07362dc1987e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK\n[link] Parking -> CLICK\n[span] From $62 -> CLICK\n[button] Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.32, 0.198, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1493", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_369d575b-1f79-4eca-87a2-b9478ab681be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.724, 0.326, 0.739] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1494", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_9f55a450-95dd-424f-950a-6e250aadc6a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[heading] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.345, 0.63, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1495", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b583686e-09eb-48e7-9bcb-65faa05d92cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.734, 0.49, 0.908, 0.518] in the image\nAnd my action is Action: TYPE\nValue: 888888888"}]}, {"id": "mind2web_1496", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_cd309bbc-3a76-4037-a334-4a8af50af9fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.244, 0.894, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1497", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c670a2c4-9acb-4532-a8e0-bcd618e1f8f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.295, 0.233, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1498", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d233722a-833e-4708-baa1-b6e6ed139325.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] 9 -> CLICK\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.261, 0.939, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1499", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_72a3df07-b748-4dce-9fcd-8047ccba0f04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.201, 0.573, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1500", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_f6cab64e-9db9-4928-b663-52d3bd4561da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.138, 0.326, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1501", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_5bbdade7-b345-4703-964f-99ff3ae7385c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.536, 0.336, 0.548] in the image\nAnd my action is Action: TYPE\nValue: 123st rd"}]}, {"id": "mind2web_1502", "image": {"bytes": "", "path": "./images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_fa576ce8-1b4b-433d-8ac0-b881f304710c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find videos from the Oscar 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.075, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1503", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_cb171a5f-f105-4818-bc45-b9a05368abf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK\n[link] Shop by Color -> CLICK\n[link] Shop red -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.334, 0.459, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1504", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_741f7aa0-288a-42b4-95c0-1dbe3f4025e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK\n[div] Add to Watchlist -> CLICK\n[textbox] Search IMDb -> TYPE: The Magician's Elephant\n[div] The Magician's Elephant -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.434, 0.942, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1505", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_38e3e23c-932b-406a-bd3f-34958395c5c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK\n[link] Running -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.379, 0.166, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1506", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_4fb89993-2c2d-43b4-8021-5cf94957b393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.019, 0.981, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1507", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_6aeb2f30-3d17-4ef6-8073-6ace2fdbc4b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2\n[button] Done -> CLICK\n[button] Get cheapest tickets -> CLICK\n[button] OK, got it -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.192, 0.925, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1508", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_e247cc44-be69-402b-8df7-1b64365510fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.483, 0.498, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1509", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_9b9714a3-1307-4aed-8fe7-c4aa796cf448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.218, 0.214, 0.245] in the image\nAnd my action is Action: TYPE\nValue: 60538"}]}, {"id": "mind2web_1510", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_fef21c26-cc60-438b-935d-d274235a5ce6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.143, 0.957, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1511", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_8d91ee68-49ba-4c63-a109-0a0728c06026.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.235, 0.655, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1512", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_ca0c6a4f-e759-4971-bbff-02f2bee950be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.094, 0.327, 0.116] in the image\nAnd my action is Action: TYPE\nValue: Glasgow"}]}, {"id": "mind2web_1513", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_c1fdc477-879c-42ad-b10a-cb7edb58d429.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 2.882, 0.906, 2.91] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1514", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_c59dad46-c249-4ecd-9c02-3ffe955c5147.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.039, 0.917, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1515", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_2b413c09-655e-41ba-8f2b-fd66aba87bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.205, 1.0, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1516", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_028f293a-f011-4c5a-a8d7-75c3024c70c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.09, 0.402, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1517", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_595b3f33-53fd-426c-95d1-2049a525a4cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK\n[div] Bayern Munich -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.858, 0.059, 0.889, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1518", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_3893c47f-d0f7-4f68-8989-f92b5d4b553a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK\n[div] -> CLICK\n[checkbox] Online Paperwork (4)\uf05a -> CLICK\n[span] Vehicle History -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 1.909, 0.277, 1.924] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1519", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_95435f7e-87e9-47de-ba6b-3818d8a47081.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.042, 0.673, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1520", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_77889bbd-2782-42c1-9514-0f31846074cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[div] May -> CLICK\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.612, 0.922, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1521", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_af73a962-fb6a-4393-b7fb-2607ab8a26ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.183, 0.629, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1522", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_d12d7503-85c9-4e58-a998-eb5cb3fd47a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.833, 0.027, 1.845] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1523", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_f6deabe8-871c-4244-a62c-a369378c0352.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.3, 0.582, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1524", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_4b1fb823-3bba-4dbe-b7fb-b6aa69585739.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK\n[button] Germany -> CLICK\n[button] Posting Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.227, 0.712, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1525", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_a36e1bc5-c3fe-4821-b962-0b360dab1f1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[img] Samsung -> CLICK\n[span] 11\" & Larger -> CLICK\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.963, 0.158, 0.972] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1526", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_fa243564-2db4-4637-be03-7c5855112c7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[div] India -> CLICK\n[span] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> CLICK\n[textbox] max price $ -> TYPE: 99"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.64, 0.783, 0.786, 0.822] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1527", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8d01874f-6ff5-460f-85ca-ec27f7a38461.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.358, 0.619, 0.388] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_1528", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_958c6197-e149-4213-bf7e-760f1d4708f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.433, 0.284, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1529", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b545264-a8c9-48ef-a6c6-873b960fa27f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.106, 0.339, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1530", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_972c9c30-8c70-4bdc-b484-3f38d969ee99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.521, 0.958, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1531", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_ff95f90f-0dfe-4ec7-a33f-3f7fa040acc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.066, 1.051, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1532", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_f80825e0-a464-4208-892e-4982389fabd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.462, 0.344, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1533", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_75d262b6-ddd5-48f9-966f-4438087ee50e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 1.279, 0.415, 1.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1534", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_d0ff114c-d1e0-4002-88e7-a44d33b20e16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.017, 0.564, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1535", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2f895a7d-c8c2-474e-959d-2cc70df86dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.032, 0.335, 0.051] in the image\nAnd my action is Action: TYPE\nValue: barbershop"}]}, {"id": "mind2web_1536", "image": {"bytes": "", "path": "./images/73cf6eec-cae6-4d5b-9b8e-e44359311565_7806581b-01a1-4c64-80e8-249ca26e8226.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for the newsletter\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 2.501, 0.337, 2.522] in the image\nAnd my action is Action: TYPE\nValue: larryknox@gmail.com"}]}, {"id": "mind2web_1537", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_ebfd5d22-29c0-4188-8b78-a1901e05974b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\n[textbox] search -> CLICK\n[textbox] search -> TYPE: Dota 2\n[link] Dota 2 Free -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.485, 0.863, 0.611, 0.882] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1538", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_9ae69032-e90d-4dd4-a331-dc5968c0c211.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Veterinarians -> CLICK\n[label] Veterinarian Emergency Services -> CLICK\n[heading] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.262, 0.388, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1539", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_f84c6ce3-3f65-4091-a2ba-e372b65fbaa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK\n[link] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.122, 0.902, 0.158] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_1540", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_67e9ecfe-bf95-42a4-aabf-a684323a69c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.242, 0.393, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1541", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_37a05e4f-282b-4550-b32b-59a3ae182626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.211, 0.943, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1542", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_9b095d3d-faae-4676-bb3d-46ffc3ae5f3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.249, 0.743, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1543", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_b87559eb-fbcd-470b-bf56-c63609269ba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.417, 0.517, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1544", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7842187c-40c4-40d1-9735-376204241576.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.053, 0.737, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1545", "image": {"bytes": "", "path": "./images/28d54466-de85-45e6-9649-2575d38adfd4_0f18ac4c-1a06-4005-a45d-4b9e1b1096c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse documentaries streaming on Netflix.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.57, 0.311, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1546", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5d397754-60c0-4eec-bc5e-b1f68748dddf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.085, 0.38, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1547", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_be028dfc-bafa-4ce8-9b29-da311352ba93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK\n[link] Training -> CLICK\n[div] Size -> CLICK\n[link] YXL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.534, 0.233, 0.574] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1548", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_8961a973-3d00-4e41-a0b8-c24b544aa233.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.23, 0.359, 0.255] in the image\nAnd my action is Action: TYPE\nValue: Greenport"}]}, {"id": "mind2web_1549", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_db4f9635-d23d-4d28-bebc-f80e58667212.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK\n[button] January 2024 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.359, 0.736, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1550", "image": {"bytes": "", "path": "./images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_bf7273cc-5470-4d70-9726-f5baa1e05def.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the design tool for a new home office.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 0.048, 0.501, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1551", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4d7706e7-35fe-4adf-a968-ba5f21d0fe38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.51, 0.238, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1552", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_56a6a786-d692-4dca-969f-3d04f183ff2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.595, 0.158, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1553", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_60eff40d-6f5d-42ae-ab89-1cb9059a3eeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.003, 0.204, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1554", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_43918fa3-9bec-465d-bd86-e9fe67bdd317.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[link] Wichita -> CLICK\n[link] Showtimes -> CLICK\n[heading] John Wick: Chapter 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.092, 0.134, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1555", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_38cab605-7db6-4ce2-b910-1a9793ec2332.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[button] Go -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.934, 0.11, 0.968, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1556", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_ba81a68d-2108-4b3e-a009-e9f9c0992380.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.018, 0.35, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1557", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_139899ed-e29d-42d0-b65d-dd0f16f15868.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.525, 0.307, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1558", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_1014509f-e902-4d92-9c7f-1668d0bf2f45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.028, 0.335, 0.045] in the image\nAnd my action is Action: TYPE\nValue: cafe"}]}, {"id": "mind2web_1559", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9cb46440-5f2a-4fd8-8725-bb7a17846a1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[option] 8:00 a.m. -> CLICK\n[combobox] Drop off time Selected 10:00 a.m. -> CLICK\n[option] 12:00 p.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.67, 0.484, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1560", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_6efa03dd-c712-4bce-80cd-c1ac3251e298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK\n[div] Add to Watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.006, 0.613, 0.021] in the image\nAnd my action is Action: TYPE\nValue: The Magician's Elephant"}]}, {"id": "mind2web_1561", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_a5c7978a-226d-4d6f-919d-a987f495eab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.327, 0.421, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1562", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ae9b2eda-9e42-4a8f-a07e-abaad1212cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_1563", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_f3cedb20-a49f-49a2-922a-8cfd2a350ec1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK\n[link] Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.39, 0.099, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1564", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_94539249-ecc8-4133-9890-519b2ea618e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.225, 0.5, 0.26] in the image\nAnd my action is Action: TYPE\nValue: AUCKLAND"}]}, {"id": "mind2web_1565", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_a08689bf-a507-4c9c-b25a-dd1d2d5adc1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.579, 0.339, 0.607, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1566", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_1c629567-4fe9-4654-8aba-72a300154818.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.265, 0.931, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1567", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_098ac9fe-e29c-4f6a-ab2c-78e793bf43d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.066, 0.628, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1568", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_f7f48747-38d7-43c2-9499-034d6ab8590f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: Under US$20\n[select] All -> SELECT: Hardback"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.397, 0.196, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1569", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_31182b46-b2bc-4c21-9b91-ef93eaff57aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.195, 0.103, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1570", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_99e1fdce-02c7-4ac8-8777-6a8a73444332.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK\n[button] Join Waitlist -> CLICK\n[textbox] First Name * (required) -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.367, 0.269, 0.573, 0.3] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_1571", "image": {"bytes": "", "path": "./images/440273fa-f1b0-41e8-be75-90732ad5170d_955f7e1b-c9a4-4f90-999b-d133dae42588.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Mens size guide for bottoms\nPrevious actions:\n[link] Size Guide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.307, 0.665, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1572", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_e8435fdf-1ad0-4c53-936e-9416382b58e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.317, 0.216, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1573", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_02cd0163-e5d7-4bad-92c2-dfe415380130.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK\n[label] 0 - 5 Miles (2) -> CLICK\n[button] APPLY -> CLICK\n[link] ADD TO RFP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.946, 0.96, 0.979] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1574", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_241fbd7e-3408-47f1-ba87-e873528e7048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] 10+ Night Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.41, 0.306, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1575", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_17b6a926-894b-4c39-82a4-70ce263fd6db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Locksmiths -> CLICK\n[textbox] Near -> TYPE: SAN FRANSISCO\n[span] San Francisco, CA -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.26, 0.048, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1576", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_6fcb9d43-418b-4352-8aa2-ac7e22d8e10a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.355, 0.691, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1577", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_de548550-7457-48b1-8215-d63d7fe2643e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.115, 0.617, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1578", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_129ca29d-80b0-4d60-ba91-0e80e47f9911.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.142, 0.972, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1579", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_6bdf3560-322b-4a2d-800a-74c3e8f62dc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.095, 0.258, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1580", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_cb1b0ad6-b6ce-4345-bb63-f83f179d8bba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.303, 0.325, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1581", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_6dde45f6-ca37-4848-9b11-2c361a0e023c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.213, 0.359, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1582", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_9a2bfce6-7f6c-496d-8537-c063af49d516.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.225, 0.312, 0.243] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_1583", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_b2ff3565-c9c1-4ad1-8be6-a68c94ff24e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.012, 0.323, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1584", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_da2aa56b-9261-4456-8a0e-d4d5b0087429.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK\n[link] Tab -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.202, 0.971, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1585", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_733acd7f-9db8-407d-9169-c28918f38ce6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[heading] Color -> CLICK\n[span] BLACK -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[button] L -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.953, 0.642, 0.997] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1586", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_d9a57dc9-b37d-47f8-801f-36523ba7235a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.2, 0.238, 0.233] in the image\nAnd my action is Action: TYPE\nValue: Cheyenne"}]}, {"id": "mind2web_1587", "image": {"bytes": "", "path": "./images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_ca1c9ff1-ad81-4a7a-b51f-0d2958396277.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the page to find classic rock concert tickets.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.009, 0.211, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1588", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_fa09573f-3fe8-4781-878c-595e27b1289d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.415, 0.207, 0.442] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_1589", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_26c2202a-1d0c-4925-b6cc-87b75a0d5d09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[link] Your lists -> CLICK\n[link] Create a list -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 1.491, 0.509, 1.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1590", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_9e2ac8b4-b3a9-4882-983f-24af5568549d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.366, 0.253, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1591", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_6cd59e9c-d4c6-408e-bcc8-74cec11ee801.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.18, 0.349, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1592", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_1e8b8975-5a8e-4555-aa79-7b38c3a2f62e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK\n[dt] Memory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 2.093, 0.095, 2.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1593", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_099c45fc-ac66-40c7-92f9-016c98c58a85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.054, 0.35, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1594", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_89812b96-d097-40d2-9b31-672894992c81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Lebron James\n[div] LeBron James -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.013, 0.574, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1595", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_ceca548f-aa24-4a6b-8249-a0974e25b9d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.276, 0.405, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1596", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_fd3c4d6b-de6c-49dc-a1ce-b8c22bbd015f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.359, 0.736, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1597", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_60755e3c-381c-4066-bf09-930cb0c80bd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.693, 0.285, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1598", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_bbfb3d84-6cda-4a67-ae68-04e8649f8c38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York\n[button] Times Square, New York, NY, USA -> CLICK\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.164, 0.927, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1599", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_3f3a9593-8678-4c23-9d1b-d5bfa52c98c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.395, 0.52, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1600", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_b258f35f-a2e4-4edc-8102-f3109e0b4909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.222, 0.688, 0.241] in the image\nAnd my action is Action: TYPE\nValue: New Yok"}]}, {"id": "mind2web_1601", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_4f3485c5-4000-44a9-b95c-82f5f488f49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.9, 0.179, 0.923] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1602", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_2442d176-ba01-4693-9f18-ee18aeb6baba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.153, 0.265, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1603", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_ffe315e9-ad0e-4366-ba6c-cbbe02d20908.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.197, 0.797, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1604", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_51b45d5f-8bb8-4178-8db0-ea0a9c2a2138.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.817, 0.434, 0.943, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1605", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_b6343c7a-6d35-4068-a022-3d52bdfb2d80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.075, 0.264, 0.086] in the image\nAnd my action is Action: TYPE\nValue: cough medicine"}]}, {"id": "mind2web_1606", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_e11ffb02-f80c-4113-90d9-a7a3fc334da9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1607", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_b1b6ed4f-e03d-4b9f-8c20-2487956712bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.706, 0.253, 0.738] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1608", "image": {"bytes": "", "path": "./images/fb73611b-dc68-4a75-bf5b-7e151dc151af_5c6956c9-6868-4100-809d-2d60c8266d39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get tickets for the Buckeyes football game on April 15th for a group of 5 people.\nPrevious actions:\n[link] BUY TICKETS -> CLICK\n[span] -> CLICK\n[label] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.927, 0.232, 0.982, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1609", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_38402c1f-8d43-4fce-97b2-4dde762c43cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK\n[link] Go to route -> CLICK\n[button] View upcoming departures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.19, 0.497, 0.213] in the image\nAnd my action is Action: SELECT\nValue: SOUTHBOUND Forest Hills"}]}, {"id": "mind2web_1610", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_eb9e2f08-e45e-4152-b15c-68af8e163e11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.243, 0.732, 0.286] in the image\nAnd my action is Action: TYPE\nValue: Nemo Front Porch 2P Tent"}]}, {"id": "mind2web_1611", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_aa1e041e-4dec-4d00-971d-f27cbae2c3bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.183, 0.75, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1612", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_16158a8f-e6d0-46ee-b592-bb982f0ea0b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] Search -> CLICK\n[div] Premium -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.476, 0.331, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1613", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_711afc2a-5dda-4d63-9704-e148390bbd8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.276, 0.795, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1614", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_e6e3ca49-d6be-447f-9169-b729bc647ee1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.271, 0.828, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1615", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_d2628a1f-38e2-45f0-b1cc-07292b3b737b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\n[link] Model Y -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.985, 0.276, 1.001] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1616", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_50059fe0-a21c-4c62-a8ea-ce6abbb1679a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR\n[div] Newark Liberty Intl (Newark) - -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.663, 0.204, 0.678] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1617", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_49915c43-ef6b-4ab0-9559-24be43b60267.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Education -> CLICK\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK\n[select] All -> SELECT: In Stock (41,088)\n[select] All -> SELECT: Hardback (13,067)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.375, 0.196, 0.395] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_1618", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_c4137ce6-fa4d-4c99-9a90-5f8465c290c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK\n[link] User Search -> CLICK\n[textbox] Enter First name, last name, and/or username: -> TYPE: Joe Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.617, 0.146, 0.645, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1619", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_9ad6f25b-3247-4c7d-843e-ba9936959a88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.02, 0.67, 0.037] in the image\nAnd my action is Action: TYPE\nValue: mirror"}]}, {"id": "mind2web_1620", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_a3d44210-e42f-4f1a-99c7-6c695782189e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK\n[button] $10,000 -> CLICK\n[menuitem] $20,000 -> CLICK\n[button] $56,000 + -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.606, 0.24, 0.632] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1621", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_0e0ccf1d-7ddd-456d-a89e-469d3a00a188.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 1.881, 0.945, 1.903] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1622", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad3a284c-17af-4570-b6d4-fd177a683a78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.313, 0.709, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1623", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_6b3f5ae5-e781-4e07-beea-f548df42dfe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.134, 0.782, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1624", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_4b695869-979b-4fc6-bcef-b75508d9d353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.253, 0.923, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1625", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_cfa92282-3f8c-4a8b-a7c5-4cb5ad14ef19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.879, 0.263, 1.889] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1626", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_d8e1e74e-2d72-49ef-9c27-8e81179156c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK\n[generic] Thursday April 20th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.461, 0.354, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1627", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_63848707-7e00-4f6b-9033-a086a7c6bdaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Black (1) Black (1) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.398, 0.243, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1628", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_0d111192-54d9-412a-84ff-e2603690250a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.812, 0.182, 0.827] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1629", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_c37c733c-abb7-4a0a-a1cd-c3d90df774a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.321, 0.471, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1630", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_18c3278f-f64b-424e-87a4-39072ea492f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.145, 0.657, 0.171] in the image\nAnd my action is Action: TYPE\nValue: laguardia airport"}]}, {"id": "mind2web_1631", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_2cd0f6aa-940d-4a88-a992-726a6eab0ea3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.484, 0.307, 0.504, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1632", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_3067c854-4afb-4093-9f28-ee9b0f735e2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 1.432, 0.209, 1.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1633", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_b119563f-04a3-4152-97ee-312e2601cea8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK\n[textbox] Parking Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.308, 0.118, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1634", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_4d9d8608-8e78-44ec-b091-592d1433d369.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK\n[div] Sports -> CLICK\n[link] Football -> CLICK\n[link] Men's UA Football All Over Print Metal Logo Short ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.499, 0.952, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1635", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_ea2a29f1-6848-4ffe-a130-49eb73e01e99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[link] Herbs -> CLICK\n[div] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.015, 0.981, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1636", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_42833388-627b-43f2-a72d-ab7582cef893.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK\n[generic] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.702, 0.372, 0.728] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1637", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_5307a9cc-3824-4f21-ba80-86eb1dcab306.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.345, 0.486, 0.371] in the image\nAnd my action is Action: TYPE\nValue: BOSTON LEGAL"}]}, {"id": "mind2web_1638", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad1a399e-b8b0-4a93-bd03-f6a9c930c30e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.541, 0.249, 0.573] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1639", "image": {"bytes": "", "path": "./images/851ed4e6-51ee-47ad-a861-a28bdc61a102_c3012402-7d2b-49f8-8a3f-4a6ff36ca6a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to schedule a Model X test drive.\nPrevious actions:\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.172, 0.886, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1640", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_522a5110-9344-48c9-a348-5cf143bdfd09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.082, 0.828, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1641", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_4fa33a9b-c512-4911-a2c8-c8118c344b25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK\n[dt] Price -> CLICK\n[textbox] price to -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 1.264, 0.192, 1.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1642", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_c0154493-539d-46e0-a7d8-13d0ec9ea144.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[div] Calendar -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.406, 0.09, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1643", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_de150a34-2838-4669-80db-ac4bf235c452.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.484, 0.708, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1644", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_8de97dd0-06a1-43b2-9db1-9a50efe628b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Cruise Deals -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.258, 0.552, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1645", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_b41d47a0-2af6-4068-877c-5fecb6b8b45e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.191, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1646", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_669be30c-bc87-4806-9cc7-18139eb1e8bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[button] Deals -> CLICK\n[div] -> CLICK\n[button] Update -> CLICK\n[button] Discount -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.851, 0.988, 0.887] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1647", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_54d26b8d-20c4-482b-99b5-1e444c403105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean\n[button] \ue9571 NIGHT Wed, Apr 19 - Thu, Apr 20 -> CLICK\n[span] 5 -> CLICK\n[span] 15 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.066, 0.914, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1648", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_525afe7d-fb68-4af8-83b9-ae729b67d9e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.207, 0.641, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1649", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_da3756d6-bdec-418d-bd70-c9b28d7ae532.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[select] All -> SELECT: In stock (53476)\n[select] All -> SELECT: Digital\n[select] All -> SELECT: French (299)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.467, 0.196, 0.499] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1650", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_657a05b7-3405-4ec1-bc74-6e2dcacc2244.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] 2 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[combobox] Return Time -> SELECT: 11:00 AM\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.267, 0.891, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1651", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_e686e478-89e5-4245-8f6b-9066b3cfcd46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston\n[option] Boston area -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.202, 0.803, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1652", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_d0942028-ea6c-4aa1-b417-3f768a2c6013.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.569, 0.47, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1653", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_a65bd23a-d4e7-404d-b5f9-a18afdcd9516.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 10001\n[textbox] mm/dd/yyyy -> CLICK\n[link] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.273, 0.181, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1654", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_28d18847-a922-481b-983f-a0131d55e6a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.588, 0.194, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1655", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_4c7b210f-a952-4105-a305-666b67c4413d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.328, 0.195, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1656", "image": {"bytes": "", "path": "./images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_ff20befa-13eb-49ef-9601-c1423f6d06d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of critic reviews for the movie Creed III.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.265, 0.156, 0.283] in the image\nAnd my action is Action: TYPE\nValue: creed III"}]}, {"id": "mind2web_1657", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_a2c937ff-c9d4-49ec-8481-f6c7292f0a27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[span] Filters -> CLICK\n[a] Software Development -> CLICK\n[a] Hybrid -> CLICK\n[i] -> CLICK\n[button] Apply Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.474, 0.977, 0.501] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1658", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_fe6b056b-6b75-4102-91d4-acf37296d4ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.666, 0.29, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1659", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b285e7b0-2a4e-43b4-a6d0-4ac251fcc085.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.32, 0.234, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1660", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_f0e82b12-d16c-4d45-b667-0ceba837fc70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.006, 0.491, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1661", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_1fbb7015-2342-4083-9fc8-141bfe2c3d68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.266, 0.347, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1662", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_18977e76-04cc-4a66-a066-08c24cd53b5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[radio] Owned -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 1.035, 0.284, 1.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1663", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_3e758aec-19d7-4865-aad8-cf4d53774bf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.3, 0.37, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1664", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_c011c7eb-4c26-49b9-8331-9f1e96f2331c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[select] All -> SELECT: In stock (53476)\n[select] All -> SELECT: Digital"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.373, 0.196, 0.398] in the image\nAnd my action is Action: SELECT\nValue: French (299)"}]}, {"id": "mind2web_1665", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_88e67c68-b1a0-4509-b2f8-bb568aa3142b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\n[link] Navigate to on-demand -> HOVER\n[link] select to navigate to Supported Devices -> CLICK\n[generic] FREE LIVE TV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.537, 0.273, 0.555] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1666", "image": {"bytes": "", "path": "./images/8710addc-5ff3-4aaf-b397-4c6165f285ee_083450dd-1a24-4b01-a29b-f370c094324d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the service options for cars under warranty.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.422, 0.811, 0.491, 0.84] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1667", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_29b304e8-8ac3-4dca-a084-e2a2b157d560.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.006, 0.467, 0.041] in the image\nAnd my action is Action: TYPE\nValue: gorillaz"}]}, {"id": "mind2web_1668", "image": {"bytes": "", "path": "./images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_b644b13e-0e08-4e91-8dbb-e80427e1b76f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vacation packages to Hawaii.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.159, 0.041, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1669", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_6379c507-cb3e-4e70-bd40-2c45ea705298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.315, 0.54, 0.354, 0.568] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1670", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_265e54aa-6c90-4afd-832f-8a1f4fe6294b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.253, 0.328, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1671", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_f0a252b2-33f2-496a-8540-e943e77082bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK\n[select] 1 -> SELECT: 8\n[select] 00 -> SELECT: 37"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.797, 0.206, 0.854, 0.226] in the image\nAnd my action is Action: SELECT\nValue: AM"}]}, {"id": "mind2web_1672", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f70b6c21-ea35-4256-824d-f478df3bc254.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.366, 0.393, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1673", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_cc6fc299-2804-4e5d-88b7-816f63bf8bd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.174, 0.418, 0.209] in the image\nAnd my action is Action: TYPE\nValue: Hackney"}]}, {"id": "mind2web_1674", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_e70021d7-5c43-4e5b-a710-953fccc9b3f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.462, 0.699, 0.485] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_1675", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_a5503712-4dd3-4c5a-ae2f-89359854adbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Women's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.794, 0.082, 0.806] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1676", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_7d65206e-629f-44a2-9720-1d58f8889d97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Guest Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.101, 0.249, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1677", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ee0f6005-30c0-42c8-a5be-131d002b1322.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.047, 0.159, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1678", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_2b7cd081-ffb7-4ead-9b48-8c8a72c92b5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK\n[svg] -> CLICK\n[button] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.355, 0.855, 0.401] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_1679", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_e8ebac86-489a-45a0-83e3-9963de2cf23a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK\n[span] Agencies -> CLICK\n[link] Bridges & Tunnels -> CLICK\n[div] Tolls by vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.535, 0.367, 0.619] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1680", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_3466e787-56c0-4c59-96f4-a31bf152d42e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[gridcell] Fri May 12 2023 -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK\n[span] Traveler Rating -> CLICK\n[img] 4.5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.113, 0.159, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1681", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_2786788a-6a55-495b-bcb7-19ceadaa2632.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[span] Westminster -> CLICK\n[checkbox] Offers Takeout -> CLICK\n[button] Thai -> CLICK\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.456, 0.529, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1682", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_79d473dd-a77c-4f65-afc8-e214c8355550.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK\n[combobox] Select a Size -> SELECT: Queen\n[spinbutton] Main item quantity -> TYPE: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.69, 0.224, 0.96, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1683", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_6f9b8dbd-0ec0-49af-8d7e-9e8596170ef8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.255, 0.84, 0.287] in the image\nAnd my action is Action: TYPE\nValue: New York JFK"}]}, {"id": "mind2web_1684", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_1f12c03d-ee6f-4717-bdfc-66c289973d4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[listitem] BMW (389) BMW (389) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] X5 (87) X5 (87) -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.104, 1.002, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1685", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_f94749d5-d311-4026-8036-c81f05ec38e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.229, 0.326, 0.268] in the image\nAnd my action is Action: TYPE\nValue: Hackney Clothes Swap - Earth Day"}]}, {"id": "mind2web_1686", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_55282477-0c69-47cb-aab3-15caa2215b85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.179, 0.446, 0.198] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_1687", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_0134b2a2-0960-4c1c-b128-61aeb08dd0c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.407, 0.287, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1688", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_67e32043-4f15-4318-a51f-237dfcf55ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK\n[searchbox] Search by keyword -> TYPE: women t-shirts\n[div] WOMEN / Tops -> CLICK\n[gridcell] Size -> CLICK\n[label] S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.289, 0.432, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1689", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_41e84ed6-7a28-41f6-92e0-daa1fc5f5611.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM\n[combobox] Select Residency -> SELECT: Vietnam\n[generic] Vehicle Type * -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.264, 0.782, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1690", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_dd2e43c1-312e-420a-b90a-c274075490db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK\n[gridcell] 19 -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.344, 0.111, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1691", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_4da9977b-a124-4efe-9395-6120ab50f4d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.348, 0.413, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1692", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_37123deb-b58b-48af-806d-b33471d5e546.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK\n[div] SUVs -> CLICK\n[link] See Details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.5, 0.443, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1693", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_981cfbcc-0b50-4f18-80a7-35a4cf18e9d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.173, 0.963, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1694", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_78338dfb-bd62-4024-9a68-17480d94f80b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.057, 0.966, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1695", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6097d607-4458-40c6-9005-9e21bf70ecaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK\n[span] -> CLICK\n[button] Find tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.311, 0.781, 0.325] in the image\nAnd my action is Action: SELECT\nValue: Price"}]}, {"id": "mind2web_1696", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2e3725c5-8ae4-4af4-b4da-d19fa51f89d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.626, 0.339, 0.644] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1697", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ca937f48-03be-48be-9daa-ffe7587749c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.203, 0.362, 0.256] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_1698", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_3f7ed9af-1f23-4b1c-aa63-1b897f1f8742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.278, 0.56, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1699", "image": {"bytes": "", "path": "./images/e9300d50-11fa-4f98-8c39-424630668ab9_0be78dd3-a700-4d87-92b5-bc57c37a4384.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the popular online Health events for tomorr\now?\nPrevious actions:\n[link] Health -> CLICK\n[heading] Most popular events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.18, 0.153, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1700", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b0065ed3-dd9d-4845-8174-7be5aed5406d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.2, 0.359, 0.219] in the image\nAnd my action is Action: TYPE\nValue: 74th street, brooklyn"}]}, {"id": "mind2web_1701", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_4edcf87f-d3a2-4bcc-b55d-01d9bb2cef31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] Railcards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.343, 0.311, 0.37] in the image\nAnd my action is Action: SELECT\nValue: 18-25"}]}, {"id": "mind2web_1702", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_52f08417-3d87-4854-b93b-6c9e07559ab6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[generic] 4 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.39, 0.863, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1703", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_683c0864-23e0-4989-979a-16e9b0ba204f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[textbox] Date use format: 17-Mar-23 -> CLICK\n[path] -> CLICK\n[link] 18 -> CLICK\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.401, 0.16, 0.416] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1704", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_95a5b4f9-e96e-4315-abec-cd2a380ae344.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[button] - -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.401, 0.263, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1705", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_ee36e538-a5c9-46f4-a19d-7fb9f3fc9b3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.379, 0.568, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1706", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_61eea936-7790-4dc4-b778-5e3ceae09c14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.135, 0.196, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1707", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_2c7c5fe2-1b57-449c-83f0-eb79a096739f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.27, 0.137, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1708", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_f1d21f88-e302-42ae-8d0c-0144616650fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.134, 0.38, 0.167] in the image\nAnd my action is Action: TYPE\nValue: Miami Airport"}]}, {"id": "mind2web_1709", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_b47b0fbe-e5bf-4cb4-a560-93d4d86a1f35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.159, 0.568, 0.198] in the image\nAnd my action is Action: TYPE\nValue: Seattle"}]}, {"id": "mind2web_1710", "image": {"bytes": "", "path": "./images/37564222-bb58-4a55-b47b-e9ffbbc1d160_c62fa753-fdf3-4a97-a464-d6e1a2d7c20f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the results of the most recent NFL games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.092, 0.286, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1711", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_d7618269-d378-4b6e-9124-9f8558d304d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK\n[button] - -> CLICK\n[div] Mr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.7, 0.353, 0.737] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_1712", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_24f70490-eff7-4c67-aaaa-e72120ee5528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.481, 0.184, 0.488] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_1713", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_241b0765-cc4d-4ce1-9b29-92cf7f2173c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.364, 0.249, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1714", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_5ebb131c-9681-479f-ae2a-2c8d50b7e606.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Kindle E-readers & Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.392, 0.285, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1715", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_31ea9a5f-c903-46bb-a7cb-6b04c5af555a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK\n[span] Asia -> CLICK\n[div] Kyoto -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.16, 0.354, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1716", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_c909f54e-a1b0-4e7d-b89d-53df496da5ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.629, 0.307, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1717", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_89339d95-1d28-4d8e-bd50-7014518d77f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.204, 0.754, 0.219] in the image\nAnd my action is Action: TYPE\nValue: MOMA"}]}, {"id": "mind2web_1718", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_99cd8c52-e44f-48d2-a670-27822e4ff213.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.287, 0.571, 0.315] in the image\nAnd my action is Action: TYPE\nValue: 1HGCM66543A064159"}]}, {"id": "mind2web_1719", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_24f44f71-9d71-4647-a587-f4f33b5b3fb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.613, 0.465, 0.694, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1720", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0d7ecbf2-34f6-44ad-8a78-48c3102e5df2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: TYPE\nValue: 5456165184"}]}, {"id": "mind2web_1721", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_88177b5d-5f76-4638-84cf-a9abf0abec85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.207, 0.641, 0.243] in the image\nAnd my action is Action: TYPE\nValue: Miami, FL"}]}, {"id": "mind2web_1722", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8bcb7868-5c9f-444e-8759-3c089e797034.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.43, 0.745, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1723", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_769d15aa-5ed6-45e7-8caa-d271597da9d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.95, 0.192, 0.962] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1724", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_831a73c6-155d-4ce3-b1f4-03b69243735f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.37, 0.187, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1725", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_19891f32-74d8-4b7e-9529-b6ad116c7002.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK\n[button] Select My Car -> CLICK\n[link] All Vehicles (13) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.189, 0.929, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1726", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_4f967b7a-9ed5-4a01-ac5a-4bfb8c5cf276.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.601, 0.214, 0.748, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1727", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e0f6b19c-2da6-4552-944e-8e375cf719be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Hot tub -> CLICK\n[checkbox] Pool -> CLICK\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.707, 0.158, 0.849, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1728", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_fa588085-8f33-47b1-8ec2-145c85ae252f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.292, 0.047, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1729", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_e6e07a93-605d-4da0-aafd-e8e6f39a344c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.221, 0.666, 0.254] in the image\nAnd my action is Action: TYPE\nValue: NEW DELHI"}]}, {"id": "mind2web_1730", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_8b345767-07e0-4c1c-b939-9cdc2d8bd275.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[checkbox] SUV -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] Avis -> CLICK\n[button] More filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 1.118, 0.3, 1.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1731", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_dec5bc64-6ab4-47dd-bdd7-3d9b3c321864.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 1.071, 0.231, 1.083] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_1732", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_32136539-d563-4515-a062-e74052a89105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.289, 0.447, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1733", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f2cbeb2f-72b6-4862-8ea3-3b40e6926317.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.823, 0.092, 0.83] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1734", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_47be89ea-69a2-4c87-a4c6-2068241fee24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.125, 0.181, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1735", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_88ae7475-92a6-4415-bbe3-16b73b100272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER\n[link] On Sale Now -> CLICK\n[img] movie poster for Elvis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.175, 0.177, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1736", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5af256e5-4b7d-429d-a1d6-e4c6fffd8129.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK\n[textbox] Drop-off location -> CLICK\n[textbox] Drop-off location -> TYPE: Miami\n[span] Miami, Florida, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.17, 0.963, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1737", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_bacc7f66-2ed3-4753-92c6-b517e447321b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] New -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[img] Woods 41366 Surge Protector with Overload Safety F... -> CLICK\n[span] Qty: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.311, 0.857, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1738", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_249c9e0b-a8b9-48e5-a518-f5f037532ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[button] Back -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.597, 0.193, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1739", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_0d28ebcd-d197-45b9-9d04-92004c51a57a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK\n[span] Sony -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.366, 0.192, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1740", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_37ce8975-b564-4ed3-9ef4-93ef6e3d31cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 6944\n[combobox] Date -> SELECT: Thursday, April 6"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.435, 0.875, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1741", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_530c1c2b-4253-4258-be3d-ace6cee9102e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.326, 0.29, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1742", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_4e0cd350-2884-4841-b365-0b0d62b7a9ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.868, 0.325, 0.878] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1743", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_aa57cbad-a560-403a-a60e-dac248b9a9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.071, 0.566, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1744", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_acb70a60-4b7a-40b0-ae7d-80f59fd9d80a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.257, 0.274, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1745", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_454e8a2e-689a-4cc6-b987-459a0ad78207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[button] Show all 14 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.359, 0.157, 1.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1746", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_69915593-b522-4215-bd37-8a27f3aa41b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec\n[button] Search for zyrtec -> CLICK\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK\n[button] Check More Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.182, 0.881, 0.198] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_1747", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_93efc07c-9976-40b0-8eec-9bee64d4f349.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK\n[button] Boston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.318, 0.655, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1748", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_7439f8b2-c8dc-495f-8dab-944cea4da660.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji\n[div] Fiji -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.104, 0.266, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1749", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_0511e7ae-896b-464b-b6ce-185a2db5c887.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.096, 0.718, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1750", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_d13b77bd-d861-4db3-a2bd-5e9b93f3a743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.784, 0.266, 0.806] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1751", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_1eb20c82-4d2d-4d62-8ddf-3f902ad6e301.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Open Jobs -> CLICK\n[span] -> CLICK\n[button] Country -> CLICK\n[span] -> CLICK\n[button] State / Province -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.493, 0.218, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1752", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_362374fe-0388-41ab-bc3f-222224b2119b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.037, 0.42, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1753", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_e537a59c-12b2-4a02-b0eb-399d677e5b81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags M\u00e9xico -> CLICK\n[button] Go! -> CLICK\n[link] Planea Tu Visita \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.813, 0.183, 0.967, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1754", "image": {"bytes": "", "path": "./images/059327ab-e26e-4af6-9063-f58c50ecd2d2_ea6d65a2-979e-4fa0-816e-5c637b48c014.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the schedule and maps for the orange line\nPrevious actions:\n[link] subway Subway Lines -> CLICK\n[span] Orange Line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.124, 0.281, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1755", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_c4c1be99-57dc-46bc-bff2-b0687469cc42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK\n[combobox] Flying from -> TYPE: MUMBAI\n[option] Airport Chhatrapati Shivaji Maharaj International ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.262, 0.764, 0.276] in the image\nAnd my action is Action: TYPE\nValue: NEW DELHI"}]}, {"id": "mind2web_1756", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_f88f7e83-95c2-41e3-a733-ec1997c2f55b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK\n[button] sub 1 -> CLICK\n[div] open -> CLICK\n[option] 6 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.484, 0.677, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1757", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_c58aefeb-4d48-4653-a97d-b5958b12472a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.255, 0.488, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1758", "image": {"bytes": "", "path": "./images/f61456ed-3cc2-41a0-b79c-56737515fac9_ee4acd97-3547-4a8e-ba52-b49838ed1d83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the photo gallery for Tsiakkos & Charcoal.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Tsiakkos & Charcoal"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.103, 0.691, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1759", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_9376f06f-d441-4ad0-8f26-502331ad9fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.327, 0.053, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1760", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_0833eb64-245d-427a-be49-e6a766226478.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.092, 0.181, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1761", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe33bdc-459b-4a31-96a3-c8439d26ed77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.208, 0.36, 0.457, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1762", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_87dc3c95-c884-4f08-9267-36cde803766b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.227, 0.359, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1763", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_41532c32-73da-4df8-83e0-f50f2f3c9baf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[button] Choose your room -> CLICK\n[button] Book Business Double Room A -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[button] Choose -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.626, 0.962, 0.666] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1764", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8922d5d7-b361-488e-bc90-959777b2d346.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[button] Miami -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK\n[link] Thursday, May 4th | American Express Presents CARB... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.28, 0.159, 0.303] in the image\nAnd my action is Action: SELECT\nValue: 3 Tickets"}]}, {"id": "mind2web_1765", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_cb8dab61-9fd6-4508-bd9a-881a8e130872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.072, 0.559, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1766", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_dcdc1299-4778-4bab-934b-25b7f85f4e65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_1767", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_cb30eaad-9ff0-4869-bdf9-d2357ab500bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Cannes -> CLICK\n[link] 2022 -> CLICK\n[link] Eo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.484, 0.182, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1768", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_2a47b218-77f1-4189-abf5-aa8933b7584f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.297, 0.689, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1769", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_efda118b-1324-4375-90be-92d6e1767945.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.251, 0.592, 0.276] in the image\nAnd my action is Action: TYPE\nValue: Disneyland"}]}, {"id": "mind2web_1770", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0e8893ad-49da-4f23-b04b-ee6ed6e2caf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 0.811, 0.492, 0.821] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1771", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_d298cf02-a542-415b-a3ec-a168b352b112.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[option] Airport Chhatrapati Shivaji Maharaj International ... -> CLICK\n[combobox] Flying to -> TYPE: NEW DELHI\n[option] Airport Indira Gandhi International Airport -> CLICK\n[button] Next Month -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.587, 0.381, 0.591, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1772", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_908889f1-3f7b-4123-b773-f233a4fde2dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[button] When -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.309, 0.777, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1773", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_e41e2eb8-6d19-446c-a636-c3ad48011f2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\n[img] movie poster for Creed III -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.085, 0.134, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1774", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_5074958c-7330-4688-bdd6-f3eb05b8c31e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.247, 0.557, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1775", "image": {"bytes": "", "path": "./images/18a104dc-29e7-4777-9fee-1e023be1d686_6d88fab4-239f-42ed-8ccf-c52a478eb08d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ground operations jobs with JetBlue.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 3.134, 0.183, 3.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1776", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_17c7b831-aee8-4f2b-88f2-a4ebe641fe61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.566, 0.478, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1777", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_28288ba0-786e-412f-a038-4a9df7f9a4a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK\n[img] Sports car icon -> CLICK\n[button] Sort by -> CLICK\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.389, 0.249, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1778", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_06fa60ae-2f59-4035-88f1-acb8471e415b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.016] in the image\nAnd my action is Action: TYPE\nValue: Nirvana"}]}, {"id": "mind2web_1779", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_5bba0b11-0c16-4d8a-89e8-086cdeb2a3b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[button] Next -> CLICK\n[gridcell] Thu Aug 10 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.263, 0.957, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1780", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_8d96a315-2e4d-4faa-ab1b-d3f7ddec978b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.017, 0.775, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1781", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_ebbb7fed-de37-4ce8-ad48-6bf6573f5708.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\n[link] Navigate to deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.889, 0.504, 0.948, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1782", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_5c1467a5-8401-44d3-a4d2-2beb7cfeb39e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.673, 0.634, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1783", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_e28713b9-2334-46a3-9c37-b9d5e33e2cf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.161, 1.145, 0.284, 1.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1784", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_4c3fa7f5-c7d3-4380-b3c5-3e0c6a22dca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[span] -> CLICK\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.373, 0.494, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1785", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_7a968f10-ab98-49c7-8dce-a5ee4b28a838.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK\n[generic] 18 -> CLICK\n[button] 10:00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.741, 0.715, 0.769] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1786", "image": {"bytes": "", "path": "./images/f385156c-4f2e-410f-bc73-7ec6d0f44448_e86c1b4b-7bca-43d5-905b-9cb5119d4fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare all membership tier benefits.\nPrevious actions:\n[rect] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.109, 0.791, 0.144] in the image\nAnd my action is Action: TYPE\nValue: Membership tier"}]}, {"id": "mind2web_1787", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_771625de-5227-4b48-a469-ac3b6593b8ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[b] Paris -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.386, 0.743, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1788", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_04780493-f795-4e65-a207-2e3edb57e3e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise\n[span] Le Marais, Paris, France -> CLICK\n[div] 27 -> CLICK\n[div] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.935, 0.149, 0.977, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1789", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_67ca7cd9-6200-4e87-a045-d7b4cc5e6c72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.413, 0.931, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1790", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_bc9d4b7a-56e0-4646-8339-e74a346a70b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[link] Your lists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.416, 0.421, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1791", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_2569ea8a-41ee-43ea-a7b7-6804a67043df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.275, 0.537, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1792", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_ea3f8f9b-8bcc-4435-8a9c-15a3bc749bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.915, 0.285, 0.948] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1793", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_4c8a7cdc-5981-4172-8c9f-9bdb8344d39f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\n[textbox] Where to? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.156, 0.359, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1794", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_cc825221-7e27-4074-a31a-56d90e876fe6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.514, 0.404, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1795", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_ec31e954-a50a-420c-8399-467d237b647d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.828, 0.007, 0.935, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1796", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_f489ea2d-fd52-48bc-a2dc-b225a500c1c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK\n[button] State -> CLICK\n[span] Virginia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.375, 0.331, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1797", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_36474a36-dedb-4836-b0bb-64cb383cadf1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.33, 0.287, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1798", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_a346f608-7469-48db-ac2e-ecd8eef73e57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: TYPE\nValue: Europe"}]}, {"id": "mind2web_1799", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_acdf9fe4-f31e-4899-955f-a59164fe2044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.395, 0.574, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1800", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_35c777e6-4dd6-4380-8684-9ebf15d75980.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.245, 0.35, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1801", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_364bc655-fc39-4523-b249-45dca735161e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.212, 0.291, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1802", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f3d7a9b2-8a52-4123-b7e7-17c771db0e20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK\n[heading] Pocketable UV Protection 3D Cut Parka -> CLICK\n[checkbox] ORANGE -> HOVER\n[checkbox] L -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.655, 0.906, 0.682] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1803", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_2557846c-fbb2-4d8a-a709-15856d7dd485.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.361, 0.829, 0.372] in the image\nAnd my action is Action: TYPE\nValue: bali"}]}, {"id": "mind2web_1804", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_0bc6e4ed-a88d-49cc-aa74-a52453e53118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.366, 0.266, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1805", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_8252ffbc-069b-40e9-b567-119df02fc127.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK\n[menuitem] Women's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.338, 0.409, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1806", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_84185798-3837-4eae-8599-fcf123c64957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK\n[textbox] To , required. -> TYPE: washington\n[a] WAS - Washington, DC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.277, 0.875, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1807", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_ffa65472-f26a-475f-b7f3-b038d6bf632f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.196, 0.474, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1808", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_e38767aa-e1c6-4969-b1f0-eb94870fafd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.591, 0.123, 0.598] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1809", "image": {"bytes": "", "path": "./images/f385156c-4f2e-410f-bc73-7ec6d0f44448_d526c9e6-eb77-49e7-ac83-5cf979528a1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare all membership tier benefits.\nPrevious actions:\n[rect] -> CLICK\n[textbox] e.g.: New York -> TYPE: Membership tier"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.109, 0.877, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1810", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_6d0e6630-2780-436f-8ab3-47c831fe077c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.044, 0.153, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1811", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7005bf98-3214-4bfb-8133-79cfab48306e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.276, 0.374, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1812", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_c67ea018-cdc3-44b1-aec9-1dd781811f05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.384, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1813", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_28f01eb6-f60d-4efe-82b9-6bab5fb6765c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Hiking Footwear -> CLICK\n[link] add filter: Waterproof(456) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 2.422, 0.208, 2.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1814", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_5fa29f1e-7753-4629-b276-e8466bef50a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 94115\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: electrician\n[span] Electrician -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.078, 0.257, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1815", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_2a48ee3e-f492-498c-bdad-de1107d2da4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.254, 0.823, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1816", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_d8569323-5315-47fb-9b08-d9b48ce92b2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.388, 0.365, 0.409] in the image\nAnd my action is Action: TYPE\nValue: 12345678"}]}, {"id": "mind2web_1817", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_30f0a458-bb0c-49e4-b940-6b82a6d7b082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.454, 0.161, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1818", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_4abeefc8-cbb3-4f6b-9059-6c379d4e2e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.826, 0.14, 0.931, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1819", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_21b2e6cd-1e81-4e05-b5f0-37feba1bafe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.017, 0.891, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1820", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_07461677-526b-43c3-96a0-f92b0e69a3b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\n[link] commuter rail Commuter Rail -> CLICK\n[combobox] Search for a line -> TYPE: Oyster Bay\n[span] Oyster -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.349, 0.344, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1821", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_56fe6cc9-a3e9-4b7c-990c-803cd8a94f2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.387, 0.95, 0.421] in the image\nAnd my action is Action: SELECT\nValue: MasterCard"}]}, {"id": "mind2web_1822", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_c01ffea2-9354-448e-8ff9-2f3083925381.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK\n[span] Active -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.885, 0.158, 0.898] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1823", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_f7935528-8d53-49ae-9235-70b6c1304d79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.292, 0.5, 0.321] in the image\nAnd my action is Action: TYPE\nValue: XA1234"}]}, {"id": "mind2web_1824", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_00403920-e25f-4c7e-877c-e8e119cda4e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK\n[button] April 7, 2023. -> CLICK\n[combobox] Guests -> SELECT: 3 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.594, 0.235, 0.616] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1825", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_53d2d9a2-528f-4a9a-b182-b80a0795a6a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK\n[menuitem] Women's -> CLICK\n[menuitem] Running Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.552, 0.218, 0.561] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1826", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_f3eb8779-cf1a-4688-a021-2a5257bba89d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.194, 0.226, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1827", "image": {"bytes": "", "path": "./images/d516b2f7-b180-422f-9e24-6fb778cb4b55_51e09831-d247-402b-9853-bfaeb5d4399b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me Ed Sheeran Chords & Tabs\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: Ed Sheeran"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.067, 0.897, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1828", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_4b20b395-938e-4a1a-8f71-bdf4dfb419e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK\n[link] 128 GB - apply Storage Capacity filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.387, 0.225, 0.837, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1829", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_7591b059-f011-44df-9bc3-cf3399a62179.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.16, 0.266, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1830", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_3371a7e6-bf53-469a-941f-47a7f6038b5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.478, 0.238, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1831", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c8aaa308-dddf-4bc7-9835-e30a48203407.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\n[searchbox] Search -> TYPE: rock\n[button] Search -> CLICK\n[link] Playlists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.255, 0.47, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1832", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3092161-3b70-4ae0-833a-2ec69d613c38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK\n[link] Seattle, WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.005, 0.867, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1833", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_63b445da-7103-4ceb-b2b2-afc1395d10c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.058, 0.661, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1834", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_0a38b7ed-8182-4324-bbae-469672aa4c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.144, 0.957, 0.156, 0.971] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1835", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_178bcb43-7ba1-429d-a973-088ef383426e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.393, 0.658, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1836", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_04efa8bd-69ee-4f5a-97a9-0a70d2e5de36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.728, 0.146, 0.845, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1837", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_009a6173-dd0d-4afc-89c3-25931c746449.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] close -> CLICK\n[button] Flavor -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.331, 0.802, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1838", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_662fb87c-70e4-4f70-bf85-337aa79a8d75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 2.552, 0.945, 2.582] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1839", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_aa4ff294-fa00-493f-8625-e483115057f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\n[button] Location Atlanta -> CLICK\n[button] Seattle -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.063, 0.576, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1840", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_1959a470-8e5c-4c0f-826c-a690ded653ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.179, 0.554, 0.2] in the image\nAnd my action is Action: TYPE\nValue: Washington"}]}, {"id": "mind2web_1841", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_383cc11d-8136-408f-bb05-a3222ccfdfc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.129, 0.374, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1842", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_b3dd079c-2531-400a-92c1-7555485e132e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK\n[link] New York -> CLICK\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.149, 0.272, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1843", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_fe9e507b-6b68-4939-b7eb-2cd4132794b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 0.471, 0.664, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1844", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_383ffaec-42ae-4770-9ef6-305c581ca89e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.252, 0.133, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 14"}]}, {"id": "mind2web_1845", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_1f6d99a6-6430-48ff-a6a9-8c7881d4a609.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.221, 0.187, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1846", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_f25623fb-9589-4fb2-984c-2da6871e9a33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.453, 0.316, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1847", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_83416428-3787-4952-875d-dcfbb6e4cdc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[link] Music -> CLICK\n[svg] -> CLICK\n[div] Canada -> CLICK\n[svg] -> CLICK\n[div] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.245, 0.311, 0.343, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1848", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_44b87654-0fc1-443f-88c2-f8898601f2bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.62, 0.956, 0.642] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1849", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_ae470708-5a6a-454a-a56e-1bacceda7eea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.379, 0.545, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1850", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_be0a740c-1b47-4b45-b879-8a45d32f7c0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.162, 0.552, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1851", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_4dd1dfca-450b-40d4-a55b-c20df696ad63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.447, 0.846, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1852", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_1945d47d-5810-42ee-bc36-5f0a90c7c1f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.029, 0.553, 0.052] in the image\nAnd my action is Action: TYPE\nValue: skirt"}]}, {"id": "mind2web_1853", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_9d6b2e4b-880a-40d0-ac9d-3f9fdcb3e7eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station\n[link] T red line silver line commuter rail Zone 1A Sout... -> CLICK\n[combobox] Enter a location -> TYPE: north station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.363, 0.679, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1854", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_08a0952a-311d-46f3-bcfd-f183dc5cf434.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK\n[button] Furniture -> CLICK\n[link] Dining Sets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.83, 0.475, 0.933, 0.49] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1855", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_2cdca21b-352f-4f82-84fa-16b60dde7c28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.122, 0.56, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1856", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_64eb802f-8390-4ffb-8a72-1f10211fffbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: red sox vs yankees\n[button] Search -> CLICK\n[link] TICKETS -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.771, 0.214, 0.799, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1857", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_1a2a11b4-132e-4f75-a8fb-ab3ede13cd0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.367, 0.383, 0.394, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1858", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_eaf260c8-e239-4e96-b387-970c8a48e56e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.295, 0.053, 0.356, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1859", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_25e25bd8-d8c8-4a09-a158-eccbd9e38296.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.117, 0.574, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1860", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_5335dfb4-618b-4282-951a-e9066ef63841.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte\n[button] Belo Horizonte, MG, BR (CNF - Tancredo Neves) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: buenos aires\n[button] Buenos Aires, AR (EZE - Ministro Pistarini) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.35, 0.246, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1861", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_603d8a45-cc7a-4dc0-a899-c74f8c86e870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK\n[span] 14 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.126, 0.353, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1862", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_158410d4-4bff-4a9d-bd03-39997c0c9a89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] 6 - 9 Days -> CLICK\n[button] SEARCH CRUISES -> CLICK\n[button] Number of Guests -> CLICK\n[path] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.12, 0.416, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1863", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_cf70ebee-773d-4ad7-b6b1-a2d55fdca152.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.094, 0.39, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1864", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_9fd0d975-1788-4b24-ae73-d661fc03b8ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.162, 0.471, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1865", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_f566954c-0762-4dcf-a758-b847ef11f301.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.265, 0.362, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1866", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_1a61760b-4d3e-459f-9940-00033fd2555e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.027, 0.164, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1867", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_332e2265-61e7-4ed9-b753-4fe9255dc1e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK\n[link] Past year -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.366, 0.212, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1868", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_8735b62b-f80c-4908-8d6f-bb314454a8b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.35, 0.429, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1869", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_7c7f0a13-b479-4828-9bd3-dbbb8bafdace.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.305, 0.479, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1870", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_dd3a385f-430e-44de-adb5-e2318ec80c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER\n[link] On Sale Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.125, 0.395, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1871", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_a5d96eea-9933-4c28-aba9-b0b7b95ea8a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.026, 0.128, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1872", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_e5e1ae2d-f013-428c-a7ff-d3144deb008c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.785, 0.316, 0.822] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1873", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_46aff272-d165-4cda-bb3a-39cf087aaba3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\n[link] WAYS TO STAY \uf0d7 -> CLICK\n[link] GLAMPING \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 1.007, 0.324, 1.01] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1874", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_e7266152-d209-4b04-b2dc-0c07b35a3d1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.0, 0.354, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1875", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_4d4276de-b1f8-4b63-95a9-90730f481623.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.205, 0.586, 0.248] in the image\nAnd my action is Action: TYPE\nValue: Stanford, CA"}]}, {"id": "mind2web_1876", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_121e3dc7-8829-4755-b0ea-bc71253e4038.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[link] 18 -> CLICK\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK\n[button] Add railcard -> CLICK\n[listbox] Select railcard 01 -> SELECT: Veterans Railcard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.539, 0.391, 0.571] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1877", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_1dd4cd76-6894-4702-8d5e-d107e0846f67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.247, 0.62, 0.26] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_1878", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_e7149b0a-8004-47b5-a369-901f174947a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.322, 0.454, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1879", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_c03afb1c-7dc5-4f54-bad0-e9361412ba27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.225, 0.5, 0.26] in the image\nAnd my action is Action: TYPE\nValue: Harry Reid Intl Airport, LAS"}]}, {"id": "mind2web_1880", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_ba6b661d-92fd-4244-8a69-962bc891113c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.324, 0.286, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1881", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_522acf7b-1e3a-4d27-a685-9133c8d1a5c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.354, 0.584, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1882", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_39c5f4f4-45aa-4c73-ac79-3c9e99a750a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Edit -> CLICK\n[button] 04/11/2023 -> CLICK\n[link] 12, Wednesday April 2023 -> CLICK\n[link] Find Schedules -> CLICK\n[div] Earlier -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.495, 0.857, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1883", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_cf712776-65b6-42ad-851a-6f37fcb94caf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall\n[em] Music -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.373, 0.379, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1884", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_abeec648-d689-44f8-a277-15fda2ecf8fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK\n[generic] 18 -> CLICK\n[button] 10:00 -> CLICK\n[button] 10:00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.179, 0.953, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1885", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_454819fe-c9be-4427-99b7-f70b68c0c6b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.336, 0.21, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1886", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_8ca400b4-34ee-4d74-b6be-b8074b17cadf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.24, 0.321, 0.322, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1887", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_cee6030c-0d5e-4c19-89a8-df52fcc3406d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.194, 0.32, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1888", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_ee2013b2-35d7-4611-a7b3-1a2bcad752bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.351, 0.84, 0.383] in the image\nAnd my action is Action: TYPE\nValue: MONTGOM"}]}, {"id": "mind2web_1889", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_08fe33bf-abd6-4099-b093-38ac58b3051b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.055, 0.266, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1890", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f1904386-8aaf-4d31-85bd-37a9301574a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.499, 0.359, 0.628] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1891", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_b7a26037-9c60-43df-a713-2ae35e0bffd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK\n[button] Sort: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.285, 0.923, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1892", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_9a969207-0b5f-4c8d-a8a5-6474fcfd24ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Cannes -> CLICK\n[link] 2022 -> CLICK\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.709, 0.338, 0.728, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1893", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_d6957c49-bb8b-4449-9fd0-2d154802e084.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.331, 0.727, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1894", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_947fb47d-5ba4-4225-b3ca-4d4948db8acf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.036, 0.646, 0.049] in the image\nAnd my action is Action: TYPE\nValue: trade in"}]}, {"id": "mind2web_1895", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_13a6b6cb-b169-43ba-a0e0-64b556025f7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK\n[link] Womens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.565, 0.233, 0.606] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1896", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_f9c4ef1a-dfda-462f-a275-179397cc7580.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Greenville\n[span] Greenville -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.025, 0.335, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1897", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_b45f21d0-bde6-46c9-a618-7710e5efa2e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.054, 0.261, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1898", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_ae175601-aa78-4ea8-91ea-1f7aa0a5e4d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.636, 0.474, 0.647] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1899", "image": {"bytes": "", "path": "./images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_fcdb8150-acc2-41e6-9b61-f2ac96016afe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information on how to find lost AirPods.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.0, 0.799, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1900", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_6c672ebf-8e99-41ab-843c-8fac574b2092.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.379, 0.956, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1901", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_0c0f9528-0f31-46b0-b7c4-78507a1facc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.184, 0.376, 0.228] in the image\nAnd my action is Action: TYPE\nValue: Mumbai"}]}, {"id": "mind2web_1902", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_870c171f-328a-46b1-8d81-e111a3d7a5f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.752, 0.241, 0.853, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1903", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_e644a94d-62bd-43a0-8279-1ea74b67e337.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.124, 0.196, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1904", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_efd43e42-a268-4260-9e20-0333d7e55f50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.023, 0.054, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1905", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_954678cc-e3c6-4ded-85b6-8032ea329f5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.326, 0.908, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1906", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_d91dd847-e852-4522-8725-3ddd418c8f7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.12, 0.969, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1907", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_65d6ae1d-4269-4ca6-9e02-0aabb1af9aca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.39, 0.037] in the image\nAnd my action is Action: TYPE\nValue: wireless keyboard"}]}, {"id": "mind2web_1908", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_fce397d8-aece-4a06-8c4f-1a90b6b1a8bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK\n[textbox] Date use format: 16-Mar-23 -> CLICK\n[link] 8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.252, 0.133, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 10"}]}, {"id": "mind2web_1909", "image": {"bytes": "", "path": "./images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_99a22a2c-d4bc-42e8-922b-7e29faba46d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vacation packages to Hawaii.\nPrevious actions:\n[link] Search for packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.478, 0.83, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1910", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_95da4d8d-e01b-4bdc-9f1c-01ab6235c3ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 1.575, 0.19, 1.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1911", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_dd98ebcc-12fe-476a-aa79-7c94bde9eabf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.0, 0.597, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1912", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_963377e9-ccf5-42ae-90ad-74516b1a38a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.491, 0.966, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1913", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d44b31d5-4480-44c8-b773-49adca6d7e9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK\n[div] -> CLICK\n[checkbox] 4 stars rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.125, 0.237, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1914", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7c000882-e591-4c81-85ea-ed2ff428e75c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK\n[img] expand -> CLICK\n[spinbutton] Enter Minimum Price -> TYPE: 0\n[spinbutton] Enter Maximum Price -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 1.034, 0.169, 1.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1915", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_2f8df0c4-d081-4b95-a0d3-3d80f872ac6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.341, 0.197, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1916", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_861c9513-4fed-487b-8eea-fd35543e0599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\n[link] Filters -> CLICK\n[checkbox] RV Site -> CLICK\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: California"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.309, 0.316, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1917", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_2f28c65c-d95f-4e34-b76b-db3a412b8fc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: los angeles kings\n[option] Los Angeles Kings -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.168, 0.881, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1918", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_4713b2b4-b558-441a-8635-75ad2fa8a3a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK\n[img] Tesla Shop Gift Card -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.281, 0.929, 0.3] in the image\nAnd my action is Action: TYPE\nValue: April May"}]}, {"id": "mind2web_1919", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c72c6551-816f-4f68-a498-19516269cc9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.152, 0.96, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1920", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_abd904e6-4b53-4414-95db-52a328c92bb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.05, 0.523, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1921", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_cc647dd5-2319-4fd2-a79e-b64dc529275b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.038, 0.646, 0.051] in the image\nAnd my action is Action: TYPE\nValue: nba2k23"}]}, {"id": "mind2web_1922", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_8516581e-6b9e-469b-8862-803974da5ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.581, 0.047, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1923", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_2431a829-5471-430a-b02a-c30e63a9c5c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Price: low to high"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.221, 0.422, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1924", "image": {"bytes": "", "path": "./images/f9e88baa-a109-454b-839f-1ab0746a5f13_7ef7d650-69ff-4cbf-a538-30a540a0be22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all watch options of Avatar: The Way of Water and where it's available.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Avatar The Way of Water"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.082, 0.594, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1925", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_944f9f58-f6e0-4143-a86a-a3ee31f8e955.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.283, 0.048, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1926", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_3c77beb9-4242-4ebe-8ec6-d5599cf39cd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[button] Refine results -> CLICK\n[button] Add to basket -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.401, 0.517, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1927", "image": {"bytes": "", "path": "./images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_9761249d-77f4-4009-a2f7-051f0a77a81a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for upcoming MLB games taking place on Sunday, 3/19.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.092, 0.384, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1928", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_b75cd0fb-f7e4-4a75-a5aa-58ce641b02be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\n[link] New & Noteworthy -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.144, 0.369, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1929", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3072f153-fda1-40dc-a266-ae38ada19df4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK\n[checkbox] 24 March 2023 -> CLICK\n[div] Sat, Mar 25 -> CLICK\n[checkbox] 25 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.11, 0.923, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1930", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_ff659553-9a17-46b7-8ba2-3b166e41eb8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Choose your room -> CLICK\n[link] Choose Another Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.502, 0.968, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1931", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b9148fc0-f5ac-4ff4-a188-9705698633f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.304, 0.123, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1932", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_6a9be6b6-2a46-47a5-baff-fe468a69a2da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] December -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.073, 0.266, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1933", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_a3049f40-57a6-4b30-bb6f-49183455254d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[textbox] Date -> CLICK\n[button] Move backward to switch to the previous month. -> CLICK\n[button] Saturday, April 29, 2023 -> CLICK\n[button] Update -> CLICK\n[link] Details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.387, 0.168, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1934", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_c7df32a5-dab9-4edb-afc5-75f2a9996884.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.239, 0.359, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1935", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_cddfab15-2683-475d-bf18-73b8f9e9d08b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.044, 0.256, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1936", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_74bfa8a6-c7a4-4df1-935c-57ff41629dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.322, 0.055, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1937", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_fcad2218-4124-4bbc-bee8-b921a0a01c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.143, 0.486, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1938", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_af528d8f-2c1b-44c8-8440-ab3caf5b60ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.274, 0.038, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1939", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ddc4c6d0-c812-4ea9-ae6a-06d94c832d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.215, 0.13, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1940", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_a9b0f4a3-62b9-47d2-b251-ff3694d32864.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.042, 0.809, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1941", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_94a52ef2-5c85-4dae-9de0-e54c23e77f0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[link] Get tickets -> CLICK\n[p] Number of Travellers -> CLICK\n[img] -> CLICK\n[button] Check availability -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.508, 0.496, 0.527] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1942", "image": {"bytes": "", "path": "./images/2879afa9-05f9-4d97-bbfe-f95f5d665174_6c0f5b45-5196-4eb1-ad83-ae44d2d157e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the Wakanda Forever trailer\nPrevious actions:\n[link] Navigate to on-demand -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.163, 0.236, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1943", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_d39be68c-f55a-4c10-b578-860068cfaa10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[tab] My flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.564, 0.254, 0.597] in the image\nAnd my action is Action: TYPE\nValue: 10000002"}]}, {"id": "mind2web_1944", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_399f334d-68f3-4b0a-ad34-57645e5d3ae6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.218, 0.521, 0.24] in the image\nAnd my action is Action: SELECT\nValue: Corolla"}]}, {"id": "mind2web_1945", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_e250ff6f-8015-4511-9c8c-e97988aa1f87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\n[textbox] Search -> TYPE: Gingerbread cakes"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.078, 0.763, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1946", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f2cd36f6-89c6-42a0-a70a-1ed8db7b1860.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.393, 0.234, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1947", "image": {"bytes": "", "path": "./images/90557510-32dc-415f-8507-41b050594962_48a1f5d2-1da6-4ac5-a698-b4fbc319662d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the coming soon AMC Artisan Films\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.027, 0.348, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1948", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_d229bdd3-bc87-470b-910d-a43ff645f98f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.024, 0.652, 0.043] in the image\nAnd my action is Action: TYPE\nValue: washing machine"}]}, {"id": "mind2web_1949", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a074576d-9370-4453-bac0-97e1eb002723.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.455, 0.492, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1950", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_e003a56c-dc49-40d5-bcdb-aa86ca0d7b66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york knicks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.218, 0.289, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1951", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_08aae8dc-6df3-4f78-b0f0-2fbdef6c83f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.869, 0.312, 0.908] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1952", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_84321d57-8d7f-4a25-b4f2-dff4851503a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 2.119, 0.318, 2.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1953", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_437b48ad-7167-492d-ae11-280b37292671.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.385, 0.373, 0.417] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_1954", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_87606ebb-a36a-4bdc-ada4-c0fee1eb610d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[textbox] Guest rooms -> TYPE: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.465, 0.648, 0.495] in the image\nAnd my action is Action: TYPE\nValue: 20"}]}, {"id": "mind2web_1955", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_1a402d99-0a6f-43eb-b962-740175d36fd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Certified fresh movies -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.415, 0.946, 0.435, 0.959] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1956", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_73800d44-ed5a-4491-8f5c-117137ca2c28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\n[link] attractions. -> CLICK\n[li] Neighborhood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.517, 0.477, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1957", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_ad5633e3-d238-41a4-9b12-78597a1f2070.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High\n[img] Black Diamond Zone Climbing Shoes 0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.783, 0.956, 0.81] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1958", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c11a83bd-a583-4cdc-a473-04c26ce5eba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Sun, Jun 4, 2023 -> CLICK\n[div] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.454, 0.959, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1959", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_0b35cdaf-9c0e-4533-a402-1801ac2683a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.585, 0.367, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1960", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_6464b9d6-3f44-4954-b9eb-b304fab198b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Katy Perry"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1961", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0ab23995-fd39-483f-9eb4-c633bef00a0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.273, 0.688, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1962", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_598ec622-0634-4fb2-8976-b12bb75f1b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.429, 0.424, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1963", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_f363d0a7-38a5-49f0-90be-ee433c2505b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.342, 0.093, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1964", "image": {"bytes": "", "path": "./images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_6dd05ca6-239a-4a8e-b976-b8399dd021fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music event organizers and follow the second one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.273, 0.273, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1965", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_841ce39d-d503-4b9f-a08c-4f24ac450c47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.024] in the image\nAnd my action is Action: TYPE\nValue: toilet paper"}]}, {"id": "mind2web_1966", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_60374600-f447-4297-b386-44c4c154ff42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK\n[img] Happy Birthday -> CLICK\n[button] EUR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.555, 0.916, 0.575] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1967", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_24d83bed-8e0b-43ec-8a4e-6a977a86d9fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.705, 0.223, 0.72] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1968", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_adba973c-6ed5-4579-99ba-918691da9c24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.692, 0.67, 0.702] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1969", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_cc95f693-0bf1-441b-91b7-7129f8b0361a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.0, 0.805, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1970", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_faf7bc28-9f05-4e7d-ba4d-8ada377c3d0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[img] Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.281, 0.316, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1971", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_5ae05692-58b4-478f-91f9-c62ad636c125.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: Lubbock, Texas\n[button] Search -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.22, 0.871, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1972", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_fe5b6d26-4f32-4da6-b7c2-59a12433d959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.423, 0.246, 0.577, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1973", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b12af52c-85cb-41ff-81ff-f93f40ac4751.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.096, 0.084, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1974", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_f88b3369-3cf8-4294-8704-fd8c5c30361c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK\n[group] \uf067 Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.352, 0.205, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1975", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_826d708c-25dd-46c8-9e40-0a777f75a221.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK\n[button] increase number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.278, 0.95, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1976", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_502cb52a-b1bc-4917-b8a3-05b5c0d471eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK\n[link] 17 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.914, 0.353, 0.953, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1977", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_e749f011-925d-4541-bd98-9a4e3a6d80d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.185, 0.991, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1978", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_40602eff-59ce-454a-98ca-c13c6f89eff6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz\n[link] Gorillaz Gorillaz Artist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.374, 0.275, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1979", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_2b375810-fcfa-4607-b97b-f1d4ee31a5a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.158, 0.65, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1980", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_e49d2de9-5610-407b-8f08-cb457d9b6297.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456789\n[textbox] last name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.192, 0.94, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1981", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a97d602c-fb9a-4e8f-b69b-e92f1033bf8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.191, 0.594, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1982", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_e506b344-947b-434c-a139-e271b049ba34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.18, 0.285, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1983", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b48ce68b-6792-48b3-8531-e49eef1bf081.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK\n[div] Jun -> CLICK\n[generic] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.237, 0.246, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1984", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_44ace67e-82b4-4aa8-9f39-85b7fb1c3059.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.219, 0.5, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1985", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_3e1a0425-96a2-4d33-bb75-a68e69a3a034.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.033, 0.788, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1986", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_84dd4b7b-ea40-4309-914e-f2eea4e5e68f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Cruise Deals -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.127, 0.772, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1987", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_4b5fe889-0eb4-48b7-b3a0-4be0ddcf6d3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.658, 0.787, 0.669] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_1988", "image": {"bytes": "", "path": "./images/cdbd410d-170a-426d-b6d2-60dafaffe853_76ee2054-93df-48ad-8b0f-8af2935d3b97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the best seller accessories for the Model X\nPrevious actions:\n[link] Shop -> CLICK\n[link] Shop Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.159, 0.138, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1989", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7c771dc6-d31e-4b5e-9619-90f0f383d7fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.462, 0.699, 0.485] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_1990", "image": {"bytes": "", "path": "./images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_fb07bb60-507e-4d13-8d03-5a9acbe22238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Edit my movie watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.345, 0.094, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1991", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_bfd1ac18-f07a-4bc9-ba4e-cdc4eb36fafb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.163, 0.702, 0.202] in the image\nAnd my action is Action: TYPE\nValue: london"}]}, {"id": "mind2web_1992", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_1595ac95-da5c-474a-bba7-243c1f2fe245.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Color: Magenta -> CLICK\n[span] -> CLICK\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.541, 0.716, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1993", "image": {"bytes": "", "path": "./images/612653f8-defe-41be-ae48-26ed859d98ca_89fcbaca-06b8-4af6-82fd-530433e7f2c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate Dry Tortugas in the state of Florida and find out the Current Conditions.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.241, 0.789, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1994", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_66a559b3-5317-49ef-b0ba-ca14967bfde9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 2.007, 0.339, 2.015] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1995", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_d1b37c24-1fd4-4076-981d-ce6ffecdaad5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: vegetarian\n[b] Vegetarian -> CLICK\n[button] Fri., Apr. 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.326, 0.253, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1996", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_b0a24212-9aae-4fbc-a62d-bc7129890aec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.19, 0.782, 0.226] in the image\nAnd my action is Action: TYPE\nValue: red sox vs yankees"}]}, {"id": "mind2web_1997", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_d8aed545-1860-46ac-a290-ce24e2ee12b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR\n[div] Newark Liberty Intl (Newark) - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.346, 0.639, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1998", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_bac24a0f-8c3f-47d7-8870-0facc3b7352b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK\n[combobox] SORT BY -> SELECT: Price: Low to High\n[button] Select Mitsubishi Mirage Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.165, 0.951, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1999", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_1d0ce156-c4ff-462f-9503-71e97ddc7bc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[link] Show all 10 cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.839, 0.324, 0.974, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2000", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_b068a66c-b3fe-4991-a9c1-b534eac1c4ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.031, 0.466, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2001", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a17e5768-0ac8-450e-af82-4b7c2656c3ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.16, 0.703, 0.183] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_2002", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_48650296-30f6-4c10-90bc-b65a4f8d92c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[link] Search for flights -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.139, 0.292, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2003", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_bc74f169-259e-446a-a63b-77e3f990d729.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Order Food & Drinks -> CLICK\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14\n[button] Order Now -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.418, 0.182, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2004", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_005bc9e7-3f90-4be3-9512-4e6c3fc9517d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.455, 0.29, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2005", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_ed7b5274-aae9-47a3-8b14-63e67b3f171c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] Railcards -> CLICK\n[combobox] How old are you? -> SELECT: 18-25\n[combobox] Who do you usually travel with? -> SELECT: Couple"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.309, 0.941, 0.334] in the image\nAnd my action is Action: SELECT\nValue: Yes"}]}, {"id": "mind2web_2006", "image": {"bytes": "", "path": "./images/330d5618-9db4-447b-9b56-0d2c33f414d5_769575d0-1e94-4299-9e76-4b79f5704861.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the full menu for AMC dine-in locations.\nPrevious actions:\n[link] Visit the Food & Drinks page -> CLICK\n[link] Explore Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.171, 0.172, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2007", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_22ad9507-f8b7-4f15-bd7f-c0f99312acd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[div] 4 -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.543, 1.91, 0.815, 1.937] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2008", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_7322835c-5bca-4b29-a680-c8d122209a40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK\n[button] Go! -> CLICK\n[link] Rides & Experiences \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.324, 0.654, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2009", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_31f0c71f-7a90-4fa6-beac-319af1442002.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.456, 0.403, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2010", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_92db244d-ca13-4885-8d45-87f3df9a87c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.7, 0.323, 0.708, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2011", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_0bfd0d38-184d-4d8a-9764-9b845095d0df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.109, 0.475, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2012", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_60b25c01-6a9c-456e-a2de-296e7090b8c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.702, 0.097, 0.713] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2013", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_9912f695-1f04-491d-bcb4-dcc99b5eb3ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.182, 0.769, 0.218] in the image\nAnd my action is Action: TYPE\nValue: po box 2846"}]}, {"id": "mind2web_2014", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_1d37262b-2901-4468-bb2f-f5a9dd9e95b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK\n[gridcell] Wednesday, April 5, 2023 -> CLICK\n[button] Done -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.774, 0.573, 0.957, 0.701] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2015", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_798b286b-adb3-4c20-b60c-f9d140ca52ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\n[button] Car Sales -> CLICK\n[link] Shop Vehicles Under $20,000 Link opens in a new wi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.368, 0.225, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2016", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_66ecd249-ed96-47a9-9e83-29e6d273fb6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.458, 0.863, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2017", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_88859090-cc5c-4b82-b5cc-3a7c2cce4f4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK\n[div] -> CLICK\n[checkbox] 4 stars rating -> CLICK\n[radio] Lowest price first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.859, 0.791, 0.896] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2018", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f3a45444-8db6-4965-b692-96e995ab489b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.008, 0.211, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2019", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1bf42320-592b-4bfb-8141-a292892eb093.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.308, 0.3, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2020", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_9732a0c7-bde5-479e-b4db-527fa1212bff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.376, 0.641, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2021", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_e24783c6-df28-49de-a73f-cdf3cf4500a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.16, 0.331, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2022", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_f083d98e-f278-4e39-9c59-c02d95e8dd2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.179, 0.321, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2023", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c0bd78c0-c5b3-4607-9f24-fa07181701a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.223, 0.359, 0.247] in the image\nAnd my action is Action: TYPE\nValue: empire state building"}]}, {"id": "mind2web_2024", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_6506aee3-cdd1-4f39-b9ff-4968228cfcda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[button] Find -> CLICK\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.789, 0.96, 0.823] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2025", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ca2a9f0f-84e9-4e41-9462-32f12264b4ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Size -> CLICK\n[span] Now Trending -> CLICK\n[li] Newest -> CLICK\n[button] Add to Wish List -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.925, 0.218, 0.947, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2026", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_e5094c07-65e1-407b-9bd1-e5fbc050372b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK\n[link] English -> CLICK\n[link] Last 90 days -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.141, 0.999, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2027", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_47a2fbbb-9821-433d-8f1f-7fcf371505a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK\n[link] HOT DEALS \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.108, 0.905, 0.12] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_2028", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_653e6f08-8ac9-495e-94e1-9f6fcda996e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK\n[textbox] Drop-off location -> CLICK\n[textbox] Drop-off location -> TYPE: Miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.249, 0.743, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2029", "image": {"bytes": "", "path": "./images/8710addc-5ff3-4aaf-b397-4c6165f285ee_8b322fdc-9820-44d2-8476-1304ae1129e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the service options for cars under warranty.\nPrevious actions:\n[button] Open helpful links menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.044, 0.384, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2030", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_b774739a-82c1-4b3c-a4e6-9925804f8038.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.256, 0.309, 0.277] in the image\nAnd my action is Action: TYPE\nValue: columbus"}]}, {"id": "mind2web_2031", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_aef52996-d58a-4772-9e64-05599aab864b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.018, 0.056, 0.137, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2032", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_fef93795-3e62-497a-bee0-d9cee88d0932.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK\n[link] Dave Chapelle -> CLICK\n[link] TICKETS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.177, 0.755, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2033", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_f2b10367-aef5-46ac-805f-5d684a9c958d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK\n[link] Europe & Mediterranean Cruises 16 DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.355, 0.662, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2034", "image": {"bytes": "", "path": "./images/74226fab-6285-45da-8582-d25a876aa7b0_5874954f-7c2e-432e-bddc-1a6028f60421.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for the next pop concert.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.008, 0.211, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2035", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_71404d12-c5cf-47c9-8128-8390e15252db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2036", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_43599216-5ae3-4012-bc6f-4583b95a4523.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.373, 0.377, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2037", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_20e92a74-f8b4-4d13-b636-5de220b1d2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.039, 0.931, 0.062] in the image\nAnd my action is Action: TYPE\nValue: Michael Jordan"}]}, {"id": "mind2web_2038", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_2edb2388-3a5b-444f-9824-2bd1e69cbf18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.742, 0.106, 0.751] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2039", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_4fdc91b7-4ecb-4279-81f3-3e53e6e92071.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: SHANGHAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.493, 0.448, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2040", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ea2935b1-0eeb-4873-985f-fcf52085b341.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK\n[span] 9 -> CLICK\n[button] SEARCH FLIGHTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.159, 0.816, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2041", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_752e0eed-61ec-416e-a42d-7313f6820f5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.2, 0.647, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2042", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d3e3c95-6cc7-41d8-923c-a543635c2643.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Crossover vehicle icon Crossovers -> CLICK\n[button] Make -> CLICK\n[listitem] BMW (389) BMW (389) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.449, 0.045, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2043", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6daab355-c31f-4f01-9790-b621f663409c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.277, 0.438, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2044", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_efb82e4d-b2c7-4b75-9125-34401b88bb10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.968, 0.746, 1.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2045", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_2193b3f2-8f8a-4bca-b688-831462294ca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.064, 0.441, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2046", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_f9eba4a9-dc63-44b8-9382-822cac46e582.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[button] Go! -> CLICK\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.31, 0.157, 0.359, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2047", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_52d7c938-aadd-4349-b61b-4db12d69371b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.35, 0.018, 0.391, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2048", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_cf49630d-7148-4457-b45c-0bc7ccde4df7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\n[link] Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.136, 0.875, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2049", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_7f72659d-d09a-4ec5-8d21-174f5ad2b87e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.312, 0.777, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2050", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_17bb4834-cbfd-40cc-84dc-8a06cb5be3d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK\n[dt] Memory -> CLICK\n[span] Show -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 2.142, 0.158, 2.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2051", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_85e67d52-dbf2-4548-bc57-7030b7d926c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.672, 0.494, 0.897] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2052", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_ade4eacb-a963-445c-bb0d-c025a8ac3b47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.261, 0.359, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2053", "image": {"bytes": "", "path": "./images/370a037c-c397-4adb-ab7c-0c388f448f68_4c7b25a2-d944-488e-ab04-8558592e50ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vinyl records at the lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.0, 0.557, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2054", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_db04e65a-c4bc-47b4-90cb-2a233cee4a12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.149, 0.647, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2055", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_585b6e77-b0da-452f-b0c9-97e223fc786a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.232, 0.377, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2056", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_60a55e3d-54ea-4570-8fe0-92972c015964.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.106, 0.421, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2057", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_a0d343ae-e59d-44b8-abfb-e1ed3c0df2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.088, 0.188, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2058", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_98fac87d-a77e-45a6-be35-d7582402efd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.235, 0.211, 0.263] in the image\nAnd my action is Action: SELECT\nValue: 100 Miles"}]}, {"id": "mind2web_2059", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_09094d31-83aa-4538-842f-a3d990b2c0f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.414, 0.336, 0.444] in the image\nAnd my action is Action: TYPE\nValue: Venice Beach"}]}, {"id": "mind2web_2060", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_05ee4572-3449-45bc-81de-0ca98ab19c32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK\n[heading] Resident Evil 4 - Xbox Series X -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.257, 0.751, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2061", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_e4a9cbd4-088d-4619-bea8-f2012f168a59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.044, 0.176, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2062", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_1bc3cce0-b1be-4e81-8248-4525ffd46b09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 1.417, 0.329, 1.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2063", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_a73e46ae-d077-4494-bbb6-3e900105e7b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK\n[li] -> CLICK\n[spinbutton] Max Price -> TYPE: 75\n[span] mm/dd/yyyy-mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.332, 0.312, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2064", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_5e1367e4-40be-4bd7-a0e7-0f4cea0043e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.844, 0.281, 0.93, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2065", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_db9f5f4a-cfd7-4ca7-9dd7-e73dd9314048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Topic -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.907, 0.679, 0.914] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2066", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_9d995e9a-9209-44b2-995e-c789e80640fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK\n[div] Add -> CLICK\n[span] Add -> CLICK\n[link] View More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.222, 0.367, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2067", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_a684c98a-c238-4bef-b2ad-f476f07d73f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.678, 0.458, 0.715] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2068", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_59857729-5631-4bd5-be03-ae871b6c7549.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.043, 0.092, 0.173, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2069", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_671005f8-0412-404e-b398-ad5476ea00cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.081, 0.913, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2070", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_1553af30-7f22-4c4d-8037-4ac6c3a3b72b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2005\n[combobox] Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.315, 0.508, 0.349] in the image\nAnd my action is Action: SELECT\nValue: Corolla"}]}, {"id": "mind2web_2071", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_d99edb30-3e73-492f-ab89-8e248147726d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK\n[button] Add railcard -> CLICK\n[listbox] Select railcard 01 -> SELECT: Veterans Railcard\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.33, 0.327, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2072", "image": {"bytes": "", "path": "./images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_9e41be5e-d1a9-4ae3-82bb-2d9cf7e3fd22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of critic reviews for the movie Creed III.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: creed III\n[div] Creed III -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.725, 0.896, 0.75] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2073", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_4b926a9c-fd06-48cd-b8dd-62a5b7d509a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[textbox] Credit card number -> TYPE: 123456789\n[combobox] Expiration month \u00a0* -> TYPE: 01"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.645, 0.492, 0.66] in the image\nAnd my action is Action: SELECT\nValue: 2024"}]}, {"id": "mind2web_2074", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_2e04274f-f7b0-447f-a96d-7094c9e50f25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books\n[button] Search -> CLICK\n[link] Auction -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.145, 0.792, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2075", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_1bbbf339-0ff9-4326-b590-da7e3b92be27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.45, 0.691, 0.464] in the image\nAnd my action is Action: SELECT\nValue: June 2023"}]}, {"id": "mind2web_2076", "image": {"bytes": "", "path": "./images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_b77ac57b-8075-4c8d-8104-6551fac80aa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ideas and recommendation for things to do in Cancun.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.145, 0.463, 0.149] in the image\nAnd my action is Action: TYPE\nValue: cancun"}]}, {"id": "mind2web_2077", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_66efeee7-26da-4ccc-a8eb-1cb6b87f7b7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 1.611, 0.141, 1.618] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2078", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_91398ccf-62f2-4b00-99e8-538f8dc83ff1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.398, 0.173, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2079", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_79195bd7-6e14-43c7-818b-83aa994a0f60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK\n[checkbox] 2+ -> CLICK\n[button] Select Nissan Kicks Vehicle -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.627, 0.951, 0.662] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2080", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_f3dc63f7-80a7-4979-99bd-7bda15aee89c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK\n[link] click here -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.147, 0.657, 0.174] in the image\nAnd my action is Action: TYPE\nValue: Ohare, Chicago"}]}, {"id": "mind2web_2081", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94fc7a4-d560-4fa5-a4ab-7c97572032f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.386, 0.349, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2082", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_f00afcea-c60d-4746-b705-8abad5a4a61c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\n[link] CareersExternal Link should open in a new window o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.311, 0.688, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2083", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_c7e2d653-d028-48ff-987d-7d48b3fc1bbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.47, 0.691, 0.483] in the image\nAnd my action is Action: SELECT\nValue: June 2023"}]}, {"id": "mind2web_2084", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_a229ef07-963b-4d03-aacc-695b5b923058.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.244, 0.554, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2085", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_17ca5c93-6288-40dc-9018-691a97dc13d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.248, 0.73, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2086", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a3853221-cf8e-4f1e-a89b-0d1e3fd620b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.008, 0.984, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2087", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_289cdf09-4bfc-4ba0-affb-bf06c4b47593.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.213, 0.643, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2088", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_853848fa-4225-4177-814e-f29de4ed4f5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.405, 0.855, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2089", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_0a22a18c-983e-446f-871f-d2fd71f2c9ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.276, 0.795, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2090", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_cfaf73bf-07fa-433e-a651-8c1c4a4a633c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK\n[option] Apr 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.325, 0.478, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2091", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_e390e8f2-c563-4082-b220-e8544fd3f37d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.221, 0.249, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2092", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_9af5e2bf-542b-482b-b479-2cdead789a25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.843, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2093", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_88a2761a-0c07-43a1-b931-1b8f81bb2cea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.014, 0.68, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2094", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_8ed5187f-287c-40c5-9ca9-9a142fb87136.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: AUSTRALIA\n[input] -> TYPE: Walker"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.581, 0.347, 0.902, 0.387] in the image\nAnd my action is Action: TYPE\nValue: A987654"}]}, {"id": "mind2web_2095", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_98de417c-491c-4137-a8d7-1071dc6e1f4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.655, 0.32, 0.696] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2096", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_fb0bb348-ec09-4106-a0c6-5072cb5a070c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK\n[textbox] Select End Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.187, 0.277, 0.201, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2097", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_80ec2d6b-ccfc-4134-88b5-e9ae4967039e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.152, 0.36, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2098", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_5f599e93-da0c-4046-a99f-5ee9b6b91c4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey\n[radio] Yes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.772, 0.667, 0.782] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2099", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_08af7cdf-e95d-4875-a679-c15c9c08e85b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\n[combobox] Search for anything -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.037, 0.652, 0.066] in the image\nAnd my action is Action: TYPE\nValue: musical instruments"}]}, {"id": "mind2web_2100", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_47c0995c-7238-4f67-8bfd-dcb9ebad4a86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 25000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.261, 0.473, 0.286] in the image\nAnd my action is Action: TYPE\nValue: 5000"}]}, {"id": "mind2web_2101", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_dd0d509f-3050-4610-baa3-cd8f57e8ab83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] When? -> CLICK\n[link] 17 -> CLICK\n[link] 20 -> CLICK\n[combobox] How many guests? -> SELECT: 4 Guests\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.407, 0.596, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2102", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_d1c3e4dc-571d-4ee7-84e2-6751f69713c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.204, 0.176, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2103", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_507573e9-3eef-41c7-833f-a9992b520d5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.173, 0.536, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2104", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_bbe959f7-08e3-4dfd-b80b-b837caab3e9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK\n[button] View more availability for BayLeaf Modern Indian C... -> CLICK\n[button] 8:15 PM Table -> CLICK\n[button] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.293, 0.523, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2105", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b07896e7-2e85-4045-9080-9134edeafe41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.004, 0.204, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2106", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_e9787ce7-b544-442f-bfc9-3c56c68ad182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.17, 0.851, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2107", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_28857cab-172b-4651-b610-831598ecf7e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.02, 0.06, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2108", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d25fbaa1-54f2-4aaa-9446-6f113794dfc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[div] -> CLICK\n[heading] to next step -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.319, 0.926, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2109", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_83815d26-fe3f-46de-8fdf-b8d347a78e50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.034, 0.36, 0.046] in the image\nAnd my action is Action: SELECT\nValue: Airport"}]}, {"id": "mind2web_2110", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_ac979c95-d410-4b40-83b9-32caefbe0fcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Flights -> CLICK\n[b] Columbus -> TYPE: NEW YORK\n[span] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.194, 0.617, 0.228] in the image\nAnd my action is Action: TYPE\nValue: TOKYO"}]}, {"id": "mind2web_2111", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_e47414c2-b553-4232-82d6-5172de9eb75c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.068, 0.387, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2112", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_53a3e30c-6fe2-4f5a-b132-8390e74be073.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK\n[button] View 95 Vehicles -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.544, 0.048, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2113", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_0dfd6898-ef1c-4b83-9abb-6fb4630af976.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.157, 0.181, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2114", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_55015e6d-ec84-41ae-99cc-1c8298eba5a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.124, 0.352, 0.147] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_2115", "image": {"bytes": "", "path": "./images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_a634663a-b496-4ead-94e0-e2c1a1f4b86a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find FC Barcelona's next fixture in the Spanish Copa de Rey\nPrevious actions:\n[li] Soccer -> HOVER\n[link] Barcelona -> CLICK\n[link] Fixtures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.307, 0.138, 0.323] in the image\nAnd my action is Action: SELECT\nValue: Spanish Copa del Rey"}]}, {"id": "mind2web_2116", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_ef7e1558-a90e-4187-81be-290734f69625.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[path] -> CLICK\n[div] 4 -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.173, 0.963, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2117", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_4231bc71-9555-49ec-8edf-0e46843f0832.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[link] More -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.103, 0.388, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2118", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_8ae64ebc-8538-42aa-bd7e-f0675af9c375.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK\n[link] Find one near you (opens in a new window) \uf301 -> CLICK\n[combobox] Find a store -> TYPE: 60540"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.12, 0.668, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2119", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f11eadf6-b789-4a4a-94d3-46613bffdc98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.0, 0.169, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2120", "image": {"bytes": "", "path": "./images/cdbd410d-170a-426d-b6d2-60dafaffe853_1c12e058-d63b-4514-bba9-ca7c1cec49d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the best seller accessories for the Model X\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 4.319, 0.611, 4.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2121", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_2bb14193-cad0-433f-aa68-3def5ba090a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew\n[textbox] Email address -> TYPE: johndoew@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.752, 0.495, 0.769] in the image\nAnd my action is Action: TYPE\nValue: 4533234565"}]}, {"id": "mind2web_2122", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_9db12986-1e76-4bff-80d8-6fd5ec3fb7b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[span] Pickup -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.095, 0.448, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2123", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_d80603c1-f854-4923-ae8c-dae6003a5cd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.284, 0.27, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2124", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_926471db-6655-45d2-9182-4af24f614ad2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK\n[button] Germany -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.174, 0.728, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2125", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_42319d5e-a274-4be1-a41e-e97ed6615952.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.004, 0.675, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2126", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_0b65497e-6dbe-4d02-b48d-0662c365c294.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.583, 0.248, 0.68, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2127", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_4f7e3555-112c-40b1-b45e-a729bb210f51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.177, 0.755, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2128", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_9bb8527b-4f0f-4adb-a232-baaaf881902d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Games -> CLICK\n[combobox] Platform -> SELECT: PS5\n[tab] AUG '23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.284, 0.523, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2129", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_11949dd6-6d1a-42e9-a965-3bad963bac16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK\n[button] increase number -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.402, 0.355, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2130", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_cea8fdc0-7489-497d-b118-515681b710bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.261, 0.235, 0.277] in the image\nAnd my action is Action: SELECT\nValue: 2017"}]}, {"id": "mind2web_2131", "image": {"bytes": "", "path": "./images/998d121b-c858-485d-9dd3-4609575d144b_6bed6fe2-2ce7-47ef-9b12-9a8f308a3102.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular song this week by the no. 1 weekly charts ranked artist\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.254, 0.206, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2132", "image": {"bytes": "", "path": "./images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_0fffa0bb-9a4e-48b1-9023-298c7a5829c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending daily deals.\nPrevious actions:\n[link] Today's Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.455, 0.567, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2133", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_728b67b3-d076-4667-afca-854c4864e209.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.132, 0.777, 0.166] in the image\nAnd my action is Action: TYPE\nValue: Crew"}]}, {"id": "mind2web_2134", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_512c2744-f31b-4206-98c6-f69312994a72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\n[searchbox] Search -> TYPE: Chill"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.007, 0.553, 0.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2135", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_f235354f-5877-4b33-82b4-dd854cf552a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.212, 0.085, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2136", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_0938ce44-198a-4d1f-a88a-c26cd07e7a2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York\n[div] New York, NY -> CLICK\n[div] Sat, Apr 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.37, 0.575, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2137", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_8c85d1f6-5d5f-4b7f-8ad8-8fcdb58ca94b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.288, 0.196, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2138", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_4f8da365-0e3a-49aa-a1d5-32e0ed17259d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK\n[link] NPGallery -> CLICK\n[span] Search all Parks -> CLICK\n[li] Acadia National Park -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.138, 0.727, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2139", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_60f12b9c-9c86-4d52-986c-d66d26ff9ea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.297, 0.269, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2140", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_6592370e-2797-4a62-9b33-9769fd75aa37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK\n[option] 2:00 pm -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.505, 0.105, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2141", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_7adc528f-60dc-477b-8cb8-77ad576f840b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.367, 0.913, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2142", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_04339d0b-8754-454b-b068-ce03b1f45f86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.145, 0.609, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2143", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_09862727-dffe-4e83-a678-d29962c98d92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM\n[combobox] Return Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.354, 0.567, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2144", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_e2dc345b-7f76-4518-9f6b-1e75f62a4fa5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.49, 0.339, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2145", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_a40aa80f-344a-4d08-8333-4778e7549172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.126, 0.041, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2146", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_ec2b8835-2edf-4769-a89a-5c36d204ee52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2017\n[combobox] Select Maximum Year -> SELECT: 2017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 1.171, 0.226, 1.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2147", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_285f5467-2cad-4f8d-8b01-8f90a80e3cce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB\n[button] \uf002 -> CLICK\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.263, 0.963, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2148", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_c6cbec5e-1b4e-4c9b-bbc5-c0d55a1968e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Employer Name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.551, 0.685, 0.578] in the image\nAnd my action is Action: TYPE\nValue: Gua AB"}]}, {"id": "mind2web_2149", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_a645bce8-e7b5-44ef-99b2-045410868809.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK\n[svg] -> CLICK\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.37, 0.894, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2150", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9bb86454-f3c6-453e-b06e-70f28ec3d09a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: Gingerbread cakes"}]}, {"id": "mind2web_2151", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_2566660a-da4f-4da5-979a-0ffb4953d972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK\n[textbox] Flight origin input -> TYPE: Mumbai\n[span] Chhatrapati Shivaji Intl -> CLICK\n[textbox] Flight destination input -> TYPE: Dubai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.264, 0.784, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2152", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_9ce95ce5-01c8-4a7a-87a4-aae8193cd6d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.218, 0.322, 0.24] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_2153", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_b178005b-95d0-4ad2-9d7a-fcf68844cf09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.375, 0.135, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2154", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_becee9e3-be6c-4d01-b62e-3b2e23d3413a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.425, 0.686, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2155", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_84502966-8969-4b4f-bbef-370c2f4e62bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.364, 0.486, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2156", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_6baa7eb5-d650-4920-80c4-bfea64397c55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.084, 0.957, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2157", "image": {"bytes": "", "path": "./images/14f0e837-af77-44b9-9cad-a8911aab30c6_89ea3db5-1984-4912-a93a-8cdb9b2402af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the status of flight from Columbus, number 1234 on April 5th, 2023.\nPrevious actions:\n[heading] Flight status -> CLICK\n[textbox] Flight number -> TYPE: 1234"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.427, 0.478, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2158", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2a7a1bbf-df80-4b6f-a57f-fd754cc16db3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.262, 0.592, 0.288] in the image\nAnd my action is Action: TYPE\nValue: Shubert Theatre"}]}, {"id": "mind2web_2159", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_7f571a32-da4f-48e1-b26c-5c5b412dca5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\n[link] WAYS TO STAY \uf0d7 -> CLICK\n[link] GLAMPING \uf0da -> CLICK\n[link] glamping near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.314, 0.141, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2160", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_82d93f55-8572-4c68-8aa7-982b1774b04c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.214, 0.966, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2161", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_39ab3869-5aa7-4ee7-b1f2-d2e182997e3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.449, 0.059, 0.589, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2162", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_36632f49-e9c1-4dbc-866d-eb03522d0614.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: NEW YORK\n[span] All airports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.163, 0.702, 0.202] in the image\nAnd my action is Action: TYPE\nValue: PARIS"}]}, {"id": "mind2web_2163", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_f32216ff-a9d3-426b-ad6b-0081850a3db0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.371, 0.196, 0.393] in the image\nAnd my action is Action: SELECT\nValue: Hardback"}]}, {"id": "mind2web_2164", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_25595676-125c-4c39-8a05-9d86e9f3b5a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] Continue -> CLICK\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.317, 0.711, 0.346] in the image\nAnd my action is Action: TYPE\nValue: Mark"}]}, {"id": "mind2web_2165", "image": {"bytes": "", "path": "./images/e9300d50-11fa-4f98-8c39-424630668ab9_6b487bd0-fda3-43e7-8adb-45fd77815a64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the popular online Health events for tomorr\now?\nPrevious actions:\n[link] Health -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.335, 0.189, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2166", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_a695dfc5-ea9d-4bb7-8efb-45d3aa1f8928.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK\n[link] Check Balance -> CLICK\n[spinbutton] Gift Card Number -> TYPE: 1000000000000000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.276, 0.5, 0.304] in the image\nAnd my action is Action: TYPE\nValue: 1222"}]}, {"id": "mind2web_2167", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_83a425dd-09e4-4a42-b6c7-440c00333fd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK\n[img] Sponsored Ad - Google Play gift code - give the gi... -> CLICK\n[button] $100 -> CLICK\n[textbox] To -> TYPE: abc@abc.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.111, 0.968, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2168", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_c53a794a-53ea-4564-b4f3-5ef7c0279bab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK\n[link] Stats \ue00d -> CLICK\n[button] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.092, 0.517, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2169", "image": {"bytes": "", "path": "./images/851ed4e6-51ee-47ad-a861-a28bdc61a102_d47cbb1c-7d70-445f-a145-f4af8c2e35f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to schedule a Model X test drive.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Demo Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.452, 0.667, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2170", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_bac428a8-a55a-4c2d-a416-51ae11d42509.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK\n[button] All Filters -> CLICK\n[tab] Features -> CLICK\n[checkbox] Wi-Fi Capability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.449, 0.757, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2171", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8f3e61f6-be1e-4fbc-b01c-904f68a74086.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK\n[button] sub 1 -> CLICK\n[div] open -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.535, 0.497, 0.558] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2172", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_6b02e5e0-bda2-415d-9468-9796ce2ad2b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.411, 0.469, 0.429] in the image\nAnd my action is Action: TYPE\nValue: Ohio"}]}, {"id": "mind2web_2173", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_d0ed97a5-260e-43f7-b268-72fa521ff5a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply\n[button] \uf002 -> CLICK\n[generic] 600 W -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Lowest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.381, 0.451, 0.503] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2174", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3f6e79c5-fb1f-41c7-be6c-53bedd7bd544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.303, 0.754, 0.342] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_2175", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_ae0e503c-2e94-4e89-92e5-a385c1434d50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.119, 0.777, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2176", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_77d2fade-b9c9-46c2-b41b-81e8bc671d15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.081, 0.303, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2177", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_89cbd56a-b983-4a06-afa8-cfd121dd0ddd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK\n[link] Used -> CLICK\n[button] Style -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.712, 0.399, 0.868, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2178", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_9b7e1386-5ab5-46e3-8739-701d711d5059.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK\n[link] Stats -> CLICK\n[select] English FA Community Shield -> SELECT: UEFA Champions League"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.314, 0.26, 0.334] in the image\nAnd my action is Action: SELECT\nValue: 2022-23"}]}, {"id": "mind2web_2179", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_4908afca-3881-4d5f-bc9c-d1bd00895602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK\n[span] 28 -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.339, 0.497, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2180", "image": {"bytes": "", "path": "./images/9223ed29-5abb-4f4d-8108-1c3a584a7017_46a981ea-d3c6-42fb-9d9e-3cc0f679b56d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about reduced bus fares.\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Bus Fares -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.406, 0.576, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2181", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_43a17e87-29ed-4e79-8b90-ede9013a6030.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.558, 0.429, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2182", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_d7aae626-e02f-4f67-a2ed-100574db0121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.356, 0.711, 0.385] in the image\nAnd my action is Action: TYPE\nValue: Johnmark@gmail.com"}]}, {"id": "mind2web_2183", "image": {"bytes": "", "path": "./images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_45ac5967-b39a-4abb-abe9-314ab611fcc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare Pro Plans with other plans available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.0, 0.702, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2184", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_c07dee8e-5b45-432f-80b3-c79f3ff2f1d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK\n[link] Taylor Swift -> CLICK\n[button] Follow -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.222, 0.375, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2185", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_1329c041-da50-44be-9694-0a50b5a51d2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Electrical -> HOVER\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.532, 0.089, 0.542] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2186", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_689efb16-8afc-4054-ae22-289fba6674b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.266, 0.486, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2187", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_c99f7ced-46a0-4187-a323-0ce345af5b76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.003, 0.204, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2188", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_72aaf637-cf0b-4762-beb3-e4cdfe50dbf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.119, 0.266, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2189", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_cb226f96-18d8-4dd2-bad2-d38a23094374.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy\n[textbox] Phone Number -> TYPE: 123-999-0000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.718, 0.391, 0.729] in the image\nAnd my action is Action: TYPE\nValue: RA@gmail.com"}]}, {"id": "mind2web_2190", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_b3044a24-aa62-41ae-a42c-b6ab256132f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.276, 0.573, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2191", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_0f9aceb2-5773-43c0-883f-c3e0ab76df13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.5, 0.204, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2192", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_384d2cb5-1500-4cfb-b973-ad828bf541fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 1.831, 0.087, 1.836] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2193", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_04bab092-de15-4b34-8d45-a444c6e6b1b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.404, 0.184, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2194", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_7672056b-5964-4cb7-95fb-579dec1a1d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.059, 0.129, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2195", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_aaa64149-aeef-4b01-9d53-323f0c6357b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.073, 0.402, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2196", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_ad1a48fb-4b8a-4ea8-8945-04d8b57dd201.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107\n[textbox] Zip Code -> TYPE: 49107\n[div] 49107 - Buchanan, MI -> CLICK\n[button] Request Appointment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.4, 0.333, 0.438, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2197", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_2ca80415-2ee3-421c-b26e-662116f8f61c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.145, 0.206, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2198", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_2fe0da29-0224-4d57-8ca7-f203f4ee7f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.068, 0.387, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2199", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e8302760-d313-4bd7-9f3b-c38819b7d97d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $16.99/Day$6.55/Day -> CLICK\n[checkbox] $5.99/Day$1.38/Day -> CLICK\n[checkbox] MARKET -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.295, 0.93, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2200", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_d2dc023e-7146-43f3-88e4-dd00ce65a2f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.474, 0.568, 0.496] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2201", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_a2ff1967-a42c-486a-9a4b-356fc3d1f590.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 7\n[button] Find -> CLICK\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.494, 0.181, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2202", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_67c0685a-9f53-46c3-9842-d416f890ea25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.153, 0.846, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2203", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_0d3bb8c1-0174-43bb-ba64-b4f5d4392c7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[button] Find Schedules -> CLICK\n[img] -> CLICK\n[span] -> CLICK\n[button] Close -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.182, 0.393, 0.296, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2204", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_d8706414-226d-4656-b7d5-818d440c9c6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\n[link] BABY -> CLICK\n[link] One-Pieces -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.489, 0.256, 0.725] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2205", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_875a9aaa-9e35-4575-868b-6dd03d6ca8dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.1, 0.487, 0.135] in the image\nAnd my action is Action: TYPE\nValue: Neo"}]}, {"id": "mind2web_2206", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_fe2329c4-61b1-43e9-9ef6-52d2ee4bdd48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.092, 0.559, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2207", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_98776082-1913-404c-8a5b-ff56c03291c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin D -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.298, 0.163, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2208", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_2bd4c20d-4f07-4507-a6c5-9cf1b634a4d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.079, 0.106, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2209", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_ff4a9a5f-bb9a-4fe4-ac0d-1b6e7ef9a46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.159, 0.4, 0.2] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_2210", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_e58c6cf0-b67e-459b-bdad-9bfe55c453ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.092, 0.181, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2211", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f2aa38c8-10e2-4a9a-8305-480422409dd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.511, 0.512, 0.527] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2212", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_b9ee8eb8-3b77-4ec0-9278-a65267b9cc50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.294, 0.894, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2213", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_3b050be4-1d7e-43f1-a584-1bf2ce238aa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK\n[link] Accessories -> CLICK\n[link] Gaming Mice -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.847, 0.09, 0.979, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2214", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_78a55844-7ec2-4b4d-9a58-e35d37ef18e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK\n[textbox] Your Name * -> TYPE: James Smith\n[textbox] Email Address * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.615, 0.557, 0.637] in the image\nAnd my action is Action: TYPE\nValue: 6157075521"}]}, {"id": "mind2web_2215", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e9c074e0-eb15-4d22-92b6-f703bb5da185.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Socks -> CLICK\n[generic] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.377, 0.943, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2216", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_28459c7a-e656-4f30-946d-53f528631e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.21, 0.395, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2217", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf489b94-792e-475d-aa34-32cdcda0f2b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.358, 0.868, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2218", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_30031b64-c4c3-4741-a338-9de86a7bd529.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.596, 0.292, 0.746, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2219", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_68c477a3-d1b6-4c90-95e1-e78aa128bf1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] Last name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.376, 0.656, 0.419] in the image\nAnd my action is Action: TYPE\nValue: Davis"}]}, {"id": "mind2web_2220", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_f7c781d4-9856-4f3e-b227-20e1cfe0a4d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance\n[combobox] Start Time -> SELECT: 3:00 PM\n[combobox] End Time -> SELECT: 5:00 PM\n[button] Update Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.496, 0.372, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2221", "image": {"bytes": "", "path": "./images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_062da2e4-9c43-48c4-898f-1ef4b05a7542.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Leaderboard for the top 10 fantasy Basketball players for the Rotisserie challenge.\nPrevious actions:\n[link] Fantasy . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.118, 0.725, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2222", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_06935ea2-746d-401d-8d7f-39e882db3cd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[link] Restaurants -> HOVER\n[span] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.027, 0.564, 0.044] in the image\nAnd my action is Action: TYPE\nValue: SAN FRANCISCO"}]}, {"id": "mind2web_2223", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_3d430b08-3b31-40be-966d-0ebc25c0e439.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] close -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Material -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.134, 0.824, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2224", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_fa456b75-d802-4cb8-a122-24ba577812f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK\n[link] 128 GB - apply Storage Capacity filter -> CLICK\n[heading] Apple iPhone 12 Pro - 128GB - All Colors - Unlocke... -> CLICK\n[combobox] Please select a Color -> SELECT: Blue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.273, 0.737, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2225", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_ba9f1b5e-9f7d-4890-b949-fab2446b19cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK\n[img] Sponsored Ad - Google Play gift code - give the gi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.402, 0.606, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2226", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_0e295f69-6563-427f-9cb9-163bc4c61253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.159, 0.694, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2227", "image": {"bytes": "", "path": "./images/fb7741f6-f388-4535-903d-d07315ea995e_e41eb015-80b3-45b5-bc29-c9f672f163ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find fitness events for this weekend.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.323, 0.252, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2228", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_e6d800be-8004-45eb-a793-b15400c0ccff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.845, 0.011, 0.87, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2229", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_43bb051b-7c4e-4b20-921d-4555a8f353dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK\n[link] Seattle, WA -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.075, 0.305, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2230", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_5391b73e-2ea7-472d-bbdf-0978e4e0564f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.203, 0.327, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2231", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_10212b24-195e-48c9-acae-cb2350a78ceb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.3, 0.359, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2232", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_3d497426-f9d7-4f13-a176-e700575969ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.028, 0.128, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2233", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_76d2a3a9-6953-4102-b032-e0b0907c88e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[input] -> TYPE: James\n[input] -> TYPE: Johnson\n[input] -> TYPE: james.john@gmail.com\n[combobox] Organization Type -> SELECT: Family Trip\n[input] -> TYPE: Johnson"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.305, 0.777, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2234", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_52c8c1af-9bd2-4aa9-aeca-c781ccba7366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone * -> TYPE: 8888888888"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.711, 0.787, 0.722] in the image\nAnd my action is Action: SELECT\nValue: Chicago"}]}, {"id": "mind2web_2235", "image": {"bytes": "", "path": "./images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_54a253e1-012d-435c-8ab1-277ef327c33f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find FC Barcelona's next fixture in the Spanish Copa de Rey\nPrevious actions:\n[li] Soccer -> HOVER\n[link] Barcelona -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.104, 0.093, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2236", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_8528cae5-cd59-4742-b285-f1855866c552.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[span] Add -> CLICK\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.605, 0.367, 0.62] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2237", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_5bab90f5-78c2-4d19-82ab-2f2aabc94fb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.038, 0.765, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2238", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_98e02390-fb4c-4887-9ec0-294167219c7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK\n[generic] Thursday April 20th -> CLICK\n[div] 23 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.001, 1.375, 0.273, 1.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2239", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_cd7f794a-afd7-45b5-8d02-ed5fbce7caf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK\n[path] -> CLICK\n[div] 8+ -> CLICK\n[div] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.584, 0.081, 0.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2240", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_d1477074-827a-4194-a1bc-1c17e76b13c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: Davis"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.575, 0.446, 0.683, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2241", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_a58ae00f-38de-4c81-a24f-d32bd6933d7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan\n[strong] Abidjan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.324, 0.5, 0.362] in the image\nAnd my action is Action: TYPE\nValue: Accra"}]}, {"id": "mind2web_2242", "image": {"bytes": "", "path": "./images/160fc162-7f03-4f59-83e1-5502d00806f2_1b586bab-28ee-4b81-96bd-0cce359c5989.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what is playing on Showtime sorted by newest.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] showtime -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.404, 0.355, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2243", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_272ad721-8eb9-4e16-b522-ec352a3edc47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK\n[gridcell] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.721, 0.35, 0.904, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2244", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_e8b587ce-c3a8-485f-8455-bc7869669484.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.197, 0.517, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2245", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_83f96f84-7682-466e-a739-da6ce13c247e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.236, 0.919, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2246", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7713e50d-7086-49cf-a8ab-0cc3befbd494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.041, 0.25, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2247", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_57021f78-0a01-4b86-864a-5f427019edf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK\n[link] Training -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.571, 0.233, 0.614] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2248", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_d3e071fc-c039-46af-adab-d88fcba72fa8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[link] Girls -> CLICK\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.405, 0.194, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2249", "image": {"bytes": "", "path": "./images/61c8e051-a847-4424-9d8b-b8bc2c134a35_c89c55cf-e379-47bd-b0b4-a642ffe7be1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the recent trades\nPrevious actions:\n[button] Shopping -> CLICK\n[link] Trades -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.167, 0.598, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2250", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_34932950-fe34-4548-99d0-8a8726ddb9f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[listbox] hour -> SELECT: 10\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.331, 0.327, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2251", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_973418b6-859e-479f-8d1a-b1a8fa9c5e51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.367, 0.468, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2252", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_40a0c326-bcad-4edf-8b4e-6fb3af658ab1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.325, 0.048, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2253", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_15606cb6-9b40-427c-b76c-5f32223fafda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER\n[link] On Sale Now -> CLICK\n[img] movie poster for Elvis -> CLICK\n[button] Rent from $3.99 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.21, 0.174, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2254", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_758947fe-0cd0-4b67-a5f6-62048e8f794a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2022 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.308, 0.249, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2255", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_db4a2ab6-e43f-4059-9dfa-0e4c7fb2eeab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.117, 0.263, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2256", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_acf51d07-4630-4160-999d-f3ecfe8a47a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.279, 0.479, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2257", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_821343aa-9c06-4f3c-9437-15e55f522c11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.285, 0.783, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2258", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_f1d36d0f-7896-44f3-bf48-6f9f9950416c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.529, 0.339, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2259", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_69b85be2-186c-4be0-90bf-103fc674b6f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent\n[span] Analyst - Sales Programs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.308, 0.325, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2260", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_6c2838cc-2eba-4e57-ba2b-91edc2804240.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.09, 0.322, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2261", "image": {"bytes": "", "path": "./images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_6505d42e-3973-4cb2-9d59-b7fa6513d6c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Last of Us series and add it to my watch list.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Last of Us"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.043, 0.194, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2262", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_acf2309a-3542-43c5-a8cb-3fef021a5c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[link] Hostels -> CLICK\n[searchbox] Please type your destination -> TYPE: udupi\n[option] Udupi Karnataka,\u00a0India -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.419, 0.805, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2263", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_60d9f7d8-adde-4e1a-8763-46cf48b62328.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.179, 0.271, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2264", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_583fc711-90e0-4363-ac48-057b547a3a33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.175, 0.627, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2265", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_ab2175cb-f9af-4c04-a557-c9671e492e76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan\n[strong] Abidjan -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Accra"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.396, 0.72, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2266", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_cdaff30d-7164-4b7a-b1e2-a95d33da9282.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.005, 0.867, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2267", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_251bb8a8-5a58-4219-8fde-c24c613d4337.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.483, 0.512, 0.501] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2268", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_98a07d01-547e-46a7-a19d-843c7cef225e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.843, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2269", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_9abb2493-8fff-4950-9b8a-d371af9516a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.294, 0.463, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2270", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_31fe500d-c8ce-4a15-a225-c86333e8826a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK\n[link] Tablets -> CLICK\n[img] Samsung -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.394, 0.158, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2271", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_3d14978f-be25-44dd-b3f4-bf95d170d4f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\n[link] New & Noteworthy -> HOVER\n[link] Most Played -> CLICK\n[generic] By Current Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.047, 0.543, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2272", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_5f310105-9322-4ff9-befc-9e9ada33ba05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.038, 0.999, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2273", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_43f569ad-2a06-4a74-b77c-16acd0431fcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK\n[div] -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.414, 0.922, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2274", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_b4ba7775-0761-4026-ace8-47325c692364.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.316, 0.161, 0.345] in the image\nAnd my action is Action: SELECT\nValue: Daytime Only Parking"}]}, {"id": "mind2web_2275", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_2ec2117c-c501-416d-a25e-c24faef4c518.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.365, 0.695, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2276", "image": {"bytes": "", "path": "./images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_24676627-e890-4aab-a9a2-aa595ee4e950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of games I've played recently.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER\n[link] GAMES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.214, 0.253, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2277", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_e89bb795-2d24-4e2c-bcae-1294e3501dfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] close -> CLICK\n[heading] Same Day Delivery -> CLICK\n[link] Self-Rising Crust Uncured Pepperoni Frozen Pizza -... -> CLICK\n[svg] -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.524, 0.988, 0.56] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2278", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_b40a4626-a9b0-46f9-b9a6-35d49a8fd0bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK\n[img] Tesla Shop Gift Card -> CLICK\n[textbox] Name of Recipient -> TYPE: April May\n[textbox] Email Address of Recipient -> TYPE: april.may@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.592, 0.938, 0.629] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2279", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_afbb5253-41d5-4896-8fb9-a49db36fecf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York\n[div] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.157, 0.753, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2280", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_56a51491-e603-4275-841c-989da2b8d9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.435, 0.504, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2281", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_b802897f-2c52-42af-b317-321de287b5ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\n[textbox] Search -> TYPE: Tom Hanks\n[p] Tom Hanks -> CLICK\n[link] My Life in Ruins (2009) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.749, 0.518, 0.753] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2282", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4545e3b8-201f-4a29-b5e2-cd31dc104bb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.296, 0.857, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2283", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_7abcc2cf-5142-4193-a68a-ccc119801db5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.045, 0.785, 0.057] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_2284", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9b22d165-4722-428c-a980-3773ac46b8d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Kitchen -> CLICK\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.407, 0.1, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2285", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_09dd0172-dd00-45b1-95e2-61dd15cf2d11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.478, 0.343, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2286", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_10e90e61-628c-4bc0-ab57-827dd4085228.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK\n[button] Subscribe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.103, 0.977, 0.142] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_2287", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_c45411de-bce5-415e-90d1-63f05c5810e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.276, 0.552, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2288", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_464ce264-f475-4262-a089-2b8f06fc4f83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.588, 0.18, 0.692, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2289", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_fac64ba6-630f-443b-ae2f-0afb8aac89bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.306, 0.153, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2290", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_3f2a8087-9586-4576-82d5-aebc1c19025b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.478, 0.336, 0.504] in the image\nAnd my action is Action: TYPE\nValue: new delhi"}]}, {"id": "mind2web_2291", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_b60f4bf4-01db-45c4-99f2-28275b4807ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Certified fresh movies -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.491, 0.435, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2292", "image": {"bytes": "", "path": "./images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_ae673ea3-b7f4-47ed-bf82-0f42f5cd51c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an available wine at Kroger.\nPrevious actions:\n[path] -> CLICK\n[button] Departments -> CLICK\n[link] Wine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.275, 0.457, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2293", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_4a105819-7709-498b-a943-5e3a9eefdfda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM\n[combobox] Select Residency -> SELECT: Vietnam\n[generic] Vehicle Type * -> CLICK\n[p] Small to Full Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.267, 0.56, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2294", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b84d10b2-35ff-4c2f-9e47-89243f5d02b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.124, 0.352, 0.147] in the image\nAnd my action is Action: TYPE\nValue: 07718"}]}, {"id": "mind2web_2295", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_c944c80a-9545-44eb-a901-aad8b0834d7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2296", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_68c8701d-311a-40ba-ad1a-482ee7d84c6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Any -> CLICK\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.233, 0.249, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2297", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7d0e900d-c57c-45f7-a2fc-a41e9ae471a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.387, 0.617, 0.515] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2298", "image": {"bytes": "", "path": "./images/90557510-32dc-415f-8507-41b050594962_317a2951-ef4e-4a9f-bd40-18d345a63cd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the coming soon AMC Artisan Films\nPrevious actions:\n[link] Visit the See A Movie page -> CLICK\n[link] Coming Soon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.145, 0.285, 0.172] in the image\nAnd my action is Action: SELECT\nValue: AMC Artisan Films"}]}, {"id": "mind2web_2299", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_98fb426d-f6ca-4336-8792-05ee6ea8b7e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lave Hot Springs East KOA\n[list] KOA Logo Icon Lava Hot Springs West KOA Holiday La... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.121, 0.771, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2300", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_353e5f4e-0dd3-4175-b341-462558576da0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.361, 0.332, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2301", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_83552bdf-be4a-412c-a088-0615ea08bbaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.139, 0.343, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2302", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_b3189405-5a1b-427c-a196-d223b6799956.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\n[button] Car Sales -> CLICK\n[link] Shop Vehicles Under $20,000 Link opens in a new wi... -> CLICK\n[button] Body Type \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.371, 0.196, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2303", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_672b06cb-3141-4330-b5c0-dfa51a37ba3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.225, 0.488, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2304", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_d1f6fe24-c802-40ec-9de5-9c81c57b69aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.113, 0.78, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2305", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_404af1a6-ec27-4b33-aa24-691486c2ec74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] 2+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.623, 0.114, 0.636] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2306", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_26e80001-b32e-4aee-982e-5d3ff6fb21bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK\n[button] Trade in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.417, 0.758, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2307", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_bdb13abb-d7a0-428b-94c5-64951f68f1db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.386, 0.216, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2308", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_0edebd4f-be22-427c-84d1-2223ab345ef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.113, 0.965, 0.135] in the image\nAnd my action is Action: SELECT\nValue: Price Low to High"}]}, {"id": "mind2web_2309", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_d35e0d76-e5a4-478c-ae41-af9e27ffd454.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.368, 0.218, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2310", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf14f1d4-470f-4110-b3f4-019a9f7d0aed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.39, 0.512, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2311", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_178bad0d-047f-4dc2-84ec-7f2a39924cc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[label] Brown -> CLICK\n[svg] -> CLICK\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 1.286, 0.223, 1.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2312", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_7ee2e3b2-d056-429c-a1c7-301f38f08660.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.159, 0.38, 0.198] in the image\nAnd my action is Action: TYPE\nValue: new orleans"}]}, {"id": "mind2web_2313", "image": {"bytes": "", "path": "./images/f9e88baa-a109-454b-839f-1ab0746a5f13_dd18b502-ec91-4183-9051-0866c49b0936.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all watch options of Avatar: The Way of Water and where it's available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.102, 0.594, 0.114] in the image\nAnd my action is Action: TYPE\nValue: Avatar The Way of Water"}]}, {"id": "mind2web_2314", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_241522f0-f05c-49d3-89e1-e0db568af201.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.132, 0.215, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2315", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_c6d1a72d-6a78-4b55-a5e1-a7360cf50158.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: New York, United States, 100\n[textbox] Enter your pick-up location or zip code -> ENTER\n[link] Close -> CLICK\n[div] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.117, 0.493, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2316", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_c741a4b9-037f-4e8f-8a72-606fb1bcba61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.028, 0.535, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2317", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_0180d34b-3ffd-44d5-ae82-c7e6b031c05e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.504, 0.373, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2318", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_97d43de2-df7c-4880-9368-ea38fa587621.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Concord\n[span] Concord -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.018, 0.335, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2319", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_442ce85b-f9ff-4a4b-8fc5-7c41fc303963.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.031, 0.169, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2320", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_fb8f3a51-6870-47b6-898a-25b1ebf691f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.269, 0.102, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2321", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_1aff894c-1861-4fe9-a936-bd4264f0c644.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[div] Play -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Top Hip Hop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.159, 0.441, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2322", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_2fe12b3f-15f7-4cff-a0da-d485f189cb4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.051, 0.483, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2323", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_442e1ce3-2522-48da-b947-c9d0c670411f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.674, 0.243, 0.709, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2324", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_46f83ebb-597c-4df2-a715-6d17b102a7cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[button] All cities -> CLICK\n[button] Go! -> CLICK\n[input] -> CLICK\n[div] All dates -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.387, 0.282, 0.419, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2325", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_771645db-3909-401b-9e11-ec577982b6c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.122, 0.855, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2326", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_d05d5797-f2d2-4046-b3b3-8f19e5bbd1f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.998, 0.322, 1.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2327", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_45f168e4-68f7-4a40-b1cb-50e2d47ed9cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK\n[checkbox] Family -> CLICK\n[button] Amenities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.886, 0.081, 0.893] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2328", "image": {"bytes": "", "path": "./images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_d82a81d4-25cb-48d9-921d-0cc1a8624a2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ideas and recommendation for things to do in Cancun.\nPrevious actions:\n[textbox] Where to? -> TYPE: cancun\n[circle] -> CLICK\n[span] Trip Inspiration -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.072, 0.34, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2329", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_feb8be19-5f9b-44dc-a9fe-1467233b4677.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK\n[link] HAPPY HOUR BURGER, Aria, Thursday, April 13, 2023 ... -> CLICK\n[combobox] 2 tickets for HAPPY HOUR BURGER -> SELECT: 1 Ticket"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.31, 0.153, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2330", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_93ff61b6-0bab-479c-9f06-45a4274258ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK\n[option] WA -> CLICK\n[button] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.63, 0.412, 0.643] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2331", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_28771408-7ab1-41bd-9819-91f5781f65d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.847, 0.313, 0.911, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2332", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_63bb767f-b11f-4830-8208-0ee804fa1842.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay\n[paragraph] Coldplay -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.161, 0.255, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2333", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_6a5ae849-09da-452e-8f6b-7757dca46690.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.177, 0.812, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2334", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a2f6493b-5528-4aeb-a97d-602877298c51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.588, 0.32, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2335", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_6714117c-6959-4cbd-9ee2-9cd57f3d627d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK\n[div] May -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.074, 0.97, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2336", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_af9e9a40-200b-4453-83ce-3ff86dd64154.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.148, 0.31, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2337", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_987291ad-91f8-4e77-80b9-343575d7813d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846\n[input] -> TYPE: Texas\n[button] Complete -> CLICK\n[button] Continue Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.409, 0.781, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2338", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_81a0d494-3e16-47e6-ae12-96ca4d918431.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.14, 0.212, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2339", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_1a75f8c0-4a9a-4c49-bac5-85a1ad22aecd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK\n[select] All -> SELECT: In Stock (7,640)\n[select] All -> SELECT: Spanish (42)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.362, 0.196, 0.383] in the image\nAnd my action is Action: SELECT\nValue: Paperback (39,356)"}]}, {"id": "mind2web_2340", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_26b869aa-497e-4de5-82f9-4f7fc39977d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.917, 0.249, 0.931] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2341", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_f0138e48-b01c-4f47-81cf-41be44fa3298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.308, 0.582, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2342", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_f34b6e67-a22e-4092-8304-c34b40b107e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[button] 1 adult -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.179, 0.321, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2343", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_d0f7d055-29ae-4ebe-bd92-0c2e7ae2de4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 500\n[link] Show 684 stays -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.406, 0.7, 0.422] in the image\nAnd my action is Action: TYPE\nValue: Togo"}]}, {"id": "mind2web_2344", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_998b318b-b288-443e-9cd0-039f263ea2b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: kashi vishwanath temple\n[span] Kashi Vishwanath Temple, Varanasi, Uttar Pradesh, ... -> CLICK\n[path] -> CLICK\n[div] 6 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.272, 0.721, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2345", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_dc03dd08-a61b-430e-97ce-1c37fec505ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK\n[textbox] Recipient Name -> TYPE: Tim Stebee\n[textbox] Recipient Email -> TYPE: scisoorbros@gmail.com\n[textbox] Sender Name -> TYPE: Jeerimiah Waton"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.573, 0.975, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2346", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_338368ed-2c11-449d-ae56-e8726649f0ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji\n[div] Fiji -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.25, 0.442, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2347", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_63febe49-b818-4412-96ee-0589ed46caba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.044, 0.176, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2348", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_942f53c6-7c06-488e-af27-0fefddaa6b13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.317, 0.506, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2349", "image": {"bytes": "", "path": "./images/29d6b448-a688-4c2f-8f6d-a13546d506d8_ffc9b6af-f030-473b-8333-db8f3f2cf31f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of jazz albums released in 1890.\nPrevious actions:\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.241, 0.581, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2350", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_60dc39b7-782e-4aa3-836d-62fc57fe8819.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[heading] Continue -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.598, 0.642, 0.636] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2351", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d7ac76c3-c31d-4daf-ba91-f07a2250eb2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Price -> CLICK\n[link] Under $75.00 -> CLICK\n[button] Delivery Options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.351, 0.44, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2352", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_7c298bc0-fed0-40a1-b15b-c6cad7071b60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.214, 0.359, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2353", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_1f8f403a-057f-40c8-8f98-5ac2d1a46e9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK\n[combobox] Select Sort Order -> SELECT: Lowest mileage first"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.367, 0.605, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2354", "image": {"bytes": "", "path": "./images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_70e49603-f22f-465e-b9b9-1344b4a905ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse camping stoves that have an auto ignition feature.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Stoves, Grills & Fuel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.941, 0.149, 0.951] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2355", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_15c7fb1a-57d3-453f-b3de-6cf368e782f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.486, 0.529, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2356", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_a6f1e015-8967-4d33-b56c-4daf513b7396.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.24, 0.278, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2357", "image": {"bytes": "", "path": "./images/41ff100f-582a-422e-b387-3abd9008cee4_ea2e6ff2-b264-4578-aa3d-cd33be74b9a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open red line subway schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.081, 0.367, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2358", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_d522a186-135f-4d08-a07a-852004c505fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.003, 0.31, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2359", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5155bd89-c99d-4e8e-8cbe-185618e319e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK\n[span] Guest checkout -> CLICK\n[span] Select a store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.206, 0.969, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2360", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_4d0ed0cc-72ac-4a64-8ff7-3d5962f067fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.069, 0.788, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2361", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_73c5cb1c-f750-41f0-8bd2-ab89bf3b403c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.0, 0.445, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2362", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_d5320812-311a-480e-934c-e35760ef5bff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\n[link] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.166, 0.287, 0.19] in the image\nAnd my action is Action: TYPE\nValue: 43215"}]}, {"id": "mind2web_2363", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_8a1e5242-bcce-46ec-8ba5-e1aa2b723b33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 0.201, 0.704, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2364", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_931c5aca-4b73-4e84-9797-1c93a3bd176b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.139, 0.332, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2365", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_35d8a6c9-4e1c-4b18-82fd-c4ee2821678f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 1.985, 0.226, 1.995] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2366", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_708f72c4-9e63-4fc5-84d7-d89623a406d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 1.5, 0.296, 1.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2367", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_344872cd-6715-4851-a8f6-01eaff065563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.49, 0.984, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2368", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_49c73a41-8cf0-4ec0-b12e-b588fa3a2320.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.182, 0.248, 0.338, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2369", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_6ac8ac5c-31df-4c8f-8093-13f539417457.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.571, 0.566, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2370", "image": {"bytes": "", "path": "./images/2879afa9-05f9-4d97-bbfe-f95f5d665174_879c0f11-6c7b-4133-b30d-ecbee152194d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the Wakanda Forever trailer\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.07, 0.259, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2371", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_9fd1755f-24be-469a-8f05-55c07c1b34a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Next -> CLICK\n[input] -> TYPE: Crew\n[input] -> TYPE: James\n[input] -> TYPE: Johnson\n[input] -> TYPE: james.john@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.272, 0.525, 0.305] in the image\nAnd my action is Action: SELECT\nValue: Family Trip"}]}, {"id": "mind2web_2372", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_26ba29a5-6c06-4176-a682-02c044459b30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.354, 0.28, 0.392] in the image\nAnd my action is Action: TYPE\nValue: addis ababa"}]}, {"id": "mind2web_2373", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_57a8bfec-2f70-49b8-b132-25569b94616a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.665, 0.071, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2374", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_708acb3e-66cf-4976-83fe-0fc5a575f150.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.036, 0.057, 0.087, 0.062] in the image\nAnd my action is Action: TYPE\nValue: 49107"}]}, {"id": "mind2web_2375", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_73c4da1e-dfc7-42c1-9b8b-493fc0048f3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Coming soon to theaters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.196, 0.226, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2376", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_6be71501-c895-4f3c-934f-16a21938dec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: easter home decor\n[span] easter home decor -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.332, 0.974, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2377", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_f5829ff5-6294-41b7-b00b-3433d86971d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.304, 0.505, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2378", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_9be4c700-75be-4c66-9202-8f31718ddabe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.286, 0.409, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2379", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_c3255b9c-7b61-4e73-a586-b21159ed70fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK\n[input] -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.35, 0.343, 0.546, 0.37] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_2380", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_f3e095cc-b31b-4f6b-83ab-677044140ff8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.417, 0.645, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2381", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_2dfb62c9-c929-4cef-a5b5-ee1b8b9d7faf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.092, 0.129, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2382", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_641f627f-98e5-4b3e-a0b5-4bb370e16340.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.181, 0.45, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2383", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_cbcdb41a-a319-4771-a94b-6c5348430bd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[input] -> TYPE: 6000\n[span] Good -> CLICK\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.679, 0.495, 0.697] in the image\nAnd my action is Action: TYPE\nValue: john"}]}, {"id": "mind2web_2384", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_7337a4ab-f0d2-4e5a-9498-4f133b64972f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue without membership -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.679, 0.2, 0.698] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2385", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_c9b80332-9b10-41ac-b0ae-09330173af4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\n[link] Recommendations -> CLICK\n[link] Recommendations -> HOVER\n[menuitem] Winter Preview: New Shows Worth Watching -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.974, 0.555, 0.981] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2386", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_8c018b5c-efad-4b19-8fdf-607219a937e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\n[link] New & Noteworthy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.112, 0.369, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2387", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_96779cdb-a229-44fd-848a-8522a105e38d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.185, 0.702, 0.22] in the image\nAnd my action is Action: TYPE\nValue: 2983 Marietta Street"}]}, {"id": "mind2web_2388", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_88a36484-1756-4a0d-8e91-cf10a9abaa0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK\n[textbox] Parking Start Date -> CLICK\n[gridcell] Tue Apr 18 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.173, 0.308, 0.187, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2389", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_eac1ae66-8d5a-48b1-b2b1-a0fd06cb5690.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK\n[link] Find one near you (opens in a new window) \uf301 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.046, 0.669, 0.079] in the image\nAnd my action is Action: TYPE\nValue: 60540"}]}, {"id": "mind2web_2390", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_59d223e5-8ce1-47cc-a614-ad75954151b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK\n[input] -> TYPE: 105307\n[button] 105307 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.403, 0.617, 0.475] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_2391", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_a8277574-07e0-411e-8787-81cb20501c1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.216, 0.721, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2392", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_095d6d7a-0df6-4731-83ef-14e17d810b5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.216, 0.937, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2393", "image": {"bytes": "", "path": "./images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_12063f39-2856-480c-aa96-7e4eb94ffcc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nfl game played by the las vegas raiders.\nPrevious actions:\n[combobox] Search query -> TYPE: las vegas raiders\n[img] Las Vegas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.745, 0.976, 0.832] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2394", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b9964889-17f7-4897-92d3-a2221740f0bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[rect] -> CLICK\n[link] 26 -> CLICK\n[polyline] -> CLICK\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.085, 0.403, 0.319, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2395", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_87e9c2d8-c6f0-42c3-8bfb-f1456f4699d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles\n[b] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.304, 0.777, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2396", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_adbd43a1-3981-48e5-97e4-1f12fb54e667.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK\n[heading] Boston Bruins -> CLICK\n[link] Full Team Statistics -> CLICK\n[heading] David Pastrnak RW -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.133, 0.249, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2397", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_0c03bd04-974b-4904-9ccd-9ec0e2152f29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.348, 0.124, 0.386] in the image\nAnd my action is Action: TYPE\nValue: 70726"}]}, {"id": "mind2web_2398", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7e1d3bb9-5950-4300-ad90-dd0be92707da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK\n[div] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.391, 0.913, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2399", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_9b9bea77-138e-40d6-bebe-86d163d835a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.174, 0.418, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2400", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_395e5287-15fc-412e-8c91-356376438cd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs\n[input] -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.561, 0.224, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2401", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_96892596-2a86-4978-8c2c-701040a4f9f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.041, 0.281, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2402", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_1012b462-c850-48cc-9aec-b52c613c9815.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.009, 0.418, 0.029] in the image\nAnd my action is Action: TYPE\nValue: headphones"}]}, {"id": "mind2web_2403", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_01f9edf6-29ed-4d92-a014-f3130a29558b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK\n[textbox] Your Name * -> TYPE: James Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.718, 0.557, 0.746] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_2404", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_f8f537b6-6859-4811-870b-70ea8462e472.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK\n[span] Search flights -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.196, 0.438, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2405", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_fd52e1f0-8e62-4dfe-87ba-46653af03edd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Jerry Trainor\n[button] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.346, 0.18, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2406", "image": {"bytes": "", "path": "./images/bafd6a44-5938-431f-8e2e-17d680d5c48b_68ac02d5-a995-46a4-91fc-cc364d6a9585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about Wi-Fi subscriptions.\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Pre-paid Wi-Fi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.165, 0.943, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2407", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_e0d490bf-0f05-41af-a25c-2c1607beb5f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.52, 0.504, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2408", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_a222958a-28e5-4650-a828-970e8418f440.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.806, 0.005, 0.838, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2409", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_eea2294a-9dc9-46e6-bffe-3a79e7bf7339.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.265, 0.362, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2410", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_2403f621-0e06-4828-bd85-e88920da6630.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Sort -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.392, 0.13, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2411", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_b66d3924-665c-4d00-89ce-3af5bbdefa0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[span] -> CLICK\n[label] 2 -> CLICK\n[span] -> CLICK\n[input] -> TYPE: 200\n[span] Prices with Fees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.272, 0.978, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2412", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_e4a42325-a654-487d-84c7-bf3df4ef3fdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Las Vegas\n[span] Las Vegas, Nevada, United States -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.57, 0.081, 0.576] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2413", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_26e640a9-4ea1-4d49-91b5-c85e6f60afff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.424, 0.314, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2414", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a7113861-fe5b-4489-9bf1-74d8e911bdac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.036, 0.652, 0.064] in the image\nAnd my action is Action: TYPE\nValue: xbox series x console"}]}, {"id": "mind2web_2415", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_73553ffc-fbfe-498e-bd3c-0f29651390e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in\n[button] Search -> CLICK\n[p] Video Games -> CLICK\n[searchbox] Find values for games and more -> TYPE: Black Ops"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.472, 0.975, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2416", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_0727b0e3-b43e-4257-91fe-d0522d9f95ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.121, 0.434, 0.139] in the image\nAnd my action is Action: TYPE\nValue: Phoenix"}]}, {"id": "mind2web_2417", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_9cab69a1-12c1-4b8a-96b6-6677977b0efb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 2.876, 0.477, 2.904] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2418", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4463379b-ecb8-4fd3-a871-b3ba26ce27bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.769, 0.678, 0.787] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_2419", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_ccb50965-fe39-45c7-8e51-1f00048585d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK\n[button] Price (lowest first) -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.46, 0.917, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2420", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a8b0b04f-7a57-4daa-9501-dcc668509760.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.288, 0.949, 0.32] in the image\nAnd my action is Action: TYPE\nValue: 1111111111111111"}]}, {"id": "mind2web_2421", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_60083201-3aa9-4224-ba53-064f1337c834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm\n[button] Search -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.127, 0.825, 0.155] in the image\nAnd my action is Action: SELECT\nValue: Total Price"}]}, {"id": "mind2web_2422", "image": {"bytes": "", "path": "./images/160fc162-7f03-4f59-83e1-5502d00806f2_44c6392e-a186-4eaa-8760-eb0ab0f1688a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what is playing on Showtime sorted by newest.\nPrevious actions:\n[link] TV SHOWS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.362, 0.691, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2423", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_247b7cf9-68ad-4c76-b6f9-179be7ce2b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\n[link] Players -> CLICK\n[link] RETIRED -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.29, 0.491, 0.315] in the image\nAnd my action is Action: TYPE\nValue: James Smith"}]}, {"id": "mind2web_2424", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_c829579a-b47f-4091-a86e-57467ac96607.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Alagnak\n[option] Alagnak Wild River -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.289, 0.73, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2425", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_241e6556-a874-4125-b694-5bc8b8bc4e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[link] Superhero -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.263, 0.331, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2426", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_6c2dbd75-a9bb-416a-a351-191e84c7897e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.44, 0.798, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2427", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_b9a3579d-b988-4a98-97d1-9bdc1abcfb2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK\n[div] Add to Watchlist -> CLICK\n[textbox] Search IMDb -> TYPE: The Magician's Elephant"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.024, 0.657, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2428", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_63575c7f-68ce-498c-a1fe-1bfefa463455.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.243, 0.205, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2429", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_28a61364-9726-4983-96f8-f68988ba8da5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK\n[checkbox] Red Devil (2) -> CLICK\n[label] Add -> CLICK\n[combobox] Select a list -> SELECT: Wish List"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.406, 0.781, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2430", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_1d412587-c49d-49e7-aa72-4b12e47b9c59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.39, 0.039] in the image\nAnd my action is Action: TYPE\nValue: laptop"}]}, {"id": "mind2web_2431", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_d5c61581-da19-4144-a5a5-540bb4ab10d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.676, 0.263, 0.684] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2432", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_66e0df08-da51-4405-a484-0d02219ec44d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK\n[span] -> CLICK\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.412, 0.255, 0.439] in the image\nAnd my action is Action: SELECT\nValue: 2010"}]}, {"id": "mind2web_2433", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_10cae0d4-c04c-40c7-ace9-d4a0bd6a2993.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.006, 0.348, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2434", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_943a0122-f698-44fa-a09a-a51b0b364862.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\n[searchbox] Search -> TYPE: game mix\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.146, 0.199, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2435", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_4c00ab57-f61c-4d58-804f-25da94282151.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK\n[label] Points -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.217, 0.438, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2436", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_68aa9c94-69f6-4512-996c-58c416c098ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.063, 0.851, 0.093] in the image\nAnd my action is Action: TYPE\nValue: Mustang"}]}, {"id": "mind2web_2437", "image": {"bytes": "", "path": "./images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_6d02d310-1f7b-45f6-b680-73edaeff707b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the release date and supported platforms for the game \"Elden Ring.\"\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.108, 0.047, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2438", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_871a1cc6-f377-40b7-bb1e-aba28ec0787e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.071, 0.664, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2439", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_0e11dda9-eff8-4ab3-a636-a4a0237becdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.021, 0.054, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2440", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_5fd3f332-ce1d-4724-b698-be9c1146adac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12\n[combobox] Minute -> TYPE: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.431, 0.305, 0.463] in the image\nAnd my action is Action: SELECT\nValue: PM"}]}, {"id": "mind2web_2441", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4fefd40e-5a97-42d1-968e-b429e4b4c5c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[label] 5 (10) -> CLICK\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.445, 0.812, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2442", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_a1c39cfa-dd66-4ab5-ad95-670f09f04659.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[path] -> CLICK\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.337, 0.121, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2443", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_70fd7564-5163-44de-b8cf-c90ccb96379d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone * -> TYPE: 8888888888\n[combobox] Market: * -> SELECT: Chicago\n[textbox] Spot Address: * -> TYPE: 123rd st"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.793, 0.264, 0.804] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2444", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ae8a6811-3d44-443b-8bae-878a6f545432.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.18, 0.359, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2445", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_a2427697-98ac-41ad-9fbf-861751daa293.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK\n[gridcell] Thursday, May 18, 2023 -> CLICK\n[button] Search flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.147, 0.573, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2446", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d6d9d6a9-2059-43c5-8b87-7b7bf2cbfeb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Crossbows and Accessories -> CLICK\n[label] Limited Stock -> CLICK\n[label] Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.492, 0.09, 0.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2447", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_734546b2-50bd-404e-9a03-995d32d7a59b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.146, 0.2, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2448", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_9620c2bd-5128-408a-83d5-9cdbcb8313c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK\n[combobox] Select a Size -> SELECT: Queen"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.877, 0.197, 0.918, 0.216] in the image\nAnd my action is Action: TYPE\nValue: 2"}]}, {"id": "mind2web_2449", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_f2a963c3-0f8b-4bad-b7ab-4298c4bf0d48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.196, 0.124, 0.217] in the image\nAnd my action is Action: TYPE\nValue: 49102"}]}, {"id": "mind2web_2450", "image": {"bytes": "", "path": "./images/673841c2-de8c-4417-bdcc-dc48753a539f_2c2aec1c-480f-4da8-ab1d-5b869f7da824.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the current standings for the western conference in the NBA and find the top team.\nPrevious actions:\n[link] NBA . -> HOVER\n[link] Standings . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.914, 0.109, 0.937] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2451", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_e16396d8-5c1b-48e4-a7a1-5fbdea6617d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.384, 0.574, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2452", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_5e2410e0-53cc-4f38-9183-503099b46c3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.592, 0.289, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2453", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_07dd93fe-4727-49e8-9b21-323de3c1d691.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK\n[textbox] Reservation/Ticket Number -> TYPE: 123456\n[div] -> CLICK\n[option] Last Name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.232, 0.616, 0.245] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_2454", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_6838a03a-5a15-4837-801d-1217ce23b10b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] Any -> CLICK\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.283, 0.249, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2455", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_6d2e17fb-6fa7-41b5-b3ad-e06febea811f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK\n[button] Show sorting options modal -> CLICK\n[span] Price: low to high -> CLICK\n[button] Show filter modal size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.249, 0.382, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2456", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_ecf837ac-8efb-4881-ba8c-0468980e8236.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.25, 0.359, 0.277] in the image\nAnd my action is Action: TYPE\nValue: brooklyn"}]}, {"id": "mind2web_2457", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_dd9f943d-ec6a-4c26-ab14-1e616956da46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.243, 0.091, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2458", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_b90bfd21-8292-4a30-b820-0e7294539949.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.326, 0.492, 0.348] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_2459", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_6661dc2d-7358-4707-b483-256abecef314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.34, 0.802, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2460", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_bbaa21de-6ebe-4cb2-b0f1-5f078bc25883.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.147, 0.385, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2461", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_bcbd4fa2-dfa8-49db-9c12-836d1369cb1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: vintage clothing\n[option] vintage clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.031, 0.906, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2462", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b1b808d1-e980-4885-9add-57e9801759d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504\n[button] Look up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.316, 0.215, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2463", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_fae28de0-5ad5-40f9-9957-f28a133d78ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK\n[span] 160 pixels x 400 pixels -> CLICK\n[span] Default -> CLICK\n[li] .15 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.801, 0.39, 0.835] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2464", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_131d877b-75a0-4877-af95-39ad3de38bd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.274, 0.583, 0.321] in the image\nAnd my action is Action: SELECT\nValue: LA"}]}, {"id": "mind2web_2465", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_a053edd4-1209-4989-9b99-86fba90a1817.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK\n[combobox] Activity -> SELECT: Climbing\n[textbox] Location -> TYPE: 90028\n[combobox] Distance -> SELECT: Within 50 miles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.717, 0.291, 0.779, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2466", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_2c0514e7-0198-47f6-9cc4-579b8d94d4b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.691, 0.546, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2467", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_c2e21f02-9b52-4af0-a516-8ded4f5667d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.361, 0.666, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2468", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_59aefbe2-91fb-454b-9776-e882facf39e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[button] Go! -> CLICK\n[input] -> CLICK\n[div] All dates -> CLICK\n[span] -> CLICK\n[span] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.167, 0.664, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2469", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_57cbe038-dfd5-40a1-a06b-ac5867355b3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA\n[option] Museum of Modern Art (MoMA) \u00a0\u00a011 West 53rd St, New... -> CLICK\n[link] MONTHLY -> CLICK\n[button] See options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.11, 0.622, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2470", "image": {"bytes": "", "path": "./images/52a8bace-f14c-41ce-980f-50d95e5ac259_39ed9e93-b3b0-4010-88bf-a0f716059c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of countries with the highest number of seen aircrafts.\nPrevious actions:\n[link] ADS-B -> HOVER\n[link] Statistics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.241, 0.437, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2471", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_977bba29-aae4-4a39-b861-3078f910070e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK\n[link] 7 -> CLICK\n[textbox] CHECK OUT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.212, 0.594, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2472", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_9de9bac3-ef0b-4042-b991-1b1dfc157d6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[select] Sort by Distance -> SELECT: Sort by Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.474, 0.32, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2473", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_71312214-c558-4c47-a70e-f32e1f74f9a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 3.629, 0.872, 3.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2474", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_f981234b-29f6-451d-b795-ad8216ee453f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.043, 0.117, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2475", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_df33373b-2ae4-4f6e-8f22-a1b84bde50a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.65, 0.394, 0.67] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2476", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_0cd2cdd2-052b-42c7-9e1d-f0bc04e54244.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.177, 0.86, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2477", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_65943829-04b2-47f0-8962-29ec916f9463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK\n[span] 160 pixels x 400 pixels -> CLICK\n[span] Default -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.772, 0.493, 0.794] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2478", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_185b383c-4764-413c-94db-33a69434174e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.14, 0.562, 0.146] in the image\nAnd my action is Action: SELECT\nValue: 2 Guests"}]}, {"id": "mind2web_2479", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_758a6b1b-74f1-42c3-84ba-bae21ea8afd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.418, 0.114, 0.515, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2480", "image": {"bytes": "", "path": "./images/0592744b-ea69-4724-80f8-3924916b7758_021fde47-dd12-4ac5-b8f5-224b962a26ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out the cancellation policy\nPrevious actions:\n[link] Help -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.487, 0.945, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2481", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_d8f45da9-b931-4adc-b980-61fc1ecf4943.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.167, 0.421, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2482", "image": {"bytes": "", "path": "./images/4ff347e6-e911-4af5-8151-7805a9e91b28_e0a4ce5f-1ee3-4a27-a60e-5c7ca962277e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show current alerts for red line subway.\nPrevious actions:\n[tab] Alerts -> CLICK\n[link] red line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.31, 0.295, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2483", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_d79f9228-b3a9-418d-add9-33ed60d96f36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.293, 0.968, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2484", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_1d109036-07b4-4d9e-83e3-9ec6c93111df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.557, 0.069, 1.576] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_2485", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_e165de37-91d8-4552-88cb-72773a2d61ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.125, 0.957, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2486", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_038b9509-b8b1-4e84-9426-a5377183ea28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[combobox] \uf0d7 -> SELECT: 1 Room\n[combobox] \uf0d7 -> SELECT: 2 Adults\n[combobox] \uf0d7 -> SELECT: 1 Child\n[select] Age -> SELECT: 0\n[link] Search Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.185, 0.123, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2487", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_d558e7ad-1abf-41ab-8a92-2e62cc399b43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Los Angeles\n[option] Los Angeles, CA -> CLICK\n[span] Filter by -> CLICK\n[div] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 1.17, 0.47, 1.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2488", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_41966dc2-1c46-44f2-89da-4e108a52dbc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.128, 0.5, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2489", "image": {"bytes": "", "path": "./images/928ec908-ea23-42a4-8b13-3ca6f0721ead_9d934934-db77-4af8-89ca-56dfc9f9f1c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter search results for guitar tabs to only show songs with a difficulty rating of \"Beginner\"\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.2, 0.153, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2490", "image": {"bytes": "", "path": "./images/0c577209-47dc-4645-8d10-0b659663a969_338aee04-6bae-4c3a-b3c3-1a8a12a61210.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nba game played by the phoenix suns.\nPrevious actions:\n[combobox] Search query -> TYPE: phoenix suns"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.102, 0.727, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2491", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_fd9fe93d-6937-4027-89b5-20b0221d4c27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.242, 0.595, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2492", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_73fac390-4a7e-41a3-814e-caa47a3ad866.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[span] Chhatrapati Shivaji Intl -> CLICK\n[textbox] Flight destination input -> TYPE: Dubai\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.195, 0.928, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2493", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_5b1347ea-791a-4e8d-bc7b-db15fe3375e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.31, 0.589, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2494", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_7c4bf048-7214-4ba9-aa74-822f50390427.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.527, 0.237, 0.557] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2495", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1a347d26-3f20-44c8-8030-c09a8ae8ec9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\n[textbox] Search -> TYPE: scream\n[p] Neve Campbell, Courteney Cox, David Arquette -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 1.339, 0.695, 1.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2496", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_bd2e6fd5-bbac-40dc-8a8b-1f2ed8eb5c07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.746, 0.532, 0.755] in the image\nAnd my action is Action: TYPE\nValue: 1"}]}, {"id": "mind2web_2497", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_d4a3cabd-8df7-4d25-a8a7-2ed784bafd3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.086, 0.332, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2498", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_808a3d44-fd0e-4a1e-aef7-55fead922731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK\n[link] Notify me -> CLICK\n[link] Add to wishlist -> CLICK\n[textbox] Wishlist name -> TYPE: Must buy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.11, 0.716, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2499", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_88495664-04bf-43d1-93fd-d2afd216d7f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\n[link] Find Stores -> CLICK\n[link] View store directory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.753, 0.31, 0.785, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2500", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_39a1e694-4b56-4f10-845c-e3d03dc73e11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.204, 0.754, 0.219] in the image\nAnd my action is Action: TYPE\nValue: Thalia Hall"}]}, {"id": "mind2web_2501", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_5feef698-f267-4190-a94a-3cc69cfae45f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.092, 0.261, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2502", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fefcbed0-8fa1-4592-88d3-8bdab9e18fd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.296, 0.699, 0.323] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_2503", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_c79b7700-6a68-429c-b616-d0151bd9bb47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.404, 0.393, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2504", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_feefcb3a-19ea-438c-9bfa-b4c99631dcbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: nba2k23\n[button] Search -> CLICK\n[img] NBA 2K23 - PlayStation 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.61, 0.975, 0.637] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2505", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_561a3105-2605-4f2f-abbe-2b622948cf16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK\n[button] All Filters -> CLICK\n[tab] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.152, 0.666, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2506", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_82b45e0c-6d98-47d6-9691-3d2d8a21abe6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22\n[button] Vehicle Class -> CLICK\n[radio] Minivans -> CLICK\n[button] Apply Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.462, 0.837, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2507", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_29164d9e-ba63-4d06-8c46-c482d44a416b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.287, 0.788, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2508", "image": {"bytes": "", "path": "./images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_3a0d5ccb-3636-49a5-898b-80a18673958a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a living History event to attend in in April .\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.362, 0.93, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2509", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_9f7ab3a7-9b90-42cc-969b-9cd4d687d6a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK\n[button] 05/02/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.351, 0.517, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2510", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_def2773e-0727-493c-916f-407e36da2dec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.266, 0.221, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2511", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_329bca7f-0638-4eff-83b5-50f793f10541.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.162, 0.846, 0.21] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_2512", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_c33315e7-7e2d-4dc0-a06d-06ad4e82dbab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK\n[combobox] Activity -> SELECT: Climbing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.291, 0.561, 0.322] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_2513", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_30484980-8301-4a3a-ae0f-f2ea7df58336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.09, 0.628, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2514", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e9c512bc-6241-4452-a4ec-3ad2237375a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK\n[img] -> CLICK\n[textbox] To: -> TYPE: John\n[textbox] From: -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.486, 0.99, 0.582] in the image\nAnd my action is Action: TYPE\nValue: Congrats on your new home."}]}, {"id": "mind2web_2515", "image": {"bytes": "", "path": "./images/54112d86-1d85-4abf-9e12-86f526d314c2_a3c7b2db-75e5-41d9-a23b-b01d06ba008f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the BGG rules for Game Submissions?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.005, 0.52, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2516", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_152064fc-85f5-4364-aa81-6f9a6fa9941c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.15, 0.094, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2517", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_9c69f639-37d4-4a10-b271-a86ad3892709.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.332, 0.94, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2518", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_03e8b74e-3cf6-4077-9339-84aefa9f9237.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.092, 0.181, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2519", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3163cce7-c289-4004-94b8-15e312dac0dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.543, 0.29, 0.564] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2520", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_0802ef06-4167-4e70-b52f-4f106bf1ce19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply\n[button] \uf002 -> CLICK\n[generic] 600 W -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.241, 0.4, 0.258] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_2521", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_438fdc63-2ab2-4f1f-9731-321dc68fda6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.306, 0.341, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2522", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_524b1cc6-240f-41df-a42b-9de89456c807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Protections & Coverages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.49, 0.341, 0.682] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2523", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c47062d6-0d58-4383-9d62-efc14a92807c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.0, 0.354, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2524", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_c3f1eed3-fedd-4937-8837-44bcace14f3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK\n[link] MONTHLY -> CLICK\n[span] Distance -> CLICK\n[link] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.298, 0.328, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2525", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_7b98db39-1751-42ca-b632-f40400c443bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK\n[link] Training -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.553, 0.122, 0.573] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2526", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e6345ea9-a5a4-4b88-95b5-4efececed261.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] close -> CLICK\n[button] Material -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.685, 0.319, 0.707] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2527", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_44cd97dd-6f7f-4709-b641-f662ff17208b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.656, 0.266, 0.673] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2528", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_afd0591d-15ef-4dec-ac72-b2cea47ba8dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.316, 0.5, 0.348] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_2529", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_b22bf5a8-0dc7-4cb4-adb4-1ae86643fe10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Drop D 39,730 -> CLICK\n[link] 1990s 3,183 -> CLICK\n[div] Today's most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.426, 0.97, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2530", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_f9bbc023-4e46-4803-a374-743e972eb8df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.243, 0.155, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2531", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_084b94a2-6e3c-4b64-baa1-ba2dc61777a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK\n[link] Europe & Mediterranean Cruises 16 DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.392, 0.079, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2532", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_a03e1463-bb7f-481c-9579-9caa826a8644.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees\n[option] New York Yankees -> CLICK\n[link] TICKETS -> CLICK\n[div] More Options -> CLICK\n[span] Parking Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.363, 0.978, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2533", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_8f7c6002-5777-46f1-80f9-13e66c053b06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.317, 0.282, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2534", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_388f7180-285e-4867-8f40-f223749016f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Pool -> CLICK\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK\n[div] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.199, 0.861, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2535", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_458554fd-debe-4dbe-a011-b64a5301fbd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.323, 0.237, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2536", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_d02d7a6c-c5d6-4b61-bd22-20a8e2309ea5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[checkbox] Outdoors -> CLICK\n[checkbox] Wine tasting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.91, 0.713, 0.954] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2537", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_82c86cfb-3786-4a17-95c7-5cb6562ae363.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[button] All -> CLICK\n[radio] Key extraction -> CLICK\n[radio] Vehicle -> CLICK\n[generic] 2 filters Key extraction \u2022 Vehicle Clear all Cance... -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.152, 0.642, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2538", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_d25c7cda-0f2b-477a-8971-de77649a5939.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.119, 0.414, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2539", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_d5672242-c470-499b-bc08-b42bbd8fb450.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Explore All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.128, 0.163, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2540", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_3d12ecd5-f8ec-4e3a-b0c0-7d16c6e27f06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.982, 0.846, 1.004] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2541", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_647547bf-d59a-4a3b-b32a-9ea1fb94a530.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 2.492, 0.276, 2.499] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2542", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_992413a4-5f43-4f99-866a-cf43fa75678c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.407, 0.5, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2543", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_c1c57834-374a-41c8-ac41-13163aad1f2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.341, 0.861, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2544", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_dac23220-8015-4eca-83ef-a520c024eb6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard\n[combobox] Month -> SELECT: 01"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.43, 0.95, 0.464] in the image\nAnd my action is Action: SELECT\nValue: 2023"}]}, {"id": "mind2web_2545", "image": {"bytes": "", "path": "./images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_45868167-8aa0-44cb-972a-cf3113815043.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information on how to get a ticket refund.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.0, 0.756, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2546", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_ea3c2af5-fb9c-4145-ab53-a35548df1e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.297, 0.157, 0.324] in the image\nAnd my action is Action: TYPE\nValue: 59316"}]}, {"id": "mind2web_2547", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_ccf0ae9a-6d2e-48e5-be97-d91c9528fc61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK\n[button] Show more filters modal -> CLICK\n[span] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.168, 0.966, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2548", "image": {"bytes": "", "path": "./images/74226fab-6285-45da-8582-d25a876aa7b0_7fffde10-7954-4767-b39f-f913cadf8a51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for the next pop concert.\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.088, 0.517, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2549", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9d136317-f8c9-430b-a4d1-ecb67729f4c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.12, 0.429, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2550", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_dc71652f-34de-4786-b270-3b5b750c5905.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.311, 0.309, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2551", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31217e2a-0ae5-4c3a-9559-dbf6eba97bf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Gaming Monitors Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.491, 0.868, 0.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2552", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f46868bc-232a-4680-8b33-8e5198c0010c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.031, 0.272, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2553", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_d46fc387-0c97-4047-b400-07d10dd1c8d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.116, 0.109, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2554", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_94563925-37bd-495c-9e75-5a2cfda4e37e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.461, 0.194, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2555", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e8fc2f56-54e4-48fc-8ec2-dd86b6042ceb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.245, 0.705, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2556", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_93807878-de8b-4b57-83bd-6964dd4decf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.604, 0.32, 0.616] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2557", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a03f2e58-8fe0-4622-998b-ddcd17a238ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.318, 0.359, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2558", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_e1cdcfc1-fc66-4d3b-8858-876e11893c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Volunteer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.266, 0.398, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2559", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_25b74fe3-6b52-453a-9885-aaa17ab27940.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL\n[svg] -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.02, 0.953, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2560", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_20f844f5-7336-4362-91e4-577a81d9d46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.355, 0.514, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2561", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_e9a53ae9-3b3d-444a-9dcc-a92bec2b77de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.348, 0.654, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2562", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_38c21a93-8c5b-4d31-b72c-06acc63a2afc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.512, 0.194, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2563", "image": {"bytes": "", "path": "./images/549452ab-637a-4997-bce1-5898541bb288_218bb404-5a73-4d8f-a72d-1b680a898e89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all NFL tickets\nPrevious actions:\n[button] SPORTS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 0.061, 0.319, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2564", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_2f4873c2-1964-4640-8275-11655aa7465f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK\n[button] View 95 Vehicles -> CLICK\n[i] -> CLICK\n[checkbox] Buick\ufeff1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.201, 0.588, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2565", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a8cfda8a-1fc7-4f7c-bec5-09e4f3b1c420.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.42, 0.059, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2566", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_98480951-572a-451f-8538-188191a9a0c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 10001\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.353, 0.116, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2567", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ab620f2c-ea4d-4465-b77b-aa3b064e0f47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.151, 0.703, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2568", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_8f468438-c6e4-4af0-be8e-055e175d6de0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK\n[button] - -> CLICK\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.7, 0.541, 0.737] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_2569", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_b86bc343-260f-4335-980b-ea5e2fca2a71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.286, 0.41, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2570", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_be1bab14-11b9-41e1-b4b2-b0a1f0a834c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK\n[div] Select your dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.737, 0.293, 0.763, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2571", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_c01ad00e-d680-48e8-bfe6-bf73b8d30674.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\n[link] Navigate to deals -> CLICK\n[link] Navigate to 1-Night Kiosk Rentals See More -> CLICK\n[img] Blockers, MOVIE on , , Comedy, Romance, Special In... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.387, 0.338, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2572", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_773b414e-e1c4-4471-bbdf-f8143c8a606f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.279, 0.373, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2573", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f16bc000-f7b9-4f57-b5e0-4fcf43b9bb40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.185, 0.237, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2574", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_481e6509-59bc-48c7-b6c0-1f065058835d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue without a seat -> CLICK\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK\n[button] Continue -> CLICK\n[button] No thanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.841, 1.065, 0.953, 1.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2575", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_10d26c2c-7db7-44d7-b5cb-ae1e2a15f5dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.21, 0.5, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2576", "image": {"bytes": "", "path": "./images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_b30b2ef7-14ff-4170-b14a-4894124b0efc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of publishers for board games\nPrevious actions:\n[button] Browse -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.07, 0.218, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2577", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_83752122-ca77-4eda-ba7c-c98b8fcfe3af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.377, 0.469, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2578", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_7426506d-f253-4977-9475-faa2e4975689.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[li] Spring -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.008, 0.82, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2579", "image": {"bytes": "", "path": "./images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_97a45713-c48e-4eef-8fe4-5711e87f4c5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me news about the ps5.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.423, 0.044, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2580", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_6838510b-e62e-416f-b389-46cd59c40012.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] Caribbean -> CLICK\n[button] SEARCH CRUISES -> CLICK\n[button] Number of Guests -> CLICK\n[path] -> CLICK\n[button] Increase to 4 guest button -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.113, 0.233, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2581", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_e851afe1-7aac-43ac-ab6d-e36cb60ccbd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Chicago Bulls"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.079, 0.406, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2582", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c8b6e56e-4973-41fa-8ffb-0b3e044b052a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.311, 0.595, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2583", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_aeeb7ac6-c8f3-4c56-bdb6-e9269dafab16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.014, 0.855, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2584", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c4186aed-2d4d-41ca-bc89-ec1e003fc4b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] sub 1 -> CLICK\n[div] open -> CLICK\n[option] 6 -> CLICK\n[button] Update -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.287, 0.606, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2585", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_9eb3684f-bdc3-44d7-aa67-be7839fb83ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.011, 0.05, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2586", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_1c9cabc3-3fc6-4561-891d-5b473220ab9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.449, 0.759, 0.478] in the image\nAnd my action is Action: TYPE\nValue: 08/04/23"}]}, {"id": "mind2web_2587", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_cf8a0b6f-dde7-4ec5-959a-23c1fd331528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER\n[link] All MLB Tickets -> CLICK\n[button] All dates -> CLICK\n[link] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.541, 0.941, 0.558] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2588", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_98b66f05-3799-48a7-955d-5c2075f75a44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[button] Price: -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[span] Save -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.206, 0.612, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2589", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_25d69ce3-0527-4b54-ae05-76b4246c6816.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.034, 0.358, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2590", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_9d73a27e-5499-4d8d-84c2-f442fdfd516e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK\n[link] Intro 65,171 -> CLICK\n[link] Guitar Pro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.222, 0.97, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2591", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c3c9c397-7337-484c-97c1-71421f964f62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] 30 -> CLICK\n[button] Search -> CLICK\n[button] Get alerts for this flight for flight 906 American... -> CLICK\n[textbox] Email -> TYPE: lin.lon@gmail.com\n[textbox] Confirm email -> TYPE: lin.lon@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.359, 0.83, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2592", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_eba8a877-10db-482e-8ca8-f6b61efb119b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.361, 0.829, 0.372] in the image\nAnd my action is Action: TYPE\nValue: HOLLYWOOD"}]}, {"id": "mind2web_2593", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_50d4a729-d12a-4707-af8d-69b5ab13c8db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[textbox] Monthly Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.491, 0.219, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2594", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_4c93db0b-982d-4815-933d-10283c2fb380.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.333, 0.831, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2595", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_66ece2a6-3789-462d-8cd2-627355cd988a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK\n[button] View 95 Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.541, 0.227, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2596", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_0043be09-27a3-4b47-81c8-cc4ee1cb996e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 2, 2023 -> CLICK\n[button] Jul 8, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.444, 0.984, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2597", "image": {"bytes": "", "path": "./images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_9a4dabad-f38c-4fea-9345-202450e96322.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse tickets for Chicago Bears games.\nPrevious actions:\n[button] SPORTS -> HOVER\n[tab] NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.113, 0.455, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2598", "image": {"bytes": "", "path": "./images/73cf6eec-cae6-4d5b-9b8e-e44359311565_702d51c4-4747-4525-b58c-324c776f600a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for the newsletter\nPrevious actions:\n[textbox] Email Address * -> TYPE: larryknox@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.292, 1.315, 0.337, 1.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2599", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_ff0ee393-4490-4aaf-9fc0-a21fcdb41c9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: wall art"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.048, 0.187, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2600", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_3c3caece-89d2-4110-b976-242c6070e947.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK\n[div] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.36, 0.736, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2601", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_276ea5ec-d119-4d03-9121-f9ee4616da2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.713, 0.53, 0.721] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2602", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_9a924659-4b0a-4374-9e9c-e24889c4dac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.075, 0.719, 0.103] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_2603", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_6ba16278-a7a3-4e06-8426-356b05e3219c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.798, 0.678, 0.817] in the image\nAnd my action is Action: TYPE\nValue: smith@gmail.com"}]}, {"id": "mind2web_2604", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_98ce6733-1dd3-4cf0-a29c-03f67319dc68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.236, 0.314, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2605", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_47baddf3-e09d-414f-8c3a-7de89a39aa06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.213, 0.324, 0.232] in the image\nAnd my action is Action: SELECT\nValue: BMW"}]}, {"id": "mind2web_2606", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_7b44e78e-d6d9-44ad-8331-3930b3d959b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[link] Hostels -> CLICK\n[searchbox] Please type your destination -> TYPE: udupi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.346, 0.409, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2607", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_794b3de4-0e28-4ba9-819a-017558734d98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR\n[div] Newark Liberty Intl (Newark) - -> CLICK\n[img] Submit Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.699, 0.205, 0.709] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2608", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_f7f63924-6669-400e-b187-76d3b6243151.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.545, 0.36, 0.561] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2609", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_f73c9f1b-6d77-4f34-bbdc-84ab137a91f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.658, 0.831, 0.663] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2610", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_a824cebd-8374-4f3a-b76d-df0f6a9f45ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.336, 0.759, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2611", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_894a3e88-d3f2-417d-b464-ce6f3086c9cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.456, 0.236, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2612", "image": {"bytes": "", "path": "./images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_409090be-7df9-412d-b354-2a68656eb421.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show my Library and sort by Albums.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Library -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.247, 0.285, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2613", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_be4c997b-89fc-4e4f-93d1-092dc7cde1a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[circle] -> CLICK\n[button] -> CLICK\n[button] -> CLICK\n[div] White Water Rafting -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.271, 0.905, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2614", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_d905eacd-ba9e-43ac-815c-d4e42a636301.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.256, 0.573, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2615", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_251534f2-9acc-4f2c-a1d4-2158f8a4840e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.391, 0.468, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2616", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_b9ea57b4-ce6f-4010-b79b-f3f8fc031d1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.26, 0.459, 0.286] in the image\nAnd my action is Action: TYPE\nValue: 5000"}]}, {"id": "mind2web_2617", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_3a37f05f-1cf6-43cb-9509-7936404dae33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] One Day Add-Ons -> CLICK\n[link] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.0, 0.505, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2618", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_3fed27c4-2cb3-43d0-b92a-16275e1f8178.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.028, 0.93, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2619", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bbff6535-c2fa-4fe3-ab52-3ba6813014b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2\n[combobox] Minute -> TYPE: 30"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.462, 0.305, 0.496] in the image\nAnd my action is Action: SELECT\nValue: PM"}]}, {"id": "mind2web_2620", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a87e7411-3bda-4944-beb8-2f77f9fbe4b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.303, 0.243, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2621", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_2c3f007d-c4df-4247-8a58-bf4b58db1530.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.019, 0.443, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2622", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_b03e5747-77f1-4f8a-9590-ca38c6b62ed3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Guest Rating -> CLICK\n[link] Pets welcome (118) -> CLICK\n[link] Free breakfast (57) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.116, 0.556, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2623", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_50cf3b67-127e-4d22-b584-4708cb56b602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.265, 0.226, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2624", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_ded882d1-0f01-46b0-b67a-11ad3e9b513d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.279, 0.729, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2625", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ac89e5b0-f806-42bb-81f7-e0e072172796.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.032, 0.938, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2626", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_2bb2c81e-02b9-46b3-a3a0-89d174bd1e53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 2.436, 0.437, 2.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2627", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_d55123cf-0a80-4b19-9b22-6719bbab3231.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.366, 0.761, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2628", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_cb0df657-8141-4273-979b-5d66494faae9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.437, 0.249, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2629", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_3d80d70b-e911-4b66-832a-9e4e48884689.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara\n[textbox] From -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.547, 0.873, 0.565] in the image\nAnd my action is Action: TYPE\nValue: Happy Christmas"}]}, {"id": "mind2web_2630", "image": {"bytes": "", "path": "./images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_10cb2e61-9f95-4cac-8db9-03791ea89776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a podcast about F1\nPrevious actions:\n[link] \ue028 -> CLICK\n[link] F1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.42, 0.195, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2631", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3ffa4643-b065-489e-824a-9c30771b411c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.493, 0.286, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2632", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_2dd318f7-faa9-4bec-891d-2d35b588268d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.145, 0.312, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2633", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_ac062748-57fb-429f-a6a1-c6eeee2dee00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[link] Store -> CLICK\n[link] Pre-Orders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.3, 0.219, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2634", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8acc4e87-a3c3-4f37-a65d-ced00e37a017.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.08, 0.388, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2635", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_0ceb3fb8-d1c7-4b00-a0f5-15d88fee4234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.142, 0.947, 0.184] in the image\nAnd my action is Action: SELECT\nValue: Carnival"}]}, {"id": "mind2web_2636", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_81d06b54-9858-498c-adf3-e40fd5b4ae17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji\n[div] Fiji -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.44, 0.772, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2637", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_0b0bd6c0-a4c4-4a82-8757-549fe7ac92a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK\n[button] Find flights -> CLICK\n[textbox] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.224, 0.406, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2638", "image": {"bytes": "", "path": "./images/7f94386a-d032-43cf-9dbe-2b64430c9c28_48bbbdea-1b37-4a5f-bd55-a9cb309e5507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: locate the store in IL\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.819, 0.2, 0.83] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2639", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_503a5c81-6a8a-4ece-9c8a-c80d7198f388.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.204, 0.079, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2640", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_0096821b-49aa-4a8d-b059-a9cea6e724f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK\n[button] San Francisco, CA -> CLICK\n[span] Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.447, 0.495, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2641", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_425a5e71-db07-473d-9e9a-43da9606841a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.35, 0.988, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2642", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_d0666e22-cb4c-4bab-b17b-7dabac0d02b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.009, 0.898, 0.018] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2643", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_4e296369-5864-4c0d-b372-f4deb8454461.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK\n[heading] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.469, 0.471, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2644", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_3533eae9-5554-4489-9498-64ba4f8c832b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.291, 0.657, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2645", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_0a0d2fc8-04cf-49c2-a658-cf12ef65c5eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: vegetarian\n[b] Vegetarian -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.22, 0.075, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2646", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_c7bcbc4d-fcc6-40ae-8c77-900307f08664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow\n[span] Glasgow Central -> CLICK\n[textbox] Date use format: 17-Mar-23 -> CLICK\n[path] -> CLICK\n[link] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.252, 0.133, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 15"}]}, {"id": "mind2web_2647", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_5330614e-ce1a-4da4-906a-6fc408c6c3f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.023, 0.553, 0.041] in the image\nAnd my action is Action: TYPE\nValue: mens black hoodie"}]}, {"id": "mind2web_2648", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_16a62e54-3ff2-4ec8-aa34-0f75f384d352.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.096, 0.23, 0.182, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2649", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_06b3b17f-68a0-4d62-a236-9852c9ae658a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[label] Austria -> CLICK\n[label] Belgium -> CLICK\n[label] Bulgaria -> CLICK\n[div] Popular -> CLICK\n[div] A - Z -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.371, 0.263, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2650", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_cd58814d-9500-4922-a7f4-416a19ffcc34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.231, 0.168, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2651", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_19e36673-154e-407f-9425-7d8c2dfdd30c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.198, 0.931, 0.249] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_2652", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6c1f5f61-3aa6-4eba-bd8b-ef20145ac9b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.405, 0.327, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2653", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_e9d3601e-1394-445d-b999-b957959694a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.279, 0.259, 0.317] in the image\nAnd my action is Action: TYPE\nValue: Santa Fe"}]}, {"id": "mind2web_2654", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_04f0418a-4171-42ec-9588-fbf470f54df0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.163, 0.702, 0.202] in the image\nAnd my action is Action: TYPE\nValue: male"}]}, {"id": "mind2web_2655", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_b7a36124-3c4f-4a40-b927-9e0c1f548427.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.213, 0.289, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2656", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_3cb50019-9056-4144-9449-be80b231f3cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.745, 0.192, 0.759] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2657", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_76b93c24-ab52-4865-8491-2c3423d615af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.041, 0.519, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2658", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_2e80fca7-6d2c-46fa-a7f3-a9569f0dc82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK\n[link] Fare Finder -> CLICK\n[textbox] From: -> CLICK\n[div] Dublin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.362, 0.553, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2659", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d3e3e41f-c601-4798-b602-6990777eba4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.244, 0.196, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2660", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_f2fd4cab-af21-4062-bc3e-9899832b6611.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[path] -> CLICK\n[div] 8+ -> CLICK\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.947, 0.081, 0.954] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2661", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_2255c0f5-0829-4874-89d2-1dafcd92da6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK\n[link] HAPPY HOUR BURGER, Aria, Thursday, April 13, 2023 ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.238, 0.159, 0.257] in the image\nAnd my action is Action: SELECT\nValue: 1 Ticket"}]}, {"id": "mind2web_2662", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_7fd3adeb-d440-4a31-a75e-3d2c009e00b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006\n[combobox] Make -> SELECT: Honda"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.59, 0.508, 0.634] in the image\nAnd my action is Action: SELECT\nValue: Civic"}]}, {"id": "mind2web_2663", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_7425e091-4439-49a8-a6ff-c355ec0c4f34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK\n[link] XL -> CLICK\n[img] Men's UA Tech\u2122 2.0 Short Sleeve -> CLICK\n[button] XL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.437, 0.952, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2664", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_dab1ffd9-2df1-450b-8d68-120b605f8d45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.075, 0.719, 0.103] in the image\nAnd my action is Action: TYPE\nValue: NAPA VALLEY"}]}, {"id": "mind2web_2665", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_97174dd3-20af-42da-b81d-bed5eadb1b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.124, 0.855, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2666", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_83cd5543-cd4f-4bfe-9d7e-4ce6bf0dce32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.43, 0.614, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2667", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7c992448-8fd4-4aff-96a6-34b790363b71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK\n[button] HOME -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Movies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.087, 0.535, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2668", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_024c93e9-5579-44a1-bf6a-c773f34b8d34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000\n[span] Good -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.634, 0.259, 0.652] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2669", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_670d6d1f-a265-4b87-bb85-bcbe74cf3740.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.35, 0.424, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2670", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_a16a9f27-7699-4f5f-a78f-5b0d6429569c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.063, 0.489, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2671", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_b17d8b15-9af5-4661-9d7b-74851a227b83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 1.07, 0.231, 1.082] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_2672", "image": {"bytes": "", "path": "./images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_f7ca543f-c1ac-4a4f-9bf1-f9980a41a07a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open gift list for beauty products.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.073, 0.249, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2673", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_515c6c84-9b18-49a5-a48c-2bdb562a48c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.152, 0.74, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2674", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_1b4aa789-a458-4655-9eb9-f9e72cb900fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK\n[button] Explore All -> CLICK\n[button] Age -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.316, 0.988, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2675", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_01b5c5b4-5304-4c38-9e8c-36cf97ccfa74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] New York -> CLICK\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.312, 0.694, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2676", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_84fd0d10-e222-4c1b-a852-e49ac16d3462.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.843, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2677", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_77eb7f8e-0e43-4535-b87b-9704908c779f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2678", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_e3e58722-6d08-46af-bb39-109c07dc6874.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.317, 0.346, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2679", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_c7d6e34d-c623-4e8a-93f4-c19e1269d82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.453, 0.504, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2680", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_770aa60a-7be7-436b-9b5d-59111c135246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[div] & up -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.274, 0.451, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2681", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_0b2e5b9a-bed8-4064-8057-bb32b4bc6111.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK\n[checkbox] 8 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.159, 0.927, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2682", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_459ed167-c817-4bbf-bc91-73822e98bfd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.117, 0.326, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2683", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_b75be239-fb3b-4d79-820c-e374efbe2c73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[input] -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[button] Go! -> CLICK\n[div] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.572, 0.09, 0.689, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2684", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_cff52937-7d1f-4306-8a48-62e7d8b814fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.169, 0.294, 0.196] in the image\nAnd my action is Action: TYPE\nValue: 59901"}]}, {"id": "mind2web_2685", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_33ee7882-a48b-49b5-afd7-d34ebec0a600.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.267, 0.079, 0.321, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2686", "image": {"bytes": "", "path": "./images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_d4ad5462-08d1-400f-881b-080390e948c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for news about the latest news about Lebron James' injury and share the article on twitter.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.046, 0.728, 0.084] in the image\nAnd my action is Action: TYPE\nValue: lebron james"}]}, {"id": "mind2web_2687", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_2fa1df82-2bfc-4e2a-a8dc-bf00f7ea75a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.271, 0.291, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2688", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e17eb0b6-cf6e-45af-be58-704816835353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK\n[generic] Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.442, 0.271, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2689", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_332d0daa-f81f-45b6-aa45-2bb32665819c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK\n[link] The Smiths -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.432, 0.645, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2690", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_abcaa74b-7460-4b0b-95e6-3fcf23ac1904.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.099, 0.298, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2691", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0d5293b2-9ff7-48b5-80f6-b043d52c9066.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Jun -> CLICK\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.077, 0.386, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2692", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_3f6cc639-ac20-4823-b0b9-b6bb1a1c9d26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.277, 0.981, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2693", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_9e62b475-81ab-4342-974b-bc13968dad2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA RESORT CAMPGROUNDS \uf0da -> CLICK\n[button] Arizona -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.712, 0.35, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2694", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_4dd5c447-4344-47c2-aaf4-63554db508f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.787, 0.16, 0.85, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2695", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9abb53ba-45cd-4c7f-92ee-33073e99789b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.176, 0.621, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2696", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_b32b39aa-4510-4ee1-8d3f-560a4fb3220f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.185, 0.505, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2697", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_600552f9-d248-4c02-bede-2b4624a229da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK\n[div] Price -> CLICK\n[link] $10 to $25 (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.632, 0.109, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2698", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_5f3ff4a9-f9ef-4e5b-99de-5bbf85c5f02e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.059, 0.523, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2699", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_ad2fa916-8d5f-41f8-bd96-ea4924d38c52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[link] Business -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: LAS VEGAS\n[div] Las Vegas -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.339, 0.254, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2700", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_fd1a35c4-45cd-4e46-ba38-ceb3203b6cb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK\n[checkbox] Electric (175) -> CLICK\n[heading] Electric Mile Range -> CLICK\n[checkbox] 300+ Miles (12) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.146, 0.888, 0.167] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_2701", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_3f1ad2d8-508b-4dca-b072-8d2ff125fafe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.373, 0.359, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2702", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_c027c8ec-b3e0-44d0-b671-5700374e6284.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.174, 0.26, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2703", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_1113a688-3969-4e5b-9a16-d418ef8ac466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.358, 0.619, 0.388] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_2704", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9e6abcf6-1bbe-42c9-bba3-8fdfe5b228a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.27, 0.894, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2705", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_589275b2-8649-4dcd-b815-bca201d28836.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] All -> CLICK\n[label] Veterinarians -> CLICK\n[label] Veterinarian Emergency Services -> CLICK\n[heading] Features -> CLICK\n[label] BBB Rated A+/A -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.479, 0.559, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2706", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_3f8b1ce5-738a-4d8e-8fe9-e682ff2cb865.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.06, 0.468, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2707", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_39e4017e-d59e-4582-be70-07a8b8cfd2fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.848, 0.193, 0.908, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2708", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_cad3fd8c-22ca-4bef-806e-3ffa533fa0b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.313, 0.464, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2709", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_6fdf95e8-5479-42b1-b0cd-7c701cb370b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.158, 0.504, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2710", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_0e9b3fb1-56a3-4609-98d6-fd91fb47d49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\n[link] NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.134, 0.312, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2711", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_114bcbeb-20b3-4d5c-a261-9db6f51a713a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.207, 0.641, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2712", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_f82185ba-eff4-4e0d-b9f0-d14a1403c7f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.646, 0.211, 0.651, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2713", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_6e893988-a198-4aab-a94b-4180a72e8dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, Tours \uf078 -> CLICK\n[link] VIP Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.764, 0.788, 0.802] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2714", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_bde4fd17-cc02-4bf9-83bd-db5d49cca9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.388, 0.365, 0.409] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_2715", "image": {"bytes": "", "path": "./images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_bdc83bef-edce-4e40-8ec5-8613da4be602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the birth place of Ali Wong.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.095, 0.595, 0.106] in the image\nAnd my action is Action: TYPE\nValue: Ali Wong"}]}, {"id": "mind2web_2716", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_e3eb0dee-a1dc-4c62-bcf9-b7c9f56c3113.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.452, 0.11, 0.47] in the image\nAnd my action is Action: SELECT\nValue: 2022"}]}, {"id": "mind2web_2717", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_456d9ebb-0ba2-4003-aaa3-a020deb5f737.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.083, 0.745, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2718", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_ee5fabe8-604c-4450-a735-09accff76895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.409, 0.459, 0.431] in the image\nAnd my action is Action: SELECT\nValue: 100 km"}]}, {"id": "mind2web_2719", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8ee2d1d9-6fc9-46af-a6ff-482d1a1fa2ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK\n[button] Confirm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.462, 0.942, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2720", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4ecddf71-7ddf-42d5-b7ba-8090b6c8ca7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: black stroller"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.033, 0.187, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2721", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_8cbee5da-e8dc-449d-8239-aad7bfa21b40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.054, 0.882, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2722", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_f7425150-b0a1-4b8d-b230-d614dcbb9168.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK\n[link] Men's Shoes & Boots -> CLICK\n[label] Boots -> CLICK\n[label] Sale -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.654, 0.062, 0.664] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2723", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_c93d7f4e-f722-4b74-accf-af32bb9ba52e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.038, 0.43, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2724", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_ce3e58d6-d6f4-4341-8c53-2dd347ac8505.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK\n[div] Fri., Apr. 21 -> CLICK\n[button] April 17, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.276, 0.192, 0.313] in the image\nAnd my action is Action: SELECT\nValue: 2 Guests"}]}, {"id": "mind2web_2725", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_b11ff5db-7e2a-487f-846e-fcc2b67d1485.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: red sox vs yankees\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.646, 0.93, 0.667] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2726", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_2fd5487b-aa31-4b3c-a230-36c025edc516.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.351, 0.29, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2727", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_cd326427-a6a5-468d-81d4-97d01903c790.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.018, 0.441, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2728", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_14959a6e-68e3-4fb9-a9de-1bae945670d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[div] Go to checkout -> CLICK\n[div] -> CLICK\n[button] Confirm address -> CLICK\n[p] Choose 3-hour window -> CLICK\n[div] 10am - 1pm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.566, 0.17, 0.596] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2729", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_fc1303fa-b215-437a-b69d-1269e991988d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[generic] Japan -> CLICK\n[div] 5 -> CLICK\n[div] 7 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.425, 0.686, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2730", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_0d097d8e-5994-40d6-83eb-55dbafcc5a7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.036, 0.164, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2731", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_6836bccf-f0bb-4f6c-86ce-a94e27dfccfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.36, 0.693, 0.381] in the image\nAnd my action is Action: TYPE\nValue: 04/19/2023"}]}, {"id": "mind2web_2732", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_139fd486-b530-460d-8d08-ab5188efe59e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER\n[button] Search -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.332, 0.382, 0.359] in the image\nAnd my action is Action: SELECT\nValue: Arizona"}]}, {"id": "mind2web_2733", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a9eaf474-7818-4f53-b92b-fc9157317806.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.381, 0.595, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2734", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d3f9d0ab-c01c-4a80-a032-02c8fd2b4430.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.292, 0.425, 0.324] in the image\nAnd my action is Action: TYPE\nValue: MANILA"}]}, {"id": "mind2web_2735", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe1c066-5e3b-4124-8973-50ca217bed17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Newest -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.372, 0.648, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2736", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8e888704-08c3-4164-9b92-57ad8521fb4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.043, 0.869, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2737", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_fa65f280-f9c8-4656-93fc-af91c10c364e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok\n[button] To make this website accessible to screen reader, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.419, 0.884, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2738", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_2dea02c5-cea0-4856-a1d3-8abb09bd43f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\n[link] Amazon Basics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.042, 0.321, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2739", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_9518f246-371a-40eb-b20f-2c5c1083d0f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.562, 0.316, 0.592] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2740", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_90311f8c-7889-459b-9739-5fe71a0f49cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.026, 0.232, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2741", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_a2e91cde-5120-4851-a140-2dcd34d9e26e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK\n[input] -> CLICK\n[option] Sweatpants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.32, 0.256, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2742", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_eefa305d-41d8-4e3e-9105-c389709d90ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.338, 0.582, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2743", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d411b028-22bd-42d8-a4c2-ffb7d2c40d32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.236, 0.244, 0.251] in the image\nAnd my action is Action: TYPE\nValue: TEXAS CITY"}]}, {"id": "mind2web_2744", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_eb3902e3-de1c-4124-9b4b-23b2190d5e8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Sightseeing Tours -> CLICK\n[label] Private Tour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.147, 0.963, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2745", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_34028620-ebde-4b2d-8709-4c162b03e46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[button] Next -> CLICK\n[gridcell] Sun Apr 02 2023 -> CLICK\n[circle] -> CLICK\n[link] Likely To Sell Out -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.293, 0.927, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2746", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_858f46ac-aa0f-44ff-8278-4b53cdae0c70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.132, 0.44, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2747", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6ba32131-0de9-44d8-a22f-75c28eb37f80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.09, 0.321, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2748", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_751493a4-4198-4f94-abf0-701f037f7e5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\n[link] Model Y -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.072, 0.871, 0.098] in the image\nAnd my action is Action: SELECT\nValue: Price low to high"}]}, {"id": "mind2web_2749", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_e8c5af4e-b575-4093-9b8f-02ec489f76a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.81, 0.182, 0.853, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2750", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_f0f24934-782b-4b19-a80e-cae0dc3acafd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.301, 0.459, 0.338] in the image\nAnd my action is Action: TYPE\nValue: 250"}]}, {"id": "mind2web_2751", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_5c0fdc7d-84e2-401f-a3de-6e925f591bc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\n[searchbox] Search -> TYPE: Nirvana\n[link] Search for \u201cNirvana\u201d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.125, 0.417, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2752", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_0127f704-8546-4c72-806e-70ad7a2c3a07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.388, 0.365, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2753", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_64430af6-f9da-4baa-9f55-d51ca0f50f7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.459, 0.536, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2754", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_a5a7935b-240e-460a-a742-723e9f435050.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.013, 0.05, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2755", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_b3736604-cbf4-4e59-aee9-d057d7ef7558.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\n[link] Press RoomExternal Link should open in a new windo... -> CLICK\n[link] View All Releases \uedbe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.304, 0.244, 0.335] in the image\nAnd my action is Action: SELECT\nValue: 2020"}]}, {"id": "mind2web_2756", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_584ff31f-642d-4e32-a387-3b47a67f9725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Locksmiths -> CLICK\n[textbox] Near -> TYPE: SAN FRANSISCO\n[span] San Francisco, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.147, 0.084, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2757", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_038250de-f189-4f6b-9a09-14f89412c863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.14, 0.319, 0.86, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2758", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_9539b195-8f21-4470-aa1b-46904e797e75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.162, 0.548, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2759", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_195a4b8c-1c6d-41ff-bb22-eca585b4e44b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.435, 0.389, 0.45] in the image\nAnd my action is Action: SELECT\nValue: 3"}]}, {"id": "mind2web_2760", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cfe62f81-d404-4c83-af48-e2a2d50afc4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK\n[label] Brown -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.847, 0.041, 0.857] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2761", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_8a0c2511-b6dd-46eb-9591-5f52889652e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> CLICK\n[link] US Deals -> CLICK\n[button] Save Now -> CLICK\n[button] Deals -> CLICK\n[link] US Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.4, 0.618, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2762", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_a87d15f4-9b84-4f7b-9e24-8cd9f9b7a6f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.485, 0.958, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2763", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7992a9c-7b79-48a4-ac2a-30cb1dfb3e13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.026, 0.829, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2764", "image": {"bytes": "", "path": "./images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_30baf113-0948-4a1c-a1da-ae5a3b030698.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Leaderboard for the top 10 fantasy Basketball players for the Rotisserie challenge.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.092, 0.14, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2765", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_652f2f08-6660-439d-af3c-a7fc41fb8da3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Done -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[svg] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.457, 0.263, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2766", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_06076f5a-e49d-4f9d-aeb0-2947192e0d54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.187, 0.414, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2767", "image": {"bytes": "", "path": "./images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_3e95ac82-a5a0-4db0-87bb-0e446a69412e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up information on the potential side effects of rogaine.\nPrevious actions:\n[combobox] Search products and services -> TYPE: rogaine\n[img] Men's Rogaine Extra Strength 5% Minoxidil Solution -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 1.087, 1.0, 1.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2768", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_a83d190e-6580-4124-aadb-f55e49050396.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York\n[a] NYC - New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.374, 0.875, 0.408] in the image\nAnd my action is Action: SELECT\nValue: Friday, April 7"}]}, {"id": "mind2web_2769", "image": {"bytes": "", "path": "./images/5b433cc4-26bf-4e62-b406-f00dc09c274d_8df36e50-14d2-43c9-85af-0e2c507c74c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a CVS brand covid home test kit to the cart.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.391, 0.492, 0.615] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2770", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a52e515e-a366-4168-9ee0-8206421aeb6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.069, 0.327, 0.091] in the image\nAnd my action is Action: TYPE\nValue: Manchester"}]}, {"id": "mind2web_2771", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_40c60443-a21c-4b2f-90a7-67bc59037f55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London\n[input] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.384, 0.079, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2772", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_c80dc072-b45e-4ca3-bc8a-42454e1554a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.251, 0.11, 0.267] in the image\nAnd my action is Action: SELECT\nValue: 2017"}]}, {"id": "mind2web_2773", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_eaaaf553-37a7-488f-95ac-adf4cde55890.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.125, 0.181, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2774", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_2c7cb3e8-b290-44ff-865e-30eb46c48a18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK\n[spinbutton] Flight Number (Required) -> CLICK\n[spinbutton] Flight Number (Required) -> TYPE: DL145"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.894, 0.096, 0.934, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2775", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_d1772f3d-086d-4f30-b37f-eed1de2786aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK\n[span] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.227, 0.254, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2776", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_e4e44d28-113f-4bfb-b728-767731f4cab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City\n[div] New York City, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.009, 0.781, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2777", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_b168819e-8122-471e-a359-ee4ed4099355.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\n[link] STORE -> HOVER\n[link] POINTS SHOP -> CLICK\n[link] Avatar -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.507, 0.87, 0.629, 0.891] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2778", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_c6255d44-9f9a-4dc4-9815-f52f35569c56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.329, 0.408, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2779", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_fae2049c-2694-4893-b305-169ac217ea7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK\n[button] Join Waitlist -> CLICK\n[textbox] First Name * (required) -> TYPE: Joe\n[textbox] Surname * (required) -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.314, 0.573, 0.345] in the image\nAnd my action is Action: TYPE\nValue: joe@bloggs.com"}]}, {"id": "mind2web_2780", "image": {"bytes": "", "path": "./images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_b7042017-f4ae-4879-a6b1-8e464d022490.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an hourly parking in New York City of lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.07, 0.785, 0.087] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_2781", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_8bd8ad11-efd2-4c3b-b2a5-597daee6be65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.252, 0.133, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 08"}]}, {"id": "mind2web_2782", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_20241afc-259f-45ba-9e50-080f1d830f99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[link] $10 to $25 (3) -> CLICK\n[link] $25 to $50 (18) -> CLICK\n[link] $50 to $100 (146) -> CLICK\n[div] Size -> CLICK\n[link] 10 (131) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.151, 0.986, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2783", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_21480087-2574-48ae-be40-92f1dafdf19c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.483, 1.678, 0.523, 1.682] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2784", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_bffa44f6-a3fb-46d7-acc7-61240391f67f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.072, 0.282, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2785", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_ca2747bd-f638-48d5-922a-3a3d48df068e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.34, 0.597, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2786", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_6d22158e-f615-4bae-b167-22f650edca52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 1.045, 0.713, 1.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2787", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_93b118fc-5d32-48a8-b85c-703862f58792.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[gridcell] 5 -> CLICK\n[span] Jun 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.244, 0.899, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2788", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_48fb0147-627c-4f08-beb2-1466c609b79c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.81, 0.078, 0.925, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2789", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_c49949a8-e0f9-4a34-b795-342e7126a979.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.073, 0.266, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2790", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_7478efe0-d084-4691-b17c-4eb86f32538c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK\n[button] Find Schedules -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.431, 0.295, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2791", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_c98344a0-ee8d-469a-b593-ec5f1552321d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK\n[span] Asia -> CLICK\n[div] Kyoto -> CLICK\n[div] Select your dates -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.349, 0.236, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2792", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3b5d10c3-ef68-4cc2-ad78-d1a5886fbfec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[button] Camp & Hike -> CLICK\n[link] Hammocks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.474, 0.175, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2793", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_5d754fb7-f2c9-4ad0-a58b-577b5a88701b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK\n[button] Explore All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.766, 0.442, 0.792] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2794", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_55539bcd-3c78-406e-ba78-4bc08281ac01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.702, 0.414, 0.969, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2795", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_131228c6-f6dc-4cf1-8109-d54d5c4abe34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.351, 0.418, 0.386] in the image\nAnd my action is Action: TYPE\nValue: Hackney"}]}, {"id": "mind2web_2796", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_345899fd-7f18-4087-99fa-be98ef4a1cd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Polos -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.636, 0.166, 0.656] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2797", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_34cd7674-6ef6-4a36-a6d4-86d0ed4840ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.188, 0.463, 0.195] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_2798", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_11ca4f53-e4fc-45c4-b503-bd5af383ebe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Look up -> CLICK\n[heading] Barboursville -> CLICK\n[link] More info about Barboursville store -> CLICK\n[button] make it my store -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 2.276, 0.181, 2.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2799", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_497bba29-70fa-48b0-a11e-3c610e59cb1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: the weeknd"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2800", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_9e4b272b-5b5f-4648-b670-b9f64de663fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 0.135, 0.687, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2801", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_2e7cc505-4147-435a-9662-293d0880c84d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.005, 0.675, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2802", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_3ed73142-d61d-49dc-b37b-f1ffdf6df747.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.481, 0.942, 0.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2803", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_7a8793c6-ae97-498e-b9d4-5bc223860950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[div] Price Low to High -> CLICK\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.28, 0.619, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2804", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_f104a722-6ce0-4b4b-ab2c-28c8653c333c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 0.144, 0.145, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2805", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9d423e0d-390c-4608-bd51-2dd07d60bfca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[generic] Click to add item \"Tuscany\u00ae Baytown Drop-In 33\" St... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.437, 0.476, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2806", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_c240bc73-00e0-40a0-8a8d-283b016b4d66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK\n[link] 9 -> CLICK\n[link] Filters -> CLICK\n[checkbox] Wi-Fi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.245, 0.357, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2807", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_f18d3105-6e8d-4c4a-b12b-a3a1351bdca2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK\n[link] Taylor Swift -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.222, 0.838, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2808", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bb69521e-4cea-48cb-997b-5779793d1ce7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK\n[input] -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.377, 0.38, 0.417] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_2809", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_80a3fcae-6109-4867-9a8c-89df2148fe40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.619, 0.355, 0.639, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2810", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_43a16107-29aa-42ef-b84e-d58837934892.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.336, 0.459, 0.364] in the image\nAnd my action is Action: SELECT\nValue: Fair (580-669 FICO\u00ae Score)"}]}, {"id": "mind2web_2811", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_1694e0bf-0021-422e-a914-aad55c47be68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.11, 0.273, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2812", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_9ddc5879-cba8-40f9-bb74-62073d3e1148.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.478, 0.309, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2813", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_8bcbfa0a-acbf-4339-a568-b3e0b28f774f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Electrical -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.266, 0.577, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2814", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f7f24d84-4a4a-4bad-9163-6010b47e39be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.337, 0.517, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2815", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_8dd6021e-c277-488e-bfb9-2e65698d85bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[svg] -> CLICK\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM\n[svg] -> CLICK\n[span] 2 guests -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.608, 0.525, 0.654] in the image\nAnd my action is Action: SELECT\nValue: 1 guest"}]}, {"id": "mind2web_2816", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_26ad118a-4b0e-4fcf-aa60-59d470e2ef31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] The Bahamas -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Miami, FL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.244, 0.871, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2817", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_130056b9-3612-4542-a3f6-8db724d54967.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.017, 0.663, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2818", "image": {"bytes": "", "path": "./images/b3a28e48-3912-4b0e-b3a9-d359da13864d_456e8f64-967d-4497-8b2c-7c1075f87817.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL passing touchdown season stats.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.092, 0.286, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2819", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_7aaf77ba-4f36-41e2-9f6f-737a7cdb55b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.612, 0.307, 0.624] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2820", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88261830-aa18-4e93-bf12-4fef640e05d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK\n[button] Filters -> CLICK\n[combobox] Select Region Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.415, 0.488, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2821", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_578adebd-03d2-4cf9-a508-15eb94946605.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Drop D 39,730 -> CLICK\n[link] 1990s 3,183 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.256, 0.97, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2822", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_77dc3641-bde1-4ddf-acbe-e7a014cf2d03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.015] in the image\nAnd my action is Action: TYPE\nValue: Chill"}]}, {"id": "mind2web_2823", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_0f444a22-9504-4eb4-a64c-e3142da53071.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Oakland, CA\n[span] Oakland, CA -> CLICK\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.026, 0.335, 0.042] in the image\nAnd my action is Action: TYPE\nValue: Hot Dogs"}]}, {"id": "mind2web_2824", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_af5953e2-01e4-4100-bfde-3a72b66535b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.102, 0.145, 0.152] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_2825", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_ee8707f3-64d9-4ca3-b9d7-12a71897b462.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Find tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.656, 0.685, 0.68] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2826", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_a78c000e-8c5f-40cb-beca-5a3daeb439c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.098, 0.041, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2827", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7065838f-d6b7-48b1-b673-1fdb72ebf959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.354, 0.446, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2828", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_ebcfa163-d925-44c7-8cf2-b73382218e73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.116, 0.3, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2829", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_c9ed5c49-2af1-457d-851d-2214eea40c77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.13, 0.317, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2830", "image": {"bytes": "", "path": "./images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_be041995-ae1b-4f8a-85ed-f83b8b1ba907.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare Pro Plans with other plans available.\nPrevious actions:\n[link] Try Next Pro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.158, 0.336, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2831", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_23f652b4-d726-4302-9ed4-c78747639b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: adele\n[option] Adele -> CLICK\n[link] TICKETS -> CLICK\n[div] Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.209, 0.792, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2832", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_fe049523-2d6d-4d2e-9721-982583f3b2bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.258, 0.495, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2833", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_94f9bf73-f8d5-45fc-9fe8-8745e3364c2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.242, 0.168, 0.303, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2834", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ec2be064-6f50-458b-8f90-5473118a60a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[span] resident evil -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.376, 0.375, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2835", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_33c0189f-b6a7-4bae-9005-79334f91871a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.149, 0.528, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2836", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3129d72d-487b-4db5-b9f5-e5108f9905c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.352, 0.943, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2837", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_f9b331ee-30d0-452d-8612-6799a1b53c65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.298, 0.148, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2838", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_719999a6-90a6-40d5-8b0c-067215172e55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.571, 0.471, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2839", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_c397e50d-70f8-4294-bf79-ee3a2d1d1385.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[heading] Distance & Shipping -> CLICK\n[button] Any -> CLICK\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.439, 0.253, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2840", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c5d7f50f-9728-4cb1-b11e-e97ff0e67470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.566, 0.683, 0.709, 0.739] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2841", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_f42d0082-b3db-4ab3-bc96-0f5c51523fbf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.205, 0.244, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2842", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_aa02e15b-4a03-4834-9c7b-426c76d2a7d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.378, 0.772, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2843", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_a0b8ce6d-f627-464e-8c71-a2e196fe4999.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.212, 0.605, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2844", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_7f944d31-195e-4421-9644-93d4aadde6f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.233, 0.096, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2845", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8338e3ee-2170-4b88-b346-742f10b82e06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.683, 0.179, 0.734, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2846", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d9021cfc-99a8-49c8-8f1e-d3a6a3dcbddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.227, 0.566, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2847", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_136a2a2c-8b5b-4ac1-ac9d-5b30cc7d2840.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK\n[textbox] Reservation/Ticket Number -> TYPE: 123456\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.26, 0.616, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2848", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_8427717e-96a8-4a13-b271-cb4eefd926be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK\n[img] Vitamin D -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.102, 0.991, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2849", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_4c99e677-3a4a-428f-818f-5505d1841eca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK\n[i] -> CLICK\n[button] Buy Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.279, 0.053, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2850", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_60a95fcf-316e-4431-94e7-50a38e6a421b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK\n[link] Running -> CLICK\n[div] Size -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.171, 0.925, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2851", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_113b23fe-b2b9-44f4-9d94-55f2490d9e41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[textbox] Flight destination input -> TYPE: Dubai\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.507, 0.466, 0.546] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2852", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_54620a46-795e-4dd5-8616-41aba7dfed58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.475, 0.345, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2853", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_635ce156-2cfa-433d-9e4a-a4b6002519cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[span] Walgreens -> CLICK\n[textbox] Add a title (Required) -> TYPE: Walgreens\n[img] A person holding a tomato and a grocery bag with a... -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.756, 0.193, 0.791] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2854", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_1e9d301f-ab8a-4ee1-8441-3a05220caf6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.47, 0.339, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2855", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d697126-f42c-4bda-8316-05bd6ab4e3a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK\n[link] Crossover vehicle icon Crossovers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.283, 0.253, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2856", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_30644e2f-e07b-499c-8a69-269b8c6dd9d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.47, 0.684, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2857", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_d2d729b6-3704-4165-b841-843500524934.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[link] Women -> HOVER\n[link] Sweaters & Cardigans -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK\n[p] Women's Sonoma Goods For Life\u00ae All Over Stitch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.519, 0.931, 0.56] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2858", "image": {"bytes": "", "path": "./images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_aac4d2a7-211c-44bd-9b2f-c1652193926f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the score of the 2020 Super Bowl.\nPrevious actions:\n[link] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.242, 0.771, 0.271] in the image\nAnd my action is Action: SELECT\nValue: 2020"}]}, {"id": "mind2web_2859", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_4eb9f199-5e4e-46b3-9f15-5b65eab3ad0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[input] -> CLICK\n[option] Socks -> CLICK\n[generic] Sort by -> CLICK\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.324, 0.256, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2860", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ddea7a9c-3acb-4198-94fd-eb659f813bf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW\n[combobox] Select Model -> SELECT: i3\n[textbox] Zip Code -> TYPE: 10001\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.18, 0.429, 0.201] in the image\nAnd my action is Action: SELECT\nValue: Lowest price first"}]}, {"id": "mind2web_2861", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_00f9659f-e69c-42c9-92cb-fb3779a46c05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.34, 0.709, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2862", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_47d12b84-360d-4519-b16c-db6972664cf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.028, 0.553, 0.049] in the image\nAnd my action is Action: TYPE\nValue: easter home decor"}]}, {"id": "mind2web_2863", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_81d303ca-5152-4c0e-bd51-bb508e5b8b61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK\n[combobox] Select Sort Order -> SELECT: Lowest mileage first\n[div] Request Info -> CLICK\n[span] Send -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.337, 0.682, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2864", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_bb8274d2-dd40-4967-9c7d-b3b4bbdc86c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.091, 0.914, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2865", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_244fae6f-f044-41cb-b2e9-28ae4d806164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.264, 0.256, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2866", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_a28f8957-0d43-4b38-ae85-e2342c1e9840.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[option] Washington District of Columbia,\u00a0United States -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[checkbox] 5 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.422, 0.53, 0.496, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2867", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e3aea0a2-63d1-40ad-9b55-ca12b927d7c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Sports -> CLICK\n[div] Fit -> CLICK\n[link] Fitted -> CLICK\n[div] Size -> CLICK\n[span] Now Trending -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.276, 0.947, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2868", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_c10ef165-10fb-42e0-858e-713888d54f96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK\n[menuitem] Women's -> CLICK\n[menuitem] Running Shoes -> CLICK\n[label] 5 (10) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.802, 0.289, 0.984, 0.314] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_2869", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_f49f5305-0a5f-46b1-af16-a1fa43ae89b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.34, 0.152, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2870", "image": {"bytes": "", "path": "./images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_42c5eaec-812c-49d9-b9c5-5c4af9c22f67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending daily deals.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.618, 0.073, 0.7, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2871", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c7fbea4e-d582-45ed-82b3-2f01cedfc20b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.256, 0.841, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2872", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_4374b5ff-1f84-468b-88d1-cbf28ecd40b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[label] Most popular -> CLICK\n[span] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.643, 0.248, 0.741, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2873", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_74542878-df35-4595-8762-c4c3e951d6aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.271, 0.249, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2874", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_507a4143-8f0c-49cc-90a8-ae3a780eea69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.085, 0.009, 0.157, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2875", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_98fe0126-b4a4-4fb4-af52-ca93b1a10f9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.745, 0.96, 0.778] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2876", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_5b0f6466-edf2-454c-bfdf-d1c49da07f97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[button] Athens -> CLICK\n[div] Apr -> CLICK\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.176, 0.5, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2877", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_1b134d74-a104-4353-a54e-5a420f0822da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[div] 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.467, 0.645, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2878", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_0a667a4f-86b7-4ec6-b915-9af08d700aca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.01, 0.45, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2879", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_d79a8219-43dc-4b34-b8e4-bdc43b6678b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.119, 0.777, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2880", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_911b9641-1ba1-4aa5-a6bd-7d1a609dd663.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.179, 0.805, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2881", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_7d59caa8-226f-4fe2-986b-17eb4b9ffcee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.117, 0.327, 0.145] in the image\nAnd my action is Action: TYPE\nValue: sheffield"}]}, {"id": "mind2web_2882", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_3ee042de-e542-4cdf-b2c0-0f2c3a4f74f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[input] -> CLICK\n[option] WA -> CLICK\n[button] Products -> CLICK\n[label] Kids -> CLICK\n[label] Maternity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.686, 0.412, 0.716] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2883", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_8521d188-44bf-4585-9ddd-10af35e11bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.128, 0.672, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2884", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f2a55dd0-a78b-43a9-8611-d22a9f6510bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: smartwatch\n[button] smartwatches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 2.05, 0.032, 2.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2885", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_37f20ac7-f9b8-45df-afe5-4f8d184cd100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Ariana Grande"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2886", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_dc1847f7-919b-4a2f-b778-2ee33edacc46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.066, 0.812, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2887", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_f24491dc-80a1-4824-82cc-67c11157db08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.391, 0.474, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2888", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_38274c62-d229-43da-a57a-32470873c88e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK\n[button] Ship Location: Any -> CLICK\n[link] Canada -> CLICK\n[link] King of Tokyo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.142, 0.97, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2889", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_9cd4c1dd-80ee-402e-992a-70c4e072e0ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.06, 0.948, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2890", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2ab926e5-d341-46ab-ac5c-48d1001bf00a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: m s subbulakshmi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2891", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_6ad53972-ccb3-4e09-a210-e20efff708a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.257, 0.459, 0.283] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_2892", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_8fac423b-2ae6-402f-85b7-48b356e7f5ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK\n[div] Remove -> CLICK\n[p] Remove -> CLICK\n[button] 4K -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.798, 0.703, 0.817] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2893", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_b4680bd7-becf-4477-b09b-b3e9351c8e25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.079, 0.471, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2894", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_37982fe3-0a00-4cb0-81bd-93641d095722.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.218, 0.986, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2895", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_82ce6c3a-9087-41e3-9900-56d7d8798099.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\n[link] Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.205, 0.737, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2896", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_c64fb6ac-9525-46c7-bb5c-a78cf71e4fc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\n[link] WAYS TO STAY \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.128, 0.266, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2897", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_683fa50a-4b42-4881-8f37-0352c39ce025.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.416, 0.284, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2898", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_40e6c0f2-c0aa-4052-bf7f-47e27f5de990.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[div] 10 -> CLICK\n[span] 12 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.197, 0.574, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2899", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_fad7b35a-6f9a-4294-8470-74b9cc85bd65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\n[link] The Hit List -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 2.076, 0.356, 2.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2900", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_d12db8d3-672e-48e8-8d6f-b9adc6ffa5f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.175, 0.291, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2901", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_51d326c2-eb8f-4c5d-b4b9-95716d9a7618.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: La Bomba\n[button] SEARCH -> CLICK\n[link] Chords -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.292, 0.971, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2902", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_37fa00e2-64aa-432d-ae02-bc716b3c0726.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.339, 0.205, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2903", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_2657d87f-4ee4-41cd-8272-12113073ca0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.313, 0.662, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2904", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_05bac10b-9c88-4c85-b380-2d89170b882d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.321, 0.7, 0.355] in the image\nAnd my action is Action: SELECT\nValue: 4"}]}, {"id": "mind2web_2905", "image": {"bytes": "", "path": "./images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_7037c6bc-63db-4a5d-93fe-fe2a87738c8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals in New York.\nPrevious actions:\n[a] City Pages -> CLICK\n[link] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 1.26, 0.858, 1.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2906", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_96fadcb9-f9f6-4d1d-b696-4208d5b98b38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK\n[img] Sponsored Ad - Google Play gift code - give the gi... -> CLICK\n[button] $100 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.505, 0.654, 0.529] in the image\nAnd my action is Action: TYPE\nValue: abc@abc.com"}]}, {"id": "mind2web_2907", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a9bbd8b9-6372-4ec8-823b-a7a75b04cd09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] 2+ -> CLICK\n[radio] Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.645, 0.096, 0.658] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2908", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_464ccc80-cbfc-4c86-a72e-f4a240a53743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.53, 0.489, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2909", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_df246174-70ac-41c1-ba3c-7f741eb5afda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.085, 0.653, 0.11] in the image\nAnd my action is Action: SELECT\nValue: \ud83c\uddec\ud83c\udde7 English (UK)"}]}, {"id": "mind2web_2910", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_a1f00027-cf54-420a-b375-71b179d29a4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Traveling with children -> CLICK\n[link] sit on a parent\u2019s lap -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.516, 0.61, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2911", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_e186f90a-bada-4de9-9201-38bce05d6f24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.257, 0.599, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2912", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_109f902d-9671-436b-9870-8f7358032809.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.085, 0.287, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2913", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_59e52dac-4d79-4f8c-96c8-b10b27294851.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John\n[textbox] last name maxlimit is 30 -> TYPE: Smith\n[select] Confirmation Code -> SELECT: Ticket Number\n[textbox] ticket number maxlimit is 13 -> TYPE: 123456780"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.433, 0.281, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2914", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_fd00b193-5c69-47ae-89a7-19293dbe9c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.027, 0.77, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2915", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_085c7e1e-20b1-4c50-ba2a-8e9088dfd3e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.052, 0.621, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2916", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_c5db25df-3e40-41fe-8667-1e5ba8f58c02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.187, 0.375, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2917", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_1896e25f-674e-407b-bb72-a02a44c625b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.014, 0.577, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2918", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_2285e9d0-68f5-4691-895a-faf4f9e1ceca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[input] -> CLICK\n[option] MEN -> CLICK\n[svg] -> CLICK\n[heading] Color -> CLICK\n[span] BLACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.314, 0.463, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2919", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_758f0d99-942a-4dc5-93c4-acefb1418d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14\n[button] Order Now -> CLICK\n[img] -> CLICK\n[button] Select -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.478, 0.703, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2920", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_01945c7c-6cea-4473-81a8-af3672d0c114.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.005, 0.492, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2921", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_7690935f-c901-40fe-8b8c-afd20a6e4a91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\n[li] Soccer -> CLICK\n[link] Teams \ue00d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.381, 0.707, 0.406] in the image\nAnd my action is Action: SELECT\nValue: Spanish LaLiga"}]}, {"id": "mind2web_2922", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_e23192da-8d2f-4759-b9c8-79da922f98bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[link] Superhero -> CLICK\n[checkbox] Superhero Sci Fi (745) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.238, 0.331, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2923", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_f32b6c7d-1aad-45f6-b201-b6f78fccc014.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.308, 0.694, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2924", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_032de1a2-3b36-4cbb-80c4-94c0c2882d0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\n[link] New & Noteworthy -> CLICK\n[link] Most Played -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.169, 0.548, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2925", "image": {"bytes": "", "path": "./images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_786a63d8-2537-40cc-85ce-2484ed87a3ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the rating and user reviews for the game \"Deathloop\".\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.256, 0.047, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2926", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_62440a28-36b1-4101-bba1-55fc81c56f90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: TYPE\nValue: Hotels"}]}, {"id": "mind2web_2927", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_32b5be81-0be2-4247-8419-7817ed9927c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[link] Business -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.215, 0.245, 0.238] in the image\nAnd my action is Action: TYPE\nValue: LAS VEGAS"}]}, {"id": "mind2web_2928", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_17c1ec49-033a-41e2-a6cd-101bfe603185.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK\n[label] Express Bus -> CLICK\n[label] Rail -> CLICK\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.297, 0.359, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2929", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_1d12dd7b-e729-489b-a3d9-6316947514ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.571, 0.972, 0.613] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2930", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_bc7f8665-64d1-48f7-97df-63fdf82ac826.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK\n[input] -> TYPE: 105307\n[button] 105307 -> CLICK\n[combobox] Change quantity -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.433, 0.351, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2931", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_50ddc6d5-f2d7-497f-b281-bf6a1aa2061a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.222, 0.383, 0.241] in the image\nAnd my action is Action: TYPE\nValue: Conductor"}]}, {"id": "mind2web_2932", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_7f3f485c-961a-45b8-bec6-288eedb4e5c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[button] Chicago -> CLICK\n[button] Today -> CLICK\n[button] April 20, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.505, 0.542, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2933", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_4497cc28-31de-4410-b209-540e572646c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.253, 0.09, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2934", "image": {"bytes": "", "path": "./images/40fbda9d-22c5-4aab-9798-3db50d981c5c_c12adadc-f6c6-4a3f-8969-6badce55661d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to answer a question in the home improvement section.\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.094, 0.595, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2935", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_b084de8c-0fa0-44f0-853f-12afedc35be6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Price -> CLICK\n[label] $40-$60 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.412, 0.248, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2936", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_38386a3c-7b6f-4dc1-9977-2bdfb13ca2c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.038, 0.746, 0.055] in the image\nAnd my action is Action: TYPE\nValue: recipe"}]}, {"id": "mind2web_2937", "image": {"bytes": "", "path": "./images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_c793da3f-2031-4dfc-8684-78418b702dd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a park in the state of California called Castle Mountains National Monument and find out it's Basic Information.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.284, 0.789, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2938", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_c09834c9-a9db-4eea-83ed-69f3feb73903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.344, 0.36, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2939", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_6cca7b3f-a1f9-42b7-b468-dc7d0dfb93a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA\n[option] Museum of Modern Art (MoMA) \u00a0\u00a011 West 53rd St, New... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.09, 0.16, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2940", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_24d96aba-20ef-4923-b4ce-41d35ddd7a45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[option] Brooklyn, NY, US Select -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.351, 0.84, 0.383] in the image\nAnd my action is Action: SELECT\nValue: 22"}]}, {"id": "mind2web_2941", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_c3adce54-837b-4bea-880c-d8500152c67d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.259, 0.206, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2942", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_e4c84777-b378-44df-8696-64999d1c0000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[span] -> CLICK\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre\n[span] -> CLICK\n[button] Hiring Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.722, 0.13, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2943", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_8126292b-9121-4097-ae47-90374a2d66b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.339, 0.881, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2944", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_571a8a34-2ad2-41e5-bca8-b8f77ab01ab7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[div] Durban -> CLICK\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.577, 0.481, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2945", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_5506a376-2a29-4df0-bf29-d43bc2bd831c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER\n[link] Jazz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.429, 0.953, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2946", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa29a6f8-0eb6-4810-a0d2-c46095e1eb0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.681, 0.47, 0.706] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2947", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_360c51fc-14b3-43ec-a013-8485a168a0f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Rail -> CLICK\n[label] Express Bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.476, 0.848, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2948", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c0c57aa1-6255-44f0-a853-a8199d85778b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846\n[input] -> TYPE: Texas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.308, 0.773, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2949", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_8d50111d-41cf-4ccc-ba45-96eaab8eff04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 1.083, 0.758, 1.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2950", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_19c29ea7-ab93-4ced-aba2-5af7c9b162c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[generic] Click to add item \"Tuscany\u00ae Baytown Drop-In 33\" St... -> CLICK\n[generic] Click to add item \"Dayton Drop-In 25\" Stainless St... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.862, 0.958, 0.883] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2951", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_06d3130e-be04-448a-a863-e5f760296504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.352, 0.995, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2952", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_24495f4c-e52f-45dc-a2db-85227476df1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[span] Any -> CLICK\n[li] -> CLICK\n[spinbutton] Max Price -> TYPE: 75\n[span] mm/dd/yyyy-mm/dd/yyyy -> CLICK\n[Abbr] May 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.33, 0.312, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2953", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2dfc1364-9827-47be-8fe1-5b4000462ec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.212, 0.409, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2954", "image": {"bytes": "", "path": "./images/b1a1f767-8611-4539-9c08-475011d38e12_7988776b-0e2f-438f-b2d6-b789efb59236.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news about Mikal Bridges\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.062, 0.931, 0.098] in the image\nAnd my action is Action: TYPE\nValue: Mikal Bridges"}]}, {"id": "mind2web_2955", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_47aa51fc-0ea0-440e-ad0d-851fd1bd1f90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport\n[option] Boston Logan Int'l Airport, 1 Harborside Dr, East ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.205, 0.807, 0.229] in the image\nAnd my action is Action: TYPE\nValue: North Station"}]}, {"id": "mind2web_2956", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_fa1a703b-4bed-4c72-9d39-92f378ff4b48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan\n[combobox] Credit Score -> SELECT: Fair (580-669 FICO\u00ae Score)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.428, 0.459, 0.456] in the image\nAnd my action is Action: SELECT\nValue: 48 months"}]}, {"id": "mind2web_2957", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_2169ef0b-2186-4604-8416-3775702ad018.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\n[link] Order Tracker -> CLICK\n[textbox] Order number EXAMPLES: ECEA12345, 01234567 -> TYPE: 456481897\n[textbox] Email or Phone Number (associated with Order) -> TYPE: 898-448-6474"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.913, 0.241, 0.99, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2958", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_6b716ee5-9df3-429d-86ec-a8e0146a5a2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.405, 0.408, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2959", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_7cf1ed05-1957-4d82-955e-7b21de6a6ff1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: wall art\n[input] -> CLICK\n[link] Wall Art -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.191, 0.037, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2960", "image": {"bytes": "", "path": "./images/6f1fe14d-543a-43c6-964a-0c74f6d86091_d0d43d10-b53a-4fe7-9b1a-bc20816f7d60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me movies produced by Aaron Horvath.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.006, 0.613, 0.021] in the image\nAnd my action is Action: TYPE\nValue: Aaron Horvath"}]}, {"id": "mind2web_2961", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_c1d3ecd1-6112-4505-a707-fd619bd9f991.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.278, 0.743, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2962", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_734c5ed7-2c72-4f8d-87d1-95043346307f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris\n[button] sam harris -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.625, 0.068, 0.633] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2963", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_59c4d38b-5848-4f9e-8057-87bda7630fe5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50\n[button] Find -> CLICK\n[generic] Brand -> CLICK\n[generic] \ue914 The Ritz-Carlton (4) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.754, 0.96, 0.786] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2964", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_95adf1db-0249-4c8d-aed5-32e5cd9b98da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: black stroller\n[input] -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.337, 0.123, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2965", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_2de427ce-7c9f-44c9-b4b7-65e4f697624f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2022 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.439, 0.045, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2966", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_09fdd043-f803-4750-933d-aee5e5291cdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.529, 0.233, 0.564] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2967", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_ebceb903-ecd1-4993-9962-4f21022d163f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark\n[textbox] *Email Address -> TYPE: Johnmark@gmail.com\n[textbox] *Phone Number -> TYPE: 234567890\n[label] I am NOT a Travel Advisor. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.712, 0.544, 0.744] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2968", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_5ed2ca25-c9ae-4888-a3cd-da8c166130fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University\n[li] New York University, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.316, 0.384, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2969", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_76b15b20-c42e-40bd-8e7e-d686c716d096.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.478, 0.482, 0.496] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2970", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_176f95c2-fb04-44dd-a18b-50de8dd10786.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK\n[button] APPLY -> CLICK\n[span] Single Pack -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.559, 0.192, 0.569] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2971", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_b33a580f-0820-45bb-8bf0-deaf9de822f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.407, 0.5, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2972", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_42312a4c-561a-4fd6-9018-313706b8acf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK\n[button] Go! -> CLICK\n[link] Rides & Experiences \uf078 -> CLICK\n[link] Shops & Gifts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.278, 0.248, 0.292] in the image\nAnd my action is Action: SELECT\nValue: Gifts"}]}, {"id": "mind2web_2973", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_235ebfe3-a9e9-4f3e-8629-731eeda9bafc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary\n[div] Calgary -> CLICK\n[textbox] Destination -> TYPE: New York\n[strong] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.346, 0.639, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2974", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_97e0fff2-f03e-45eb-9263-2ee6bf94bac9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.3, 0.153, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2975", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_eb17dc66-943c-4b2d-8533-f60580dd669d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK\n[option] One way -> CLICK\n[gridcell] 17 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.604, 0.922, 0.632] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2976", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_5d24be92-5a3e-4d6f-949d-d4aec8a374a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.232, 0.375, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2977", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_d42eee95-95cc-482d-99f0-3f087df1b275.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara\n[textbox] From -> TYPE: James\n[textbox] Message Line 1 -> TYPE: Happy Christmas\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.721, 0.685, 0.9, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2978", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_4559c623-3668-4d7b-8d9b-f91e46c95435.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK\n[heading] ENGLISH PREMIER LEAGUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.266, 0.375, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2979", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_775290f1-45da-49b4-b454-0c1739b55504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK\n[textbox] first-name -> TYPE: Joe\n[textbox] last-name -> TYPE: Bloggs\n[textbox] booking-number -> TYPE: 101010"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.7, 0.23, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2980", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_bce1747c-7034-48d8-8257-0157b7e1d6b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Chicago\n[option] Chicago, IL -> CLICK\n[button] See next Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.865, 0.965, 0.93] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2981", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_18aac891-d2cc-4fd1-bd1a-04b3b0a349af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK\n[button] Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.231, 0.715, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2982", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_7e25073e-b49b-49fb-aa04-c4eb651386f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.174, 0.734, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2983", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f42e267a-d410-43e3-986b-17397fa958cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.433, 0.284, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2984", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_b92ebe23-bc7d-4bad-a928-aa3ef23ca849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.047, 0.546, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2985", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ff2b5ac7-e294-4f9b-afef-8dbb37c61efb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.007, 0.219, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2986", "image": {"bytes": "", "path": "./images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_d36bbe70-741b-4a62-93a4-1b16f4790520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about how to buy metro card on booth.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Where to buy a MetroCard -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.409, 0.742, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2987", "image": {"bytes": "", "path": "./images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_cad83f0b-1baf-461f-92b1-b353a804f39c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the rating and user reviews for the game \"Deathloop\".\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.17, 0.677, 0.198] in the image\nAnd my action is Action: TYPE\nValue: Deathloop"}]}, {"id": "mind2web_2988", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_f326b843-6523-487d-8fce-82e72e9a0bd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.105, 0.377, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2989", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3203402a-4b9e-4b1a-8ddb-22aad143f626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Sun, Jun 4, 2023 -> CLICK\n[div] Start Date -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.182, 0.803, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2990", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_02f85c6e-84ed-42a2-8d2e-1a23f5ac3528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.406, 0.645, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2991", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_a04c978f-a418-4035-9e7e-24eccfb178df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.021, 0.74, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2992", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_33d871a9-cab5-4efa-a4b0-d1fed5245166.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK\n[button] Next -> CLICK\n[gridcell] Mon May 01 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.39, 0.556, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2993", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_ee422c7b-5f11-4a16-9245-1fe1fd5e4e3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.123, 0.695, 0.139] in the image\nAnd my action is Action: TYPE\nValue: Love"}]}, {"id": "mind2web_2994", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_d40f1233-8494-41b8-81a7-06a62b0e1d9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.351, 0.543, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2995", "image": {"bytes": "", "path": "./images/37564222-bb58-4a55-b47b-e9ffbbc1d160_53c60b2d-adf4-4d28-b1d2-bf611ed7b011.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the results of the most recent NFL games.\nPrevious actions:\n[link] NFL . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.118, 0.21, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2996", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_879e4979-0951-4fc9-a7f2-10d0324f5524.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.131, 0.773, 0.152] in the image\nAnd my action is Action: TYPE\nValue: 99201"}]}, {"id": "mind2web_2997", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_1b53f68e-584e-406b-89f1-9ebc42ccc465.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.274, 0.359, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2998", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_0bf9dab8-18e1-471d-a458-72708badf771.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK\n[link] The Weeknd -> CLICK\n[button] Love this track -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.464, 0.104, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2999", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_61dfd7e4-d733-4b03-a0bd-a80a9821c4a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.244, 0.481, 0.277] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_3000", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_b6b56f9c-77f1-40ed-bfef-d708917927c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.216, 0.473, 0.245] in the image\nAnd my action is Action: TYPE\nValue: 25000"}]}, {"id": "mind2web_3001", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_587cf410-7484-4014-a232-ebe323189d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.133, 0.139, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3002", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_2471b465-ed09-45d5-9c2b-ecf0efc16f91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in\n[button] Search -> CLICK\n[p] Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.198, 0.932, 0.231] in the image\nAnd my action is Action: TYPE\nValue: Black Ops"}]}, {"id": "mind2web_3003", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_f66238f2-ef35-41ed-bd5a-61140b435c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK\n[textbox] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.285, 0.25, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3004", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_cceccde0-e4da-420c-a4c0-3dc9ef3191a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.319, 0.89, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3005", "image": {"bytes": "", "path": "./images/27724810-0bc8-446a-a2f4-b53a87e190df_32f9fa3a-cc74-4d29-8347-ec82ea2f97f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the location and operating hours of the nearest CVS pharmacy to zip code 90028\nPrevious actions:\n[button] change store -> CLICK\n[textbox] Search by Zip Code, City, State, or Landmark -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.598, 0.205, 0.637, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3006", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_cd059490-19cd-4a25-9017-ecd728b2b58c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.202, 0.249, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3007", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_07860790-53f1-4130-90cc-6a68c134f319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.006, 0.144, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3008", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_2c3caf3a-dd47-45ad-9bb0-4b63ea700dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.118, 0.587, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3009", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_ebc2b67a-b88f-4ff5-8a9a-3b93c778a404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 1234567890"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.222, 0.647, 0.247] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_3010", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_dd2c3bba-7534-49fa-9567-2db678f5486e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 1.502, 0.147, 1.522] in the image\nAnd my action is Action: TYPE\nValue: 60"}]}, {"id": "mind2web_3011", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_7ad31fb5-8326-41d3-b6d1-d7de45193fc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.644, 0.037, 0.681] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3012", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_29373ca9-75bb-449f-9f36-2cd26f44674b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK\n[checkbox] 2+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.235, 0.914, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3013", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3e652678-e0e8-49b3-9954-f5076aa0631e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[button] Camp & Hike -> CLICK\n[link] Hammocks -> CLICK\n[link] Camping Hammocks (21) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.233, 0.428, 0.253] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_3014", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_c0e76686-4c8a-44e1-8982-6cb008ef9a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.044, 0.407, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3015", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_b29cd238-ad53-4b4d-b6cc-8a139d724bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Kayaks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.075, 0.668, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3016", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_19e8ffd0-e981-4b9e-8eac-a752dabc1b72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK\n[button] select to browse a kiosk -> CLICK\n[button] Show Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.332, 0.105, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3017", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_18adac7f-bb79-4f9d-85b4-e4ec43f9775c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.384, 0.571, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3018", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_48468ad4-f291-419b-9089-9ca9d1ffd9da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK\n[label] 4 -> CLICK\n[span] -> CLICK\n[div] Lower -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.922, 0.232, 0.982, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3019", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1741ce47-1b26-41c1-9827-b8393e3dfab3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.214, 0.691, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3020", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_33c00f88-b578-4fac-9e0c-a9a0520fc6f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3021", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ffc28c0e-9243-45a9-9470-4669e5a310e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.114, 0.788, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3022", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_103a605a-1392-4f31-bc96-5a2f561540da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.121, 0.894, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3023", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_191f5324-8b5a-4b2d-aecd-47c7c053d9e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.804, 0.291, 0.831, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3024", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_a5cac325-2527-4236-9fb6-6ecbfbd52c58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.466, 0.514, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3025", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6e1208e-d16a-437b-aa5e-4bce43b335f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.294, 0.427, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3026", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_c6dad653-0f55-4f00-83a7-7ac9cffc7316.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.109, 0.469, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3027", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_bda37da7-1fef-4ff8-9174-51730582abd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.337, 0.434, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3028", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_55888df7-c6b9-4ab0-8f38-d6970f7a3025.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK\n[link] GIGABYTE B450M DS3H WIFI AM4 AMD B450 SATA 6Gb/s M... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.537, 0.441, 0.551] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3029", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_c8ff6366-a117-4e3d-8a22-0c74ccf24360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[button] Listing options selector. List View selected. -> CLICK\n[link] Gallery View -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.23, 0.774, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3030", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_a7f6b888-5de1-4223-9684-6cb8f17c2402.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\n[span] Hurricane Harbor Los Angeles -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.1, 0.468, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3031", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_e184447e-7f19-40e8-82f0-58a6173878f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.344, 0.643, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3032", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_7109924d-4f35-4dd3-a5ec-af0b66f8cc53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK\n[input] -> TYPE: 3329456534543\n[input] -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.104, 0.851, 0.141] in the image\nAnd my action is Action: TYPE\nValue: Green"}]}, {"id": "mind2web_3033", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_f365d762-a1b7-4bfc-be6d-29c97073326c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Order Food & Drinks -> CLICK\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14\n[button] Order Now -> CLICK\n[img] -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.155, 0.443, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3034", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_08743486-379f-4213-a796-2f2ec65df153.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.283, 0.721, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3035", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_cb293186-4d7c-4e50-96c2-2f81fc673290.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[menuitem] Women's -> CLICK\n[menuitem] Running Shoes -> CLICK\n[label] 5 (10) -> CLICK\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.341, 0.523, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3036", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_cfdab859-8ea1-4145-a761-ca9ffa100107.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.024, 0.274, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3037", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_945a12bb-d7e9-4fca-b017-2f102026def7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Ballet\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.547, 0.132, 0.569, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3038", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_caa63937-8a3f-4ea6-8013-fb602b62e01c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK\n[link] Tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.284, 0.212, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3039", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_64384e4d-3b63-4313-bc53-479890efd517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.024, 0.464, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3040", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_9754a5ea-7a5e-4822-8c16-3049b3ec50ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.266, 0.328, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3041", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c778de5-a846-4249-9be7-49bf4badb86f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] Depart , required. -> TYPE: 04/23/2023\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.184, 0.357, 0.202] in the image\nAnd my action is Action: SELECT\nValue: Total travel time"}]}, {"id": "mind2web_3042", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_f1699e8f-d02e-4667-92f0-13061818f3c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.004, 0.651, 0.018] in the image\nAnd my action is Action: TYPE\nValue: dog food"}]}, {"id": "mind2web_3043", "image": {"bytes": "", "path": "./images/928ec908-ea23-42a4-8b13-3ca6f0721ead_1287a730-f1cc-4046-9b54-e1aa12b6d33f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter search results for guitar tabs to only show songs with a difficulty rating of \"Beginner\"\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.044, 0.176, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3044", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_7fd7c256-6857-49f5-bd5a-c761fb99587b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.121, 0.659, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3045", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_8a8aa71c-c8b4-4fb8-85d8-c47a3787306b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.061, 0.882, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3046", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_43f973b3-95d3-440f-8ecd-60b4b10a1d46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[button] Athens -> CLICK\n[button] Go! -> CLICK\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK\n[div] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.287, 0.616, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3047", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_ed48cb9a-ee51-4e80-9a71-bdd03240fef1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK\n[div] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.198, 0.493, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3048", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_b655d9a9-a403-46c8-8b35-5686ceb8c895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] December -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: High to Low -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.479, 0.772, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3049", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_2ac7ed26-b119-4a0a-bcdb-a83f63b211f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.547, 0.404, 0.566, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3050", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_a6f4f6b6-0ea3-435f-95bb-55fd74917bad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.029, 0.036, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3051", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_bfa33329-4c33-4284-bdd5-ffae4b862d8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4\n[link] Fallout 4 $19.99 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.347, 0.577, 0.363] in the image\nAnd my action is Action: SELECT\nValue: 1995"}]}, {"id": "mind2web_3052", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_ea5b4be4-62c9-4afd-8e22-69ce9b7b0102.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK\n[link] Fiction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.415, 0.196, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3053", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_4d44da5c-7602-419f-a8d1-e48ccbe0ccb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.007, 0.613, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3054", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_197af70a-27ca-4bb0-bbfd-8374dcbca041.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.219, 0.389, 0.25] in the image\nAnd my action is Action: TYPE\nValue: Abbotsford"}]}, {"id": "mind2web_3055", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_8c16969c-e931-4482-9e62-dc9ac32fe338.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.295, 0.049, 0.455, 0.096] in the image\nAnd my action is Action: SELECT\nValue: Tue, Mar 28"}]}, {"id": "mind2web_3056", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_30b98e40-d57a-4744-bfdb-660a9dfef288.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK\n[button] 16 -> CLICK\n[div] Petaluma Music Festival -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.375, 0.95, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3057", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_7581d740-729b-401a-9625-340b89af3dfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.366, 0.245, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3058", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_d834a522-37a2-4ae0-88eb-0d4490a2d956.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.681, 0.07, 0.698] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3059", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_e835b0d2-5db9-498a-81f5-598bb3d144c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[button] Search -> CLICK\n[span] -> CLICK\n[button] Show all 14 -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.878, 0.263, 1.885] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3060", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_118b28dd-087b-45f4-8490-baa847a291ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.888, 0.262, 0.959, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3061", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_23a4d8ba-846f-4a6b-9d5e-8e9059bbd4be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.4, 0.518, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3062", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_bc2c2145-162c-46a9-8bfd-32e070aa3cb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle\n[span] Seattle, Washington, United States -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.207, 0.702, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3063", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_4b2dcd3a-0b40-469e-845d-e7b4f050d030.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.173, 0.244, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3064", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_38d7f841-8b1f-4c5b-8131-310960d438fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Find your state -> CLICK\n[link] New York -> CLICK\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.096, 0.628, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3065", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_dbb99d1d-77d3-4826-a887-2bee8e5bf43c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.097, 0.123, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3066", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_ef69e26b-3544-4e04-95cb-b382313130d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.213, 0.322, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3067", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3851cdfd-1081-4f86-b05d-6062a054e094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK\n[span] $200 - $300 -> CLICK\n[button] APPLY -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Lowest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.646, 0.33, 0.665] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3068", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_8baa6a27-c421-47ea-9ad4-efeeba6e1815.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[span] Reservations -> CLICK\n[textbox] Near -> TYPE: SAN FRANCISCO\n[span] San Francisco -> CLICK\n[checkbox] Cocktail Bars -> CLICK\n[checkbox] Outdoor Seating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.128, 0.635, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3069", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_949e1e48-94d8-4d69-aa74-24e5582011bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.071, 0.664, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3070", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_8031c316-8f33-49ca-85a3-4f274aac7fb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.232, 0.354, 0.266] in the image\nAnd my action is Action: TYPE\nValue: TOKYO"}]}, {"id": "mind2web_3071", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_c31b9357-ee70-4f66-974d-647feb53a5da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.839, 0.007, 0.858, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3072", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c6b0f017-31fc-4a65-880b-30d19b72e561.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.33, 0.675, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3073", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_74dc5d68-9acc-4106-936e-cba53c2782cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.004, 0.371, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3074", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_ff85d6b0-e760-47c9-9cbe-0b2c40ea369f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan\n[combobox] Credit Score -> SELECT: Fair (580-669 FICO\u00ae Score)\n[combobox] Term Length -> SELECT: 48 months"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.404, 0.835, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3075", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_2da87719-cf27-463c-859e-44538f0428bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.373, 0.316, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3076", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_3260885d-20b9-4daf-9f4b-1a95e0a6a4d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.598, 0.305, 0.699] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3077", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_0aeafd95-fef4-4d22-aaea-cd873ef8fd5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.399, 0.535, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3078", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_ddec100d-f916-4321-ba6a-dceb8f48e51c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\n[searchbox] Search -> TYPE: rock"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.011, 0.553, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3079", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_d92ae16a-3126-4997-ab0e-125dd2416c77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.378, 0.205, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3080", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_ce91ca6f-c166-4451-aec6-a9f75ea23165.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.107, 0.969, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3081", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_729f0caa-03f2-413f-a4f0-c17602d24653.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.459, 0.536, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3082", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b2516cbd-9d43-4c25-b114-d8edc450e95c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[div] -> CLICK\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK\n[button] Find Your Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.381, 0.915, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3083", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_103014cb-e53e-4b52-84d3-7653842690ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3084", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_c9e53d13-c6fc-4af4-b8c4-45ea969dd04a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.081, 0.808, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3085", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7b712fcd-3dcd-44a1-a57b-e574ddb56109.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Price + Shipping: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.257, 0.78, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3086", "image": {"bytes": "", "path": "./images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_86f31177-b948-43d1-bfb7-32166655b35a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information on how to get a ticket refund.\nPrevious actions:\n[span] Help -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.088, 0.756, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3087", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_ba56712e-4801-424b-857e-fb64ab1a9307.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.399, 0.648, 0.432] in the image\nAnd my action is Action: TYPE\nValue: 50"}]}, {"id": "mind2web_3088", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_4da78b8c-50b7-4b50-8e3c-bad054eacd2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay\n[paragraph] Coldplay -> CLICK\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.364, 0.553, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3089", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_4d38e663-23c7-43da-9cf4-3667c1872ff3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.548, 0.339, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3090", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_61d45c64-e415-47a6-b881-54e29f1cdc68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[span] Vehicle History -> CLICK\n[checkbox] No Accidents (4) -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[link] Confirm Availability for Used 2019 Buick Encore Pr... -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.156, 0.643, 0.185] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_3091", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_a2193e39-2058-4b6c-bfaa-6395e17662a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.289, 0.591, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3092", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_078ff56d-4223-448a-a76d-bd8b5b747c95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty\n[textbox] * Required Fields Last Name * Required Fields Las... -> TYPE: Lue\n[textbox] * Required Fields House Number -> TYPE: 4847\n[textbox] * Required Fields ZIP Code * Required Fields Zip -> TYPE: 10019"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.28, 0.476, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3093", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_ba02b1c4-6cdb-4c43-8f62-e7fbe49c6a90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.619, 0.455, 0.639] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3094", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_de372882-c37b-4261-a0b7-a234e29456c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.314, 0.616, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3095", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_427fc741-820d-4659-b46e-ba46fa397047.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.255, 0.285, 0.301] in the image\nAnd my action is Action: TYPE\nValue: 1234567890123"}]}, {"id": "mind2web_3096", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_685cf19d-39cf-483b-9d1d-0b664f121910.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.388, 0.21, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3097", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_3e7c0282-8e70-4f28-af2d-3f6c13c55221.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK\n[span] Search flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.255, 0.223, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3098", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_09dcc085-8dd6-4a59-89f8-c14cb6266555.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.023, 0.821, 0.038] in the image\nAnd my action is Action: TYPE\nValue: organic dog food"}]}, {"id": "mind2web_3099", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_697b485d-dfb7-4825-8e41-6d0fffe55a76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.456, 0.038, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3100", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_678dddb3-2699-4a17-9a70-578517f9260f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[div] Kyoto -> CLICK\n[div] Select your dates -> CLICK\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.503, 0.165, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3101", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_941c96f6-80a7-4f83-bcdd-a4c57106ddc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.17, 0.25, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3102", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_2ec82da8-e3d5-4d54-a618-84c72889c172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.837, 0.048, 0.897, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3103", "image": {"bytes": "", "path": "./images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_5f4a93f4-ae47-4f5c-b7ba-c9ccc2463d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the page to find classic rock concert tickets.\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.13, 0.517, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3104", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_e672e49c-5049-4b40-b486-40028a26aa99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: kashi vishwanath temple\n[span] Kashi Vishwanath Temple, Varanasi, Uttar Pradesh, ... -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.281, 0.571, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3105", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_fd4aace9-856c-4933-a18f-8817c81c926b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK\n[heading] Resident Evil 4 - Xbox Series X -> CLICK\n[span] Digital -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.306, 0.825, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3106", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_29445a41-8ff2-4bff-b2fa-3f892a59bbf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.657, 0.004, 0.761, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3107", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_c8f29a0e-e477-43e2-a3c5-2895cac22164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.455, 0.459, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3108", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_0360e768-e23e-4a3f-8e45-956e24c36c5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.478, 0.985, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3109", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_ac2708fd-5705-4b77-8cf4-684e0e121f2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.185, 0.463, 0.191] in the image\nAnd my action is Action: TYPE\nValue: Dubai"}]}, {"id": "mind2web_3110", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_227dfbfe-6b1f-473c-b87d-101c9dfd7306.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK\n[link] 1 -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.181, 0.891, 0.216] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_3111", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_7d5af9ff-4e09-4a12-a7aa-870797c20fcb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.081, 0.582, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3112", "image": {"bytes": "", "path": "./images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_961171c8-d057-442d-9bd2-68aa64900442.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Read the 1 star reviews of DayQuil Severe Cough Liquicaps.\nPrevious actions:\n[combobox] Search products and services -> TYPE: dayquil\n[button] Submit search -> CLICK\n[div] Vicks DayQuil SEVERE Cough, Cold & Flu Relief, 24 ... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 1.103, 0.362, 1.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3113", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_ffaf6b03-f8a7-4d88-b8b3-e95d8ac0b97a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.114, 0.987, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3114", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6c235a86-4748-4b46-bb48-86f39329f0e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.179, 0.838, 0.215] in the image\nAnd my action is Action: TYPE\nValue: 5"}]}, {"id": "mind2web_3115", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_3b99d7fa-1730-4fcd-86c1-d5244bc75520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[button] Sort selector. Best Match selected. -> CLICK\n[button] Listing options selector. List View selected. -> CLICK\n[link] Gallery View -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[link] Price + Shipping: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 5.066, 0.4, 5.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3116", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_39a3a8d8-dbe1-4949-8d5f-a9097d58c4fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.04, 0.409, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3117", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_f9b97955-f20c-41ea-83d4-8b3c274cb9f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[searchbox] Type your destination -> TYPE: Las Vegas\n[option] Las Vegas Nevada,\u00a0United States -> CLICK\n[checkbox] 10 April 2023 -> CLICK\n[gridcell] 16 April 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.133, 0.44, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3118", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_8bcadcd3-882c-4c95-a59f-121cd8e75eba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.19, 0.384, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3119", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_fd110d4a-c93d-432a-860b-76873aeb4d0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.126, 0.812, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3120", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_1d0652c6-8c34-4087-aea7-e41d19eea42e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\n[link] Recommendations -> CLICK\n[link] Recommendations -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.248, 0.565, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3121", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_f2c74424-c791-46ac-8ed3-080e0b523ebe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: TYPE\nValue: Spain"}]}, {"id": "mind2web_3122", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_01991012-99bb-43ba-80b3-8761e12526b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.216, 0.712, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3123", "image": {"bytes": "", "path": "./images/693ae151-6a70-41b1-b016-87279c4c532e_f455e88f-ed98-4077-b0cb-ea5f32a69743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the cheapest xbox 360 game available for purchase\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.036, 0.646, 0.049] in the image\nAnd my action is Action: TYPE\nValue: xbox 360 games"}]}, {"id": "mind2web_3124", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a3ce4ba4-e949-4d3f-961a-e0ab53de1539.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[input] -> TYPE: South Station\n[option] South Station, 700 Atlantic Ave, Boston, MA 02110,... -> CLICK\n[link] Go to route -> CLICK\n[button] City Point -> CLICK\n[menuitem] City Point typical route -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.67, 0.393, 0.684] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3125", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_5ae1626b-8e2b-4bb4-be5c-4488a2121063.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.574, 0.015, 0.596, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3126", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_9d90cf4a-97bd-4b59-a600-b1a420139626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\n[span] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.097, 0.961, 0.132] in the image\nAnd my action is Action: TYPE\nValue: 11231"}]}, {"id": "mind2web_3127", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_8050d3d0-fb77-42bf-bbe4-77c3f358a46b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK\n[textbox] Down Payment -> TYPE: 500"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.297, 0.459, 0.325] in the image\nAnd my action is Action: SELECT\nValue: New Jersey"}]}, {"id": "mind2web_3128", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_49ca6092-7a1c-4313-9a93-16fd2713cbb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.209, 0.269, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3129", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3e6b66ed-8220-4c62-8a25-6d6f0815a83c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK\n[checkbox] 8 April 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.198, 0.71, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3130", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_7fabb89e-f2fd-419c-b2b1-bf792b60efff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] 7 -> CLICK\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM\n[combobox] Select Residency -> SELECT: Vietnam"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.214, 0.902, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3131", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_b1cdb3df-d1c0-4a37-966f-b0f460a30a65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.067, 0.587, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3132", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_640f1bc7-d87d-4abd-b427-251868d68256.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[tab] One-way -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.163, 0.702, 0.202] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_3133", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_8b608f62-98ad-4a30-98fb-39c4d74a95d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK\n[button] Add to Cart -> CLICK\n[button] View Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.338, 0.394, 0.361] in the image\nAnd my action is Action: TYPE\nValue: FREESHIP3093"}]}, {"id": "mind2web_3134", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_18953029-7de1-4f5f-bbfd-47497151e78d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.781, 0.094, 0.798] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3135", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_906d43ef-49fe-40c0-b676-17bf6a6c7cc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\n[link] Library -> CLICK\n[link] History -> CLICK\n[button] Clear all history -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.207, 0.684, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3136", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2355acbf-015a-411b-9255-66eb6a6ea664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3137", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_85d89f31-f66f-4dea-9d27-26cffeb6b2ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK\n[img] -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.015, 0.988, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3138", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_041c26fa-ce1d-486c-ac07-f01db497d492.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Audience score -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Tomatometer -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.675, 0.477, 0.798, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3139", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_f9125541-f6f0-462e-bbd6-74b95fa0141e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.543, 0.492, 0.564] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_3140", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d1b5e61a-25a8-4b5a-8797-51292027172a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK\n[searchbox] Please type your destination -> TYPE: MANILA\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.385, 0.83, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3141", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_3458c954-87c8-4d5d-848d-2b9041fd5ef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK\n[span] Size -> CLICK\n[link] Under 2'x3' (38) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.238, 0.986, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3142", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_2696e6cb-e2fc-45da-b9e5-33fe50d21113.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\n[span] Menu -> CLICK\n[link] Accessibility -> CLICK\n[div] List of Accessible Stations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.099, 0.403, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3143", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_ee8846f3-42cd-4257-9f4e-011b4079bf1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.236, 0.744, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3144", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_76b4419e-d497-4831-8074-447ca32328fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[svg] -> CLICK\n[div] Canada -> CLICK\n[svg] -> CLICK\n[div] This weekend -> CLICK\n[p] Rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.478, 0.242, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3145", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_146406e2-e394-4d53-8e79-f20f4d7c3df0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.005, 0.371, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3146", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_38d43410-d666-4a82-8c4c-514bd2c40a0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes\n[button] Go -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] Under Armour Men's Charged Assert 9 Running Shoe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.781, 0.549, 0.81] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3147", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7d4790d6-486f-4152-a56d-6ec08c11b626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.326, 0.144, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3148", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_4bf4e3a7-8e4d-4453-9bdb-3f68faa7feb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.117, 0.673, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3149", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_8eb4a4cc-f4f6-4cdd-979a-3eb10b5742a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.027, 0.232, 0.043] in the image\nAnd my action is Action: TYPE\nValue: Burgers"}]}, {"id": "mind2web_3150", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_b740c1d3-669f-45ae-beed-936d5f4e4f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.018, 0.469, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3151", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_ff47cd42-40a5-47a6-8b52-589ca150f520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.145, 0.587, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3152", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_9174deb2-b1a0-46a7-b79b-8265bbbad507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Bridges and Tunnels tolls -> CLICK\n[span] Toll Rates -> CLICK\n[span] Motorcycles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.262, 0.867, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3153", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_82ba2b10-839f-4716-b42e-af904b9b0c04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.413, 0.47, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3154", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_b368cde5-79cc-412f-89d5-579b80c8db94.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.432, 0.252, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3155", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_5b52c0ec-73b3-41b6-b34a-b0882e65cbfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[textbox] Credit card number -> TYPE: 123456789\n[combobox] Expiration month \u00a0* -> TYPE: 01\n[combobox] Expiration year \u00a0* -> SELECT: 2024"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.675, 0.302, 0.689] in the image\nAnd my action is Action: TYPE\nValue: 123"}]}, {"id": "mind2web_3156", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_c44a1e52-643c-4487-b25d-dedd81984892.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.512, 0.329, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3157", "image": {"bytes": "", "path": "./images/9223ed29-5abb-4f4d-8108-1c3a584a7017_7b4101fe-ef0c-4517-9e11-be6a982e764e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about reduced bus fares.\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.106, 0.494, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3158", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_61593538-6a94-428e-a354-f8b3da1bde5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.443, 0.605, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3159", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7693c6c7-4388-417a-acdc-d0ad1937dec5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA\n[div] Goa -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.7, 0.404, 0.734, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3160", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_a414ba1e-c263-4dcf-865b-04054f9e18aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.578, 0.052, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3161", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_69956295-9900-40e8-b095-abf025c73dc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.142, 0.472, 0.184] in the image\nAnd my action is Action: SELECT\nValue: Kia"}]}, {"id": "mind2web_3162", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_b74474d1-d37d-4bc9-9fb5-5d6a91112763.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.202, 0.241, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3163", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7ebdc5db-d931-4a68-a6f5-cb6976b12702.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.374, 0.452, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3164", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_c6b4511e-d878-4799-9d4e-4cbac9de4c48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.274, 0.592, 0.301] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_3165", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_1bb0ea8b-610d-45e5-b23d-ee1bad6369c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[span] Dining package -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.91, 0.713, 0.954] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3166", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_90db0fb9-3240-4db9-bd1f-ee23ceb7ea76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK\n[link] NPGallery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.146, 0.695, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3167", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_3cd13063-08bf-498b-92ed-c690490a1526.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.67, 0.263, 0.677] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3168", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_6ae1d041-2f14-47cd-8056-e5167cde24a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.093, 0.326, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3169", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8975d7cf-935f-4d95-aa36-5eb71e2b01eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.296, 0.173, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3170", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_94853346-3f50-42d4-a572-19b457b58ea5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.221, 0.254, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3171", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_23bec80e-fc2a-4ca2-afa5-ea11e0911edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.374, 0.433, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3172", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_eaebc513-fd13-45da-8a09-78a30eb928d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\n[link] NBA -> CLICK\n[link] Scores -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.365, 0.717, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3173", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_4f12982c-1b2f-42b8-9391-dd4b17ff0ced.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.023, 0.665, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3174", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_b36f6cf0-ceff-4a01-bc4e-bf8ffa893d48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.142, 0.45, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3175", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_51eb2c1d-08f8-4f21-92f2-c17c6de966de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.249, 0.498, 0.288] in the image\nAnd my action is Action: TYPE\nValue: 123 Main St, West Chicago, IL"}]}, {"id": "mind2web_3176", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_1cc8fab2-5512-4790-95ae-8349beb1f6f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.581, 0.366, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3177", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_cb589bb8-5110-40b5-9fa6-b42683918b39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Fresno -> CLICK\n[img] -> CLICK\n[span] Order Online -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.061, 0.123, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3178", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_e5dc29b7-feed-4ca1-addd-d63034be1d36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.064, 0.518, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3179", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c83aa179-2b1b-4f4b-8d0e-714e90cb8743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Update -> CLICK\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.613, 0.278, 0.624] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3180", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_cbbfb5cf-8c1a-47fc-a015-afaa2567bbf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: La Bomba"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.837, 0.039, 0.897, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3181", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_1be05128-edbd-43c1-b205-2923b0b3b75d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.364, 0.218, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3182", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_abb7ad2b-5603-4bd9-995c-e98a5ad82e57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK\n[textbox] Custom name your search -> TYPE: Jaguar\n[button] CONTINUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.427, 0.357, 0.459, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3183", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_6eacf7f1-bfcf-4dad-9660-fda396a4b150.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.009, 0.553, 0.018] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3184", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_08676b6f-dfe9-4f7c-acb9-b85f4e91123c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.044, 0.615, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3185", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_de29d2ea-22c4-4b25-ac58-063235e2f9c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK\n[textbox] Confirmation or eTicket More information about con... -> TYPE: 12345678\n[textbox] First name -> TYPE: Jason\n[textbox] Last name -> TYPE: Two"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.339, 0.271, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3186", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2f6c50ae-f7f9-4cb1-bf09-185df432382f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[combobox] \uf0d7 -> SELECT: 2 Adults\n[combobox] \uf0d7 -> SELECT: 1 Child\n[select] Age -> SELECT: 0\n[link] Search Hotels -> CLICK\n[radio] $100 to $200 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.445, 0.123, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3187", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_4f5d1bb3-55ec-4b13-a0fd-8c243591f074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.259, 0.359, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3188", "image": {"bytes": "", "path": "./images/716ed90e-a138-452e-b5b5-167911871fda_499639da-7d48-4aef-a2f6-2f0b1cda21b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Playstation gift card of $10.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.036, 0.646, 0.049] in the image\nAnd my action is Action: TYPE\nValue: playstation gift card $10"}]}, {"id": "mind2web_3189", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1d6a6c4d-7dec-47fe-a26d-a596304c0ef2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.473, 0.868, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3190", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_9ac267ec-b83a-4ee9-9fba-e0beedd3f174.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK\n[input] -> CLICK\n[option] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.341, 0.906, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3191", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_24a16929-1568-40b1-b407-013d6d8b9107.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.179, 0.554, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3192", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_76efe3f4-a2cc-4414-bd56-7ba6012a68a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.359, 0.567, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3193", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_439a6ca4-c0e8-4e02-8b9c-37632fdbf3d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[img] Hasbro -> CLICK\n[button] All Filters -> CLICK\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.358, 0.443, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3194", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_89b46503-23ad-4b12-8aa0-132496a675ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.418, 2.133, 0.479, 2.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3195", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_f63f307f-8cac-4289-9eb1-bfed085a6f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER\n[link] Jazz -> CLICK\n[img] Image for Red Hot Chili Peppers 2023 Tour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.209, 0.372, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3196", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_ac1ad2da-eeb2-4030-a592-fdf3c8a5c97f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3197", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_1d159c3f-a3f0-41a4-a733-ea456f96c507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK\n[div] 18 -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.167, 0.964, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3198", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_a4d1e68e-8bb3-42fb-a386-74798f3660b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 23 -> CLICK\n[input] -> CLICK\n[span] -> CLICK\n[button] Find Your Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.381, 0.915, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3199", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_92faefb0-d3d9-46ed-a6e1-200c685e21b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\n[label] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.19, 0.429, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3200", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_70e3d5b7-ec0e-4e31-ab82-ab367b0aa9f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] Bras -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.383, 0.256, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3201", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_6f4fdbe1-0c56-424c-9df6-b84d8876fc21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK\n[div] Sports -> CLICK\n[link] Outdoor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.808, 0.233, 0.852] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3202", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_9d559cfa-a819-4c5b-8d50-446d5a0538d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.79, 0.492, 0.838] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3203", "image": {"bytes": "", "path": "./images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_92587b64-ad6e-4e8f-8c27-feeff12b79d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of games I've played recently.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.116, 0.481, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3204", "image": {"bytes": "", "path": "./images/fce75183-0825-42b1-baf3-a9214fe20ce9_36269d09-9e56-4e12-ac33-a0ac39b4a53c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse recent NFL-related videos.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.112, 0.588, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3205", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_34accc8c-406e-4136-8c51-c2b1edb1654a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200\n[combobox] Number of Stories -> SELECT: Two-Story\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.306, 0.963, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3206", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_89ea3bea-b9d9-4381-8184-5c66df6cd0ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.025, 0.232, 0.039] in the image\nAnd my action is Action: TYPE\nValue: spa"}]}, {"id": "mind2web_3207", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_ca8b04e6-e90e-436c-84fa-b5af56223c3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK\n[span] -> CLICK\n[span] Plastic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.322, 0.169, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3208", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_d9976c1f-bf18-4f5f-abd6-fb7592c0622c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Men's Shoes & Boots -> CLICK\n[label] Boots -> CLICK\n[label] Sale -> CLICK\n[div] Color -> CLICK\n[label] Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.979, 0.134, 1.001] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3209", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fd4064b1-988a-4940-9578-6fbfbfc2f352.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.03, 0.426, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3210", "image": {"bytes": "", "path": "./images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_2f9858fb-e872-4568-b349-3391628deb20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an hourly parking in New York City of lowest price.\nPrevious actions:\n[textbox] Search for parking -> TYPE: New York City\n[li] New York City, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.441, 0.397, 0.454] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_3211", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ce2b117c-d60b-4135-9f3e-406a601c7028.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.558, 0.273, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3212", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_ab6a19ab-94cb-4b50-a231-3ec9df28c9b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.5, 0.435, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3213", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_85141338-8cd2-4b4d-9f60-9cea25beadb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.14, 0.326, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3214", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8e64a305-417f-4f93-b0c4-ae588b41194e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[option] Socks -> CLICK\n[generic] Sort by -> CLICK\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK\n[img] Short Socks (2 Pairs) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.388, 0.248, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3215", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_52148af4-19e2-4ea7-be70-40c779c314bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.149, 0.713, 0.166] in the image\nAnd my action is Action: TYPE\nValue: Paris Charles de Gaulle Airport (CDG)"}]}, {"id": "mind2web_3216", "image": {"bytes": "", "path": "./images/bbfed209-df70-434a-aece-5c5fc7a38f4f_c15c9c63-aa0b-4ae5-82c1-a5df5c474ea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the push notification settings\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.511, 0.241, 0.548] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3217", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_ad58eba1-5d2a-4f85-905a-6d2d0b1312ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.371, 0.48, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3218", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_d71602fb-ca32-4910-9360-a5684b83ea91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\n[link] Navigate to deals -> CLICK\n[link] Navigate to 1-Night Kiosk Rentals See More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.206, 0.333, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3219", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_d17cc61b-2cdc-4948-91c9-e58d1f9311b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow\n[span] Glasgow Central -> CLICK\n[textbox] Date use format: 17-Mar-23 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.353, 0.412, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3220", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_d420750c-23ce-43f7-8f39-d31b623dddde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: AUSTRALIA\n[input] -> TYPE: Walker\n[input] -> TYPE: A987654"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.415, 0.416, 0.585, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3221", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_e1e4c60c-9081-43ac-96e0-4b8e5bd6003d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.079, 0.342, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3222", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_cb7b348f-dc7d-4c76-b1a9-0fc02a87f46a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Alien Worlds\n[div] Alien Worlds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.191, 0.189, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3223", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_771dcd89-5187-4dbd-bcac-6e4ea751b409.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.627, 0.277, 0.641] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3224", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_a603d9a4-649e-4daf-8218-cba78f032b30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.017, 0.567, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3225", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_29e613e0-4f71-4265-8c2c-7ecf158499b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.361, 0.285, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3226", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_4124e048-0c44-4b5f-b3f9-a449d3e18de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK\n[svg] -> CLICK\n[button] 25 -> CLICK\n[combobox] Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.37, 0.894, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3227", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4ef64b7b-5543-41f0-87b8-90c79cb7aa92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.292, 1.0, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3228", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_985d0c82-e934-4e69-9a1a-7e7097fb4c00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.228, 0.196, 0.248] in the image\nAnd my action is Action: SELECT\nValue: US$20 to US$40"}]}, {"id": "mind2web_3229", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_f35b1ee4-b294-4c01-9578-db54c94efe46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK\n[img] Missing (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.085, 0.127, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3230", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_7222bd6f-e476-48ce-8e0c-01637c662281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch\n[link] nintendo switch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.379, 0.483, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3231", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_6a59bf9d-e498-42ff-9361-27f824894bd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.624, 0.304, 0.658, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3232", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_20b8aff4-4269-4cf8-a6a7-d1232bab53a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.161, 0.984, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3233", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_d5d9dc65-d2d7-40a9-bffc-3a9bf35a0050.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[img] -> CLICK\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.402, 0.209, 0.445] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_3234", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_d601a7c6-57c5-4f10-993f-b6ca0040497d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\n[input] -> CLICK\n[input] -> TYPE: KCCR"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.051, 0.576, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3235", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_68bef90c-0ea2-41c8-8e0d-24c2cdbe7b8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[label] L -> CLICK\n[heading] Color -> CLICK\n[label] BLACK -> CLICK\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.204, 0.843, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3236", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_d19d4129-17c8-4d55-8922-f2e6468c09fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.008, 0.686, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3237", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_1a65c0c5-0a3f-4937-b236-47abc8727a64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.954, 0.777, 0.978] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3238", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_fed84eba-0361-4753-883e-226494372650.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.775, 0.316, 0.793] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3239", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_f187504d-f032-4815-a47d-c44cf137f3aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.507, 0.155, 0.663, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3240", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_652b20f7-ac5f-4df0-a811-8439600ebe0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.214, 0.209, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3241", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_790f3994-0282-4c98-a80d-4758ef216776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.553, 0.32, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3242", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_fd219d72-5b65-4045-a4f4-04587d1c4cf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.163, 0.432, 0.202] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_3243", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_31676c0a-b906-4ef6-a036-5b82635f521a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.464, 0.391, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3244", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_4f02b46a-27e4-4252-b903-79e909d5cd42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.31, 0.643, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3245", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_5e9517f2-e5c6-4f6e-9dc8-48652fa459f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 100\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.548, 0.701, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3246", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_6a9eafbb-53ed-43f0-88f7-6282ca1c676e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.084, 0.223, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3247", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_3d184f90-9278-44e1-ba90-4b853b6d57d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.167, 0.414, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3248", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_c9b23ba4-feaa-4d70-b31e-4ab45b0de665.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK\n[button] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.301, 0.913, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3249", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_20025603-f2db-480a-b623-54c605d29e37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK\n[img] AIRism Cotton Graphic Short-Sleeve Raglan T-Shirt -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.698, 0.802, 0.718] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3250", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_0786979a-b7d5-4a76-8b9a-5b24c2ed095b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 2.797, 0.396, 2.821] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3251", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_17cc8fec-b781-48d4-86ab-a842b9ffa5bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.255, 0.691, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3252", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_1a967805-4e68-408d-907b-66a52037ca3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK\n[button] Deals -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.762, 0.988, 0.791] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3253", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_6c97e7b4-b514-4509-9c8f-a7f8f802f56f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[div] May -> CLICK\n[span] -> CLICK\n[span] Filters -> CLICK\n[textbox] max price $ -> TYPE: 100\n[div] Shared room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.688, 0.786, 0.722] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3254", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_12b43c44-08b2-4054-96c4-4f4b62433e37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.32, 0.62, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3255", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_a21b93a5-223b-4203-b8d1-b50e53371daf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK\n[div] 5 -> CLICK\n[div] 7 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.412, 0.686, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3256", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_435c88ad-84e9-40e9-b104-f732917fa6e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Rail -> CLICK\n[label] Express Bus -> CLICK\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.373, 0.359, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3257", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_2d7e4e1f-c4e4-4952-b72d-8578d04e5a20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.008, 0.323, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3258", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_7f5b804a-de4d-431e-af40-f11f88024f8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK\n[link] Produce -> CLICK\n[link] Fresh Fruits -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.21, 0.516, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3259", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ec3faf8f-2b87-4367-8e09-3f7977f994e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.03, 0.987, 0.057] in the image\nAnd my action is Action: TYPE\nValue: Alinea"}]}, {"id": "mind2web_3260", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_dd3fda05-b84a-42a1-92dc-f7f60043d557.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK\n[span] (0) -> CLICK\n[textbox] Max Price -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.435, 0.826, 0.468] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_3261", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_912cb9c9-bd7e-4716-a337-1d848ad699be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.25, 0.359, 0.277] in the image\nAnd my action is Action: TYPE\nValue: central park zoo"}]}, {"id": "mind2web_3262", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_bad1f3ca-f331-49c9-b384-520ef6d972de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.32, 0.285, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3263", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_5f3874c8-9929-49b6-8e63-d7e356a0021c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Los Angeles\n[span] City -> CLICK\n[div] 21 -> CLICK\n[div] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.161, 0.964, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3264", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_84e40751-b41d-4447-9230-62c763c51494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.141, 0.463, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3265", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_f2d2e650-eea6-4670-b758-ee55649c1e07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.318, 0.359, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3266", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_3c7c8607-1c94-490a-b1aa-7d545fba0376.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.219, 0.263, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3267", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_389c1fc4-5e1d-487d-8791-0f6f32b1a1a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.608, 0.546, 0.654] in the image\nAnd my action is Action: SELECT\nValue: 4 guests"}]}, {"id": "mind2web_3268", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_24909b46-0dde-4b06-8dbc-150212c5eb23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 2.96, 0.192, 2.968] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3269", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_fbe93f95-b79d-4208-94b5-baa35be519f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.112, 0.019, 0.205, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3270", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_d5d06797-e73f-4063-8807-2792fae51cf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[span] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.056, 0.72, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3271", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_49b6818c-3f34-49f1-ba58-9cba952646ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\n[link] Tabs -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.182, 0.153, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3272", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_c5d34636-ac2c-4afa-bc53-ca501dba2c63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: black stroller\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.236, 0.037, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3273", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_cc1f3832-fb78-4fdb-a6a4-6ecd4c37c716.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.416, 0.521, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3274", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_176a7cf8-69c3-47c4-8090-30c9a98a3633.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London\n[button] London England -> CLICK\n[circle] -> CLICK\n[link] Unique Experiences -> CLICK\n[label] Up to 1 hour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.636, 0.236, 0.652] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3275", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_14d83651-d626-40c5-bb20-7cafc64a78ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.148, 0.89, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3276", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_f84b368c-5c44-41ff-be11-271ea329cfbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.355, 0.777, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3277", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_12a902e3-b65c-4644-86f6-53b76223606d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.272, 0.891, 0.304] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_3278", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_18ad1432-a6eb-46c8-a4d0-b27f35971f9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28\n[link] 7:30pm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.832, 0.473, 0.879] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3279", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_7b525852-d861-4b68-96ac-240e8e78e5e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.031, 0.036, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3280", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_4dafe010-a466-41a4-ae48-14b3769fdd36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.23, 0.241, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3281", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_78240913-e4ba-488d-af47-988d6dcd7307.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK\n[div] Choose date -> CLICK\n[generic] 5 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.405, 0.922, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3282", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_2ed285c2-d5c2-47a3-9cab-96af7b698f2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.345, 0.639, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3283", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_70eb3436-556f-4fe5-8c18-78852559efc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK\n[textbox] Flight origin input -> TYPE: Mumbai\n[span] Chhatrapati Shivaji Intl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.458, 0.195, 0.59, 0.241] in the image\nAnd my action is Action: TYPE\nValue: Dubai"}]}, {"id": "mind2web_3284", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_03665eda-4ea5-49b2-b687-66ec30c80b16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Ariana Grande\n[button] Search -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.467, 0.645, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3285", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_353b91ca-a6e4-4e6b-9fd1-14a2586a796a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK\n[button] Comedy -> CLICK\n[button] TV Shows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.797, 0.639, 0.828] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3286", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_80303c11-8409-4de5-a1ab-7e724a8c74cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[img] Woods 41366 Surge Protector with Overload Safety F... -> CLICK\n[span] Qty: -> CLICK\n[option] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.709, 0.389, 0.869, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3287", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_2f3a086d-426a-4c71-b79b-05865962cb27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.198, 0.366, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3288", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_401aaaf4-e63a-4957-ae58-38fab003cb30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.775, 0.118, 0.79] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3289", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_d86079d3-0dbc-483f-a352-a5b6b204d119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.1, 0.492, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3290", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_832003de-d47f-4c28-8581-ee704cc1f19a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK\n[link] 2 -> CLICK\n[listbox] hour -> SELECT: 17"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.331, 0.327, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3291", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_4ae02d45-a9d7-49e0-b784-860687b59016.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.779, 0.941, 0.811] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3292", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_1b864842-2093-41bd-9dd5-2a2e967afdf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.06, 0.307, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3293", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_6dc4d4a5-0284-47c7-b9b9-123e7e580adc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\n[link] Press RoomExternal Link should open in a new windo... -> CLICK\n[link] View All Releases \uedbe -> CLICK\n[combobox] Select year: -> SELECT: 2020"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.261, 0.48, 0.288] in the image\nAnd my action is Action: SELECT\nValue: Press Releases"}]}, {"id": "mind2web_3294", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_6359600f-6012-4e5a-a3bc-26b3faaf6d51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.618, 0.307, 0.648] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3295", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f057ecbf-37ff-4849-a8bd-52524dbff3d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.329, 0.234, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3296", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_76aa8b0d-7681-46b5-983a-c715bbec237c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[strong] Accra -> CLICK\n[option] One way -> CLICK\n[gridcell] 14 April 2023 -> CLICK\n[button] Continue -> CLICK\n[button] Show flights\ue99a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.621, 0.727, 0.766] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3297", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_db1977c3-e244-4d3b-9ff6-b0b0cc554c7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 1.384, 0.634, 1.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3298", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_4f2fb538-a8c2-4890-a77e-65c8f133c0cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.404, 0.393, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3299", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e1baa59a-4622-4d82-9916-a8ab39e36512.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Next -> CLICK\n[span] -> CLICK\n[div] Leather -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.435, 0.286, 0.457] in the image\nAnd my action is Action: SELECT\nValue: Good"}]}, {"id": "mind2web_3300", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_babd2160-a830-4bcf-a262-9729e78664c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels\n[div] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: TYPE\nValue: Mexico"}]}, {"id": "mind2web_3301", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_cb4667cf-02bf-48dc-a01b-a81a5c205577.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.482, 0.474, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3302", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_162a7c64-4c7e-4540-a732-954d6b4be4d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.626, 0.648, 0.645] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3303", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_d1ad5da4-0888-4482-9973-f2aace082189.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK\n[checkbox] Red Devil (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.485, 0.552, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3304", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_0fee4950-9755-496f-814f-6f6f5eecd575.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\n[link] Library -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.054, 0.634, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3305", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_c0eb07c3-268b-4e2a-8db0-666d7d413517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[link] View More -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.595, 0.691, 0.62] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3306", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_dea8c80e-a711-4e0e-9e9c-5ce98849184c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.167, 0.46, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3307", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_096ddbe2-4904-4769-95a4-5f086d977a22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.297, 0.715, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3308", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_3456d1ad-6145-49ba-bef7-cf879d1981f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[textbox] Credit card number -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.645, 0.302, 0.66] in the image\nAnd my action is Action: TYPE\nValue: 01"}]}, {"id": "mind2web_3309", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_e8695bb9-96f7-47f0-8ed8-13a4d78e50d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.119, 0.777, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3310", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_636bf503-609b-4c28-9677-2735b7389f07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.186, 0.025, 0.288, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3311", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9d4a2fe6-8c14-4164-902f-0529d7d9261e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK\n[button] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.273, 0.237, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3312", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_5e76ed62-0279-4542-a2d8-928980ccbe2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.419, 0.184, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3313", "image": {"bytes": "", "path": "./images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_bf4e20d6-7a31-4c0c-94b7-1ca00193f3ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the user reviews for the game \"Cyberpunk 2077\"\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Cyberpunk 2077"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.255, 0.677, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3314", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_9a5bcd22-5ab4-495e-ab2e-5a5979182205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.329, 0.342, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3315", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c02f0195-b85a-4a67-95a2-379936f61b69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.03, 0.267, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3316", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_3fdd479d-2e1e-477d-b8dd-f21c40d2d86f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.15, 0.094, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3317", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_e6a3d740-87e2-4af5-a32e-55478f7813bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.25, 0.446, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3318", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_3e990bc8-0831-405f-89e7-0d2c621e5bb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.306, 0.195, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3319", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_d8ebd628-85a8-41ba-a20b-8d10222703e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK\n[input] -> TYPE: 50"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.207, 0.837, 0.231] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_3320", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_5317b42b-0d04-47ec-ba12-84aab7c9039d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.487, 0.605, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3321", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_1dbf3df0-7bb2-4b4d-bf1b-108692b3387d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK\n[link] View Tickets -> CLICK\n[span] Filters -> CLICK\n[listbox] Sort by Price - Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.855, 0.976, 0.896] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3322", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_0f71aa1b-f0ae-4360-8312-faeac77e1fe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.0, 0.552, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3323", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_d64e976c-6174-4f23-80ae-bb2a1af5a5a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.543, 0.194, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3324", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_6c5a2ac8-5f7c-4c89-8151-a9429ef3797b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456\n[textbox] First Name (required) -> CLICK\n[textbox] First Name (required) -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.359, 0.833, 0.389] in the image\nAnd my action is Action: TYPE\nValue: Buck"}]}, {"id": "mind2web_3325", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_a0d0a0dc-e1ef-4efb-8c64-f76f38813865.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK\n[button] Calendar -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.281, 0.514, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3326", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_40c13d1e-b12e-400e-8755-60d0c6dd3652.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[button] Sail To -> CLICK\n[button] Caribbean -> CLICK\n[div] Sail From -> CLICK\n[button] Miami, FL -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.196, 0.551, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3327", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_f1bd2a11-0430-4a4a-a850-5d4f2a0509bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[combobox] From -> TYPE: Abbotsford\n[div] Abbotsford -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.212, 0.98, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3328", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_f44293d8-7694-4f8b-b54e-b14d572de3db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[div] Apr -> CLICK\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.124, 0.272, 0.175, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3329", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_118d364c-60ab-4ee9-bb94-839fad51462c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.035, 0.917, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3330", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_05089a33-1242-46b1-add7-bd4eb35abc03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[heading] Continue -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[span] 36 -> CLICK\n[button] 34 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.626, 0.774, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3331", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_81062e4e-eea9-437a-ab50-756bba2cca30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Sports -> CLICK\n[link] Training -> CLICK\n[div] Size -> CLICK\n[link] YXL -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 1.155, 0.233, 1.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3332", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_ba02fb70-2e69-4906-bb2b-34f4731545f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456\n[textbox] Date you received your order (DD/MM/YY) * -> TYPE: 08/04/23"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.512, 0.759, 0.54] in the image\nAnd my action is Action: TYPE\nValue: Harry Potter Box Set"}]}, {"id": "mind2web_3333", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_acc48811-0d86-4f47-ac69-4ef0073c9d99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.411, 0.797, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3334", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_227c3818-5a1d-45fb-b107-14f02fd50a22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.237, 0.318, 0.268] in the image\nAnd my action is Action: TYPE\nValue: albany"}]}, {"id": "mind2web_3335", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f5aeec71-5f34-4bbb-872c-fcf7e73581e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK\n[textbox] 1 Adults, 18 to 64 years old, 1 of 8 passengers se... -> TYPE: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.67, 0.34, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3336", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_f5c415d1-2c8e-40e4-bd0f-72fba8cf0fd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.063, 0.644, 0.094] in the image\nAnd my action is Action: TYPE\nValue: iPhone 12 Pro"}]}, {"id": "mind2web_3337", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_2258dff5-dc9b-44c6-94f6-629411cc0506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.188, 0.621, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3338", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_17fddc65-7f90-4e09-ad51-64f7224c3242.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\n[textbox] Where to? -> CLICK\n[button] Nearby -> CLICK\n[gridcell] Thu Mar 30 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.465, 0.78, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3339", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c609672f-a7f7-4d05-bf6d-e0a3beac539e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.128, 0.695, 0.145] in the image\nAnd my action is Action: TYPE\nValue: Top Hip Hop"}]}, {"id": "mind2web_3340", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_9bcdd47d-1557-4bda-b942-08571a6d3688.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\n[span] COVID-19 booster, testing, treatment & records -> CLICK\n[link] COVID-19 testing Schedule a COVID-19 test -> CLICK\n[textbox] Where do you live? (required) -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 0.271, 0.844, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3341", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_6a527941-0214-4124-b97f-4f28e08866a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.048, 0.546, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3342", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a356fe26-1097-4344-9d45-c4c9cdaf42a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.737, 0.481, 0.744] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3343", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_6b93ccb8-16b3-41bc-90ca-62441ccb33f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK\n[span] -> CLICK\n[button] Country -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.48, 0.245, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3344", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d0518e79-c097-4a9a-a841-be2f94c1aac2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK\n[img] Sports car icon -> CLICK\n[button] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.345, 0.243, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3345", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d71a35ff-906e-400a-95ea-268aec2e265b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.227, 0.35, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3346", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_fd544f3c-9154-4db2-9d56-7cc4138fe0b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\n[link] CareersExternal Link should open in a new window o... -> CLICK\n[textbox] Search by Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.311, 0.688, 0.32] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_3347", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_19d143be-5401-4456-a21c-788e8e6a043b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Games -> CLICK\n[combobox] Platform -> SELECT: PS5"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.107, 0.737, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3348", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_56836838-03d7-449e-b87f-37ea90bf16fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.301, 0.384, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_3349", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_77c05c0f-315a-4014-bfcd-4943c731b855.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.457, 0.486, 0.483] in the image\nAnd my action is Action: TYPE\nValue: BOSTON NAVY YARD"}]}, {"id": "mind2web_3350", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_81fd0300-9ad3-40fa-bec3-798fec6e088d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 1.79, 0.727, 1.811] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3351", "image": {"bytes": "", "path": "./images/d29e8a14-ee66-4330-b282-09cb1955aad0_548c5177-531c-485e-83b1-5c1773bd3068.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the weekly ad in List View.\nPrevious actions:\n[button] Savings & Memberships -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.194, 0.638, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3352", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_f124a0f6-d428-41e6-957d-75863da08b17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK\n[button] 23 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.308, 0.808, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3353", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_9f5ae924-5319-4085-9d3d-f0e93305d8b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.201, 0.524, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3354", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_e16871e0-fd0f-46bd-b7e2-46e8908c39ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK\n[img] Men's UA Surge 3 Running Shoes -> CLICK\n[button] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.544, 0.952, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3355", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_47a8b77a-b439-4aae-b55a-dc9989289199.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.129, 0.441, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3356", "image": {"bytes": "", "path": "./images/7f640279-bd9d-45ae-b3fc-43338977e2c1_3014cec8-07b7-4224-8737-3260aa0ca81b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of Museums in New York City.\nPrevious actions:\n[link] CITIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.556, 0.349, 0.654] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3357", "image": {"bytes": "", "path": "./images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_05655bed-844e-40ad-8f78-36a2466eb50a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Remove the SSD on my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.903, 0.011, 0.984, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3358", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_ebbf95db-06c5-4ac5-8355-504f1f77a72a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK\n[div] Make/Model -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.392, 0.192, 0.436] in the image\nAnd my action is Action: SELECT\nValue: 2016"}]}, {"id": "mind2web_3359", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_a4b9f6d3-602d-47c0-bb53-cbc05c2c73a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[link] Amazon Basics 7-Piece Lightweight Microfiber Bed-i... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.702, 0.342, 0.765, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3360", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_5762cb31-0f90-4da5-84d3-8fe2f8e95134.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[checkbox] Superhero Sci Fi (745) -> CLICK\n[checkbox] Based On Comic Book (226) -> CLICK\n[strong] IMDb Rating -> CLICK\n[group] IMDb user rating (average) -> SELECT: 7.0\n[group] IMDb user rating (average) -> SELECT: 9.0"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.163, 0.182, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3361", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_baa133fa-49f8-4b65-b96b-d529f98ac029.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs\n[input] -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.535, 0.216, 0.576] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3362", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_8c5d4102-d5ae-4d01-8751-7c37609f5fce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.516, 0.487, 0.527] in the image\nAnd my action is Action: TYPE\nValue: Honolulu"}]}, {"id": "mind2web_3363", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_371d79e1-816f-4ac1-b567-3373e6257e51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.513, 0.194, 0.523] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3364", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_2bbc5313-c847-4bd1-8db4-1fa235c134ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.305, 0.748, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3365", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_04f7c7bb-0def-4780-aea6-e6171f06625a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.475, 0.195, 0.51] in the image\nAnd my action is Action: TYPE\nValue: 2"}]}, {"id": "mind2web_3366", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_3f581127-3bd7-4965-8787-13548d03385c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: musical instruments"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.239, 0.643, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3367", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_9a4a0f84-7a55-4cca-bf4b-0c044967ed41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.801, 0.456, 0.92, 0.476] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_3368", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_74f2b513-2ee2-469b-a2de-837034b739e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.038, 0.937, 0.061] in the image\nAnd my action is Action: TYPE\nValue: jaguar xf"}]}, {"id": "mind2web_3369", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3f8cd6ae-eaec-4079-b5b1-d39b0b9c8903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK\n[button] Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.686, 0.295, 0.702] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3370", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_c9626d97-8b52-49bd-80e3-6490a55642f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.252, 0.32, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3371", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c4d77221-301b-48fe-8061-d291c303317d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.52, 0.238, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3372", "image": {"bytes": "", "path": "./images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_9498722d-5902-4a3b-9128-22ab274da505.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending searches in Columbus.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.001, 0.417, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3373", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fc51fd06-6764-4183-9c13-c4e78867ba63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.36, 0.492, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3374", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_6f344629-c4fb-4980-b926-4ea947c17609.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.098, 0.627, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3375", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_171cf048-50dc-47a6-90ed-3eb5fa533fc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.463, 0.958, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3376", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_52367d47-a04c-4db0-94f4-b1525d6e4db1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK\n[textbox] Depart date -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.256, 0.562, 0.275] in the image\nAnd my action is Action: SELECT\nValue: Business or First"}]}, {"id": "mind2web_3377", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_ce167469-0673-4a2b-824a-db6bb26f2912.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.031, 0.228, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3378", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_d071fca1-24f8-460f-8e33-f6d24a8f651e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.683, 0.631, 0.716] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_3379", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_5bb23b9f-783a-4b44-8439-6703dd7bf340.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.318, 0.424, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3380", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_a68ce23a-54c0-4a20-bdf2-c64c60b7db33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.19, 0.782, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3381", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_86075eb3-e1d9-44a3-899b-abb27b2a899d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK\n[div] SUVs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.285, 0.71, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3382", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_062a6bbc-b371-4d55-9970-603857dd185b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK\n[option] One way -> CLICK\n[gridcell] 17 April 2023 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.383, 0.94, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3383", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_438a3200-eaac-441b-b9c8-6940fd697362.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas\n[button] Search Jobs -> CLICK\n[link] Developer/ Senior Developer, IT -> CLICK\n[spinbutton] Select how often (in days) to receive an alert: -> TYPE: 8"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.079, 0.463, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3384", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_55d1b23e-c4cd-4459-8ecc-fc8db8334fb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[ins] -> CLICK\n[ins] -> CLICK\n[link] Show all 10 cars -> CLICK\n[link] Opel Insignia\u00a0or Similar , View deal -> CLICK\n[radio] Radio yes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 1.669, 0.93, 1.697] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3385", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_a6121a43-e23f-421c-ad8a-ec637cb2e49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.013, 0.546, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3386", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e12856be-7e2c-4628-a1a4-9e78e1ca3e35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.268, 0.918, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3387", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_85ba8675-1e8b-485c-a4ac-87ead92a45a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.218, 0.599, 0.247] in the image\nAnd my action is Action: TYPE\nValue: 90012"}]}, {"id": "mind2web_3388", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_245b8385-23b8-4570-b928-1c9e54526995.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.351, 0.271, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3389", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_36910aa0-a074-4234-955b-a3d43e59bdc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.282, 0.981, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3390", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_bd1ad30c-c61c-4dc2-8445-1e2d605ca95c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Kindle E-readers & Books -> CLICK\n[link] Kindle Books -> CLICK\n[textbox] Search Amazon -> TYPE: roman empire history"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.008, 0.686, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3391", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cfb5072a-1eb9-4da0-8515-843dba96f9d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.24, 0.613, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3392", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_5b4418c5-688a-41d9-8de6-0552c58d18ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.088, 0.233, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3393", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_bc59266c-98c2-4d58-8b55-2df5b754a1e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Pop Rock -> CLICK\n[link] 958,222 United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.382, 0.163, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3394", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_48938eb1-60ff-48ac-880b-4ffac70ac2ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 1.757, 0.909, 1.764] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3395", "image": {"bytes": "", "path": "./images/978376c1-8545-4160-81d5-722bdea60434_c217d9bf-cb78-42fc-97ab-8e7d362b796c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Start the process to sign up for CVS ExtraCare\nPrevious actions:\n[button] Prescriptions -> CLICK\n[link] Pharmacy & Health Rewards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.16, 0.418, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3396", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e49fc4a5-1f43-41a9-9d9a-ec9b5a65e2b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.714, 0.349, 0.722] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3397", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_224a759b-aa9a-4cee-8cea-36a955e2ce76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK\n[radio] RENEW - An Existing Account -> CLICK\n[textbox] KOA Rewards Number -> TYPE: 1000000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.431, 0.926, 0.466] in the image\nAnd my action is Action: TYPE\nValue: 10023"}]}, {"id": "mind2web_3398", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_14bb477c-5382-4aa5-9c10-767f73d2e3ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK\n[button] Release Date -> CLICK\n[button] View Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.647, 0.466, 0.662] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3399", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_a5faede6-890b-4518-9ff8-94f1cd3d1460.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.378, 0.871, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3400", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_f2b4a031-d62e-4f07-9ebc-8b3d9684c116.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.223, 0.755, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3401", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_f21feaf0-2f36-42c7-8714-70e118a11da6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.022, 0.456, 0.049] in the image\nAnd my action is Action: TYPE\nValue: thai"}]}, {"id": "mind2web_3402", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_a6f86a41-b433-478a-b445-563cafaebe34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\n[link] Press RoomExternal Link should open in a new windo... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.154, 0.945, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3403", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_0d47ad52-d333-48b6-9718-abf6fd0dcccf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.358, 0.268, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3404", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_540327b8-779c-4b6b-8ea9-e4a180265a55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK\n[spinbutton] Flight Number (Required) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.104, 0.562, 0.14] in the image\nAnd my action is Action: TYPE\nValue: DL145"}]}, {"id": "mind2web_3405", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_4221ed36-d0a6-4821-b352-b9cdb97af2ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.386, 0.716, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3406", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_fc21b0b8-0045-44c9-b6ac-423368b4bb4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.191, 0.249, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3407", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_c10a7d0d-2f2f-4def-bba3-816048aa552e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo\n[strong] Camarillo -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.423, 0.203, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3408", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_d59e9047-3e81-43c4-832c-0513a9f41954.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[link] Hotels -> CLICK\n[searchbox] Type your destination -> TYPE: Las Vegas\n[option] Las Vegas Nevada,\u00a0United States -> CLICK\n[checkbox] 10 April 2023 -> CLICK\n[gridcell] 16 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.335, 0.927, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3409", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_89927f7f-c3a1-4274-b1eb-a8f3086ceddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.386, 0.17, 0.405] in the image\nAnd my action is Action: SELECT\nValue: UEFA Champions League"}]}, {"id": "mind2web_3410", "image": {"bytes": "", "path": "./images/453da07e-cb2b-4f05-80c5-5b3bc6413086_7e109022-22a1-45b2-9942-b053f85b89bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me FAQs related to eating and drinks\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.103, 0.286, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3411", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_3aa74cd2-39e2-4618-92d3-1f4de5170032.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK\n[link] Rock 2,420 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.242, 0.291, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3412", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_c6753f2d-865b-4622-89e9-09d2beb9e602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.065, 0.668, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3413", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_2f18d9ac-7e3d-47eb-a590-dfe4ec702343.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Kitchen -> CLICK\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.624, 0.116, 0.633] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3414", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_20ae08b1-640a-41f8-9af6-9b29da52578e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.448, 0.31, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3415", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_711a103f-e023-47d5-bba6-84481d512f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK\n[textbox] Search by game -> TYPE: Dota 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.27, 0.992, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3416", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_9ee68218-54fe-4eea-80ad-dbc710aff87f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.083, 0.174, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3417", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_f7a5ee05-b056-4d74-8dbe-1a6ea359f004.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, NY, US Select -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.459, 0.691, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3418", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_627fa5b3-5e4b-4b8b-aaf7-6fc28b256a15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[button] Let's go -> CLICK\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.291, 0.966, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3419", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_ea4f9b84-8386-40f7-821b-26aabdb914d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.464, 0.335, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3420", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_ac723ff9-25c7-4256-a703-4498b7baaad3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.512, 0.278, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3421", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_77c307df-b1ab-41f3-b616-1e19acd5cd98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.087, 0.206, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3422", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_885aca06-fcca-4f82-b060-1578409d7c2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: San Francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.263, 0.247, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3423", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_101d08fa-ab8d-4d48-8827-10b75525a40c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Crew\n[input] -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.179, 0.777, 0.214] in the image\nAnd my action is Action: TYPE\nValue: Johnson"}]}, {"id": "mind2web_3424", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_63149fa6-84de-46e5-a6aa-e8eed68cd23d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.273, 0.278, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3425", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_ca514e43-ce9d-4aeb-bd96-9c8fc7f2017d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.121, 0.547, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3426", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_08878507-5684-4a12-a316-4ce18a2fbf55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.306, 0.804, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3427", "image": {"bytes": "", "path": "./images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_471e7745-222e-40b2-a20f-c65fc40e098a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the profile page for author of latest shot\nPrevious actions:\n[link] Shots -> CLICK\n[button] New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.149, 0.852, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3428", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4a005a1-d2ff-4628-80b4-310e149d0585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.665, 0.93, 0.691] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3429", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_00bb11f2-1f7d-49f5-a15a-5bc24bc5dd4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.09, 0.327, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3430", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_8f47dd64-2175-4e78-ba73-3395e8188152.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.293, 0.3, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3431", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_bc48fabd-306e-466a-98cd-490fe1730ece.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi\n[combobox] Select Model -> SELECT: A6\n[combobox] Select Year -> SELECT: 2020\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.308, 0.301, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3432", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_be6d2fbf-2e18-4a79-ae8d-29fe60b67390.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.343, 0.106, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3433", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_1ca55a08-3c16-407e-b2c5-1c3d15360c55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 0.103, 0.766, 0.121] in the image\nAnd my action is Action: TYPE\nValue: dallas"}]}, {"id": "mind2web_3434", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_a48d2bb3-e783-4679-9d31-3a86b8e0353d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.059, 0.563, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3435", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_05f6ee5e-250d-4770-82a2-0b7e87ff2586.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.194, 0.326, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3436", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_9d1d6dc3-2184-4ded-84ea-77035eeb1a7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.628, 0.795, 0.672] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3437", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_303b1894-bec5-49b2-a4e0-b0c0cdc3d3d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.828, 0.258, 0.898, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3438", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_e9493953-b795-4941-acb1-554769dbee75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.43, 0.358, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3439", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_a4a2bb03-dc07-4e60-942b-d43fe00ca4b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.324, 0.618, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3440", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_249650fb-199e-4ea7-b79f-6dfe0e204f62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK\n[link] Check Balance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.233, 0.5, 0.262] in the image\nAnd my action is Action: TYPE\nValue: 1000000000000000"}]}, {"id": "mind2web_3441", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_daa6fad6-24ba-49d3-a6ad-2370649a2e8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 1.063, 0.147, 1.077] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_3442", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_90bf1701-92ac-4889-ae87-3983445c4c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.177, 0.271, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3443", "image": {"bytes": "", "path": "./images/f61456ed-3cc2-41a0-b79c-56737515fac9_f77042ab-abf2-495d-9ad1-b4d23d272cde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the photo gallery for Tsiakkos & Charcoal.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Tsiakkos & Charcoal\n[heading] Tsiakkos & Charcoal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.893, 0.292, 0.977, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3444", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_64a3ed10-4de5-4698-84dd-c9fe2059c059.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.13, 0.723, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3445", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_1524351c-9647-484f-83b4-c844747fec77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.169, 0.294, 0.196] in the image\nAnd my action is Action: TYPE\nValue: 60538"}]}, {"id": "mind2web_3446", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_5d543f92-b9a3-4ffd-8b08-c10032b9c704.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Real Madrid"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.166, 0.931, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3447", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_415f38ea-8042-4f3e-a62e-b7cf6a488379.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] SEARCH CRUISES -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.56, 0.173, 0.59] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3448", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_2fedfeba-1361-4546-a638-0b8f70a9f69a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[div] Size -> CLICK\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK\n[img] Men's UA Surge 3 Running Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.867, 0.377, 0.924, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3449", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_103182c3-4574-4ff1-bb5f-9dce65f2f2e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.052, 0.652, 0.077] in the image\nAnd my action is Action: TYPE\nValue: vintage clothing"}]}, {"id": "mind2web_3450", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_e3a1714a-ede8-4672-9707-2030e3484f5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.256, 0.139, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3451", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8811440d-7710-4542-87cd-217dbe94a7b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Tomatometer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.372, 0.794, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3452", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_c89800b3-f7c9-4862-aece-bdb8b5e50736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.573, 0.304, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3453", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_6876130c-d667-4051-a398-95e5cba6f1e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.088, 0.581, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3454", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_a91196cb-b774-4575-a1f8-0d09f1aba6b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.366, 0.882, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3455", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_b0060309-c2c0-4df1-b25b-a0246d005187.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.195, 0.566, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3456", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_ac7fadf0-dae5-47ce-b122-a54664a3566f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.162, 0.397, 0.201] in the image\nAnd my action is Action: TYPE\nValue: Birmingham"}]}, {"id": "mind2web_3457", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_cb72c5b6-7bc1-40f4-84a0-264b0de8d2bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[button] Next Month -> CLICK\n[span] 11 -> CLICK\n[span] 18 -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.656, 0.267, 0.67] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3458", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_a297de95-784f-429c-9ff7-b987f1cbcbef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[tab] Lowest price first -> CLICK\n[checkbox] list-filter-item-label-0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.786, 0.089, 0.793] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3459", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2f8b7429-46da-4860-82ef-dbfe229fcf9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.128, 0.734, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3460", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_20c42e23-c938-4889-8b06-e59438c1e794.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.49, 0.375, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3461", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_549a5ad5-d37c-4180-8797-01abf12af15c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.345, 0.733, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3462", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f1a04b25-a0cc-4bfa-bf18-7862f7ba3700.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.351, 0.376, 0.383] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_3463", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_931c4775-68b2-4411-a8d8-57c3b4f6cb64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK\n[link] Schedule & Maps -> CLICK\n[button] Connections \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.641, 0.98, 0.875, 1.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3464", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_14bd050c-7014-4023-8da9-9c0b2974c571.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Hiking Footwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.998, 0.142, 1.008] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3465", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_701e1555-43d2-4dbb-86a8-308404d496a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[div] 2 -> CLICK\n[button] Search -> CLICK\n[button] Number of rooms and guests -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.06, 0.899, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3466", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_54555413-eb7c-40b7-8f49-d78f658e881b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK\n[button] Chicago -> CLICK\n[button] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.27, 0.318, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3467", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_97cc385a-366e-4733-8afc-54b6ceb584b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.068, 0.387, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3468", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_00e931ae-8251-49ff-9ac5-8409c46d5204.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK\n[button] Add to Cart -> CLICK\n[button] View Cart & Checkout -> CLICK\n[textbox] Add Kohl's Cash or Coupons -> TYPE: FREESHIP3093"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.369, 0.529, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3469", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_bcc99912-98b5-4458-b057-a3f9c7aa4391.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.127, 0.695, 0.141] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_3470", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_f9942007-cb07-4f7a-a597-4280403e62d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.051, 0.243, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3471", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_10392ce8-9a90-4bbb-8106-e627c22465da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.395, 0.627, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3472", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_31782c9f-b77c-46ce-bee1-4ee1d7199cfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.279, 0.712, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3473", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7f0414c5-2299-4fb1-9b20-cbc1cdf35486.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.329, 0.512, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3474", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_30cd750e-6560-4cc9-ac1a-4bbce8e6444e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.352, 0.323, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3475", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_643a83b8-2d11-4001-a1b6-d5ad0fe22f89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.245, 0.397, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3476", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_8183ac39-c17d-48ca-9e6c-5cc6db13667d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 100\n[button] Submit price range -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.193, 0.905, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3477", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_d154d215-a2c1-4ca9-8aec-a8f047a361da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\n[link] WOMEN -> CLICK\n[link] Cropped Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.891, 0.165, 0.959, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3478", "image": {"bytes": "", "path": "./images/13cf0b14-422b-4486-841a-aa9ded048380_e56baebb-6877-4766-9a61-6f73fafacb7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find kinect camera for xbox one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.041, 0.646, 0.055] in the image\nAnd my action is Action: TYPE\nValue: kinect camera"}]}, {"id": "mind2web_3479", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_0c9a515f-1917-4832-8e49-d33f76581263.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI\n[combobox] Guests -> SELECT: 4 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.24, 0.215, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3480", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_3d8400a4-ec58-48bb-a45d-9d8bef993fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.569, 0.307, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3481", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_491d1d9c-fb28-4878-a568-757c0b80241b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.449, 0.195, 0.482] in the image\nAnd my action is Action: TYPE\nValue: 12"}]}, {"id": "mind2web_3482", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_209698d6-671f-446c-9af0-d3ec4a85381f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.229, 0.73, 0.259] in the image\nAnd my action is Action: TYPE\nValue: Times Square"}]}, {"id": "mind2web_3483", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_bc47bc18-9778-4205-87e2-11cf7d6bad00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[link] Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.058, 0.301, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3484", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_413d5059-2958-4244-883e-b5ec9474badf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse TV Shows by Genre -> CLICK\n[link] Documentary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.281, 0.316, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3485", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_6788371c-dda7-4003-ba11-27f187e92ae1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.827, 0.231, 0.927, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3486", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a97fa8a7-cf17-4f25-a02e-adf1cc4c1e43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.467, 0.069, 0.475] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_3487", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_36665f93-2ffe-41d3-9c44-bffa85122390.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.506, 0.864, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3488", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_1b09b330-9c1a-4b78-8a83-e9beab45cccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] Check in / Check out Any week -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.206, 0.613, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3489", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_49696ae4-ee6d-4a31-9521-754f78814c3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2022 -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.666, 0.253, 0.705] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3490", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_c3e9825f-6e7b-4c76-b9a2-2fd62f64a14b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK\n[span] 160 pixels x 400 pixels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.73, 0.209, 0.744] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3491", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_12598eea-0be8-4d16-bf8f-2114636a2c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees\n[option] New York Yankees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.407, 0.941, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3492", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_11d98119-16db-4912-930e-afe4a8e285d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] African -> CLICK\n[link] East Village (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.499, 0.207, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3493", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_822476fd-11a5-4d57-88d0-dbc0ead7e7bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[button] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.472, 0.09, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3494", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_71116ee3-3e15-4a37-9498-820698eef9b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.299, 0.233, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3495", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_7e5977da-bceb-4022-a210-58f4c1a25d9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.216, 0.156, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3496", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_837177b9-fc1d-4b15-8035-b15efd915693.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.066, 1.051, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3497", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_9bae8776-4d35-44df-9f57-73a91801eee4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504\n[button] Look up -> CLICK\n[heading] Barboursville -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.344, 0.28, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3498", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_31f7b8a4-1b3c-47d0-b248-9dac460f9f5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.512, 0.917, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3499", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_b8217911-9803-46de-a83e-f7996c8899e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.139, 0.16, 0.177, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3500", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_b00bea60-30f0-44e1-8bc5-b691cc38c391.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.0, 0.279, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3501", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9bca0cd8-adcb-40e4-b5be-788809e0f59c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.499, 0.253, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3502", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_c1b6e8bd-86ad-45bf-91b1-1afa13bf0167.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.134, 0.782, 0.16] in the image\nAnd my action is Action: TYPE\nValue: Ballet"}]}, {"id": "mind2web_3503", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_4fc1cd27-721c-4c5c-a8ea-a8dd4b50f1ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Rap / Hip Hop -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.273, 0.881, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3504", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_8e24ad34-2bd0-42ae-8e55-d78362055463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.345, 0.782, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3505", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5f4ecb4d-824e-44c3-870a-813c9d96d954.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.196, 0.715, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3506", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_52afba1f-0b83-422d-a20d-10afa650dc82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.228, 0.644, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3507", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d550a4d6-20bb-4663-8319-6ea7930ed041.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.224, 0.196, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3508", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88e8254f-f9bc-4604-9dcd-92b6618a6ffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK\n[button] Filters -> CLICK\n[combobox] Select Region Type -> CLICK\n[option] Domestic & North America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.498, 0.565, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3509", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_3b878620-0274-48d4-930f-73ddb4e39492.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo\n[strong] Camarillo -> CLICK\n[img] Submit Search -> CLICK\n[link] View KCMA Airport Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.087, 0.353, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3510", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8b668c59-b70f-47c6-89af-b30d15b3d84b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[textbox] Enter zip code or location. Please enter a valid l... -> TYPE: 90028\n[img] -> CLICK\n[generic] 6201 Hollywood Blvd., Suite 126 -> CLICK\n[button] In Stock at 6201 Hollywood Blvd., Suite 126. 0.2 m... -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.273, 0.812, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3511", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_1d564585-6725-42ae-ab43-5203aab4ae39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street\n[span] 2983 Marietta Street -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.424, 0.702, 0.472] in the image\nAnd my action is Action: TYPE\nValue: 2"}]}, {"id": "mind2web_3512", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_321c544d-a6f2-48b8-9db0-60760dc13574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.126, 0.273, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3513", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_18f60177-0fe3-4abb-a4b8-22f8e0c6cbf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Amenities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.63, 0.089, 0.636] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3514", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_7f57582c-c4a9-4de7-9804-977b8120e0dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.121, 0.74, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3515", "image": {"bytes": "", "path": "./images/7f640279-bd9d-45ae-b3fc-43338977e2c1_757c2d0e-783e-4e4b-b3d6-ae763877604f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of Museums in New York City.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.008, 0.348, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3516", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_fd4af41c-9faf-4c75-b376-c0be227d6673.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.223, 0.568, 0.241] in the image\nAnd my action is Action: TYPE\nValue: Dallas"}]}, {"id": "mind2web_3517", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_cf4a59fc-c8e5-42cd-9278-2a65679a02c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[link] Wichita -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.335, 0.14, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3518", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_94362238-1136-4e0f-a10b-dc03a18519f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.048, 0.243, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3519", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_8810aa59-ef59-41eb-9dd2-4f79b1c8262f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.366, 0.168, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3520", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2a371b9b-0d60-4252-bb8f-ed98d12d77c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard\n[combobox] Month -> SELECT: 01\n[combobox] Year -> SELECT: 2023\n[textbox] CVV -> TYPE: 123"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.507, 0.95, 0.541] in the image\nAnd my action is Action: TYPE\nValue: joe bloggs"}]}, {"id": "mind2web_3521", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_99166e02-7f26-4ead-b3ac-370225b32d30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK\n[option] WA -> CLICK\n[button] Products -> CLICK\n[label] Kids -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.71, 0.412, 0.723] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3522", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7324432-b416-4718-99a0-42887f0bb612.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[button] VIEW ALL -> CLICK\n[img] -> CLICK\n[textbox] To: -> TYPE: John\n[textbox] From: -> TYPE: James\n[textbox] Message: 200 characters remaining -> TYPE: Congrats on your new home."}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.701, 0.789, 0.725] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3523", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_1cdcae52-8227-4239-8f0b-e512b769eef1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.543, 0.956, 0.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3524", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_4c96f4fe-99ac-440e-804d-bea1c48f40ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.18, 0.359, 0.197] in the image\nAnd my action is Action: TYPE\nValue: little caribbean"}]}, {"id": "mind2web_3525", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_9027e140-c963-4718-afa2-d6a47ce31453.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.033, 0.645, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3526", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_79cf4364-cc02-439e-a7c8-3244a668dd67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.198, 0.932, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3527", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_289a75cc-bb17-4cd3-8ef5-214427a0b471.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.697, 0.037, 0.722, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3528", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_daa6154d-0580-423c-9d14-633a3de4fb59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.308, 0.582, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3529", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_77cdcb4f-8373-48d4-9dd8-6c9f2ca90b39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.194, 0.364, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3530", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d97dd54e-0198-46e3-b4d1-78883c9422c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK\n[heading] to next step -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.31, 0.63, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3531", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_a7450e82-c348-402b-a662-a94d2c7f36d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.149, 0.782, 0.177] in the image\nAnd my action is Action: TYPE\nValue: los angeles kings"}]}, {"id": "mind2web_3532", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_946b79ed-c797-471c-a2cd-668b999cf3a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\n[link] The Hit List -> CLICK\n[a] -> CLICK\n[link] Book Now\ue90b -> CLICK\n[button] 12:30 PM Outdoor Counter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.381, 0.523, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3533", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_48f143c9-5b96-4ecd-9782-f33375c7879d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop\n[button] \uf002 -> CLICK\n[div] Newest Lenovo Ideapad 3i Laptop, 14\" FHD Display, ... -> CLICK\n[div] Price Alert -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.317, 0.495, 0.339] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_3534", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8a0711a0-bd00-4c9e-8186-8178f224303d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago\n[span] Chicago, IL -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Ann Arbor\n[span] Ann Arbor, MI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.333, 0.831, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3535", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7b485017-057c-4657-821f-25df616be249.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.026, 0.284, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3536", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_75f78e83-b910-4a8c-bd15-97003e9216fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.288, 0.359, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3537", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_fa1cf227-c27c-409e-b9a8-dd7b6211f1d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK\n[div] 18 -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.904, 0.234, 0.92, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3538", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_2a0253c2-d580-4b2a-a8bb-32aa79df68f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK\n[textbox] Monthly Payment -> TYPE: 250\n[textbox] Down Payment -> TYPE: 3000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.293, 0.459, 0.321] in the image\nAnd my action is Action: SELECT\nValue: Tennessee"}]}, {"id": "mind2web_3539", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_cf98d157-acd5-4580-b1e5-bcbfc964517d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.032, 0.564, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3540", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_9b98022c-c2e1-4233-b9e0-547ad4c678c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[button] Go to next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.438, 0.821, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3541", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_0d6eba3e-0e6c-4ad6-ab14-4b84cbb2265a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[menuitem] Free to home or store -> CLICK\n[button] Back to all categories -> CLICK\n[switch] COMPARE -> CLICK\n[path] -> CLICK\n[button] Add to Compare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.162, 0.899, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3542", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a6849511-4d2a-4799-9fdb-82757a549170.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.23, 0.688, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3543", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_bb6aa598-dc1b-4818-ad11-2f54fde43845.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.449, 0.863, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3544", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ad9b72be-b60a-47b0-af68-3123c7b4a0ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK\n[button] set store -> CLICK\n[button] Make -> CLICK\n[span] (954) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.221, 0.249, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3545", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_8004364b-2cb8-4ef4-95bd-14d0365581d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: musical instruments\n[option] musical instruments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.031, 0.662, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3546", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_ee2b0b59-efb3-464f-958e-90d6db5839f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Save -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.282, 0.712, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3547", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_aabba011-6b2a-42a7-abcd-05d5a92d6784.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.25, 0.574, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3548", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_336eb253-b594-42dc-97c0-a6a96a35c858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.464, 0.699, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3549", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_c5643e17-b79c-4cfd-a521-fa58b0c006ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[span] Barbershop -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[span] -> CLICK\n[checkbox] Private Lot -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.608, 0.688, 0.638] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3550", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_0b1c7033-5c2a-4574-80c3-86956a2b8e45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.155, 0.964, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3551", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_e44d4bc3-a47f-4b4f-95f7-635fea019b5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\n[link] shopping. -> CLICK\n[li] Neighborhood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.461, 0.171, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3552", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_ef60845a-ab21-45d4-a89e-c77ec915c0dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK\n[button] Show more filters modal -> CLICK\n[span] Price -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.786, 0.806, 0.83] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3553", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_fc9e7790-8bb2-4915-ab1b-3e51b7c79a71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.27, 0.691, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3554", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_42ffdf8b-64e3-461f-8094-ca965c529368.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.326, 0.462, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3555", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_89f5d60a-f436-4da2-8d01-57ed7e61d270.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] Railcards -> CLICK\n[combobox] How old are you? -> SELECT: 18-25"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.138, 0.626, 0.149] in the image\nAnd my action is Action: SELECT\nValue: Couple"}]}, {"id": "mind2web_3556", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5bc86a39-0dbb-450a-b960-5d87fd390bfb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.739, 0.161, 0.754] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3557", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_48e7b865-b9ad-4137-875a-03918d8e7933.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.167, 0.326, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3558", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_6157d472-2e23-4858-928e-091450f63ff9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[button] 19 -> CLICK\n[button] 23 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.447, 0.686, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3559", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_57fab646-1b28-4e2b-a267-e7b8b41ec858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.13, 0.374, 0.143] in the image\nAnd my action is Action: TYPE\nValue: TEL AVIV"}]}, {"id": "mind2web_3560", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_99f3702a-7d37-4557-b110-b28e439599fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK\n[link] Rock 2,420 -> CLICK\n[link] Tab -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.258, 0.97, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3561", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_518c52e1-9005-4b0e-b702-b4847a54b9e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.004, 0.51, 0.023] in the image\nAnd my action is Action: TYPE\nValue: king of tokyo"}]}, {"id": "mind2web_3562", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_a7a0f732-8940-4da0-b0a4-6aa68777441f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[div] GBP -> CLICK\n[button] -> CLICK\n[div] \u00a3 -> CLICK\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.583, 0.916, 0.599] in the image\nAnd my action is Action: TYPE\nValue: Stuart Bloom"}]}, {"id": "mind2web_3563", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_ac106afc-a33a-4df9-9a39-62e856864f0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.258, 0.505, 0.294] in the image\nAnd my action is Action: TYPE\nValue: New Orleans"}]}, {"id": "mind2web_3564", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_2b86d495-4492-4196-9f85-257dbbf27d61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK\n[div] Sort by: -> CLICK\n[link] Number of Seats (High to Low) -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.334, 0.93, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3565", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_7e6e1e43-af17-4934-848a-4c235520b30e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.067, 0.326, 0.108] in the image\nAnd my action is Action: TYPE\nValue: music"}]}, {"id": "mind2web_3566", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_a87551ef-f7ee-40d6-8c93-deeea86e0d50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK\n[button] Follow -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.304, 0.513, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3567", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_f4702531-91bf-45e3-9072-b758cdb35ae8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[link] WOMEN -> CLICK\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK\n[searchbox] Search by keyword -> TYPE: women t-shirts"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.257, 0.637, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3568", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_f421e549-c5ee-43bb-94ed-5df3f9ec7af6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.565, 0.014, 0.579, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3569", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_5232d12e-5d7f-406f-a6ad-f9f054f9e2b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.217, 0.574, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3570", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_ecfca8ce-c709-4d5d-8104-bb73107d2eb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK\n[div] Real Madrid -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.046, 0.79, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3571", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9ebda146-dfb9-438c-b151-ae45e7624802.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[generic] Controller -> CLICK\n[span] Sony -> CLICK\n[button] APPLY -> CLICK\n[span] Free Shipping -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.214, 0.4, 0.236] in the image\nAnd my action is Action: SELECT\nValue: Highest Price"}]}, {"id": "mind2web_3572", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ed1f55a1-64f6-433a-b8da-0abfcdad6ec5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK\n[img] Vitamin D -> CLICK\n[div] Relevance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.077, 0.98, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3573", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_1c141689-6500-45dd-b75c-d0e4ff1588db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[button] Listing options selector. List View selected. -> CLICK\n[link] Gallery View -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.114, 0.775, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3574", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_54b9a781-e649-40a7-8f18-0361898363c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\n[link] subway Subway Lines -> CLICK\n[span] Red Line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.095, 0.45, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3575", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_0f472545-bc7d-45e0-8614-8bda0386ae6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Choose your room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.324, 0.549, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3576", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_075a04fe-0a97-42ee-aa93-736bd6b90023.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK\n[checkbox] Family -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Air-conditioned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.087, 0.754, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3577", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_9a5ca032-1f47-4c2b-b33f-5356a5d7116b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.377, 0.252, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3578", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_7d8f7af7-05f9-4a86-96e0-4a680f3b2c6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.468, 2.937, 0.536, 2.949] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3579", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_bd93fcae-3bb0-4acf-8189-415e9cdce009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.385, 0.385, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3580", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_d75f272c-4aa0-45e5-9737-33d00ac9f661.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.333, 0.515, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3581", "image": {"bytes": "", "path": "./images/f4623be1-31c6-4546-a567-92bfd1da9cd7_2b0f415b-e7e6-4380-b66f-97cafe7a2a8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Upgrade the count of the current SSD in my cart to 10\nPrevious actions:\n[link] Shopping Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.339, 0.523, 0.358] in the image\nAnd my action is Action: TYPE\nValue: 10"}]}, {"id": "mind2web_3582", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_cd10aed6-e4b2-4cfb-af74-8c6b0c2caae5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[link] Shoes -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[div] Size -> CLICK\n[link] 9K -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.507, 0.495, 0.713] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3583", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_9720e24e-7a9f-42e7-86e1-b48d0b05c57e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.597, 0.266, 0.641] in the image\nAnd my action is Action: SELECT\nValue: Honda"}]}, {"id": "mind2web_3584", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_cbca43c1-359f-42bd-bcab-d1f61c9af591.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[input] -> TYPE: Crew\n[input] -> TYPE: James\n[input] -> TYPE: Johnson\n[input] -> TYPE: james.john@gmail.com\n[combobox] Organization Type -> SELECT: Family Trip"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.272, 0.777, 0.306] in the image\nAnd my action is Action: TYPE\nValue: Johnson"}]}, {"id": "mind2web_3585", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_5c4b121e-b7a1-4a58-9f37-8146db77190e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.308, 0.144, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3586", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_b977193d-503a-4389-ad04-7ace55c70e04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte\n[button] Belo Horizonte, MG, BR (CNF - Tancredo Neves) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: buenos aires"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.329, 0.617, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3587", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_2db82559-908a-4e1e-a73b-9cba33575c47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.529, 0.493, 0.652, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3588", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_8fd5373b-93ce-4726-8b01-9cc2688e631e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.266, 0.42, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3589", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_0acd72fb-4000-48a3-be6e-9df18f318ca9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.383, 0.797, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3590", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_0e63a72b-be6d-4c64-bd1b-b745222e02ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.141, 0.652, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3591", "image": {"bytes": "", "path": "./images/cb07d410-75ff-483a-920c-3ce2a295524f_580c3525-e4a3-44e7-8480-ff930ac9b5ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the kicker with the most made field goals in the 2022-2023 NFL season.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.014, 0.392, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3592", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_d88b5180-5170-4103-9593-db363c4caede.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.279, 0.729, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3593", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_e929f088-cc08-476a-b91a-607c0572186e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 1.305, 0.176, 1.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3594", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_8dce160e-b79a-4de0-88f2-cc59dc891541.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Top Hip Hop\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.155, 0.695, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3595", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_8cfcf8da-5ec8-4836-87c2-01cfc886d515.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.268, 0.469, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3596", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_8207c27d-1536-43da-8fdb-6973924ef101.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\n[button] Car Sales -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.159, 0.215, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3597", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_d04d30d3-199f-40a7-a804-6bf215c11519.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.505, 0.486, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3598", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_22ff8d83-db1a-44b1-8a74-fd9c1bd0b489.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.028, 0.535, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3599", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_7638b874-5601-4028-9e02-931e87de0aa4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.015, 0.592, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3600", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_21c2de06-c37f-48d9-9657-a25121393718.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 2.481, 0.405, 2.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3601", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_7681b7eb-faa0-4363-ade5-49c5cd230b87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.159, 0.554, 0.198] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_3602", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_19094986-c5bc-4351-96e5-2b11185894b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.543, 0.092, 0.549] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3603", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_a8076012-4c69-49af-af46-8d84cfd2638f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.339, 0.423, 0.55, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3604", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_2cc7fafe-b2f4-46ce-9f99-62c9b885e2db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.801, 0.11, 0.886, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3605", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_e909e5cc-b764-4ebc-9e91-b87b5863879d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.311, 0.312, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3606", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_344fdc6e-858a-48cf-8dc8-073c98975aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.159, 0.239, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3607", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_308e735f-a080-469b-879b-4c99508ede29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.43, 0.685, 0.457] in the image\nAnd my action is Action: TYPE\nValue: 133 st avenue"}]}, {"id": "mind2web_3608", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_9a991a36-6a7b-42c7-9599-fbfebc37336c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 1.344, 0.268, 1.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3609", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_05016fe3-32db-4f00-8d4a-e23b842cbd13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 1.109, 0.375, 1.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3610", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_d4598675-b3f5-4401-989e-45aeca7b33c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[input] -> CLICK\n[button] Spain -> CLICK\n[button] All cities -> CLICK\n[button] Go! -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.326, 0.154, 0.376, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3611", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_33f33559-f12f-4c42-9a01-9e4ce1feb006.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\n[searchbox] Search -> TYPE: Chill\n[button] Search -> CLICK\n[link] People -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.187, 0.426, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3612", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e3c95f6e-c1f6-4930-90d4-a34358b98d49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK\n[button] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.179, 0.815, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3613", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_f786baf2-b7ce-4eb3-ac8a-bb407d850be6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK\n[span] 31 -> CLICK\n[span] Search flights -> CLICK\n[button] Continue to flight results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.15, 0.223, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3614", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_bee495fb-a632-4df6-b714-a7b289a9c7bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 2.068, 0.338, 2.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3615", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_f5a169c9-f5f1-465d-ad88-c56bf75aa1ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[option] Airport Indira Gandhi International Airport -> CLICK\n[button] Next Month -> CLICK\n[use] -> CLICK\n[div] 1 -> CLICK\n[span] 8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.375, 0.686, 0.416] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3616", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_8db7043e-11fc-4825-a35d-c65b0acbcbcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK\n[checkbox] Electric (175) -> CLICK\n[heading] Electric Mile Range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 1.204, 0.277, 1.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3617", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_48d7ca2b-52f7-4730-9672-abe75d7aee92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.092, 0.129, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3618", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_170ece26-3a41-445a-b474-e2f643fcd5a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.042, 0.309, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3619", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_d434647d-400c-4bf5-8f59-e1bffe2583ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.039, 0.311, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3620", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_5dd12581-fb29-43fa-b5d8-785d5a442017.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Search -> CLICK\n[button] 2 adults \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.255, 0.331, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3621", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_54e0b5c5-4903-4dd7-97bb-8fbd0b78e00b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK\n[button] SEARCH BY JOB TITLE OR KEYWORD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.214, 0.571, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3622", "image": {"bytes": "", "path": "./images/44284a24-b3de-44ef-bcfc-abf57c3f791a_8561b027-bf66-4fda-9414-c6a4d3f4213c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flight #039028\nPrevious actions:\n[input] -> TYPE: 039028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.03, 0.603, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3623", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_fe53f6bd-ace7-4544-aab4-803e19962624.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.37, 0.894, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3624", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_a3d22f91-736a-458a-b567-be47670899fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.27, 0.191, 0.307] in the image\nAnd my action is Action: SELECT\nValue: 3 00 PM"}]}, {"id": "mind2web_3625", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_146bcec4-09b5-47b1-97b0-6a17d09e9e95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Gift Cards -> CLICK\n[img] -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.173, 0.975, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3626", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_c809eab5-3466-4dc3-89dc-7f1329bbf5d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[use] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 1.099, 0.716, 1.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3627", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_805622d3-f7bf-4871-8774-5a3fa531171b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK\n[link] See all open MTA positions. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.244, 0.663, 0.264] in the image\nAnd my action is Action: TYPE\nValue: brooklyn"}]}, {"id": "mind2web_3628", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9b72dec5-6c89-4886-9e53-c982e3601f5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\n[label] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: London\n[div] London Heathrow Airport (LHR) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.15, 0.923, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3629", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_4aee1c2f-31ad-464e-8dc6-bdddbf81f193.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.137, 0.719, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3630", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_3f99e4ac-4933-41ad-84e9-1395f8194c48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.059, 0.366, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3631", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_991edd8c-d233-4898-80b1-d91ad3831f9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: easter home decor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.057, 0.342, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3632", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3b7020b8-f410-4928-836c-247d4cec350d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.099, 0.727, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3633", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_b8cff931-25cf-43d1-bd7c-c81275bec27d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[button] Back -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.986, 0.193, 0.999] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3634", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_da9e60a4-6770-483e-8d86-fdc06a48523d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.201, 0.387, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3635", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_a98a57df-9cbd-4882-8daa-dd037f890ed7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK\n[button] Altavista, VA \ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.163, 0.269, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3636", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_3077a3f0-48f7-423f-a919-efe74e72572a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.417, 0.622, 0.449] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_3637", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_69ff5db0-8fee-4696-aea9-2b9142a8449b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Action Figures -> CLICK\n[img] Hasbro -> CLICK\n[button] All Filters -> CLICK\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.213, 0.393, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3638", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_97ed99f6-1db3-4a61-a2ff-356c3ebc03cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK\n[button] Follow -> CLICK\n[link] Popular tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.312, 0.457, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3639", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9cffd287-ffc6-42a3-a408-b3198b37fd01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK\n[div] Jun -> CLICK\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.13, 0.953, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3640", "image": {"bytes": "", "path": "./images/b9f5dd60-690d-4f32-9e69-3db9d346f020_a066138a-8316-4514-b493-b12221f9f0b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out what payment methods are available for monthly parking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.008, 0.431, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3641", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_c3436179-32b0-4eee-87c9-92f564819bb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.244, 0.037, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3642", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e0384709-ae58-4537-9314-fe8e3eff55ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.422, 0.441, 0.452] in the image\nAnd my action is Action: TYPE\nValue: 250"}]}, {"id": "mind2web_3643", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_b597c9fb-a0dd-48ae-aab4-cb1928e97ecb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.455, 0.471, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3644", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_57d825a3-5ada-4f45-9789-a4d4cdecb04d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.266, 0.278, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3645", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1fff6452-6d62-49a3-84f2-dfbdf0f3e314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.391, 0.468, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3646", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_9c7a9f0c-e8d7-47cf-903d-30fb9d0b5854.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[link] Search for flights -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.139, 0.35, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3647", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_15c67851-8081-4bf2-a0d5-a005fb7a2e98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog bed 30 inches\n[button] Go -> CLICK\n[RootWebArea] Amazon.com : dog bed 30 inches -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.122, 0.394, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3648", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_f0cd22a0-1abe-4cb5-b3cf-ae886963828c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.234, 0.211, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3649", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_aa25f120-fb19-4ecd-9708-d18d857e48ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.758, 0.122, 0.775] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3650", "image": {"bytes": "", "path": "./images/b9f5dd60-690d-4f32-9e69-3db9d346f020_64d255e5-d45d-43fc-a9d2-49c01b414b38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out what payment methods are available for monthly parking.\nPrevious actions:\n[link] SUPPORT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.791, 0.331, 0.812] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3651", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_949553fb-552c-49ed-9735-48391fd1e11a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.045, 0.378, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3652", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_0f9dec76-0399-40ec-95ca-b76d4f091120.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.681, 0.122, 0.699] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3653", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_2642d695-660f-433e-b9fa-6f820ccfc7e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[path] -> CLICK\n[span] -> CLICK\n[heading] Car specs -> CLICK\n[span] -> CLICK\n[div] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 1.035, 0.632, 1.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3654", "image": {"bytes": "", "path": "./images/b4872f0e-9d9e-4259-8b1e-844509b85712_8793262f-20c6-4600-a161-8ef3699192cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all campgrounds located in California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.004, 0.461, 0.01] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3655", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_48fba121-c893-45fd-85f5-9bcd5094a0cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.348, 0.28, 0.385] in the image\nAnd my action is Action: SELECT\nValue: UNITED STATES"}]}, {"id": "mind2web_3656", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_9eb4e38b-69dd-454a-baa1-7412bf4d5830.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.332, 0.514, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3657", "image": {"bytes": "", "path": "./images/a31de393-c6e0-4175-858b-03cdc435d585_edb2c211-bef6-4991-a828-73831abc411d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse events happening at Madison Square Garden.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Madison Square Garden"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.292, 0.748, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3658", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_b87d39de-e0c4-41ce-9ed7-37de94e8fe7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.462, 0.699, 0.485] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_3659", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_da04e845-1f80-4464-80df-2a89df6c5d9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.363, 0.656, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3660", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_00e19130-721a-425b-aa74-57bcbb23ded3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[button] Paris France -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Food Tours -> CLICK\n[label] Free Cancellation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.154, 0.963, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3661", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_59276a5b-5c7e-49bc-ba3e-07e7219dfcd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.018, 0.072, 0.137, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3662", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_b491c36c-59cd-45e3-853d-c107a16e3373.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.69, 0.248, 0.787, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3663", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_107e8ce0-5be5-4b2f-8966-35de535030bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\n[link] Store Locator -> CLICK\n[textbox] Please enter City, State, or Zip Code -> TYPE: SPRING, TX"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.143, 0.517, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3664", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_56260a12-8133-43af-ba62-a8526f0e5aee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM\n[select] 04:30 PM -> SELECT: 04:30 PM\n[button] Select Pick-up Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.473, 0.255, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3665", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_bc4dbb7f-e800-41f5-9fb1-edeecfa090b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\n[combobox] Search products and services -> TYPE: cough medicine\n[button] Search for cough medicine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.609, 0.143, 0.621] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3666", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_0d7efda5-9240-4c57-9a5c-5446e9c2d83c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.355, 0.292, 0.401] in the image\nAnd my action is Action: SELECT\nValue: Events"}]}, {"id": "mind2web_3667", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_32423741-d475-4401-bb90-37b5783f2819.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[span] -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Price -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.956, 0.011, 0.988, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3668", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_91233398-b0ea-424a-9cd5-2b60b4283b6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok\n[button] To make this website accessible to screen reader, ... -> CLICK\n[button] To make this website accessible to screen reader, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.481, 0.305, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3669", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_3b61150e-f073-4093-b655-8b362b023c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.339, 0.014, 0.422, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3670", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_7eca4c5a-2094-4510-8f7c-b18976791000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.487, 0.873, 0.504] in the image\nAnd my action is Action: TYPE\nValue: Clara"}]}, {"id": "mind2web_3671", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_4c168a73-6f51-4f60-8121-76e76caa359d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.069, 0.785, 0.086] in the image\nAnd my action is Action: TYPE\nValue: barclays center"}]}, {"id": "mind2web_3672", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_decbda01-c8ad-439a-a719-9fae758733b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.021, 0.74, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3673", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_c2522b37-de29-4b8e-8f85-8cbe56475733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 1.407, 0.431, 1.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3674", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_0711a396-35a4-4cc4-b1a1-0264829f7b8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.571, 0.927, 0.601] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3675", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_c2e9086d-05ae-454c-a286-99169b97287a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, April 17, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.151, 0.957, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3676", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_3f9a5ae5-bcb9-4dda-aec6-2e5d2e3a0499.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK\n[link] 18 March 2023, Saturday -> CLICK\n[button] done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.104, 0.562, 0.14] in the image\nAnd my action is Action: TYPE\nValue: 2819"}]}, {"id": "mind2web_3677", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_131ab6e4-1cda-403e-892f-48975f9de2b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Tops -> CLICK\n[heading] Size -> CLICK\n[label] L -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.511, 0.397, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3678", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_cc1747c1-6897-475a-a414-30da991bc3fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK\n[div] Add -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 1.652, 0.969, 1.661] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3679", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_d73fbe6d-8222-4166-9484-330d448e6b15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] 10+ Night Trips -> CLICK\n[button] Guided Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.474, 0.772, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3680", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_12c4f752-1759-4fe1-b011-6efe7006dcda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 1.541, 0.431, 1.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3681", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_59b106a8-1c6b-4d63-bf92-a82e063fc15a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Cruise Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.289, 0.079, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3682", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8ec95e8f-a20b-4ab2-be5b-78333b5b16fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.421, 0.323, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3683", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_393218a4-be87-41c9-880d-9dff65eb1a23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.054, 0.233, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3684", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_398a1cab-4bfc-42ae-b8df-5bb1fabdb9cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.177, 0.554, 0.199] in the image\nAnd my action is Action: TYPE\nValue: was"}]}, {"id": "mind2web_3685", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_9a258ed2-27f1-43d8-96f2-b7dd1562bcea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK\n[textbox] Near -> TYPE: Texas City, Texas\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.117, 0.279, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3686", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_27418770-0fb2-4572-8950-c111ca546d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.046, 0.785, 0.057] in the image\nAnd my action is Action: TYPE\nValue: madison square garden"}]}, {"id": "mind2web_3687", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_15c9b3ca-89b6-401b-9d4c-beb382884a11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.379, 0.594, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3688", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_f46e2703-87c4-4986-b7be-4975b7288aef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.246, 0.309, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3689", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_c534f502-bbd7-495e-b75e-fa1d5e851def.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK\n[dt] Memory -> CLICK\n[span] Show -> CLICK\n[span] 16GB -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 2.329, 0.192, 2.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3690", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31f7682f-dbf9-40fa-8368-f25c2670dabe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[path] -> CLICK\n[button] sub 1 -> CLICK\n[div] open -> CLICK\n[option] 6 -> CLICK\n[button] Update -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.239, 0.858, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3691", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_d8c4b3b3-80ff-4b99-b6c1-2591f2525750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.159, 0.378, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3692", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c155e0d8-1093-4266-8b0c-bc68546903ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi\n[input] -> TYPE: Wolowitz\n[input] -> TYPE: debbi.wo@bbt.com\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 1.145, 0.953, 1.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3693", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_4b7d1d2a-fd6c-4c7c-b09e-31e4ead7df5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[div] Make/Model -> CLICK\n[combobox] Year -> SELECT: 2016\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.372, 0.71, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3694", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_c3453d54-f335-4c14-ba7a-4675249cedc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[button] Thai -> CLICK\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK\n[checkbox] Accepts Apple Pay -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.141, 0.63, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3695", "image": {"bytes": "", "path": "./images/5f9182dc-d35d-4c0e-9abe-cd913c136528_f173443e-4eb1-4ae7-a454-247a9d439f6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find career openings in the marketing department\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 1.453, 0.255, 1.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3696", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_3d8b49b4-942a-45c5-a376-69f56192a34e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.126, 0.08, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3697", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_f3653021-d6dd-40c8-a6af-e8b82e9cb356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.713, 0.027, 1.725] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3698", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_74a19b41-3a33-4bab-b089-69728a1ad3bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Flavor -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.419, 0.57, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3699", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_b692968d-a907-4613-89eb-1760e9529b96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107\n[textbox] Zip Code -> TYPE: 49107\n[div] 49107 - Buchanan, MI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.379, 0.475, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3700", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b0b16702-6153-482c-b402-5cd4ff52a76b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.509, 0.147, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3701", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2129d050-f557-464e-a1c4-932650bbc1a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: m s subbulakshmi\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.335, 0.146, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3702", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_cca8b945-855b-47c1-82fb-2ccaf2794176.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.014, 0.799, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3703", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e6a4b3ef-f0b3-4aed-9c52-7e8ad1b03356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK\n[button] Select My Car -> CLICK\n[link] All Vehicles (13) -> CLICK\n[span] SUVs & Wagons (5) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.304, 0.918, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3704", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_296e4fb3-13b3-4223-ae21-3bb06155dd9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[tab] Lowest price first -> CLICK\n[checkbox] list-filter-item-label-0 -> CLICK\n[checkbox] list-filter-item-label-1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.685, 0.089, 0.694] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3705", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_55f7b393-c44a-43dd-924a-37bbcb3e2b07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK\n[p] Black -> CLICK\n[p] Lexus -> CLICK\n[p] Backup Camera -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.19, 0.429, 0.211] in the image\nAnd my action is Action: SELECT\nValue: Newest first (by car year)"}]}, {"id": "mind2web_3706", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_d4ddbbb9-4b1c-4cd9-b4b9-e938ed17e04e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.371, 0.586, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3707", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_428e6ce9-bdfd-4278-8ac1-7e2a35aa7d0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK\n[link] Dave Chapelle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.264, 0.941, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3708", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_57fc6fee-25ac-4ac0-9074-5578b7fb359d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.265, 0.652, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3709", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_d675f8fc-0d5d-4d60-8f8e-9da0ea5b0edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Kayaks\n[option] kayaks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.633, 0.164, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3710", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_b4a42ac9-e109-4952-8e2c-206e39e788e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.802, 0.916, 0.818] in the image\nAnd my action is Action: TYPE\nValue: denise.bloom@bbt.com"}]}, {"id": "mind2web_3711", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_3b2f20d2-4e98-433d-b1cc-4c6495958de0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK\n[label] Austria -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.246, 0.086, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3712", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_4ed5a644-3e8b-4966-8913-bb0e0c5b63ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\n[link] Solar Panels -> CLICK\n[link] Order Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.774, 0.582, 0.901, 0.595] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3713", "image": {"bytes": "", "path": "./images/14f0e837-af77-44b9-9cad-a8911aab30c6_451803f9-762c-48e4-a1a6-71d06f5d9431.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the status of flight from Columbus, number 1234 on April 5th, 2023.\nPrevious actions:\n[heading] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.37, 0.272, 0.407] in the image\nAnd my action is Action: TYPE\nValue: 1234"}]}, {"id": "mind2web_3714", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_aa7a9996-a657-4023-97a9-d9baa3b5462c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK\n[link] Check Balance -> CLICK\n[spinbutton] Gift Card Number -> TYPE: 1000000000000000\n[spinbutton] Gift Card Pin -> TYPE: 1222"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.319, 0.5, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3715", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_76fc3614-10f5-43b1-a822-b3a3289e4a98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Kitchen -> CLICK\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.254, 0.146, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3716", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_d602e0d8-9092-45a7-99e7-c0d26107ca55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK\n[link] Theaters -> CLICK\n[link] Warner Theatre Theaters -> CLICK\n[link] SEE OPTIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.302, 0.336, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3717", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_7648c204-6107-469b-915d-6b24608d0e96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.095, 0.452, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3718", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_bc897867-f54d-4a70-aeda-56cdfe8b25e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.076, 0.325, 0.087] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_3719", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_0c6371f1-0488-46a8-9683-b6d73833cee9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.249, 0.82, 0.288] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_3720", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_575c9828-b577-4fd4-bc2c-656a78fe0d83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.178, 0.375, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3721", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_414ca573-101c-43e1-9a61-dea8ac4d6a54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[span] Phone Repair -> CLICK\n[textbox] Near -> TYPE: houston\n[span] Houston -> CLICK\n[button] Fast-responding -> CLICK\n[radio] Data recovery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.054, 0.63, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3722", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_ec68d5c3-9dd6-47e0-ae07-61673d79709f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: roman empire history\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK\n[link] English -> CLICK\n[link] Last 90 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.934, 0.092, 0.968, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3723", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8d380612-6d85-4cf3-9691-a9ba0257e423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.388, 0.445, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3724", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_9feb1093-8b40-4fa5-81f4-548ad83d5940.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.359, 0.567, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3725", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_46c14367-c9aa-4663-aef8-8d3ebac75daa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.022, 0.39, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3726", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_694f27c3-ec85-4bb5-a08c-7650fcbbbaf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[combobox] Start Time -> SELECT: 9:00 AM\n[combobox] End Time -> SELECT: 6:00 PM\n[button] Update Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.419, 0.397, 0.432] in the image\nAnd my action is Action: SELECT\nValue: Sort by Distance"}]}, {"id": "mind2web_3727", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_95b4682d-c31a-4bcc-877d-e861c8f213ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[button] Search -> CLICK\n[button] Number of rooms and guests -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[div] Update -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.038, 0.754, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3728", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9945456e-05f3-4c9e-8ce8-65ca56ec133c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.258, 0.335, 0.28] in the image\nAnd my action is Action: TYPE\nValue: NORTH PLYMOUTH"}]}, {"id": "mind2web_3729", "image": {"bytes": "", "path": "./images/e8603513-2740-485e-adf9-86361dd015f4_f149f408-6377-466c-8b6a-f552605df2f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare FlightAware subscriptions and signup for Enterprise plan.\nPrevious actions:\n[span] Products -> CLICK\n[span] Premium Subscriptions -> CLICK\n[button] Compare Subscriptions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.908, 0.583, 0.932] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3730", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_a544e04d-4b21-40f0-beb2-5da432e73791.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Coming soon to theaters -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.295, 0.43, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3731", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_8365081d-8726-4dad-9a47-25429f6fb4c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.144, 0.712, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3732", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3d0769b3-8443-4f88-9b2a-25919abee6b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.35, 0.693, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3733", "image": {"bytes": "", "path": "./images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_df6e386e-5d3b-41fa-9e31-180a841bf8e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guide for paying with commuter benefits.\nPrevious actions:\n[p] About -> HOVER\n[link] SpotHero for Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.391, 0.737, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3734", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_481aabef-7831-4470-967a-8926d70118fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.774, 0.192, 0.786] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3735", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_385a6a91-88f4-4837-83e1-2f3c5b92b626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] Netflix streaming -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[img] Chris Rock: Selective Outrage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.544, 0.5, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3736", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_bb39f777-043d-4f12-9973-afd6bef8c9b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.248, 0.702, 0.262] in the image\nAnd my action is Action: TYPE\nValue: Daytona"}]}, {"id": "mind2web_3737", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_6d984d16-dfbc-428c-b948-d82c0d1ca057.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.062, 0.434, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3738", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_9b8e4f5e-44c2-4d7b-822a-2f50757cdf0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.17, 0.25, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3739", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_50a2f5c1-64de-41e9-abd0-4451f762fcea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.031, 0.227, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3740", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_947b3258-9cc5-40c9-8aec-b8e6041d3782.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.488, 0.44, 0.499, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3741", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1e025e9b-cd9f-43a1-83c4-088b78703733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.039, 0.475, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3742", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ac23a9dc-a401-429c-a93f-dbbf04494cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara\n[textbox] From -> TYPE: James\n[textbox] Message Line 1 -> TYPE: Happy Christmas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.685, 0.714, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3743", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_326b47cd-cccd-456f-b004-592a3038e94b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.006, 0.553, 0.012] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3744", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_1533426c-6f64-4fc5-aa01-7fad60360f2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago\n[span] Chicago, IL -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Ann Arbor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.393, 0.406, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3745", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_f99dcc87-41a8-46f2-9c49-357593e5e4e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[button] 10 -> CLICK\n[button] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.312, 0.879, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3746", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_3c16c075-2c0d-4f6b-8239-27d144b4b7bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.846, 0.248, 0.9, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3747", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_7b459c84-10b1-4039-8d5e-815757741f7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.529, 0.114, 0.65, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3748", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_9bbb8418-648f-4efb-a31e-9cb314c075be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK\n[link] Find A Travel Agent | Carnival Cruise Line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.403, 0.494, 0.428] in the image\nAnd my action is Action: TYPE\nValue: Grand Junction"}]}, {"id": "mind2web_3749", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_f0282d1a-fb09-404d-88ce-5a583a75a055.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.209, 0.375, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3750", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_2fed1405-e307-4548-b8db-160e3d6a3342.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.13, 0.668, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3751", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_828eeb3d-81d9-49a8-a848-523adbcf487e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\n[svg] -> CLICK\n[combobox] Language -> SELECT: \ud83c\uddec\ud83c\udde7 English (UK)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.047, 0.565, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3752", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_89a66828-0f63-4b8f-9090-933d55e222a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.019, 0.981, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3753", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_4c49fa1f-80b9-49fd-b1df-515931e10c8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.241, 0.828, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3754", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_2e1e2b82-41a6-4bb9-af07-28fc1b8604d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK\n[label] Miami -> CLICK\n[button] View details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.857, 0.952, 0.888] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3755", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_9404fc4c-c485-4e47-af68-762a4e97965f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.46, 0.846, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3756", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9fe8d58a-4c1f-4bbb-8bc9-2c1e157f291a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK\n[span] Black -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.217, 0.284, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3757", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_9f40df58-a7fa-4181-b1a9-f08a0f0bd2eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.084, 0.705, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3758", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_2a99e22d-6bc8-48c6-b38b-d358a070a01a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.235, 0.591, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3759", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_363d6ca6-36b4-40cd-8116-3c77b4246f5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.243, 0.919, 0.261] in the image\nAnd my action is Action: TYPE\nValue: 7528 East Mechanic Ave. Fargo, ND 58102"}]}, {"id": "mind2web_3760", "image": {"bytes": "", "path": "./images/eab97f0c-38b3-4421-bff6-697b3267f23c_9d6b03f7-af9e-4339-9c0e-9b57b36796e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find carnival cruise options that include Alaska.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.545, 0.871, 0.613] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3761", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_c73b04b6-058a-4c28-9cb2-ca6eb698b205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.09, 0.318, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3762", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_bb5ecc35-cad8-4934-8fd3-8db479c6b832.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] View Results -> CLICK\n[span] Watchlist -> CLICK\n[span] Watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.332, 0.788, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3763", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_9cb7b962-2d18-47fb-926a-597470068e61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.315, 0.505, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3764", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_0eb19e0d-99bd-405c-ba39-76125661d09e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.481, 0.389, 0.497] in the image\nAnd my action is Action: SELECT\nValue: 8"}]}, {"id": "mind2web_3765", "image": {"bytes": "", "path": "./images/a747bed0-0f45-413a-8f48-2c45795e4e3d_5ecad292-0fb1-4d8e-8963-715a5e924186.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give a like to the #1 track of the Real Time Top Chart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.806, 0.006, 0.838, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3766", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_812eb1e7-0b27-48a2-b770-544d5bccbd76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.31, 0.238, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3767", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_cc84e6d9-c116-476e-8c9e-7bc04f3fe1ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.244, 0.286, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3768", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_75331416-79a1-49ba-9151-cb36c58c21e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.232, 0.238, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3769", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_98673272-fde8-4585-bcb4-8fb21d9ef497.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[tab] Fastest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.425, 0.916, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3770", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_0fb30cdf-7ea1-47af-ad9a-010175cc1fb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[gridcell] Thu Jun 01 2023 -> CLICK\n[gridcell] Fri Jun 30 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.482, 0.125, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3771", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_531e7d43-cdb2-42b0-ad84-8f482edced43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\n[searchbox] Search -> TYPE: rock\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.165, 0.199, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3772", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_4e4e4193-b03a-48de-a041-da1ba92837d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.018, 0.131, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3773", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_a2229723-f483-4aad-a049-63b0de313d31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.181, 0.318, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3774", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_088d7365-0d88-422a-b819-ff3660ebdf1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK\n[link] Womens -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.62, 0.194, 0.631] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3775", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_720c5c98-aa5c-4e3a-b84b-4edf4fc74ac7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK\n[link] Tracks -> CLICK\n[link] Any length -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.323, 0.212, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3776", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_cf9e9473-f15a-4e35-a52d-fc2f8f6a9d9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK\n[link] Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.333, 0.553, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3777", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_af09a4fc-7fe3-430f-9aad-50d6c1d8ce02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.408, 0.583, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3778", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_11b8428e-580a-427b-945b-e9964306d187.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.718, 0.197, 0.792, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3779", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_3b9dcd1e-9bb4-4599-b812-5fda7a1bf251.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK\n[menuitem] View All -> CLICK\n[div] Type -> CLICK\n[label] Dome -> CLICK\n[link] Bass Pro Shops Eclipse 2-Person 5x7 Dome Tent -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.51, 0.976, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3780", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_88070251-f05f-4d48-8365-9ae69c638083.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.19, 0.359, 0.208] in the image\nAnd my action is Action: TYPE\nValue: Oyster Bay"}]}, {"id": "mind2web_3781", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_ef48f3d8-f9e6-40d1-931c-334e477f5a28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK\n[link] New York -> CLICK\n[link] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.11, 0.592, 0.127] in the image\nAnd my action is Action: TYPE\nValue: 66 perry st"}]}, {"id": "mind2web_3782", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_c3667e3c-b19b-44bc-a90e-e55c3a194518.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.354, 0.066, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3783", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_c8ccfecb-f35d-47be-a43b-48928934fcd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.332, 0.514, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3784", "image": {"bytes": "", "path": "./images/1538e37b-9c33-48b0-b10e-662e192ad53a_3be9ae7f-a70b-4318-8fb6-4ad2c22b8f3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stops in Alanson, MI\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.013, 0.481, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3785", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_5a7f29fc-db45-4eee-8795-c0ab17f04f05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.309, 0.72, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3786", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_2010968d-7952-46a7-b08a-1b281434fdac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[span] Search flights -> CLICK\n[button] Continue to flight results -> CLICK\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK\n[label] Points -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.198, 0.223, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3787", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_6940b55b-a01a-4773-a195-09ade3dfa191.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.094, 0.327, 0.116] in the image\nAnd my action is Action: TYPE\nValue: manchester"}]}, {"id": "mind2web_3788", "image": {"bytes": "", "path": "./images/7f94386a-d032-43cf-9dbe-2b64430c9c28_56c6ec61-144d-4320-836a-4aaa0573ed66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: locate the store in IL\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.264, 0.959, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3789", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_80ff8fd1-64c6-4df9-91bd-7478a1730329.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK\n[textbox] Recipient Name -> TYPE: Tim Stebee"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.436, 0.975, 0.467] in the image\nAnd my action is Action: TYPE\nValue: scisoorbros@gmail.com"}]}, {"id": "mind2web_3790", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_08a1e24a-5539-4b20-a56a-a6201abec410.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.35, 0.359, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3791", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_65871880-9edf-4376-ba5a-724665a1454b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK\n[textbox] Custom name your search -> TYPE: Jaguar"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.43, 0.588, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3792", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_1ca75cb6-4ad0-4233-bba6-b07ccfdec468.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.225, 0.473, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3793", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_32f50fb0-29e4-45af-ac3a-e2c5e30fd5c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.613, 0.568, 0.626] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3794", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_39ddc5fa-ac0b-46c4-97f1-7fda5d38e1d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.158, 0.534, 0.174] in the image\nAnd my action is Action: TYPE\nValue: Cahill"}]}, {"id": "mind2web_3795", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_6530ec2d-af29-4aa3-87d9-1459e0e0aab5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.217, 0.309, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3796", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_27cebec5-d92c-4883-b6f0-9514162b357b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.388, 0.66, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3797", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_4df4d08c-48cb-452b-bd28-c31f36f0c7f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK\n[button] Lighting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.227, 0.605, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3798", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_c8d98ede-94ff-4686-80c4-d63369045443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Swimwear -> CLICK\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.552, 0.106, 0.561] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3799", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_33996c48-9ef9-42e0-9ae2-d73a23df1bee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.228, 0.057, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3800", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_287372aa-eea9-4451-9ea6-628052669c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[img] -> CLICK\n[textbox] To: -> TYPE: John\n[textbox] From: -> TYPE: James\n[textbox] Message: 200 characters remaining -> TYPE: Congrats on your new home.\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.246, 0.992, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3801", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_5f626ce1-8f6c-41da-a606-191bbaf298a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.004, 0.204, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3802", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_d7a061ca-bdc0-46b6-9f53-d3a7eb20cd89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\n[link] Investors -> CLICK\n[link] Financial Info -> CLICK\n[heading] Balance Sheet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.7, 0.384, 0.742] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3803", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_1f10d654-9925-42b4-80dc-1c85e4fc7e9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK\n[group] \uf067 Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.421, 0.076, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3804", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_e63bd0c5-1f40-461f-a792-dfe15f095b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.075, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3805", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_090caaac-cb79-4e25-b11f-dafd929b8871.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.826, 0.846, 0.848] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3806", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_7d12b26e-cea6-48a9-84bb-31a71783af9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.104, 0.562, 0.141] in the image\nAnd my action is Action: TYPE\nValue: 3329456534543"}]}, {"id": "mind2web_3807", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2c84e548-890b-4c83-bb17-731112429425.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi\n[input] -> TYPE: Wolowitz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.849, 0.916, 0.865] in the image\nAnd my action is Action: TYPE\nValue: debbi.wo@bbt.com"}]}, {"id": "mind2web_3808", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5dbffc54-e517-4b9b-a93d-8731878ee4e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK\n[div] Find a Store -> CLICK\n[link] FIND A STORE -> CLICK\n[textbox] SEARCH BY CITY, STATE, OR ZIP CODE -> TYPE: 43240"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.212, 0.991, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3809", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_15edde36-09a1-4143-8fd1-1aa23e4c17fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.943, 0.192, 0.956] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3810", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_05079796-ff63-4353-b6f4-58469eeb7be8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.279, 0.729, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3811", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_a6a47a5f-af4c-4828-877e-98c2bec76ac6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.44, 0.207, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3812", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_6283deae-3b1b-4134-a5c2-b272074c2708.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone * -> TYPE: 8888888888\n[combobox] Market: * -> SELECT: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.82, 0.787, 0.846] in the image\nAnd my action is Action: TYPE\nValue: 123rd st"}]}, {"id": "mind2web_3813", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_7457cb3f-1727-46f9-ba3c-c6fa1f567265.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.407, 0.72, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3814", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_1aefcac7-5379-4ce3-b57c-ee32805a47e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK\n[button] Deals -> CLICK\n[div] -> CLICK\n[button] Update -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.619, 0.208, 0.692, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3815", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_2eacf5f3-80a5-491f-9d67-5d5793a8d030.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK\n[div] 5 -> CLICK\n[div] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.439, 0.393, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3816", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_ac59711d-fcaa-4057-92af-1038cdc97b7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.456, 0.478, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3817", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_48bf6e5e-9a49-4d59-a377-26dcf4a830f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.438, 0.448, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3818", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_24d287e5-d848-4bd8-bacd-eb06b15a0036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK\n[link] Theaters -> CLICK\n[link] Warner Theatre Theaters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.319, 0.486, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3819", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_6ed1aa8a-c227-4ee2-8dfb-d04fe3d3fdb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.124, 0.393, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3820", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_42238faf-5676-4bfc-8fb1-4c18741ce175.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\n[button] Explore -> HOVER\n[link] Dining -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.335, 0.969, 0.669] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3821", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d838fd6f-80b5-45fc-8388-4038eb8363ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK\n[span] Guest checkout -> CLICK\n[span] Select a store -> CLICK\n[span] IKEA West Chester -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.956, 0.011, 0.988, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3822", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_0a3ab473-d734-47eb-9710-22e03410d4f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.21, 0.406, 0.254] in the image\nAnd my action is Action: TYPE\nValue: beauty salons"}]}, {"id": "mind2web_3823", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a008a7f6-6480-487f-abda-be44b38c3d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[button] Departments -> HOVER\n[button] Grocery & Home -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.187, 0.352, 0.368, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3824", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_ef25943f-68a6-4969-91d1-956e78f70336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] T-Shirts -> CLICK\n[gridcell] Price -> CLICK\n[label] $40-$60 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.36, 0.491, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3825", "image": {"bytes": "", "path": "./images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_8a105984-8971-4dbe-8929-a49933e300de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending searches in Columbus.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.46, 0.588, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3826", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_4d36a13d-82ba-46ee-8587-497ec99d0638.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.309, 0.648, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3827", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_692ed3d3-325b-412e-bc50-f2c834c7c4c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> CLICK\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK\n[link] Thursday, May 4th | American Express Presents CARB... -> CLICK\n[combobox] 2 tickets for Thursday, May 4th | American Express... -> SELECT: 3 Tickets"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.367, 0.162, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3828", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_ffe3ba82-c9d1-4d63-a501-1525cbd12380.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.467, 0.241, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3829", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_37ad3e47-c309-4efe-ace6-3208fe05fdb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[link] More -> HOVER\n[span] Dry Cleaning -> CLICK\n[textbox] Near -> TYPE: new york city\n[span] New York, NY -> CLICK\n[button] Virtual Consultations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.095, 0.63, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3830", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_bf7c3146-d7ce-4c7e-83ca-1e3ff7b12175.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Order Food & Drinks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.097, 0.63, 0.101] in the image\nAnd my action is Action: SELECT\nValue: AMC Grove City 14"}]}, {"id": "mind2web_3831", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_374ff5ac-f7ac-41b8-9db1-62af14e7b4a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.076, 0.325, 0.087] in the image\nAnd my action is Action: TYPE\nValue: Carribbean"}]}, {"id": "mind2web_3832", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_e0c7be7c-eb3c-4ce7-b04d-d385aea37cbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.147, 0.657, 0.174] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_3833", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_60c544de-dc61-44c3-b0f2-0bb17011e3bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Drop D 39,730 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.411, 0.153, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3834", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_c5203087-62da-4044-9189-5a59dd38004b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.128, 0.668, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3835", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_af385238-0c5d-4ce4-bf14-c3ece21aa30c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.443, 0.984, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3836", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_d6772d62-36d8-4118-a2d4-d899094404a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.248, 0.362, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3837", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_fc6195f2-3260-40a9-a000-5a0d2faf4e98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK\n[link] Taylor Swift -> CLICK\n[button] Follow -> CLICK\n[link] Playlists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.485, 0.489, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3838", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_875c751f-e7b4-444c-b6ba-c3516398869c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.192, 0.235, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3839", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_d0f6e4c4-1e10-4b80-808a-2e0d70eb0ce0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.093, 0.266, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3840", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_77b1d0a6-ec27-41a0-905a-1fd4d43e01ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.086, 0.735, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3841", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_2d7f9a07-3428-4891-8d3b-24e22be9e7b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.165, 0.39, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3842", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_b61e0bec-bcd8-4a74-896a-1014bbf71f7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.26, 0.141, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3843", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_b1259dba-320f-42b0-97a0-41dc930a594a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.03, 0.817, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3844", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_d5b4d8ea-73a9-4e11-8496-13694222c79b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.012, 0.804, 0.03] in the image\nAnd my action is Action: TYPE\nValue: zyrtec"}]}, {"id": "mind2web_3845", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_19738f5d-7377-4d14-9f1e-8589bd2c655c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.057, 0.784, 0.073] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_3846", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_d0bc46ae-42b7-4510-949b-2c0c747f8ac3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK\n[button] select to browse a kiosk -> CLICK\n[button] Show Filters -> CLICK\n[button] Filter by rent -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.205, 0.141, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3847", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_cf550759-5d20-4109-8c9a-469f64f2f1e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] European trains -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.185, 0.966, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3848", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_4e0c7759-42dd-49e7-b8f0-c1a71191be69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.245, 0.359, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3849", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e33e7423-a005-4de8-89dc-d34c5f297820.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.39, 0.377, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3850", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_33320628-6d12-4948-a068-aad951d8eab1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.207, 0.786, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3851", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_743b5d19-6618-4851-8d60-aff7605fc7d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.278, 0.255, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3852", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_cdeb8a94-6f22-4f9a-9224-861a9f9518c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK\n[link] $0 - $10 $0 - $10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.443, 0.375, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3853", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_a709ab3a-f10a-4d4c-adda-404e37e3755d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.844, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3854", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_731f1c88-2e9c-40af-a9a2-5cbb8486771a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.219, 0.473, 0.248] in the image\nAnd my action is Action: TYPE\nValue: 50000"}]}, {"id": "mind2web_3855", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_89a10228-542e-43ea-be51-914770f17ff5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK\n[textbox] To , required. -> TYPE: washington\n[a] WAS - Washington, DC -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.777, 0.219, 0.843, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3856", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a1e01f2e-743a-423b-b3b2-8c89b8775b7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK\n[link] Navigate to New Releases At The Kiosk See More -> CLICK\n[img] Plane (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.175, 0.085, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3857", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_9d29bcf6-38dc-4fc3-b54b-c79ad0c7b672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.056, 0.58, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3858", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b2e40a74-71d8-4594-963c-04d6c99d9924.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS\n[span] Paris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.094, 0.327, 0.116] in the image\nAnd my action is Action: TYPE\nValue: MILAN"}]}, {"id": "mind2web_3859", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3faae7a-eb45-4287-a15e-dc3226ffb69f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.062, 0.788, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3860", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4230e0d5-3c05-4f5e-a84b-380081e7d025.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[span] -> CLICK\n[textbox] Guest rooms -> TYPE: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.523, 0.648, 0.55] in the image\nAnd my action is Action: TYPE\nValue: 7"}]}, {"id": "mind2web_3861", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_5d534eb2-9235-4e29-9b92-955b87be94bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK\n[span] 11 -> CLICK\n[span] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.639, 0.522, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3862", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_9c70ec38-dd91-4342-a324-41ede6034a26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[label] BLACK -> CLICK\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.558, 0.491, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3863", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_2da19bbe-dd62-482e-bbf3-24f0ecc52e72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.459, 0.404, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3864", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_4ff311df-0d3b-4e91-aa58-1fa0219c8834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.867, 0.185, 0.941, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3865", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_0392b523-ff32-4400-98ff-9da00b7cda72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.006, 0.873, 0.026] in the image\nAnd my action is Action: TYPE\nValue: blazer"}]}, {"id": "mind2web_3866", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_96c2ecf0-d98a-4fd3-af03-4eefb8ccf225.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.74, 0.557, 0.77] in the image\nAnd my action is Action: TYPE\nValue: James Smith"}]}, {"id": "mind2web_3867", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_7c725110-9b24-416e-bf34-1566ee5fd7a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.086, 0.266, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3868", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_8ca7bf3c-75bf-4fbc-80d1-7d527c476669.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags M\u00e9xico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.844, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3869", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_4058fdcb-00c9-479e-a343-0bf9db5ff23e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK\n[li] Outdoors -> CLICK\n[DisclosureTriangle] All Dates -> CLICK\n[li] This month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.484, 0.095, 0.553, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3870", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a3741eaf-81d7-4424-8aa6-4171091b1faf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.048, 0.366, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3871", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_f34ec95c-8c93-4e9a-8b49-69c0eaee86e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.308, 0.582, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3872", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_4b43afb6-5cfb-4405-95c9-4fefabda58be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.372, 0.059, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3873", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_bff2b2ce-8a02-4c9e-8262-f76461b7f237.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.039, 0.499, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3874", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a40a0d44-e057-46e9-98bc-cf21b715bbb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.969, 0.284, 0.994] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3875", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_fcf8a62d-5909-423d-b5d8-241e0adb4dac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.213, 0.623, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3876", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_8db68fdb-b4d0-4883-ba65-f2ecbb8ac59b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square\n[li] Times Square, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.36, 0.384, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3877", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_09ad252d-0aa9-4500-9c76-a6f428acfcae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK\n[p] Black -> CLICK\n[p] Lexus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 1.758, 0.226, 1.768] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3878", "image": {"bytes": "", "path": "./images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_9164d8b5-1f9c-401b-ac02-e2235a798755.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking spot near Dallas Love Field Airport.\nPrevious actions:\n[tab] Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.079, 0.784, 0.101] in the image\nAnd my action is Action: TYPE\nValue: Dallas Love Field"}]}, {"id": "mind2web_3879", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_ec1bbbbe-ce3c-4e80-8b4a-5549ce2cb133.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean\n[button] \ue9571 NIGHT Wed, Apr 19 - Thu, Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.846, 0.193, 0.88, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3880", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_476b5f29-c0de-40d0-8f3d-2ce384cd591c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\n[link] Amazon Basics -> CLICK\n[link] BEDROOM -> CLICK\n[link] Mattress & Mattress Toppers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.096, 0.22, 0.168, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3881", "image": {"bytes": "", "path": "./images/998d121b-c858-485d-9dd3-4609575d144b_3105db36-2d46-422c-990a-31de39ab0a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular song this week by the no. 1 weekly charts ranked artist\nPrevious actions:\n[link] Charts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 0.221, 0.145, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3882", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_df0d8978-9049-4cb9-968c-6df0e0da3812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK\n[span] Black -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.458, 0.286, 0.482] in the image\nAnd my action is Action: SELECT\nValue: Good"}]}, {"id": "mind2web_3883", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_94ea9c63-2eec-4898-9bd9-b1155e31d79e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK\n[mat-pseudo-checkbox] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.656, 0.944, 0.68] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3884", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b4e902e0-1823-4a2d-82d1-e4cb17411a3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.056, 0.615, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3885", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_2c586599-8f29-41ca-a0b1-87e1e3789284.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.371, 0.637, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3886", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_84c56bf6-9bbd-44d5-bcd2-ec8a1a549af6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.638, 0.143, 0.691, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3887", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_25b10621-3099-4546-9ff0-dd74ac022908.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.049, 0.482, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3888", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_76121b93-9b56-4b51-86a1-c62104d6fb48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK\n[combobox] Flying from -> TYPE: MUMBAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.361, 0.476, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3889", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1680dfb8-7555-457e-916a-b744dd50ccb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.157, 0.273, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3890", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_0a1e66bf-415c-4c64-a0c9-7fe592135fec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.179, 0.923, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3891", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_b066ef7b-a2bc-40b2-941a-8aeae8f79bf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK\n[button] Sail To -> CLICK\n[button] Caribbean -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.056, 0.491, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3892", "image": {"bytes": "", "path": "./images/78c52592-76e4-4c45-afd5-f94cf213314e_3f5e842c-d368-42bb-a2c6-1407fa5b61d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a star wars movie trailer.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Star Wars\n[button] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.542, 0.653, 0.555] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3893", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_d4f72df1-7bad-4682-801b-1306a7dbf865.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK\n[span] 20 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.105, 0.188, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3894", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_bc8a6490-c12c-4d18-bed0-e0a9652265d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.319, 0.469, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3895", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_18dc8214-4872-434c-876a-f628e23fcfc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 1, 2023 -> CLICK\n[button] Jul 7, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.26, 0.568, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3896", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_0d8093b0-56b9-45e6-b9c9-b8d9c0f501cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.173, 0.628, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3897", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_564cb934-0518-4171-9ef4-ddc0e0d42251.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.299, 0.457, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3898", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_05ba30e9-0691-4d0c-9307-8af2746cc476.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.054, 0.261, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3899", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_1148e403-4327-47a0-ba61-c781b3c53813.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.696, 0.059, 0.714] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3900", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_d471e93f-d8de-4dd3-8d20-e0d660259ade.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.431, 0.648, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3901", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_7dd29122-5aa7-4e40-a2a3-6193dd6eb05d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Flights & Car Rentals -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.121, 0.772, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3902", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_95bdb2f9-afcd-4088-bb07-12fb2b494992.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] SEARCH BY KEYWORD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.223, 0.759, 0.255] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_3903", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2b69e7d2-66ed-486b-8ae2-2e763e1f7d6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM\n[select] 04:30 PM -> SELECT: 04:30 PM\n[button] Select Pick-up Location -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.101, 0.928, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3904", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fd35e11d-5eb9-48c1-a193-bf04d51813b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.005, 0.789, 0.019] in the image\nAnd my action is Action: TYPE\nValue: hawaii"}]}, {"id": "mind2web_3905", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_74bf98d9-7598-4103-8a25-8fb7859700e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\n[button] CITY GUIDES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.204, 0.782, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3906", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0b060218-50cb-4545-bf81-04f57be2db97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.248, 0.631, 0.282] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_3907", "image": {"bytes": "", "path": "./images/2a8ae104-6f06-47cb-80a0-045188125868_fc3816cd-1221-4d65-a475-ef22b1771303.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Display details of new lanched iPad pro 11-inch\nPrevious actions:\n[link] iPad -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.1, 0.343, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3908", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_83ac91e9-e83c-49e2-aa02-8a6085f50d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.396, 0.828, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3909", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_6b1edaaf-3328-41f0-a8dc-462e1d2cb8c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary\n[div] Calgary -> CLICK\n[textbox] Destination -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.354, 0.532, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3910", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_ed843127-7acc-4756-96f7-ef0177b3e64a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK\n[generic] 1 -> CLICK\n[generic] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.147, 0.953, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3911", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4a795c4f-8cd7-4d8b-8dfb-747268abf852.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.486, 0.943, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3912", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_e94d39fa-877b-4289-81f9-0762467b1315.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.04] in the image\nAnd my action is Action: TYPE\nValue: m s subbulakshmi"}]}, {"id": "mind2web_3913", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_e6d16748-7363-4ea8-88d5-d84d200ed602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER\n[link] Order Status -> CLICK\n[textbox] Order number -> TYPE: 24124124091"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.389, 0.872, 0.441] in the image\nAnd my action is Action: TYPE\nValue: boobear@gmail.com"}]}, {"id": "mind2web_3914", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_636947f5-7244-4149-8c6b-21830f9574ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[div] Apr -> CLICK\n[div] Choose date -> CLICK\n[generic] 5 -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.612, 0.922, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3915", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_19c1d885-fd92-43a9-b9a6-091054ce4e46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.615, 0.93, 0.639] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3916", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_1298e843-2ed2-4cff-a3bb-95cf25587d71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.44, 0.089, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3917", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_7f6709ef-e981-466c-93ef-0fca08b49eba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.047, 0.347, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3918", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_a4d6ed68-2cee-458e-92d7-a10c85cf0636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.237, 1.0, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3919", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_d6cb8acb-5af8-4ed1-9b71-eb36c19e63dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[link] Added any time -> CLICK\n[link] Past year -> CLICK\n[link] Any length -> CLICK\n[link] 2-10 min -> CLICK\n[link] To listen to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.473, 0.212, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3920", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_f8cc67b4-274d-4aeb-9012-d1e307deb997.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.015, 0.189, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3921", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_d2be2e1a-e5ba-44a6-bbee-83c29f97f07a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.158, 0.887, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3922", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_baeb278e-2713-42b7-9253-d5c13138436f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 1.105, 0.266, 1.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3923", "image": {"bytes": "", "path": "./images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_b6a55c9d-ad1f-4ef5-aca1-093ccb6731d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of Cleveland's animal shelters.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Cleveland -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.072, 0.921, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3924", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_cabd36a8-0a4b-43c8-a930-64ae46695583.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] 25 -> CLICK\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.355, 0.74, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3925", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_513911cb-10e9-44d7-9254-252734b92b6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK\n[checkbox] 1 Stop (49) -> CLICK\n[checkbox] Seat choice included -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.553, 0.048, 0.565] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3926", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_84fd0117-d99b-47e0-96ea-08945d9fadb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\n[img] -> CLICK\n[textbox] Search Kroger... -> TYPE: bananas\n[span] bananas -> CLICK\n[span] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.183, 0.963, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3927", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_72aebfb6-ea05-4023-b171-cd398ebf61b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\n[button] Fastbreak -> CLICK\n[link] Fastbreak Program -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.169, 0.19, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3928", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_e8584aab-e315-4f72-b91f-bf7e76a7d1b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.563, 0.32, 0.575] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3929", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_eb57cac8-928c-4777-8c81-103790610108.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.029, 0.28, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3930", "image": {"bytes": "", "path": "./images/e9300d50-11fa-4f98-8c39-424630668ab9_66a995c0-14dc-4dc8-8e8f-adfdd0247b88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the popular online Health events for tomorr\now?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.409, 0.939, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3931", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_9be7b356-89b9-40c0-827e-a23d85da1644.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK\n[img] Happy Birthday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.428, 0.916, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3932", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_bc650861-2931-44a1-8ee6-9a22468604df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.139, 0.52, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3933", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_ca8df4fe-9e2b-49ea-9eb4-cb71f99749a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK\n[button] APPLY -> CLICK\n[span] Single Pack -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.152, 0.4, 0.167] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_3934", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_4c5b26b1-21bb-4ba7-a996-a9609b832e1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.204, 0.691, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3935", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_7ea26b28-5c62-473d-b458-360617fef404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK\n[link] Parking -> CLICK\n[span] From $62 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.176, 0.136, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3936", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_519ead86-64a8-4df9-a1d2-6bd89a9f8f54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[link] Search for flights -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[button] 1 adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.202, 0.532, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3937", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_88037d8e-d35e-43e7-b65b-3effca4aaeee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York\n[div] New York, NY -> CLICK\n[div] Sat, Apr 1 -> CLICK\n[checkbox] 30 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.157, 0.923, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3938", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_d03e7438-57ba-4030-84f8-8f933491cd6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK\n[button] Price: -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.372, 0.209, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3939", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e9e59734-447c-445e-bdb7-bea4db2729a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.698, 0.512, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3940", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_fddf88e2-6ba5-4b77-94a6-aa4c1b5c0c67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.413, 0.456, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3941", "image": {"bytes": "", "path": "./images/8710addc-5ff3-4aaf-b397-4c6165f285ee_78a8a34b-3cf0-4509-b428-953fd4f0c3de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the service options for cars under warranty.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.012, 0.384, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3942", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_d2cd0379-7409-47f4-aeb6-8c3b3a889a8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.704, 0.285, 0.736] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3943", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_2ed0e2bc-5efb-4f91-af60-ce5031a71a68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.175, 0.465, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3944", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_4d90c658-8e5c-4f33-abda-abb115083116.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] PlayStation 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.153, 0.094, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3945", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_9bd8b189-7a5b-4d0e-96ef-ac97d7b147af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.827, 0.276, 0.859, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3946", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_59dcc93a-860e-48a6-8b81-8097c3fee4ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.285, 0.603, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3947", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_c43e32b7-d7af-4d60-b11f-d2f9c45da006.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Shoes & Sandals -> CLICK\n[link] Athletic Shoes & Sneakers Athletic Shoes & Sneaker... -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.62, 0.974, 0.641] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3948", "image": {"bytes": "", "path": "./images/330d5618-9db4-447b-9b56-0d2c33f414d5_a7fa1b89-a997-48b0-9e33-4f34fcca5f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the full menu for AMC dine-in locations.\nPrevious actions:\n[link] Visit the Food & Drinks page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.121, 0.488, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3949", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_56ae15c7-ae7b-4d02-aa81-ade2de73778c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.736, 0.715, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3950", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_0f57acd6-e046-4943-9760-1aa47a966503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[link] WOMEN -> CLICK\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK\n[searchbox] Search by keyword -> TYPE: women t-shirts\n[div] WOMEN / Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.364, 0.36, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3951", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_af730851-81f0-4bb7-b065-a2c06a4d7121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.329, 0.686, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3952", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_e2ae9f11-253a-4887-856a-20a5f2a77659.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.352, 0.448, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3953", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_8c2ccd6f-96dd-45ff-821e-eb2dc0a30b49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.616, 0.39, 0.72, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3954", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c04a9026-e147-4c97-8589-5ef46bd0f224.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.755, 0.537, 0.927, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3955", "image": {"bytes": "", "path": "./images/b4872f0e-9d9e-4259-8b1e-844509b85712_82cc8845-bf93-4eeb-bf4b-56ec11926ae4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all campgrounds located in California.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] FIND BY STATE/PROVINCE \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.559, 0.154, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3956", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_404e24fc-6086-4570-a4aa-a1f1530104a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] $10,000 -> CLICK\n[menuitem] $20,000 -> CLICK\n[button] $56,000 + -> CLICK\n[menuitem] $30,000 -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.281, 0.341, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3957", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_68bbcc87-382f-446f-b611-bf58f39479cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Fresno -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.077, 0.215, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3958", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_f13cc2d8-4952-40bf-a4fb-54be342dfa9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.451, 0.29, 0.488] in the image\nAnd my action is Action: SELECT\nValue: 1"}]}, {"id": "mind2web_3959", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_b7a670b5-20f0-4800-b4b3-ffd095b8acd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[textbox] Near -> TYPE: SAN FRANCISCO\n[span] San Francisco -> CLICK\n[checkbox] Cocktail Bars -> CLICK\n[checkbox] Outdoor Seating -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.524, 0.223, 0.617, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3960", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a0318c58-8752-4304-9f6b-235154d272b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.697, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3961", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_79ecf264-bcba-4974-af90-74b67ca769aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.704, 0.284, 0.729] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3962", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_a091ad8f-b00e-4d24-838b-439f2c89e0c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[checkbox] Outdoors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.532, 0.306, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3963", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_36a9d86c-7e16-4e62-8351-5bf4f50e8b2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.303, 0.553, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3964", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_9f67bc8f-0bad-45c4-b2f4-b3ebbbe9aef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance\n[combobox] Start Time -> SELECT: 3:00 PM\n[combobox] End Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.322, 0.3, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3965", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_ca11435d-ef30-4f9f-8a60-fdc777a44ab9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\n[link] Bestsellers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.225, 0.196, 0.246] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_3966", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_fa2e4ec5-e583-4ee6-9768-6bc6f7d43822.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.318, 0.375, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3967", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_2f60a0c7-f38b-45e6-ab39-8b984c0ecd9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.353, 0.932, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3968", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_5d55fb91-fc5c-44ae-b62e-0fc07d2d5bdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[listbox] hour -> SELECT: 08\n[group] RETURN -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.373, 0.233, 0.395] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_3969", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_f1d139be-16d6-448e-836d-4a5043a316d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.418, 0.12, 0.462] in the image\nAnd my action is Action: SELECT\nValue: 2012"}]}, {"id": "mind2web_3970", "image": {"bytes": "", "path": "./images/020bc054-a829-4af5-8f0a-6efce012c7ac_104a87a5-25a2-48c5-add0-206e46511d03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the number 4 ranked board game on the geekmarket.\nPrevious actions:\n[button] Browse -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.124, 0.036, 0.231, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3971", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_54802f88-fafb-4740-bd79-f5e3717f2733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[span] 7 -> CLICK\n[span] 14 -> CLICK\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.066, 0.914, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3972", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_eb8b8087-d063-4855-90c0-f238e4752bdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.909, 0.938, 0.956] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3973", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_eaf85801-dbb0-4f5f-bc2f-75832d6dcfb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Most Popular TV Shows -> CLICK\n[link] Horror -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.183, 0.601, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3974", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_70e082cb-31c1-4468-a16b-10fe67cce0bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[svg] -> CLICK\n[div] Tomorrow -> CLICK\n[p] Startups & Small Business -> CLICK\n[div] #virtual -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.438, 0.478, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3975", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_9fb8c08d-560d-454e-8098-08de434ef903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.264, 0.341, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3976", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_2008bd87-e75d-4056-ab8d-218ec362bbb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Phuket"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.247, 0.795, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3977", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_3ace967f-2791-4a0e-87d1-c514a29195a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.4, 0.32, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3978", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_703f0030-4a9b-4879-a9ec-f17fff4b2859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Deals -> CLICK\n[heading] Spring Break Savings & Packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.289, 0.945, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3979", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_a3caa31c-759f-4764-ba35-39db38cc3e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.423, 0.127, 0.575, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3980", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_da34e6ef-01dc-47ce-8f12-3a771d0ad4be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[option] Tops -> CLICK\n[heading] Size -> CLICK\n[label] L -> CLICK\n[heading] Color -> CLICK\n[label] BLACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.539, 0.473, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3981", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_ad0e2c07-6812-4507-8b98-f82b0d619fd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.163, 0.432, 0.202] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_3982", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_06821f17-f373-4854-9f11-fdf64b7a44f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.078, 0.266, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3983", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_74e72084-66fc-48b6-adb1-1795475571ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[link] Group Tickets -> CLICK\n[i] -> CLICK\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.077, 0.777, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3984", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_6cf0b91a-fc8d-4494-a0c8-fb11ed928aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.383, 0.277, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3985", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_4164e2f1-5ef8-43d2-bb38-176244354c7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.005, 0.887, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3986", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_baed0e46-faa0-4f15-aa46-f27dd88f6e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.279, 0.384, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3987", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_7e717da9-b333-49a6-a9bd-b2f045d69cc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai\n[option] Chennai, Tamil Nadu, India -> CLICK\n[button] Monday March 20, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.935, 0.137, 0.977, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3988", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_bc0ec628-85e4-4548-9c22-79e966b51ed2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.069, 0.327, 0.091] in the image\nAnd my action is Action: TYPE\nValue: BRISTOL"}]}, {"id": "mind2web_3989", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_13a2c4e4-2eed-443e-9c1b-f9158831bce5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Employer Name -> CLICK\n[textbox] Employer Name -> TYPE: Gua AB"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.883, 0.73, 0.934, 0.754] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3990", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_6b277741-9a89-48d0-9635-b0323bb1270d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK\n[div] 24 -> CLICK\n[button] Search -> CLICK\n[div] $141 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.643, 0.395, 0.728, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3991", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_6d39e04d-cc1b-4633-9459-350a37def42a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.026, 0.284, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3992", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_b8a690b2-a54c-43a4-9e9e-85a85f00eee7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.021] in the image\nAnd my action is Action: TYPE\nValue: dog bed 30 inches"}]}, {"id": "mind2web_3993", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_fb5b3b82-7410-4d95-b743-5441b8e24ece.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.004, 0.204, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3994", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_5700b7d0-ce16-4fb9-b77f-0546c08c8568.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.091, 0.157, 0.1] in the image\nAnd my action is Action: TYPE\nValue: jakarta"}]}, {"id": "mind2web_3995", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_912aaece-7d84-4401-bda8-02aa5289da82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.141, 0.782, 0.168] in the image\nAnd my action is Action: TYPE\nValue: professional boxing"}]}, {"id": "mind2web_3996", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4f9882ac-080d-4657-a4ff-47696c6a4b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.433, 0.451, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3997", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_50bd371f-78a5-443f-a626-2689e0c84de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.121, 0.087, 1.132] in the image\nAnd my action is Action: TYPE\nValue: 90026"}]}, {"id": "mind2web_3998", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_78d5767c-9755-4037-a6cc-b9395a07ba99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.605, 0.95, 0.639] in the image\nAnd my action is Action: TYPE\nValue: the home of joe bloggs"}]}, {"id": "mind2web_3999", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_62f82d6a-0799-49e5-9b06-3de4294ea2e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.044, 0.176, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4000", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_a4c79037-e990-4a7a-9d4a-c0f8936dba07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.169, 0.375, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4001", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a725013c-1fb2-44f4-b17c-66f001302852.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Make -> CLICK\n[span] (954) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Series -> CLICK\n[listitem] 1-SERIES (8) 1-SERIES (8) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.403, 0.253, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4002", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_9ce52b41-222b-432a-93fc-3a3050e800b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.376, 0.228, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4003", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_d953665b-60d3-4f3b-a12b-d55b929baddb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[link] Choose Another Hotel -> CLICK\n[button] Choose your room -> CLICK\n[button] Book Double Bed - Standard Room -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.896, 0.153, 0.977, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4004", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_e23e2c2a-cf6c-45ff-8920-f0444ffee944.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.435, 0.591, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4005", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_dbc01bfc-47d2-48f9-b43a-8b8e74b33d08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 1.07, 0.541, 1.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4006", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_e11321fa-806c-4878-b2b8-656dd9b9c735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.245, 0.359, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4007", "image": {"bytes": "", "path": "./images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_405e3a16-3fd4-405a-8e06-74ca8e5fe25b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Last of Us series and add it to my watch list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.265, 0.156, 0.283] in the image\nAnd my action is Action: TYPE\nValue: The Last of Us"}]}, {"id": "mind2web_4008", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_0f4a4727-3b2d-4295-b8fd-52f2e3c17124.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2005\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.315, 0.71, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4009", "image": {"bytes": "", "path": "./images/549452ab-637a-4997-bce1-5898541bb288_3a060beb-0619-4c77-8131-7ffe4c62debf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all NFL tickets\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.029, 0.28, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4010", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_a8fc3743-1bc9-4364-8cf4-243301d9ad7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.276, 0.383, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4011", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_55dbe23a-9887-4aca-9658-46b687dac5af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.317, 0.777, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4012", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_5081e4ab-c126-42d1-a018-1794aa0466d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[div] Size -> CLICK\n[link] S -> CLICK\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.804, 0.233, 0.814] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4013", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_ba10e068-4c5c-44f8-8b25-50986ef28501.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.258, 0.562, 0.279] in the image\nAnd my action is Action: TYPE\nValue: new orleans"}]}, {"id": "mind2web_4014", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_6cd768bb-689b-45f4-aa5e-d0e6532efd84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai\n[option] Chennai, Tamil Nadu, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.327, 0.246, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4015", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_416ca3da-f479-4b9d-b5f5-29b8c251f0f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.087, 0.93, 0.103] in the image\nAnd my action is Action: TYPE\nValue: Fallout 4"}]}, {"id": "mind2web_4016", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_05e33d5a-8cac-4627-a403-d66707fd9217.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Kindle E-readers & Books -> CLICK\n[link] Kindle Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.006, 0.651, 0.027] in the image\nAnd my action is Action: TYPE\nValue: roman empire history"}]}, {"id": "mind2web_4017", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_353ff760-01eb-4a28-8694-2e0dfbf72cb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.089, 0.478, 0.111] in the image\nAnd my action is Action: TYPE\nValue: Le maraise"}]}, {"id": "mind2web_4018", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_e169a421-70f0-477a-9db4-ed882245eb5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 11231\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.293, 0.981, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4019", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_05442d32-f8bd-4cac-8990-cc1c6885ba52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] make it my store -> CLICK\n[path] -> CLICK\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.231, 0.816, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4020", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_49b0a764-2d11-408e-81a9-a1f9983a7ac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.042, 0.652, 0.075] in the image\nAnd my action is Action: TYPE\nValue: Nintendo Switch Console"}]}, {"id": "mind2web_4021", "image": {"bytes": "", "path": "./images/3c9442f9-5542-4395-918a-6551dbba3e3a_b060e216-e69b-4bad-81a0-482a2cfd7a18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Massively Multiplayer games which can be played on VR.\nPrevious actions:\n[link] Massively Multiplayer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.451, 0.497, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4022", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_888d6f02-95b8-4b33-8eb6-25baeaba2feb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK\n[label] All Regions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.486, 0.269, 0.499] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4023", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_ad95a52e-a737-472f-89dd-9b9c096d10c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK\n[div] All dates -> CLICK\n[span] 2 -> CLICK\n[span] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.203, 0.781, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4024", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_cf567012-e9c2-4c4d-a269-6abf5adff7d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK\n[link] Tom Brady -> CLICK\n[link] STATS -> CLICK\n[link] CAREER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.216, 0.655, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4025", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_49fbd1f1-44e1-46b1-807a-88fa536868b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[link] Show 684 stays -> CLICK\n[path] -> CLICK\n[textbox] Name -> TYPE: Togo\n[button] Create -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.455, 0.709, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4026", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d1e11c67-483f-4ef7-aac4-3740e9498349.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.349, 0.595, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4027", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_e420dd73-9c53-48e7-b5be-51c7c081f040.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK\n[button] - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.737, 0.166, 0.774] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4028", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_61a66563-d15b-4bd5-a0e1-cca261a596de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.427, 0.31, 0.443] in the image\nAnd my action is Action: TYPE\nValue: 222900"}]}, {"id": "mind2web_4029", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_36e6f5fb-eb43-4278-aee1-29a470c244a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.036, 0.343, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4030", "image": {"bytes": "", "path": "./images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_8711c91a-4523-49b7-aab6-78c85d0e8af7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the complexity rating for Frosthaven.\nPrevious actions:\n[combobox] Search -> TYPE: frosthaven"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.05, 0.986, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4031", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_0551c27e-cc99-459d-b713-9a698a9eb578.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK\n[button] Check More Stores -> CLICK\n[textbox] Enter zip code or location. Please enter a valid l... -> TYPE: 90028\n[img] -> CLICK\n[generic] 6201 Hollywood Blvd., Suite 126 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.285, 0.87, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4032", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_0c664ad1-d63c-45e2-bb2e-95f9b295e8f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.009, 0.384, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4033", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_10fb0059-93be-4e14-875a-92fd1557bfd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.18, 0.423, 0.209, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4034", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_5799d00b-7193-4441-9a23-5d2fd1c7d4f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.0, 0.169, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4035", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_af4330bb-a695-48ff-bddb-dddf6ee09277.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.562, 0.07, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4036", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_305706d0-b1f9-42fc-988c-a57904eb9ce7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[img] -> CLICK\n[svg] -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.867, 0.988, 0.918] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4037", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_93f20444-cbc4-4f91-ae8d-26e72b80b236.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK\n[link] Find a bike shop near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 1.102, 0.372, 1.126] in the image\nAnd my action is Action: TYPE\nValue: 10013"}]}, {"id": "mind2web_4038", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_714aee7c-12e9-46f2-80e7-71ba558c3f4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.657, 0.211, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4039", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9b2f17c5-ddb8-49a4-87b3-1840f8f1047e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.421, 0.234, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4040", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_390bf505-f485-4703-86b5-6894eda4e191.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.276, 0.388, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4041", "image": {"bytes": "", "path": "./images/66625c9d-5bf3-42d1-b463-ab2767307201_aba5ccb1-8c17-4ae3-b311-38bbf81bd19f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Learn more about the Partner Deal that gives 25% off for Veterans.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.018, 0.598, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4042", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_83e6f82f-518e-46a3-83e7-9512d36279d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.999, 0.796, 1.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4043", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_e72a4cee-1a25-4609-a4a9-09587f670585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.203, 0.362, 0.256] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_4044", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7c668a7b-1de9-4df9-b75b-69ac45fc6d15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[combobox] \uf0d7 -> SELECT: 1 Child\n[select] Age -> SELECT: 0\n[link] Search Hotels -> CLICK\n[radio] $100 to $200 -> CLICK\n[radio] New York (and vicinity) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.598, 0.123, 0.605] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4045", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_72517ff4-d1db-49a4-a416-9539c0b06e84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\n[link] Investors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.076, 0.412, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4046", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_e7584865-130e-41bc-8b05-9c8a0376a1e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.264, 0.348, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4047", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_31a14711-3dfc-40e7-82e0-7c877e622c01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.086, 0.327, 0.114] in the image\nAnd my action is Action: TYPE\nValue: edinburg"}]}, {"id": "mind2web_4048", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_cc132005-2d40-4e1f-8699-bf828e06b700.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.986, 0.032, 0.996] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4049", "image": {"bytes": "", "path": "./images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_062beced-fb6a-435e-9e47-a52f8ff8db4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find videos from the Oscar 2023.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.147, 1.0, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4050", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_caf5665b-735a-4dbc-b204-6b82136c31db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.504, 0.233, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4051", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_2c1ac090-674a-426e-9cab-3857abef2dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.325, 0.123, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4052", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1cb32c35-d655-487b-ad30-fc234522bfe5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.007, 0.441, 0.016] in the image\nAnd my action is Action: TYPE\nValue: diner"}]}, {"id": "mind2web_4053", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b37e82ef-4b52-4746-8b5e-68663a04a73d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] $99 Or Less -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.487, 0.237, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4054", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_0df26719-4457-4f0d-a480-07531eaae3b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER\n[span] Gender -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.345, 0.086, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4055", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_c1977bd0-8644-4263-937b-c5b4d681d54b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.649, 0.307, 0.671] in the image\nAnd my action is Action: TYPE\nValue: fre"}]}, {"id": "mind2web_4056", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_86b75914-108e-4670-923c-28f40115d397.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.087, 0.93, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4057", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_6e9ddb40-c8c9-49c8-b24d-23de6338158b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.199, 0.603, 0.225] in the image\nAnd my action is Action: TYPE\nValue: india"}]}, {"id": "mind2web_4058", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_fe2547fa-bebe-490d-95b9-22a6f8cd70f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.046, 0.67, 0.057] in the image\nAnd my action is Action: TYPE\nValue: Smithsonian"}]}, {"id": "mind2web_4059", "image": {"bytes": "", "path": "./images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_110a1d40-5b06-4c86-821d-a085f20b70f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ideas and recommendation for things to do in Cancun.\nPrevious actions:\n[textbox] Where to? -> TYPE: cancun\n[circle] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.045, 0.498, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4060", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4da58b5c-bb8e-4c17-be85-757cbff832c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.037, 0.646, 0.05] in the image\nAnd my action is Action: TYPE\nValue: resident evil"}]}, {"id": "mind2web_4061", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_16b24021-bfdb-41dd-a733-ca9415863d65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.055, 0.594, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4062", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_7f7174c9-88dc-4df0-8fac-54a0603bbbac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: heathrow\n[button] London (LHR - Heathrow) United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.26, 0.568, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4063", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_7473d4f5-d147-4c64-912c-620553698746.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK\n[textbox] Pickup -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK\n[button] Saturday, April 15, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.41, 0.484, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4064", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5d6ff011-4cfd-4d8b-abdc-39e927e234bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.05, 0.556, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4065", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_971e520c-3d24-499b-9111-fd67d7d1a884.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.321, 0.336, 0.347] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_4066", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_cb08d3e8-86b1-44c5-9bee-0261182c7acd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: the weeknd\n[button] Search -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.888, 0.224, 0.957, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4067", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_71a88ba7-ab8d-470d-bcab-c04236870135.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.021] in the image\nAnd my action is Action: TYPE\nValue: mens black running shoes"}]}, {"id": "mind2web_4068", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_d248d946-05c5-485a-bb16-dd322317f149.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.193, 0.677, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4069", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_b955660b-1f8d-4a21-b953-dac02bb5c70c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[option] Price: Low to High -> CLICK\n[button] Add to Cart -> CLICK\n[textbox] Search Amazon -> TYPE: laundry detergent\n[button] Go -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.023, 0.917, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4070", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_625191e5-adcb-4948-a105-2c4e95dad39f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.025, 0.424, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4071", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_2161ad6c-0a74-439a-ad07-2493fe8039c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 1.52, 0.867, 1.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4072", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3b88c290-cc6d-40d9-8de4-2f891e6650c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.125, 0.411, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4073", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_8815465b-cee1-4d62-bd97-da432f3cf972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.338, 0.638, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4074", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_266eb157-7298-4781-b591-f73f82a00451.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.094, 0.579, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4075", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_17db9ef9-89fe-482a-bfe0-9e2bf9d76253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200\n[combobox] Number of Stories -> SELECT: Two-Story\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.469, 0.959, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4076", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_9d457ae2-7f3a-454c-9bcd-38738fdc80e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Price -> CLICK\n[link] Under $75.00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.285, 0.412, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4077", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_076445f7-fdd3-49f9-a7d9-642f2d7090a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Fresh Fruits -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.903, 0.243, 0.964, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4078", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_0e2c3790-5fc1-451f-bc5a-f9e29750564c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.042, 0.309, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4079", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_68c92172-9b4e-41fd-866d-129ce1846de4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.183, 0.592, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4080", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_47176cdb-d2c8-4197-8b6c-cb83c22fe1ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK\n[input] -> TYPE: 3329456534543"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.104, 0.707, 0.141] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_4081", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_0e448126-506e-4091-91af-91117f73e5d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10018\n[button] Find care -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.449, 0.448, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4082", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_55207068-c425-4a70-ad93-db28b62041e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.473, 0.83, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4083", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_eaf07f0c-2487-42fc-9bae-98d8979b6192.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.285, 0.844, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4084", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_c99edf07-d6a6-46ea-a1da-f1cdbea62441.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\n[li] Soccer -> CLICK\n[link] Teams \ue00d -> CLICK\n[select] UEFA Champions League -> SELECT: Spanish LaLiga"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.36, 0.131, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4085", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_7d5bb406-d247-416d-8e13-dd2cf463b43e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK\n[label] Accessible Trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.314, 0.359, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4086", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_b9e28e22-0524-4e6b-a3f2-13059124e719.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK\n[textbox] first-name -> TYPE: Joe\n[textbox] last-name -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.465, 0.434, 0.486] in the image\nAnd my action is Action: TYPE\nValue: 101010"}]}, {"id": "mind2web_4087", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_fdca6b87-1298-4b66-bbfa-d325894b0f2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.023, 0.821, 0.038] in the image\nAnd my action is Action: TYPE\nValue: winter coat"}]}, {"id": "mind2web_4088", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_b3e14891-f17f-4a02-9c65-53333af0daf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.484, 0.013, 0.601, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4089", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_1a324d9d-69b2-4367-9576-c0f051d94050.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.383, 0.255, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4090", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_471f0a54-bca2-48ac-91d6-7b20917a0ec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.711, 0.495, 0.729] in the image\nAnd my action is Action: TYPE\nValue: johndoew@gmail.com"}]}, {"id": "mind2web_4091", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_f485178c-d6c3-4937-b013-bafc9d8fe989.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.452, 0.826, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4092", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_8734306f-5a8a-4671-a560-5850fbb319a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\n[img] movie poster for Creed III -> CLICK\n[link] get tickets for Creed III -> CLICK\n[select] AMC Columbus 10 -> SELECT: Change Location..."}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.092, 0.711, 0.121] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_4093", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_fbe6ff6d-5197-4a28-8a47-777faa60d37b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.332, 0.271, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4094", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6b113310-2e4d-4c97-b6e3-51d42e406e3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Material -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.297, 0.237, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4095", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_27347e65-851e-4170-a7bf-64293faf81e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK\n[img] Short Socks (2 Pairs) -> CLICK\n[svg] -> CLICK\n[button] Age18M-3Y(12-15cm) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.451, 0.642, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4096", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_7d9695f9-c5ef-4fb4-908d-cbd0c1b4d423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107\n[textbox] Zip Code -> TYPE: 49107"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.091, 0.294, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4097", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_0c112ad5-8f20-4d35-ab34-fab5d32abbe0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK\n[div] -> CLICK\n[button] Apply promo code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.096, 0.369, 0.126] in the image\nAnd my action is Action: TYPE\nValue: 1000001"}]}, {"id": "mind2web_4098", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_4b178183-f3c3-495d-b232-53c6250c7329.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.425, 0.783, 0.452] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_4099", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_5b09a6bf-3ece-4b80-961a-6928f0367453.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK\n[svg] -> CLICK\n[button] 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.355, 0.855, 0.401] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_4100", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_7ed1aec7-f9b5-428c-ab0a-0340f1a44480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.025, 0.464, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4101", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_d0076f22-6fca-4791-b04d-2567fd6b3d69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.206, 0.192, 0.241] in the image\nAnd my action is Action: SELECT\nValue: 3 Guests"}]}, {"id": "mind2web_4102", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_d1b27abe-d3b2-458b-8b80-428b838fc9eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.003, 0.549, 0.013] in the image\nAnd my action is Action: TYPE\nValue: laundry detergent"}]}, {"id": "mind2web_4103", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_30aa0a88-4767-45c6-8fa8-eb179e6e0cb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.541, 0.661, 0.588] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4104", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_4f65d11e-ef5e-43c6-8f29-3bb466f8c02a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] -> CLICK\n[svg] -> CLICK\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK\n[div] Multi-city -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.195, 0.906, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4105", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_1c73847e-41c9-4e2b-ab4d-f9a8c2156508.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.358, 0.309, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4106", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_5da862ec-0736-4eef-82ff-2920815a68b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.243, 0.312, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4107", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_a8388b0d-2e41-4cf0-ae0b-be1a72f3df55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK\n[button] From today -> CLICK\n[Date] FROM -> CLICK\n[Date] FROM -> TYPE: 04/01/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.224, 0.238, 0.287, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4108", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_d7cbd9b8-6505-43f7-a163-d7c00bfd62a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK\n[button] Calendar -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.279, 0.514, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4109", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_424f88c5-1dc6-439d-8cba-43b2225ac064.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: washington\n[option] Washington District of Columbia,\u00a0United States -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.457, 0.361, 0.463, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4110", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_8f546563-ff13-45a3-8764-50c7781f81b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.304, 0.777, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4111", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_9a9fafc5-8b22-4d00-a724-188153f1c7b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.03, 0.987, 0.057] in the image\nAnd my action is Action: TYPE\nValue: Matthews winery"}]}, {"id": "mind2web_4112", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_7f372b63-9007-46eb-9765-517c2ca4b480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Computers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.232, 0.285, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4113", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_ac2a90b7-0655-4a0e-afc6-64e9c8c133ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.3, 0.37, 0.338] in the image\nAnd my action is Action: TYPE\nValue: SHELDON"}]}, {"id": "mind2web_4114", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_e22f283f-8da1-4294-ac2c-90e0d472d487.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\n[svg] -> CLICK\n[combobox] Language -> SELECT: \ud83c\uddec\ud83c\udde7 English (UK)\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.16, 0.639, 0.184] in the image\nAnd my action is Action: SELECT\nValue: Euro (EUR/\u20ac)"}]}, {"id": "mind2web_4115", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_47aabd28-f643-4dbd-96ca-fcd0b7cbaae2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.188, 0.5, 0.216] in the image\nAnd my action is Action: TYPE\nValue: Harrt Reid Intl Airport, LAS"}]}, {"id": "mind2web_4116", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_6c06c719-5c8f-4536-bf8c-e31d5d14af89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.178, 0.215, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4117", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_21a13bd7-0205-4eb5-bf16-53f77d303977.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: San Francisco\n[div] CA, USA -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.28, 0.273, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4118", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_648d07dc-b2c7-47e8-b6b9-2c603d07455d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.711, 0.266, 0.729] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4119", "image": {"bytes": "", "path": "./images/370a037c-c397-4adb-ab7c-0c388f448f68_5e4409af-2205-49f1-a595-13b9617f85a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vinyl records at the lowest price.\nPrevious actions:\n[button] Marketplace -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.088, 0.183, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4120", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_eada1fe6-09c0-45de-a024-e035bf9aa036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK\n[div] Price (lowest first) -> CLICK\n[link] See availability -> CLICK\n[button] Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.66, 0.923, 0.682] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4121", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_fe15d851-e3ca-40c2-bc4a-afb820d1d12c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.069, 0.798, 0.093] in the image\nAnd my action is Action: TYPE\nValue: black sleeping bag"}]}, {"id": "mind2web_4122", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_fb963821-351c-4418-8bbc-a5f87f916ed8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.4, 0.634, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4123", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_f785e1b7-a569-4764-9ed9-af5a405d8962.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.444, 0.81, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4124", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_121024fb-90f8-4d41-be93-5f26d9dabfc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.336, 0.212, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4125", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_aab2a04e-c41e-4057-abc6-d839f51cfcc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.204, 0.476, 0.222] in the image\nAnd my action is Action: TYPE\nValue: Lue"}]}, {"id": "mind2web_4126", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_79eb3b5b-854a-44b5-a115-c239a4d58c3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\n[link] \ue92f Computer Systems \uf105 -> HOVER\n[link] Gaming Desktops -> CLICK\n[span] NVIDIA GeForce RTX 4000 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.203, 0.158, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4127", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_21105c34-cac6-408f-b1ed-2ee9550a4dcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.212, 0.041, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4128", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c6a46943-7ccf-4d6e-a06b-13264890131f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK\n[button] Chicago -> CLICK\n[button] Today -> CLICK\n[button] April 20, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.163, 0.562, 0.169] in the image\nAnd my action is Action: SELECT\nValue: 7 Guests"}]}, {"id": "mind2web_4129", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_ac56a867-e610-41b4-a583-605eb29cd9c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.065, 0.32, 0.074] in the image\nAnd my action is Action: TYPE\nValue: NIAGRA FALLS"}]}, {"id": "mind2web_4130", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eea26e1d-39b7-4781-b30d-dbdf56df77fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK\n[checkbox] Ford \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 1.226, 0.408, 1.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4131", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_5d332384-8419-4484-8e27-3a97401f38f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.179, 0.52, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4132", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_e85f24b8-77cd-4c47-b407-05b6a636c04c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK\n[link] Filter -> CLICK\n[button] Genre -> CLICK\n[link] View More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 1.593, 0.063, 1.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4133", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_9c71b00d-199e-437e-a510-ab151f6b1539.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.015] in the image\nAnd my action is Action: TYPE\nValue: Taylor Swift"}]}, {"id": "mind2web_4134", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_905e2ca6-5659-4d6c-be5d-940a41712c87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK\n[label] Miami -> CLICK\n[button] View details -> CLICK\n[link] Select package -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.787, 0.327, 0.965, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4135", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_a8676a33-d16d-4331-b300-a79c7d73f3ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.03, 0.817, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4136", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_b35e7803-4bfd-4c47-94eb-9055e61c98fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.428, 0.868, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4137", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_8a4a6571-1410-440e-a5f1-1ed1c39160a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.348, 0.28, 0.385] in the image\nAnd my action is Action: SELECT\nValue: AUSTRALIA"}]}, {"id": "mind2web_4138", "image": {"bytes": "", "path": "./images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_fbfa94eb-b0f2-40b4-a0ec-c95ea564d036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up the scores for the previous day's NBA games\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.058, 0.178, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4139", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5746bc15-9d5b-484d-8f38-d15bdcbed1ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK\n[button] Filter -> CLICK\n[checkbox] Self Park (1) -> CLICK\n[button] Show 1 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.548, 0.372, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4140", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_4b90e183-4ddd-4768-a0b3-ba25a5dbd94a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.124, 0.369, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4141", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_31ffb3eb-ddb4-4ca0-ba8c-1a6dd6b4497b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Shop all -> CLICK\n[link] Filter -> CLICK\n[button] Genre -> CLICK\n[link] View More -> CLICK\n[checkbox] RPG RPG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.025, 0.378, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4142", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_416946e2-39b4-459c-a21e-e3133c02fb04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK\n[textarea] -> TYPE: Directors\n[textarea] -> TYPE: To Watch\n[combobox] Type of List * -> SELECT: People"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.609, 0.198, 0.64] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4143", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_12632bc3-c1f3-4dc7-8320-0923fcbe924b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Swimwear -> CLICK\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK\n[link] Black (294) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.362, 0.866, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4144", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_2e5cd3b2-dd5b-4055-bd2b-ffdfc93923ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK\n[link] Close -> CLICK\n[textbox] *Preferred date of travel -> CLICK\n[link] 22 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.811, 0.447, 0.84] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4145", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_cbd106f6-33c7-4094-9edb-03c35153f4b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK\n[tab] Hourly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.236, 0.372, 0.249] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_4146", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_cb88089d-5c77-4c71-b428-9815070ef35d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.236, 0.344, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4147", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_999f432e-b9a3-4a3f-87fc-7f3e4c568500.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[checkbox] Offers Takeout -> CLICK\n[button] Thai -> CLICK\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK\n[checkbox] Accepts Apple Pay -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.597, 0.658, 0.623] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4148", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_de4459d6-26bf-4a32-9099-9880aab98615.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\n[link] Store -> HOVER\n[link] Find a Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.047, 0.669, 0.081] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_4149", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_4581fcd6-7468-4230-b488-bcaac1055d22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4150", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_0cb8de0b-1d19-4944-9449-4e01d24cb987.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK\n[span] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.544, 0.111, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4151", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_edb09eb7-6a8c-4aeb-9b52-796762ca821d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\n[span] Hurricane Harbor Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.282, 0.844, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4152", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_728205e5-9af6-447c-8866-339071d7f193.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Electronic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.391, 0.271, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4153", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_c78075a6-7141-410a-82c2-a1fdd561cf38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.262, 0.095, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4154", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_5b60b5ed-44e7-434e-8908-e11418f9e4dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.868, 0.16, 0.931, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4155", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_31d78d33-b4df-433c-8033-62c738f1a8a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Tab -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.38, 0.271, 0.485, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4156", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_97b3ffde-528a-43c7-8306-22f3294f8b0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Michael Jordan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.044, 0.931, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4157", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_a0acf9c4-877d-41bf-b856-126033533bdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\n[link] Tabs -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.324, 0.97, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4158", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a72834c-741f-4059-8cb2-0a6769c33a32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.302, 0.234, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4159", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_bc8c7895-1ab4-407c-85e6-11dfd925cfa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.553, 0.411, 0.594] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4160", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_daaa0ead-b5e3-4a9e-91aa-d9cadc1b97dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.107, 0.942, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4161", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_a6eff9d8-88ca-429b-9bdf-a7955bd4eb06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.192, 0.293, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4162", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_b65f2754-439c-42c9-a484-846c10998517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.008, 0.156, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4163", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_9b6f677f-1342-423b-bb5f-68412e75b9ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.577, 0.796, 0.637] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4164", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_ec42f184-f160-4f42-94d4-6789b2afea9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.059, 0.931, 0.094] in the image\nAnd my action is Action: TYPE\nValue: Real Madrid"}]}, {"id": "mind2web_4165", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_f7d7601c-aa74-4cfc-a9c4-2f08fe628d44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.18, 0.367, 0.467, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4166", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_8be9df77-6ac7-4962-bc6b-b8084033cc94.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.24, 0.491, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4167", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_7a51a447-19e0-40e9-9568-7af78ea6557b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.127, 0.084, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4168", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_09c7f72f-1512-4342-b3c3-ae639ae8cdfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK\n[link] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.314, 0.666, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4169", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_1bc4d405-9f40-47ef-80b3-eaf62e4f49a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.271, 0.291, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4170", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_dad6690b-9b3e-4395-bd06-9aa065bf4027.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.239, 0.329, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4171", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_82cc36d2-5e6b-4fff-b30e-4cea1a55c919.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK\n[textbox] Drop-off location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.159, 0.554, 0.198] in the image\nAnd my action is Action: TYPE\nValue: Miami"}]}, {"id": "mind2web_4172", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_b4b72b7a-2b9a-4bc6-9d43-34f2094f2bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.414, 0.336, 0.444] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_4173", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_d06ad609-36ae-4f0b-8623-247fa123cbb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK\n[checkbox] 19\" Wheels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.181, 0.273, 0.205] in the image\nAnd my action is Action: TYPE\nValue: 60602"}]}, {"id": "mind2web_4174", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_40f87c26-9448-4531-b356-b08bfe0e831d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 0.342, 0.651, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4175", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_826632f8-5f08-404f-855b-b7b3374dfde3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK\n[link] Denzel Washington -> CLICK\n[button] Expand Upcoming -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.833, 0.059, 0.841] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4176", "image": {"bytes": "", "path": "./images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_08a60925-4f62-45da-aa46-c69d90ef1915.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information on how to get a ticket refund.\nPrevious actions:\n[span] Help -> HOVER\n[div] Visit the help center -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.684, 0.607, 0.694] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4177", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_5af0f370-33fa-497b-a075-0a6acbc1cb7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[span] West Hollywood -> CLICK\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK\n[button] Recommended -> CLICK\n[span] Most Reviewed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.236, 0.287, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4178", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_cb306aa7-a977-4b22-a191-4e7ff1683495.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.371, 0.454, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4179", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_69d92e43-3d66-42f1-b437-29280a51214b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.004, 0.492, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4180", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_44b4fb46-9ac1-4433-a49f-92c78000593a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl\n[button] Search for benadryl -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.761, 0.143, 0.779] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4181", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_8b97f306-2eff-498a-8a45-2e113edfc5dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.131, 0.293, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4182", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_e4fe74f4-0455-4d58-a108-1d2820295a1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise\n[span] Le Marais, Paris, France -> CLICK\n[div] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.463, 0.281, 0.496, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4183", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_2be5e5da-b142-4be7-9aca-8573136aa54e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.089, 0.128, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4184", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_2a784ab8-38ce-492f-8942-69b903f33a57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.119, 0.777, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4185", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_03ea992d-d5ed-4a7f-a6a3-1d66a15aec50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK\n[link] 18 March 2023, Saturday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.355, 0.536, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4186", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_03b3771e-2a05-4a39-8770-852b2e28652c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.062, 0.56, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4187", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_5ad16225-5e2c-4a50-97f0-c7742c5bc261.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.462, 0.699, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4188", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_93da400b-38b2-4337-9b42-dae5b8caf0b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.217, 0.687, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4189", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_a0404d19-6c64-4ba4-943c-303f416d93ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse Movies by Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.177, 0.278, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4190", "image": {"bytes": "", "path": "./images/a88676d0-c252-408f-b796-93c95f6b71fc_e8963296-becc-47f3-ad53-3d823ede9da4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my trade offers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.0, 0.589, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4191", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_f4d8f5c8-3590-4a21-a09a-085d4d732c2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.091, 0.56, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4192", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_03e8e495-0e91-49be-902c-3a0f659ec428.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK\n[link] Sci-Fi -> CLICK\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.151, 0.628, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4193", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_69323266-e943-4d87-a9a3-c38c6a97683d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK\n[tab] Hourly -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.275, 0.566, 0.291] in the image\nAnd my action is Action: SELECT\nValue: 8 00 PM"}]}, {"id": "mind2web_4194", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_ec54679e-f5df-407b-abb0-a75b7fe45356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK\n[button] Price: -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.107, 0.227, 0.123, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4195", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_18882ce8-7875-4663-93ec-0807ef95ce96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.167, 0.376, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4196", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_82f59f60-849f-4b79-be21-114105330e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.071, 0.566, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4197", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_76da21b1-1ead-496c-841d-d52583fcd675.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.028, 0.553, 0.049] in the image\nAnd my action is Action: TYPE\nValue: shirt"}]}, {"id": "mind2web_4198", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_823fc50e-e7ed-41be-8e54-2d9088a4da28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Adventure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 1.057, 0.225, 1.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4199", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_47ed0fb1-3ad0-495a-858d-e826a4481c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.049, 0.248, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4200", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c9345036-4aaf-4175-9cd9-1ea6debe5fe1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK\n[img] Happy Birthday -> CLICK\n[button] EUR -> CLICK\n[div] GBP -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.553, 0.916, 0.571] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4201", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_e1c980b4-954f-44d5-8288-8b27eb6c7f24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.306, 0.221, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4202", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_8f46db0b-776d-4841-b186-b3c0faa3dd27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[combobox] autocomplete -> TYPE: ohio\n[div] Ohio -> CLICK\n[generic] Run Search -> CLICK\n[label] -> CLICK\n[div] RANG BARSEY- HOLI MUSIC FESTIVAL, CINCINNATI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.461, 0.575, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4203", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_caf11e97-b1ea-4f59-aaf8-02f7a18f9536.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.403, 0.089, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4204", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_35301bd1-f6f4-42b1-811c-f35b27afdc8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK\n[button] Search -> CLICK\n[button] Get alerts for this flight for flight 906 American... -> CLICK\n[textbox] Email -> TYPE: lin.lon@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.308, 0.828, 0.333] in the image\nAnd my action is Action: TYPE\nValue: lin.lon@gmail.com"}]}, {"id": "mind2web_4205", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_7b1492bb-0d9e-4311-ba8d-402a5ed99076.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM\n[combobox] End Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.353, 0.3, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4206", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_21d23ff9-ab59-4c28-9f7b-4c08ee362138.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 1.114, 0.881, 1.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4207", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_ed3c1666-d006-4dfa-8ba0-9b84253364e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.338, 0.34, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4208", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_416cbb3e-b141-477f-b75a-2e4b3da93394.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[svg] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[heading] Car specs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.379, 0.331, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4209", "image": {"bytes": "", "path": "./images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_fe721d29-2b19-4c71-8bdf-3be63712c52e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the Dallas Mavericks.\nPrevious actions:\n[link] NBA -> HOVER\n[link] Dallas Mavericks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.156, 0.137, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4210", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_502edf50-b8f9-44ea-8313-42addffed44f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.218, 0.764, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4211", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_dcad1685-0929-496c-b434-2f408805f4bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.484, 0.868, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4212", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_e0a29b99-5021-409d-b09e-cbf39a4b1dd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[checkbox] 30 April 2023 -> CLICK\n[combobox] Drop off time -> SELECT: 1:00 PM\n[div] Search -> CLICK\n[div] Premium -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.411, 0.331, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4213", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_9163c12d-351c-4892-bd5f-8918723bcf44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK\n[combobox] Make -> SELECT: Honda"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.357, 0.508, 0.395] in the image\nAnd my action is Action: SELECT\nValue: Civic"}]}, {"id": "mind2web_4214", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_49717bd1-bd15-48ca-a3e7-6e3bffe0ed44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.108, 0.047, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4215", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_60e413f8-5da1-49af-9a07-7b8caaa3de3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.149, 0.891, 0.178] in the image\nAnd my action is Action: SELECT\nValue: midnight"}]}, {"id": "mind2web_4216", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_f73bb3ec-e0ee-4c9a-88f4-067c971d74af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.179, 0.271, 0.2] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_4217", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_4852e3fe-905f-4e27-9a12-35d97fabc229.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK\n[button] January 2024 -> CLICK\n[button] DURATION -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.364, 0.861, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4218", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_05693c99-fd4d-4edb-8bc6-928ce06772f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.118, 0.26, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4219", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_e049fd1b-420c-4d5a-8879-da5d9e7c436d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456\n[textbox] Date you received your order (DD/MM/YY) * -> TYPE: 08/04/23\n[textbox] Please cancel my order for the following products ... -> TYPE: Harry Potter Box Set\n[textbox] Reason for cancellation (optional) * -> TYPE: Not available at address"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.615, 0.759, 0.653] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4220", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_93d10c01-8038-4307-a588-04ff78151bb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] Alaska -> CLICK\n[link] 1 National Heritage Area \u00bb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.41, 0.391, 0.416] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4221", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_9c2290bc-9528-494c-b4b2-6c24d402f0ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.998, 0.192, 1.006] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4222", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_a0511245-165d-42d4-984b-d22c988d5742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4223", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_aac1c895-4aba-4a70-92e5-fcc5fb7e46e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.037, 0.746, 0.053] in the image\nAnd my action is Action: TYPE\nValue: Harry Potter"}]}, {"id": "mind2web_4224", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_563fa026-5ccb-4530-ba47-2733ea4e3f73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL\n[svg] -> CLICK\n[button] ADD TO CART -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.754, 0.953, 0.787] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4225", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_d4035166-f027-406d-a033-54f1537852f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.306, 0.341, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4226", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_d9c7b18b-2aed-4aa2-9e8f-1a2cb9fc509c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.363, 0.691, 0.378] in the image\nAnd my action is Action: SELECT\nValue: July 2023"}]}, {"id": "mind2web_4227", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_d4393929-345e-460d-859a-1600973ae800.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.205, 0.807, 0.229] in the image\nAnd my action is Action: TYPE\nValue: boston"}]}, {"id": "mind2web_4228", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_63c62cac-1560-44ee-baab-e349ce9a7fc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.336, 0.486, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4229", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_6cc4b85e-f193-43cb-a661-b6a4f7cb1c59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.245, 0.766, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4230", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_d571bd82-235d-4db6-a852-0ad4320383e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK\n[button] Germany -> CLICK\n[button] Posting Dates -> CLICK\n[button] Less than 7 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.11, 0.338, 0.89, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4231", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_e63e526c-0f1e-4a26-8fb7-bcdabb7c51d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.361, 0.829, 0.372] in the image\nAnd my action is Action: TYPE\nValue: Tokyo"}]}, {"id": "mind2web_4232", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_a89299a2-03da-445f-bad6-2ab49df34fa8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.192, 0.542, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4233", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_94b84afe-117d-4fd2-a611-616055f7a86a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.376, 0.36, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4234", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_ea03253e-d374-4d74-ad87-4190b34c30c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 1.51, 0.147, 1.53] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_4235", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_dc8e26e9-cdcf-4135-b829-4ef2137c2758.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] Postal code, address, store name -> TYPE: 10017\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.385, 0.997, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4236", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_89834bb1-075b-4540-8bbe-88224a51cb0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[svg] -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[svg] -> CLICK\n[button] Used for -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.132, 0.796, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4237", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_a1454cf5-5c62-4137-82c2-813f8dd4073c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Products & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.28, 0.93, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4238", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_d4367241-a990-4cdb-909e-2a0e80135606.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK\n[span] Sony -> CLICK\n[button] APPLY -> CLICK\n[span] Free Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.484, 0.192, 0.499] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4239", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_33cacba9-a4ac-459b-a7a0-8010dfef19e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent\n[span] Analyst - Sales Programs -> CLICK\n[button] Apply Now -> CLICK\n[textbox] Email * is a required field. -> TYPE: jacksparrow@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.781, 0.132, 0.824] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4240", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_d01a0aa8-14a8-454c-8544-dcc082a22324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_4241", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_38ec61e1-77aa-4f6f-9bfe-c062d0f80e62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.104, 0.769, 0.139] in the image\nAnd my action is Action: TYPE\nValue: thomas.neo@gmail.com"}]}, {"id": "mind2web_4242", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_07e08472-af90-4513-b934-c8893026dfc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] 22 -> CLICK\n[button] Continue -> CLICK\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.278, 0.711, 0.307] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_4243", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_c3c62fdc-7f5f-4b13-a9e0-2fce42f49db2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.023, 0.665, 0.041] in the image\nAnd my action is Action: TYPE\nValue: rare books"}]}, {"id": "mind2web_4244", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_149c287a-3fb6-4483-929b-aee42e6e4527.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[span] Add -> CLICK\n[link] Herbs -> CLICK\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.015, 0.981, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4245", "image": {"bytes": "", "path": "./images/5a181549-c79c-499c-b7d7-90860f0e0068_2682ad2f-8cd9-4b44-a3ac-40ed813b6192.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play Trailer of \"The Flash\" 2023.\nPrevious actions:\n[textbox] Search IMDb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.262, 0.156, 0.28] in the image\nAnd my action is Action: TYPE\nValue: The Flash"}]}, {"id": "mind2web_4246", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_5e79823e-7c1a-455c-afb4-c9a536f0c4ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK\n[link] Price Low To High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.614, 0.184, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4247", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_5a39e836-5fa1-4b38-b70d-d1191480b770.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK\n[li] Outdoors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.104, 0.165, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4248", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_6fad4f05-f655-4e45-b926-c773034e90c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[button] 4K -> CLICK\n[div] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.694, 0.703, 0.711] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4249", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_b605b086-ec49-460a-ba68-c3117d5a3499.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK\n[link] CITIES: SKYLINES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.267, 0.982, 0.355, 1.001] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4250", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_67cfe42d-b9db-4b88-a753-af5ee18af657.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.405, 0.233, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4251", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_8bb3b4e6-7581-4aca-ac35-060f04786c75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.285, 0.844, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4252", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_7e8bf2cf-620f-4e0f-9a98-cb0a178f6cfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.787, 0.16, 0.85, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4253", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_37743c80-cdfa-45ca-8318-679da8952f30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK\n[select] 2023 -> SELECT: 2019"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.312, 0.145, 0.332] in the image\nAnd my action is Action: SELECT\nValue: UFC"}]}, {"id": "mind2web_4254", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_a8a56cd5-cf3c-46a5-a241-47d55e04c119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK\n[div] Sports -> CLICK\n[link] Football -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.566, 0.495, 0.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4255", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4349ee88-0a9c-44d8-b554-f4952ee742fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.211, 0.29, 0.241] in the image\nAnd my action is Action: TYPE\nValue: bhz"}]}, {"id": "mind2web_4256", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_851fc5a4-d856-4f18-9634-c1e1a0669314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\n[link] Players -> CLICK\n[link] RETIRED -> CLICK\n[textbox] Search by player name -> TYPE: James Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.289, 0.666, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4257", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_521ea3e1-7da1-4fd9-94f0-6d5eafd32fe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.079, 0.106, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4258", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_39e0fe3d-64be-40eb-a9b5-65dcf8a97695.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.252, 0.344, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4259", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_33ed3481-1a77-422e-8dc8-adf0c11bec5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.357, 0.196, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4260", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_ebb6e2a1-73dd-4ef0-9dae-4f80fc30e110.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.089, 0.478, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4261", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_847bd686-a068-494a-b37e-7d5679ff8cd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.337, 0.615, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4262", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_00ea167e-ab9c-4cb5-ad27-3e2a9d4808c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\n[link] Navigate to on-demand -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.141, 0.312, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4263", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_c6afb333-db4f-4c5e-a453-f71572c34a7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.477, 0.393, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4264", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad4ae519-c48d-4921-9da8-b102cae1e64f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.688, 0.253, 0.722] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4265", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_03b96773-9634-4493-b857-612a778193b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[div] 8+ -> CLICK\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK\n[checkbox] Family -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.861, 0.226, 0.873] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4266", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_72755349-6b54-4449-b255-f2560b342cae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\n[searchbox] Search -> TYPE: Chill\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.147, 0.199, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4267", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_e8ebcee6-59f0-4613-a5c7-fb120ac0a491.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.177, 0.568, 0.231] in the image\nAnd my action is Action: TYPE\nValue: national university of singapore"}]}, {"id": "mind2web_4268", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_0f8922cc-c34b-40d7-a6f4-4c095f40a94f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.394, 0.132, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4269", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_4b195391-b9c2-4913-bfba-18cb820a9858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4270", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_aa09ebe7-4fa3-49cd-9fd2-84b5ead50fa1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.471, 0.944, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4271", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_0bb44b72-6b4c-4892-a91f-d640f266ff44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK\n[heading] Resident Evil 4 - Xbox Series X -> CLICK\n[span] Digital -> CLICK\n[li] Deluxe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.357, 0.975, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4272", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_cd74b537-1276-4126-bf2e-d2135aba47ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK\n[button] Go! -> CLICK\n[link] Plan Your Visit \uf078 -> CLICK\n[link] Park Policies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 1.066, 0.106, 1.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4273", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_61803cf9-7251-4771-ae40-e0694bec96e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.135, 0.326, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4274", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_112135b7-86d9-4229-9794-e472f3ca4544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.237, 0.573, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4275", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_ce1dba77-1add-4cad-889f-7a90b54c5ccb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH\n[button] find store -> CLICK\n[button] filter by services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.347, 0.744, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4276", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c88bb1ab-d1fe-4205-af84-9542a145f787.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.206, 0.409, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4277", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_0d3428c6-2527-45c7-9bb4-64c3bca723bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.296, 0.144, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4278", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_3f558697-ab28-4d4e-b047-333054eb40cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.36, 0.981, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4279", "image": {"bytes": "", "path": "./images/549452ab-637a-4997-bce1-5898541bb288_1cba5090-1401-4ce5-ab29-6dbb9aaaac26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all NFL tickets\nPrevious actions:\n[button] SPORTS -> HOVER\n[tab] NFL -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.064, 0.455, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4280", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_65009bac-a256-4768-969f-c64e4ac76638.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.577, 0.955, 0.59] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4281", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_6a5d4462-eb16-4b06-9b5d-e146aed21024.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.079, 0.248, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4282", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_dfc898b1-8a15-4482-8e9f-563a8d77ae89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.062, 0.208, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4283", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_6cb303c4-1ce6-481a-aea4-4579b0be918e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[link] Music -> CLICK\n[svg] -> CLICK\n[div] Canada -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.373, 0.243, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4284", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_07db340a-8bc6-410d-9856-4888318261b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.083, 0.157, 1.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4285", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_76377685-cdd6-4780-bfd8-b03bd4dec0cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[span] Dublin -> CLICK\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.308, 0.725, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4286", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_de9053b8-703b-4782-a562-66e97a63276b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami\n[option] Mint. Miami area -> CLICK\n[button] Explore flights -> CLICK\n[span] 234 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.709, 0.511, 0.866, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4287", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_514bcfa3-e57a-4004-b98a-331a51bd1de1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[img] search icon -> CLICK\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK\n[input] -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.396, 0.347, 0.438] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_4288", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_cb175ec6-1b33-4e7b-a205-3e5fae52fd07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: NEW YORK\n[span] All airports -> CLICK\n[textbox] Flight destination input -> TYPE: PARIS\n[span] All airports -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.349, 0.641, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4289", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3b6ab224-8bd0-4206-ada8-7e14e8308314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email Address (required) -> TYPE: allan.smith@gmail.com\n[textbox] Confirm Email Address (required) -> TYPE: allan.smith@gmail.com\n[textbox] ZIP Code (required) -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.727, 0.754, 0.759] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4290", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_ca703888-1346-4c10-af36-2ecd3a7f5fcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.013, 0.988, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4291", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_b1062855-4c2b-4283-9b44-d7dc68373578.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER\n[span] Tokyo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.344, 0.104, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4292", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_92ef851f-5e36-4b3b-826d-730a35f6816d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.023, 0.06, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4293", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_ae3dbbe1-426e-4fff-9667-43fe2d1f382e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.207, 0.931, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4294", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_ce4602ee-4097-4c4d-a52e-dd181d2ca5eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK\n[link] See all open MTA positions. -> CLICK\n[textbox] Enter a Location -> TYPE: brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.675, 0.244, 0.743, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4295", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_1755a651-6a6c-470d-8c28-8470e6038b82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK\n[link] 17 -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: SELECT\nValue: 4 Guests"}]}, {"id": "mind2web_4296", "image": {"bytes": "", "path": "./images/03e45ce0-4375-44aa-b57f-cf439ccbe363_073152ad-a25b-4229-b88e-710c06a9e4cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news article and send an email about it.\nPrevious actions:\n[link] Jets signing former Packers QB Boyle to 1-year dea... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.42, 0.768, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4297", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_51aa7294-0daa-44c7-adc5-04a136b43a7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.468, 0.153, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4298", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_4ac7cf83-cd41-4c71-b91e-f48fa542319a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> TYPE: Concord\n[span] Concord -> CLICK\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: mexican\n[span] Mexican -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 1.691, 0.631, 1.711] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4299", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e499aff5-fada-4a43-a168-d2465e48c36f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.348, 0.277, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4300", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_82d66396-a339-49bf-94d7-e088d54ab356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000\n[input] -> TYPE: 10000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.369, 0.473, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4301", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_4e1fe0be-da2a-4005-8084-67028e46af25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[span] Locksmiths -> CLICK\n[textbox] Near -> TYPE: SAN FRANSISCO\n[span] San Francisco, CA -> CLICK\n[button] All -> CLICK\n[radio] Key extraction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.462, 0.048, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4302", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b44a010e-fd15-4659-a3e5-e01c7fd86c81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.538, 0.554, 0.56] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_4303", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_d13147e2-afaf-4608-bd27-65d8b4520f52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.0, 0.465, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4304", "image": {"bytes": "", "path": "./images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_8a3768a1-8b90-4fe8-ad47-a109170ea6c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the latest news from rotten tomatoes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.79, 0.077, 0.82, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4305", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_350fd79e-7572-4e46-b13c-7bb569bebc81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Log in later -> CLICK\n[button] - -> CLICK\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.815, 0.934, 0.852] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4306", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_f5afe3cb-9632-40a4-a9e5-07bb9894e599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> ENTER\n[strong] Filters -> CLICK\n[checkbox] \uf0e7EV Charging -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.283, 0.328, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4307", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7624a77c-25cb-456d-96d6-a8f4841f7a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.136, 0.914, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4308", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ad396ee3-8490-4f70-9196-6da9a1d68166.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.29, 0.693, 0.309] in the image\nAnd my action is Action: TYPE\nValue: 150"}]}, {"id": "mind2web_4309", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_9a76e93b-f2bc-4cad-ab30-cd3ffbbd96c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.291, 0.792, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4310", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_c5ef9f3a-728c-4744-bc1d-d112a9d73d99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\n[button] Location Columbus, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.127, 0.47, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4311", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_5aa3d6d6-e7b6-429c-815d-85b1df8eaab9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.145, 0.771, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4312", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_ac2eb761-67e0-413a-8388-b0e85e06601f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.261, 0.471, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4313", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_0af4d0bf-3b98-45b8-b7a4-a0c99d68398c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.259, 0.443, 0.276] in the image\nAnd my action is Action: TYPE\nValue: 12345678912345"}]}, {"id": "mind2web_4314", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_c6c213c6-e9c9-4ebd-b779-19fd733f7453.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.203, 0.5, 0.234] in the image\nAnd my action is Action: TYPE\nValue: 10023"}]}, {"id": "mind2web_4315", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_b0495a6e-1270-4d11-8868-2413bc8f1272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.194, 0.539, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4316", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_974c432b-99eb-42e8-a5a5-9ff19f60d0bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK\n[link] Go to route -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.339, 0.429, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4317", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d02f058f-0877-48fe-bec7-bb51b808656a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.419, 0.173, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4318", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_bc8a2b0a-b824-4d31-996a-98da91c17d68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.141, 0.302, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4319", "image": {"bytes": "", "path": "./images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_ad2ccb9e-f110-417f-97cb-e2595afe0dd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the design tool for a new home office.\nPrevious actions:\n[link] Design -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.773, 0.245, 0.781] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4320", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_75bcd0ba-31a7-43c4-a6a9-c9eb75258065.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.026, 0.535, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4321", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_b8806818-06e9-467c-8a42-067311698bfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.699, 0.277, 0.73] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4322", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_fa8985dc-a6b2-4b61-8ee1-b532dff08e13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.241, 0.853, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4323", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_313db28f-e14f-4a5d-af0a-7fca3e4fcd49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz\n[link] Gorillaz Gorillaz Artist -> CLICK\n[link] Gorillaz -> CLICK\n[link] Buy a copy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.884, 0.118, 0.97, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4324", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_648bdb7d-6268-4937-afe4-50036e127c4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.552, 0.417, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4325", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_329d8cfe-b9b5-4cb7-a9ed-bf622f9a3a98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.235, 0.559, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4326", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_78c3abc0-517b-4da4-b4eb-ce0788ed923a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.204, 0.438, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4327", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_b5988297-2c7c-4904-b027-838dccd562f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.905, 0.14, 0.956, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4328", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_cdddded4-b437-467d-99c4-8f76f89e0aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old\n[div] Age range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.233, 0.196, 0.254] in the image\nAnd my action is Action: SELECT\nValue: Ages 3-5 (31)"}]}, {"id": "mind2web_4329", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_b0cd091e-32b2-4506-9cb9-8259c8d63ce5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[button] Dubai United Arab Emirates -> CLICK\n[path] -> CLICK\n[button] -> CLICK\n[div] On the Water -> CLICK\n[label] Up to 1 hour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.697, 0.236, 0.705] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4330", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_0c859da4-62dd-45c1-9935-aad323de8426.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.262, 0.446, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4331", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_ed212c71-ebb3-483a-8e55-dee589fad20b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.406, 0.614, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4332", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_235eb14d-9210-4c53-a3d1-0afe2b3c737a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.18, 0.782, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4333", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_14af7c4c-eb5c-4ec0-bb9f-33a24e6fcc22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.31, 0.209, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4334", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_336e6fd2-269d-493a-b7dc-c6c145b02503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.26, 0.568, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4335", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_2bfdc2d3-8e60-435d-9e21-c63207b3c90d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK\n[span] Buy Now -> CLICK\n[button] April 22, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.102, 0.464, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4336", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_da3ddac9-4749-42d8-9fab-3cd56b1ac44a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.744, 0.159, 0.881, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4337", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_ed54258d-f01a-4eb0-8b28-5c6b95d348fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lave Hot Springs East KOA\n[list] KOA Logo Icon Lava Hot Springs West KOA Holiday La... -> CLICK\n[button] FIND A KOA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.487, 0.553, 0.541, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4338", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_fb6ad166-2b4e-4439-bcb9-4024694fe8fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.295, 0.35, 0.322] in the image\nAnd my action is Action: TYPE\nValue: belo horizonte"}]}, {"id": "mind2web_4339", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ab08133d-0fb2-4fe2-abe7-fc145167b9b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.435, 0.25, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4340", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_05bed7b6-3573-4132-93c1-7cfe12b02c17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.368, 0.567, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4341", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7470ad8d-b2a2-4965-827b-7a794991454e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK\n[img] Right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.384, 0.62, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4342", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_1ef99a0e-af72-404c-a371-1815204eea54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Concord\n[span] Concord -> CLICK\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.026, 0.335, 0.042] in the image\nAnd my action is Action: TYPE\nValue: mexican"}]}, {"id": "mind2web_4343", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_d7504220-9487-4929-8b6f-608bf6883e93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[link] 2022 -> CLICK\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.399, 0.609, 0.422] in the image\nAnd my action is Action: TYPE\nValue: Directors"}]}, {"id": "mind2web_4344", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_15235466-cb9e-45a0-baf0-c2715e127ad9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark\n[textbox] *Email Address -> TYPE: Johnmark@gmail.com\n[textbox] *Phone Number -> TYPE: 234567890"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.388, 0.566, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4345", "image": {"bytes": "", "path": "./images/1bf4f465-99cb-483b-aac1-a7512b150755_f6e5cdcb-7b34-4d49-9c97-e74cb6428e87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hydropack and make the results to show only items that have a capacity of 21 to 35L.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.25, 0.21, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4346", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_85de1ef5-b340-4275-924e-4ad340d35a4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.009, 0.189, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4347", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_3740da81-2f79-42c0-be33-7f148bf3f1d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\n[link] Find People -> CLICK\n[link] BY PHONE\u00a0NUMBER -> CLICK\n[input] -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.37, 0.389, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4348", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_2bb85139-e8e3-45ca-8e49-d99aea4df215.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Search -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK\n[span] Select store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.239, 0.71, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4349", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_5d09b93b-3839-4e67-83bc-9cfde7194124.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.267, 0.462, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4350", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_77f619a4-6625-416b-823f-da4c81e06018.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.097, 0.866, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4351", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6564754c-cf0b-4099-a320-28dabde5f587.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.245, 0.568, 0.267] in the image\nAnd my action is Action: TYPE\nValue: Washington"}]}, {"id": "mind2web_4352", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_6ac79817-591c-4763-b856-e3a201786417.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK\n[span] (0) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.435, 0.972, 0.468] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_4353", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_58359ff4-73dd-45ee-b703-026fd4666acf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.327, 0.748, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4354", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_342bae0c-0a5a-4040-8b4e-238906800a1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.409, 0.262, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4355", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_aa68d1df-0533-407f-b2e3-6f8118babb0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.254, 0.625, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4356", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_b5b09d80-00ed-4295-82de-d967b31efaa5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[link] Running -> CLICK\n[div] Size -> CLICK\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.189, 0.495, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4357", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_86abe01d-3a02-4f50-86be-bc8454ad2f8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] View Results -> CLICK\n[span] Watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.332, 0.466, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4358", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_2d2144f4-0a3d-482e-95f0-7f07ca0bbf5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.228, 0.259, 0.259] in the image\nAnd my action is Action: TYPE\nValue: BWI"}]}, {"id": "mind2web_4359", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_d05fb7e9-a294-4741-a6df-073f2cd22866.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK\n[link] CITIES: SKYLINES -> CLICK\n[link] Bundle info -> CLICK\n[link] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.441, 0.467, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4360", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_be9d5c1e-0aa0-43d7-8dff-c3ea5a77d4f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK\n[span] for 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.614, 0.938, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4361", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_95c8c608-6806-4dc5-95c1-ebad7ad6b1b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.419, 0.359, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4362", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_bc3b63a8-06fc-4da6-a5d0-8a80cec2bdc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER\n[span] Gender -> CLICK\n[link] Neutral (7) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.316, 0.986, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4363", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_f5cda11c-d548-456b-a605-5b5857a87848.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.304, 0.501, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4364", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_e2a55480-187b-4c9f-8f3a-28a19a3c7931.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\n[link] New & Noteworthy -> HOVER\n[link] Most Played -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.168, 0.548, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4365", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_0f19dcc1-254d-4a15-a862-941158d86dde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12\n[combobox] Minute -> TYPE: 00\n[combobox] AM or PM -> SELECT: PM\n[button] Get trip suggestions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.404, 0.874, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4366", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_00e89ec7-a6d4-4c75-ae50-335ba459f64d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[button] 4K -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.44, 0.429, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4367", "image": {"bytes": "", "path": "./images/0592744b-ea69-4724-80f8-3924916b7758_e85650b5-205b-4c62-8430-558ab5a7a477.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out the cancellation policy\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.014, 0.659, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4368", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_b2fd1297-19ee-4a76-89d7-39842b79a223.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.18, 0.568, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4369", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_18ed8582-6390-4c5e-834d-a8c52a81fd04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK\n[label] Express Bus -> CLICK\n[label] Rail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.476, 0.848, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4370", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_d27c83d7-a7bf-4035-be1a-7cfe70abd291.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2\n[combobox] Minute -> TYPE: 30\n[combobox] AM or PM -> SELECT: PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.6, 0.353, 0.635] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4371", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c8172921-931f-4897-badb-a46e41361d4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (10) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.717, 0.458, 0.754] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4372", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_79c44cf2-97a2-4876-8e7d-99b6d5b1855d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Landscaping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.018, 0.564, 0.029] in the image\nAnd my action is Action: TYPE\nValue: WEST HOLLYWOOD"}]}, {"id": "mind2web_4373", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_08739645-479a-4ec6-8ace-d73e2de59ebe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.175, 0.463, 0.18] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_4374", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7e7219df-cb90-454e-aeb9-988780eced12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.326, 0.29, 0.348] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_4375", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_4b07f157-afb9-41cc-bc51-78f88a227dfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK\n[span] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.364, 0.066, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4376", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_a2e74740-9137-4289-afcf-e7975501f39d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[button] CHECKOUT -> CLICK\n[button] Pick up in store Shipping: Free -> CLICK\n[searchbox] City, State, or ZIP code -> TYPE: 10005\n[svg] -> CLICK\n[label] UNIQLO SOHO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.835, 0.325, 0.853] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4377", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_b716dec6-b13d-4e4c-bfbb-96a9fbd930ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.119, 0.969, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4378", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_6dfde72b-7747-444c-835b-2feaf91878ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.299, 0.683, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4379", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_cdab6084-b5e6-4dc2-908a-907ef2e36ce8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.208, 0.42, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4380", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_df559d9e-9ae9-42b5-833d-1268b513e3db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: ohio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.177, 0.42, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4381", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_80261cdf-aaba-4a97-976e-a2d72d013c4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.161, 0.594, 0.18] in the image\nAnd my action is Action: TYPE\nValue: Alien Worlds"}]}, {"id": "mind2web_4382", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_84cfe885-5fe0-4f65-bebd-c0f56bf02c16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.211, 0.492, 0.241] in the image\nAnd my action is Action: TYPE\nValue: ewn"}]}, {"id": "mind2web_4383", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_a8b1678d-bc7c-412c-b9ad-f628c66d0f63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.307, 0.077, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4384", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_d4441fd5-a932-4be8-9301-89a7764372d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK\n[img] expand -> CLICK\n[spinbutton] Enter Minimum Price -> TYPE: 0"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.994, 0.121, 1.012] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_4385", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_19088135-f0c7-424d-8c4b-c28c88f3c7db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight\n[textbox] Enter your pick-up location or zip code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.225, 0.891, 0.26] in the image\nAnd my action is Action: SELECT\nValue: noon"}]}, {"id": "mind2web_4386", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_2a60c96b-b69a-4763-9e83-c7ad02c58d8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.254, 0.397, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4387", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_83ca13ff-29f2-4738-bcd2-859f003ae40d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.006, 0.67, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4388", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_00b8010a-a12a-4481-b961-a21322bb3972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.266, 0.694, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4389", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_eebe61c3-9d1e-450d-a54c-0e428e7c9dd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK\n[link] 128 GB - apply Storage Capacity filter -> CLICK\n[heading] Apple iPhone 12 Pro - 128GB - All Colors - Unlocke... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.458, 0.179, 0.76, 0.188] in the image\nAnd my action is Action: SELECT\nValue: Blue"}]}, {"id": "mind2web_4390", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_f86868ba-77b6-40ce-afe6-ec0cdbf31f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.206, 0.727, 0.226] in the image\nAnd my action is Action: SELECT\nValue: 8"}]}, {"id": "mind2web_4391", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_5b3eb865-638b-48aa-8415-2acfb4905ade.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.397, 0.972, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4392", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_e8dcebbf-9804-4061-9c08-d4008deb715e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[span] Select store -> CLICK\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.121, 0.969, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4393", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_93ed2d34-334e-4c25-9bdd-b1ed285fdd11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK\n[span] 12 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.413, 0.686, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4394", "image": {"bytes": "", "path": "./images/4f395aad-6f10-4055-932a-d2af443e6bfa_8dada5a6-6c79-452b-9908-de98a25c6f5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Star Wars The Mandalorian statue and add to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.012, 0.613, 0.044] in the image\nAnd my action is Action: TYPE\nValue: Star Wars The Mandalorian statue"}]}, {"id": "mind2web_4395", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_d1c03c4f-03c1-42df-a1eb-752d2d674a7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[link] TICKETS -> CLICK\n[span] -> CLICK\n[label] 2 -> CLICK\n[span] -> CLICK\n[input] -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.884, 0.23, 0.894, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4396", "image": {"bytes": "", "path": "./images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_ba175789-2fbd-4694-80a4-dc507e353aae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me news about the ps5.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.125, 0.677, 0.146] in the image\nAnd my action is Action: TYPE\nValue: ps5"}]}, {"id": "mind2web_4397", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_f36f8381-72f0-49e3-b691-c855827719b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.205, 0.331, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4398", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e1c57852-be09-49d4-b6c5-8b08ffa4dbc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.409, 0.194, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4399", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_7ff13337-9f2f-4ca3-874a-76cacb179479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 1.369, 0.492, 1.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4400", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_8b009b24-ae1b-40ce-b188-25c36447b588.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.427, 0.416, 0.439, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4401", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6d3daffd-e582-43ec-9bde-6823e140ab89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.135, 0.08, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4402", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_ffd31f7b-a7cf-4c8b-9a96-5ca46768637c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[gridcell] 5 -> CLICK\n[span] Jun 5 -> CLICK\n[gridcell] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.09, 0.959, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4403", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_65751985-a337-44ea-92ee-e6539bda7fd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.138, 0.158, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4404", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_13ed3791-9c2f-4f2c-a0e5-2d2a472e1fd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.084, 0.801, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4405", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_fd6c3519-38ba-4091-be70-5c82a7f542f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK\n[link] VIEW ALL -> CLICK\n[span] Tomatometer -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.489, 0.805, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4406", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_1ee44a3e-5f91-4a34-bbed-16b7b4fbb81d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[span] 31 -> CLICK\n[span] Search flights -> CLICK\n[button] Continue to flight results -> CLICK\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.15, 0.84, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4407", "image": {"bytes": "", "path": "./images/bafd6a44-5938-431f-8e2e-17d680d5c48b_bd359ca1-6647-4b90-9465-583fbc71a119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about Wi-Fi subscriptions.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.071, 0.664, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4408", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6a8d7913-dea5-453b-b17d-6652f31792b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK\n[checkbox] 24 March 2023 -> CLICK\n[div] Sat, Mar 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.34, 0.627, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4409", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_a77dd692-2148-4a62-8cf9-b62a855abf40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.336, 0.941, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4410", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_61810484-2d4f-4d88-b9b3-25dc95d9719b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.291, 0.452, 0.306] in the image\nAnd my action is Action: TYPE\nValue: MUMBAI"}]}, {"id": "mind2web_4411", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_e9377db7-e0c7-4d52-b555-c18621895092.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK\n[span] -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.437, 0.519, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4412", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_2da08de7-e183-43a5-850b-1b41d9cdf907.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.264, 0.375, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4413", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_4a8b112d-25b2-4430-8ca3-275372e7ecbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com\n[checkbox] Career opportunity Career opportunity -> CLICK\n[checkbox] Office location Office location -> CLICK\n[checkbox] Company success Company success -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.662, 0.829, 0.684] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4414", "image": {"bytes": "", "path": "./images/28d54466-de85-45e6-9649-2575d38adfd4_dffa71c7-91d4-4a03-8642-c9af8bc7a05c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse documentaries streaming on Netflix.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.498, 0.639, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4415", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_3f4f7403-be24-4fc0-a33a-961f7dc478a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK\n[textbox] Locations -> CLICK\n[option] US, CA, San Francisco -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.288, 0.472, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4416", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_5d992829-9076-470c-9e36-dd1dd1918ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.149, 0.713, 0.166] in the image\nAnd my action is Action: TYPE\nValue: berlin"}]}, {"id": "mind2web_4417", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_fa28ad4c-66b5-46f6-8cdb-9f52c1ef5404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK\n[menuitem] View All -> CLICK\n[div] Type -> CLICK\n[label] Dome -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.356, 0.488, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4418", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_7ad70888-fa59-43d7-8787-aa207662d59a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK\n[button] All -> CLICK\n[input] -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.533, 0.517, 0.545] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4419", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_5bebb9b8-5943-41f5-b871-76d06302dbfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.113, 0.336, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4420", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_34c6c7d3-aea8-4755-978d-ba476644df1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 2.877, 0.279, 2.898] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4421", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_d25f3ac6-5b8b-4c1d-a4f1-905223ab9ea1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.516, 0.067, 0.527] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4422", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_31cb54e5-737b-4c62-8e2f-b2b8b74d551d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK\n[link] $189 -> CLICK\n[link] $259 -> CLICK\n[button] CONTINUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.258, 0.87, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4423", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_6dec575d-ef6c-419c-98f4-c906218623e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK\n[button] Let's go -> CLICK\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.615, 0.34, 0.636] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4424", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_aa168c45-dd50-42df-acda-564969d01e5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.344, 0.339, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4425", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_67dad40a-d63a-4cbf-9271-85500b8de12d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.028, 0.98, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4426", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_8dc97709-9a15-4255-b63f-010da99ade05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.067, 0.587, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4427", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_b651a6f3-40e8-4541-bb42-45c812a7017b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\n[link] Manage trips / Check-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.358, 0.365, 0.378] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_4428", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_be2b8420-57de-4edb-8ee8-1316eabea49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.309, 0.348, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4429", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_489a9668-fabb-4591-aa4f-a235753a96fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK\n[combobox] SORT BY -> SELECT: Low to High\n[checkbox] 4+ -> CLICK\n[button] Select Chevrolet Colorado Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.099, 0.951, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4430", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_a9d256e6-2222-4953-8e33-4444408df4ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK\n[mat-pseudo-checkbox] -> CLICK\n[button] Close -> CLICK\n[button] First from $722 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.579, 0.945, 0.606] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4431", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_610a704a-42dd-43c7-b6f6-8800697dc2d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.344, 0.533, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4432", "image": {"bytes": "", "path": "./images/998d121b-c858-485d-9dd3-4609575d144b_afaeea13-bdcc-4dfb-820b-5ca847f3103e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular song this week by the no. 1 weekly charts ranked artist\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.806, 0.005, 0.838, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4433", "image": {"bytes": "", "path": "./images/f4623be1-31c6-4546-a567-92bfd1da9cd7_b845c300-477d-4935-97cc-1ea84ec96398.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Upgrade the count of the current SSD in my cart to 10\nPrevious actions:\n[link] Shopping Cart -> CLICK\n[textbox] qty -> TYPE: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.322, 0.523, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4434", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_12c1b3bf-3325-4612-9902-b097acc4a6e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.114, 0.3, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4435", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_93b6ddac-92d1-4133-9adc-86d8ce49f9d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.204, 0.249, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4436", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_2b8696c1-be11-47de-af3c-141664f86b58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.484, 0.92, 0.503] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_4437", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_552ceafa-2cc7-46fc-a178-1ffd27f5ef89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue with this address -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.641, 0.2, 0.663] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4438", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_d0560cf3-9b90-4a29-a8b8-08577ec0c19b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.021, 0.598, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4439", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_2b73cbf1-4de3-4555-9070-0c329cd919b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.222, 0.656, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4440", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_e1fe0472-1d20-446d-a70d-80ff72131b1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.231, 0.568, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4441", "image": {"bytes": "", "path": "./images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_464ec933-1237-478f-a390-08e1168b4498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the complexity rating for Frosthaven.\nPrevious actions:\n[combobox] Search -> TYPE: frosthaven\n[link] Frosthaven (2023) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.248, 0.75, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4442", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_66342372-6689-4084-8008-5bdd51746855.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK\n[input] -> CLICK\n[gridcell] March 19, 2023 -> CLICK\n[spinbutton] How many travelers? -> TYPE: 3"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.346, 0.919, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4443", "image": {"bytes": "", "path": "./images/779cec8e-eef5-4de8-a42e-b449363664df_05b42c16-2b58-423e-9f0d-c4ef3203b528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a theatre near 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.145, 0.888, 0.17] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_4444", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_a0620450-f297-4f91-9643-1324d3373687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York\n[button] Times Square, New York, NY, USA -> CLICK\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.588, 0.369, 0.811, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4445", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_07eaf7cf-bc33-4fd1-9e7a-5b4c915112c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.189, 0.335, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4446", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_f74e6f89-7fd4-4a49-9204-750b69c96b67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK\n[img] Tesla Shop Gift Card -> CLICK\n[textbox] Name of Recipient -> TYPE: April May"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.367, 0.929, 0.385] in the image\nAnd my action is Action: TYPE\nValue: april.may@gmail.com"}]}, {"id": "mind2web_4447", "image": {"bytes": "", "path": "./images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_dcf6e978-dc7f-436d-80c6-2f8ad9445bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of Broadway events sorted by date.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.065, 0.17, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4448", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_1ae1d6c7-b4d4-4b78-a3f9-5e6974eb5bde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.012, 0.83, 0.018] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4449", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e0790ebc-b02a-4b78-abb2-ec03cf320458.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK\n[generic] Economy -> CLICK\n[option] Premium economy -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.454, 0.636, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4450", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_2b807397-c070-4c9f-9438-75fe88d865d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK\n[heading] ENGLISH PREMIER LEAGUE -> CLICK\n[link] UEFA CHAMPIONS LEAGUE -> CLICK\n[heading] GAMES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.266, 0.846, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4451", "image": {"bytes": "", "path": "./images/ef23fbf3-f05e-41e2-b847-a27028f42470_6d29058c-c968-4817-a15a-99a4667e39f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me \u201cpizza\u201d restaurants near Atlanta\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.108, 0.223, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4452", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_43e06d15-4af3-477d-8d5a-2be93ca570e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.036, 0.448, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4453", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_21d16a48-dcdb-4226-92ba-31ea01da9118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[combobox] By State -> SELECT: Colorado\n[button] Activity -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.287, 0.154, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4454", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_39e9097a-7ac8-4543-8ad6-91b40f932b34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena\n[button] Get Directions -> CLICK\n[li] Cheyenne, WY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.398, 0.578, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4455", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1654a37a-cc71-4bac-88f3-efe73a2675f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.411, 0.686, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4456", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_247a11f8-00a7-4f2d-a549-c4bafb74faf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.005, 0.789, 0.021] in the image\nAnd my action is Action: TYPE\nValue: Union City Nj"}]}, {"id": "mind2web_4457", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_fe665efe-5d42-48d3-ae92-66c30e8134ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK\n[link] Advanced search -> CLICK\n[textbox] Search by keyword -> TYPE: Taylor Swift\n[combobox] Find Posts from -> SELECT: 1 Months Ago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.435, 0.223, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4458", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_3c7bf6a2-a75f-434d-8bc2-e34824e43dbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.31, 0.792, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4459", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_77ccb021-cc33-47a9-9637-3cf72d44d1af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.36, 0.164, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4460", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f5195da6-57eb-4def-a279-ec2069340b01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.269, 0.102, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4461", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_30c13bcc-6f9d-4265-8c57-1073030ce44f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] View Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.332, 0.144, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4462", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_30499164-5c21-4aa3-861e-81c8b848a22d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[button] EUR -> CLICK\n[div] GBP -> CLICK\n[button] -> CLICK\n[div] \u00a3 -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.516, 0.916, 0.551] in the image\nAnd my action is Action: TYPE\nValue: Happy Birthday Love"}]}, {"id": "mind2web_4463", "image": {"bytes": "", "path": "./images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_91811be9-5687-41af-9800-b2ad1470e844.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about the NBA Finals schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.031, 0.958, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4464", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_2fbd874e-8a6d-4382-8e29-670c173354bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.365, 0.634, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4465", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_f96e7319-7712-4074-9b25-48a0c4769033.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.484, 0.045, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4466", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_36517be2-c47a-4c23-8d97-fefc258aa5b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.36, 0.29, 0.381] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_4467", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_d9f6a27e-20e4-4711-939c-c1d832462aa2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK\n[input] -> TYPE: South Station\n[option] South Station, 700 Atlantic Ave, Boston, MA 02110,... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.272, 0.344, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4468", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_8fbb902f-04fa-4bd7-a4e9-d0ba0f793c6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.538, 0.271, 0.56] in the image\nAnd my action is Action: TYPE\nValue: WASHINGTON"}]}, {"id": "mind2web_4469", "image": {"bytes": "", "path": "./images/f8428085-905f-4190-9404-3e28fb691252_2326f36e-ad6b-4850-a5cd-83eb7df45721.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the newest on-demand releases.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.027, 0.614, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4470", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_c3c60d0c-7d49-46c8-ba77-3b9a28a14d52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.039, 0.58, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4471", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cbf7f977-c76b-4ae7-bfec-2ff4b8c4f362.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.394, 0.249, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4472", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_7a9bc022-4dfa-4b45-bcf3-35db5c5902c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.062, 0.266, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4473", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_7577fa72-6d3d-468b-9c75-a4dd8f2d35bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK\n[button] Find Schedules -> CLICK\n[img] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.689, 0.397, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4474", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f26ebe1e-3767-41cc-9263-447a47ea8ce3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.881, 0.04, 0.897] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4475", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_4ac20b82-db8a-4e3f-94c8-d76357986448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.218, 0.151, 0.24] in the image\nAnd my action is Action: SELECT\nValue: 2022"}]}, {"id": "mind2web_4476", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_33dc45c6-f292-4b68-8df3-95a76a20a619.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.071, 0.369, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4477", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_75341aa4-49d6-43ad-86f6-b82d2b6c95fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.119, 0.719, 0.163] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_4478", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_f4a04bcc-0ce6-4a2c-a076-b96a65d0a7e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[gridcell] Sun Apr 02 2023 -> CLICK\n[circle] -> CLICK\n[link] Likely To Sell Out -> CLICK\n[svg] -> CLICK\n[span] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.21, 0.785, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4479", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_2ca10cfe-4ab1-488f-b16d-305182e3c99f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[gridcell] Category -> CLICK\n[input] -> CLICK\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.89, 0.205, 0.918, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4480", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_83ce3ed0-9d5e-4e13-885c-6cd6b0291439.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.046, 0.785, 0.057] in the image\nAnd my action is Action: TYPE\nValue: stripe, 5th avenue"}]}, {"id": "mind2web_4481", "image": {"bytes": "", "path": "./images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_41b08949-112b-453b-83cf-1426058407d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite the top rock track\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.095, 0.645, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4482", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_a3d23bf6-5aa6-4245-8bb3-0c4d05470750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram\n[button] \uf002 -> CLICK\n[img] 8GB (1x8GB) DDR3L 1600 (PC3L-12800) Desktop Memory... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.328, 0.852, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4483", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_d5d286c4-6f8e-4b7a-8fff-e50d13cf9ada.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Thalia Hall\n[span] South Allport Street, Chicago, IL, USA -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.257, 0.379, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4484", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_0be743dd-2860-4ed8-81aa-211cb3c67518.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.375, 0.754, 0.413] in the image\nAnd my action is Action: TYPE\nValue: allan.smith@gmail.com"}]}, {"id": "mind2web_4485", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_230a1bfe-cf97-4bde-8268-f70287809032.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[combobox] Return Time -> SELECT: 11:00 AM\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.351, 0.782, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4486", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_4256fd38-43d4-49a8-a0da-618b5264ed20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi\n[combobox] Select Model -> SELECT: A6"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.172, 0.545, 0.196] in the image\nAnd my action is Action: SELECT\nValue: 2020"}]}, {"id": "mind2web_4487", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_27988d8a-0da4-41ff-bb40-f20d4a1a7749.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.215, 0.114, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4488", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_8782d364-4e18-44ff-9aee-4e1c21c11ed6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.456, 0.471, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4489", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_e6cb09cb-edbd-44c6-a911-d51f39af7dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] set store -> CLICK\n[button] Make -> CLICK\n[span] (954) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Series -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.287, 0.253, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4490", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_3a673d2c-870c-483f-8337-b1359c2cd031.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK\n[p] Compact -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.438, 0.882, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4491", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_2918942d-cf82-4992-ac54-7ce758ca697f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.333, 0.396, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4492", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_dc38e438-dc4a-4fea-8621-383fb449ebf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[span] -> CLICK\n[div] Leather -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.533, 0.286, 0.555] in the image\nAnd my action is Action: SELECT\nValue: Good To Go"}]}, {"id": "mind2web_4493", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_4559b512-72e0-43a0-93f8-38b7f1688a06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.163, 0.312, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4494", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_665f4508-101d-406e-b5f6-ebfe574eb34d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM\n[combobox] Drop off time -> SELECT: 11:00 PM\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.057, 0.682, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4495", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_df533272-7f21-43f3-a50f-89b97eb99bc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK\n[button] Miami -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.32, 0.186, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4496", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_185d50aa-4bbf-4107-8913-200ee426102d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.228, 0.908, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4497", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_c7548fe6-29eb-4ffb-a431-24ad7f535f5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.0, 0.645, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4498", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_e69a6088-4873-41f8-be56-5ee72a989dac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK\n[link] MONTHLY -> CLICK\n[span] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.226, 0.14, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4499", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_128f72a9-5531-4202-a730-5c09d7d9aaa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[span] 17 -> CLICK\n[span] 20 -> CLICK\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.187, 0.341, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4500", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_f2510dca-2b8a-4d16-9824-8bd6f3e5274f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.321, 0.488, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4501", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_15a35d9f-c3aa-44be-8f0f-4827042e2f95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.274, 0.471, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4502", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_3eec37dd-f749-468a-9e10-8cd36f12e224.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\n[link] Order Tracker -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.241, 0.459, 0.27] in the image\nAnd my action is Action: TYPE\nValue: 456481897"}]}, {"id": "mind2web_4503", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_89d0a43a-593a-46f4-92e3-d1b1615293e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.433, 0.375, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4504", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_e2bedd9a-7758-4d86-b0d2-517ee761c274.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.611, 0.969, 0.619] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4505", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_21f8bfdc-d53f-40a3-8c4c-ee39f0f4a7d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Venice Beach\n[em] Venice -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.128, 0.336, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4506", "image": {"bytes": "", "path": "./images/4f395aad-6f10-4055-932a-d2af443e6bfa_214bef59-0758-44eb-886f-b6745b668e05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Star Wars The Mandalorian statue and add to cart.\nPrevious actions:\n[searchbox] Search... -> TYPE: Star Wars The Mandalorian statue\n[img] The Mandalorian ArtFX+ 1/10th Scale Statue - Manda... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.407, 0.969, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4507", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_d32919a6-4663-42fb-a081-9cbd842bc551.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.184, 0.084, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4508", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_efdcb0e6-e118-4995-bf76-1dc64e6f6e0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[span] Good -> CLICK\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.668, 0.698, 0.685] in the image\nAnd my action is Action: TYPE\nValue: doew"}]}, {"id": "mind2web_4509", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_aa72534c-0de6-4c4c-8d2e-378dd9bb25ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: red sox vs yankees"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.19, 0.871, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4510", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_070d9bc0-242b-4d83-ae47-c1a17c0040e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.284, 0.571, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4511", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_5e338d2e-d804-4b2b-8f81-7311a89ccce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 1.039, 0.938, 1.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4512", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4f9eaa4-d365-4194-ac5c-02412f49c7e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] QAR\u00a04,980 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.499, 0.712, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4513", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a0ae2016-64e5-4317-b950-1a708db07062.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.59, 0.238, 0.617] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4514", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_596faa46-ddf1-4243-8f12-aae808036582.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.388, 0.365, 0.409] in the image\nAnd my action is Action: TYPE\nValue: san antonio"}]}, {"id": "mind2web_4515", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_95063aac-bdf3-44d0-9ac9-ff9893c88672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.155, 0.561, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4516", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_943d00d4-11cc-4ffc-8bad-584d783ae37a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.249, 0.573, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4517", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_2c773e90-ebe7-4e70-ad41-8b5e3ea66c7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.074, 0.223, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4518", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_d4a307c1-04e4-46fa-8d17-2bd949c405ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.077, 0.777, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4519", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_9a6d2ff8-b4aa-4bfe-8962-01e45d156bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.962, 0.247, 0.974, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4520", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_bbe4a682-8575-43f2-919e-1e66ee9af9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[span] Spa -> CLICK\n[button] Price: -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[span] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.148, 0.63, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4521", "image": {"bytes": "", "path": "./images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_10bba13a-2813-4375-9199-70db338f88c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guide for paying with commuter benefits.\nPrevious actions:\n[p] About -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.017, 0.627, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4522", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_a2787ae6-4fa3-444f-a098-41c7fe2c1de0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Beachfront -> CLICK\n[checkbox] Hot tub -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.787, 0.089, 0.792] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4523", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_eaa6d53f-db1e-4d44-a4bf-95031cb2e235.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK\n[link] GIGABYTE B450M DS3H WIFI AM4 AMD B450 SATA 6Gb/s M... -> CLICK\n[a] Processors - Desktops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.775, 0.376, 0.804] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4524", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_34ecbbb5-7d8b-4f1a-b63c-b940c801e7e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER\n[heading] Find your trip - Find a reservation - American Air... -> CLICK\n[textbox] Last name -> TYPE: sin"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.329, 0.618, 0.36] in the image\nAnd my action is Action: TYPE\nValue: 1234567"}]}, {"id": "mind2web_4525", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_885432a5-3a1f-40a1-9c83-3e2222987162.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK\n[tab] MOVIES -> CLICK\n[searchbox] Search -> TYPE: Prometheus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.163, 0.677, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4526", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_080b7f6e-4cf4-4bcd-b8b7-5de3e9fb5337.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK\n[button] San Francisco, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.172, 0.591, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4527", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_ff237f12-b8da-44a6-a94b-44c986bde324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.253, 0.5, 0.288] in the image\nAnd my action is Action: TYPE\nValue: nevada city"}]}, {"id": "mind2web_4528", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_ff268edf-5481-45b7-87dd-16072ddacf02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.423, 0.974, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4529", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_6fe96e04-6f9b-4de6-960e-14f70df89eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.501, 0.32, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4530", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_b88c28d6-7d12-42fc-95b3-f4267b4fd200.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.489, 0.214, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4531", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_25c59d1d-3e28-490b-a832-aa15ee2497d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 1.621, 0.312, 1.66] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4532", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_85a2842f-8d86-4d33-b7ae-a9a5af111f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan\n[combobox] Credit Score -> SELECT: Fair (580-669 FICO\u00ae Score)\n[combobox] Term Length -> SELECT: 48 months\n[link] SHOP WITH BUDGET -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.27, 0.184, 0.288, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4533", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_5a1b9d08-5eb0-4ae0-8fa1-7183a2c8a7c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.137, 0.719, 0.165] in the image\nAnd my action is Action: TYPE\nValue: Coldplay"}]}, {"id": "mind2web_4534", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_aa22d0e0-82c3-4792-afb9-94cc4366b61b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 25000\n[input] -> TYPE: 5000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.311, 0.473, 0.336] in the image\nAnd my action is Action: TYPE\nValue: 12"}]}, {"id": "mind2web_4535", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_dc7e4e12-7d61-48a8-a1ec-2c52646d5975.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.183, 0.629, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4536", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_1ad12bdc-c43e-4bd9-acfe-a5fdd5d8e2ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.621, 0.115, 0.737, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4537", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_3eb249cb-72cb-4fdb-be0b-adee49627c52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK\n[checkbox] 2+ -> CLICK\n[button] Select Nissan Kicks Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.318, 0.93, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4538", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_0e088632-253f-4d11-af58-c48d7b276f16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.468, 0.281, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4539", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_9c998cb2-c740-4cc3-8c99-58824e200687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[ins] -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[link] Show all 10 cars -> CLICK\n[link] Opel Insignia\u00a0or Similar , View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.784, 0.298, 0.798] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4540", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_2ec3122c-4656-4a11-b38a-ace3f4ecb082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK\n[div] Narrow By -> CLICK\n[link] Adventure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.72, 0.432, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4541", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a0335e1b-4305-40b9-9379-c6ecb06799ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.112, 0.3, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4542", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_4038a1d1-b391-48f7-9093-45bec729f442.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.252, 0.194, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_4543", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_348c21cb-4bc5-454a-b3b3-3955c93b08ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[i] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.303, 0.309, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4544", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_82978229-c1f9-4bb2-a23f-900adb290f39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[button] Next month -> CLICK\n[gridcell] April 10, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.293, 0.831, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4545", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_5200e3d5-946c-41fe-b34d-015858be3dec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.145, 0.701, 0.174] in the image\nAnd my action is Action: TYPE\nValue: 10017"}]}, {"id": "mind2web_4546", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_d5c1426a-876f-4bb5-945e-be4c53a4afc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[option] Adele -> CLICK\n[link] TICKETS -> CLICK\n[div] Quantity -> CLICK\n[label] 2 -> CLICK\n[button] $3,535/ea -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.182, 0.213, 0.479, 0.244] in the image\nAnd my action is Action: TYPE\nValue: adelefan@hotmail.com"}]}, {"id": "mind2web_4547", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_1048aad4-2ed3-4bb9-8c0d-234cdb6b90ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.048, 0.376, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4548", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_00d49134-71de-43ed-9c37-19452b46685e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: diamond stud earrings"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.085, 0.342, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4549", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_2146a4a1-d5b8-42c5-94b9-547d31930e44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.658, 0.609, 0.673] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4550", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_86b669c9-2974-40ed-8fc8-6c3f5b709c7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.276, 0.795, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4551", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_dbe89b77-d51e-4e13-8e3f-970b5ce60eca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine\n[button] Search -> CLICK\n[textbox] Zip code -> TYPE: 90026"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 1.02, 0.108, 1.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4552", "image": {"bytes": "", "path": "./images/16886ec7-3301-4103-b175-9fa817335984_96b11da3-438a-468c-a35a-da87b5a3b3ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the highest average points scored in the current season\nPrevious actions:\n[button] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.231, 0.312, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4553", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_777270ff-78c9-4c91-81f2-776107cbcd41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Price: low to high\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.322, 0.422, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4554", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_9f57308e-e4bb-4c1c-8514-5c0d179fa5db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: organic dog food\n[link] organic dog food -> CLICK\n[link] Tender & True Organic Small Breed Chicken and Live... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.424, 0.988, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4555", "image": {"bytes": "", "path": "./images/5f9182dc-d35d-4c0e-9abe-cd913c136528_8450e2c6-f8aa-40bc-876e-21cf29a8cb77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find career openings in the marketing department\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.242, 0.706, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4556", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_b209f671-5caa-487b-a43c-666ba609b584.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.314, 0.128, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4557", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_542b1db1-6a74-4c6c-bd1c-ff43a1309b99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.016, 0.469, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4558", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_c1fa9374-0367-4285-a5f2-3f7cfa4f5379.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.007, 0.444, 0.036, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4559", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_00248976-2b6a-47d6-a025-29c82ff112f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.072, 0.377, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4560", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_0d2a5b19-2a2b-4397-b66e-ddb186bdebde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.14, 0.961, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4561", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_1faa68ab-b59e-4b69-85ad-8f3f1c42d617.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.245, 0.473, 0.277] in the image\nAnd my action is Action: SELECT\nValue: Corolla"}]}, {"id": "mind2web_4562", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_1b7055a0-0d97-4373-acf5-fd2eb15e484b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.136, 0.553, 0.158] in the image\nAnd my action is Action: TYPE\nValue: los angeles"}]}, {"id": "mind2web_4563", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_14aad43c-c6e4-4205-beaa-94ff9abb394c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.046, 0.838, 0.07] in the image\nAnd my action is Action: TYPE\nValue: smith"}]}, {"id": "mind2web_4564", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_aea31efd-c391-4099-a13e-3a9417cca68f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[link] TICKETS -> CLICK\n[div] Quantity -> CLICK\n[label] 2 -> CLICK\n[button] $3,535/ea -> CLICK\n[textbox] *Email Address -> TYPE: adelefan@hotmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 0.448, 0.64, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4565", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_347640d1-0b66-485b-9360-cf59dc1ce10c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.128, 0.568, 0.164] in the image\nAnd my action is Action: TYPE\nValue: singapore"}]}, {"id": "mind2web_4566", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_a69f9ab4-9419-40c0-a22b-d1bad1fd7c55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.148, 0.956, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4567", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_869e8d1b-c77b-48a8-9a52-9b34eace9019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[textbox] I need a car in -> TYPE: berlin\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.761, 0.372, 0.787] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4568", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_f00c7706-22fe-42e0-bc77-2312bea8c3d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK\n[div] SUVs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.167, 0.331, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4569", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_cde960df-47f6-41cd-bd34-9082cede2dfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.278, 0.173, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4570", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3cd2999e-b440-48bd-9a23-54e47a560466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.299, 0.641, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4571", "image": {"bytes": "", "path": "./images/9ebd069a-7703-47b5-9c75-53958637e7c0_ba2bbee3-bfcc-4bab-91ef-2fd4893e6c39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Watch the halftime show from the most recent Super Bowl\nPrevious actions:\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.025, 0.787, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4572", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_f7af6222-9fbe-4bed-9d34-344c135ddca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.239, 0.331, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4573", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_6911a96a-5cf1-45ea-a4ac-7b020fa68506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.577, 0.435, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4574", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_9d13499c-38c1-4c07-9165-7af7d7dc7bee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.359, 0.353, 0.399] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_4575", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_37e9f402-9fb7-4e3e-a1fe-8756bffbffbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK\n[checkbox] Electric (175) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 1.019, 0.296, 1.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4576", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_4c7017eb-b5a7-45a4-9644-d3b39dfe5c2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK\n[span] -> CLICK\n[button] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.744, 0.178, 0.757] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4577", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_fb4c8a9d-fe56-4fa9-9c3f-9c8044e3e2c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK\n[div] -> CLICK\n[button] Confirm address -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.481, 0.619, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4578", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_acaf6392-4605-4e32-a646-d1b3d7675895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Recommended -> CLICK\n[div] Lowest Price -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.331, 0.968, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4579", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_455d1b00-132a-42d9-bb64-a7c94ae46392.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA RESORT CAMPGROUNDS \uf0da -> CLICK\n[button] Arizona -> CLICK\n[link] Tucson / Lazydays KOA Resort\ue250 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.561, 0.336, 0.659, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4580", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_1650528f-5ac6-4a51-91a8-e217fa90b7a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.163, 0.284, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4581", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_20d5f662-5659-41f2-873e-3f1a4a681fe1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK\n[button] Comedy -> CLICK\n[button] TV Shows -> CLICK\n[span] 2010 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.486, 0.639, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4582", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_85e614b4-41b1-43b9-8519-71f86c26641d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] SEARCH BY KEYWORD -> CLICK\n[searchbox] SEARCH BY KEYWORD -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.223, 0.959, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4583", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cb7997fb-4091-45ac-b23e-3c4f1e114867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[menuitem] 2022 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.744, 0.253, 0.783] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4584", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_a7c0ac26-f51d-45fc-969f-73a22770dfc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.14, 0.322, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4585", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_fba5ff31-33b7-42ac-81a2-29fd9779dc8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte\n[button] Belo Horizonte, MG, BR (CNF - Tancredo Neves) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.295, 0.637, 0.322] in the image\nAnd my action is Action: TYPE\nValue: buenos aires"}]}, {"id": "mind2web_4586", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_8ff449e2-99a5-48b0-94be-3804c92710c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.406, 0.358, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4587", "image": {"bytes": "", "path": "./images/03e45ce0-4375-44aa-b57f-cf439ccbe363_540f58be-846a-4639-988b-214d708f6238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news article and send an email about it.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.358, 0.963, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4588", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_27977c97-543d-4538-bfb8-ac7679262132.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK\n[div] 24 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.194, 0.975, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4589", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_a811817b-439d-4624-995e-f2151b37a537.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.395, 0.5, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4590", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_073be97e-382f-4914-9a6e-ba12b35d6460.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.559, 0.064, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4591", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2e19a5e8-4201-46f0-a062-f812e7f06f8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.084, 0.705, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4592", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_442b42be-0716-4617-8548-d72fbceb218d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.007, 0.393, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4593", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_b20324e8-7daa-49b4-b79c-fcc6ef95992d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK\n[gridcell] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.508, 0.139, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4594", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_45d235df-68bb-4808-aab1-97e2fdf6fee8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.262, 0.759, 0.29] in the image\nAnd my action is Action: TYPE\nValue: James Smith"}]}, {"id": "mind2web_4595", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_b477c115-47bc-4eeb-8d34-b128c643d648.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\n[link] attractions. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.318, 0.395, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4596", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_6c9158f0-6c3b-4ef9-9e89-c09c74149da8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[div] -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.827, 0.877, 0.871] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4597", "image": {"bytes": "", "path": "./images/2879afa9-05f9-4d97-bbfe-f95f5d665174_8e5a056f-17cf-401a-8338-09bd7aad3e3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the Wakanda Forever trailer\nPrevious actions:\n[link] Navigate to on-demand -> HOVER\n[link] select to navigate to Genres -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 1.477, 0.152, 1.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4598", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_56dff79d-4441-4435-a096-71da4f343a09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.378, 0.36, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4599", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2895a313-9711-4a8b-9467-88df99b6dd48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.522, 0.335, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4600", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_5541dc40-3d3c-4624-8e10-e78d62032873.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 19 -> CLICK\n[combobox] Fare preference -> SELECT: First"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.437, 0.875, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4601", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_6fc4eaa0-463a-4839-87d9-6df70024abef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK\n[input] -> TYPE: John\n[input] -> TYPE: Smith\n[input] -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.805, 0.347, 0.852, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4602", "image": {"bytes": "", "path": "./images/27724810-0bc8-446a-a2f4-b53a87e190df_63a067c3-55b3-4dfd-934a-c0bec0d8dccf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the location and operating hours of the nearest CVS pharmacy to zip code 90028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.887, 0.013, 0.911, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4603", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_71b5600a-25ce-4afd-9929-242be8f64d5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.147, 0.26, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4604", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_08603353-f42f-498c-8b7e-74aceb336815.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.215, 0.358, 0.249] in the image\nAnd my action is Action: SELECT\nValue: Lexus"}]}, {"id": "mind2web_4605", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_eac5aaaf-f188-42f8-a20d-90b6695ad4b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.175, 0.902, 0.214] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_4606", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6651b25d-7ba0-4963-a7d0-3211b2eb79b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK\n[button] Apply -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.109, 0.906, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4607", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_5edb6bac-5471-482d-904a-09635fe4ee2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA HOLIDAY CAMPGROUNDS \uf0da -> CLICK\n[button] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.66, 0.481, 0.665] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4608", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_5aaf3df7-66f5-437f-8051-2cb596fbdcde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.023] in the image\nAnd my action is Action: TYPE\nValue: smartwatch"}]}, {"id": "mind2web_4609", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_bda0171c-5a26-4bdc-83a3-b7f80d13498e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.02, 0.652, 0.035] in the image\nAnd my action is Action: TYPE\nValue: trash can automatic lid"}]}, {"id": "mind2web_4610", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_da2be31e-ad27-4939-b2f1-d7ad426c736d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[menuitem] Running Shoes -> CLICK\n[label] 5 (10) -> CLICK\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK\n[radio] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.393, 0.62, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4611", "image": {"bytes": "", "path": "./images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_9df6a79e-671a-40f0-bc71-b7394d96f511.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Leaderboard for the top 10 fantasy Basketball players for the Rotisserie challenge.\nPrevious actions:\n[link] Fantasy . -> CLICK\n[link] Fantasy Basketball -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.227, 0.221, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4612", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_b47f5256-ae9f-4c40-8a64-189b47fe6849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.296, 0.274, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4613", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_89295463-6730-47b5-9111-ef25b548fe7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Kevin Durant"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.13, 0.931, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4614", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_ee6dfe65-aa46-4181-97d8-3c1944f1ba7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[path] -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK\n[gridcell] Thu Aug 10 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.503, 0.78, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4615", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_148ed09c-e612-460c-8704-bafa61872edf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.248, 0.969, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4616", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_349619be-15c9-4731-a1c9-c020a40df044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.155, 0.463, 0.16] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_4617", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_588f22df-6300-45c1-839a-bdaf09f6b27b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.237, 0.984, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4618", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_84a83797-c89b-4fe8-a1d3-e2198a825f0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.412, 0.365, 0.435] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_4619", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_a60ce1f0-5d1f-4a3b-a4fb-251b1c51f61b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 07718"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.172, 0.352, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4620", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_d0539316-5a09-4304-a9c2-2beef62f1c93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK\n[div] Recommended -> CLICK\n[tab] Price (low to high) -> CLICK\n[div] View Deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.303, 0.959, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4621", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_6c7a7082-2897-41c7-9688-4b0f3d778cdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.156, 0.294, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4622", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ba3ec399-f548-4454-b3f0-eaf53fce1d3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.057, 0.966, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4623", "image": {"bytes": "", "path": "./images/0991035b-6acb-4dca-aaef-5384a0739781_cdb3023c-a1e1-4791-aeb1-ecfdcd3e3c26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find careers at the company on the Product Management team\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.826, 0.055, 0.879, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4624", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_66e14c21-122f-4ff8-af51-9510d38fef5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.206, 0.487, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4625", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_cc4e6036-7475-48cc-99dd-d130b01c3dea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Search without signing in -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.259, 0.942, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4626", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_53a1b63c-5d8d-48eb-a2b4-7246f1da4b0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.324, 0.759, 0.353] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_4627", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_50e901a3-dcef-4026-a6b6-282ba62561c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[div] Dublin -> CLICK\n[textbox] To: -> CLICK\n[div] Anywhere -> CLICK\n[svg] -> CLICK\n[div] $ 150 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.718, 0.346, 0.862, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4628", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_318ea7e0-6ab8-4357-9d9a-e407c2f4dfad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK\n[li] Outdoors -> CLICK\n[DisclosureTriangle] All Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.203, 0.165, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4629", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_f7834a7a-41e6-48ab-9ed4-922a1940da9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: nba2k23\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.221, 0.179, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4630", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_1c128c7d-67c5-454c-9aaa-82cd82e5e69d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[option] Los Angeles, CA -> CLICK\n[span] Filter by -> CLICK\n[div] 16 -> CLICK\n[div] 16 -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 1.125, 0.264, 1.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4631", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_39ca15cc-f268-42e1-ba92-de0f199ac70b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.023, 0.39, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4632", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_87e392a0-87a0-45b7-9c89-069cc86317ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK\n[span] 1 -> CLICK\n[button] See all -> CLICK\n[checkbox] Vegan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.588, 0.688, 0.618] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4633", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_bb12a018-b966-477f-8fec-249635e955eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK\n[button] From today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.243, 0.287, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4634", "image": {"bytes": "", "path": "./images/551ab381-5bfe-4491-9602-0b0c584d1346_d71a5ed7-97e4-49b1-b3e5-d64c46ae7a24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give me the IMDB recommendations of what to watch.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.33, 0.628, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4635", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_87659e64-52c9-4193-842b-6676a7d6bf73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: sofi stadium\n[span] SoFi Stadium -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.128, 0.336, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4636", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_5f391dd3-bf59-4f55-8776-180a9fd6dc48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK\n[span] Castles -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.228, 0.739, 0.241] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_4637", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_416731f8-343d-415b-8f98-b01beae69685.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.377, 0.488, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4638", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_e63aff02-4d6c-4289-b897-91262275d712.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.258, 0.193, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4639", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_3afea1db-55b2-42ec-bbce-86728f28a0ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK\n[link] White -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.438, 0.375, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4640", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_4952ce05-c06b-458e-b0ac-da5925c2ac39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.304, 0.777, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4641", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_325b7d4b-c635-4187-851d-8219f9a98b4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.393, 0.219, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4642", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_03315d1e-dfa4-4cc0-b5ee-7a7b8f4cf799.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.604, 0.833, 0.61] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4643", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_7b768457-fd7e-478f-b7a5-e5b3b31acd34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.241, 0.789, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4644", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e1c0c2da-37ad-41aa-a735-8768c02f6928.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.205, 0.559, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4645", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_bdc2d235-fba7-4911-a120-56b4a3e32410.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.597, 0.745, 0.628] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4646", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_5df6d848-d5b7-4202-ac80-1959faf35581.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.007, 0.957, 0.047, 0.976] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4647", "image": {"bytes": "", "path": "./images/4ff347e6-e911-4af5-8151-7805a9e91b28_d0e2ec63-357b-4237-9476-b54c6feba4e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show current alerts for red line subway.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.079, 0.722, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4648", "image": {"bytes": "", "path": "./images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_7a9111bf-ea91-4f63-b47b-e1117bc84494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find videos from the Oscar 2023.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.332, 0.357, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4649", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_7c149935-f2e7-47f5-beca-303dc388238e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Style -> CLICK\n[link] French -> CLICK\n[button] Material -> CLICK\n[link] Oak -> CLICK\n[button] Time Period Manufactured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.612, 0.275, 0.752, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4650", "image": {"bytes": "", "path": "./images/9f57055d-c269-47d7-99be-3525d725439e_6dc6377a-e668-4b3b-8e93-5f5f1899b8f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the best odds to win the NBA title.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.092, 0.335, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4651", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_765d1395-6d7e-496e-96ad-ce9fa6367197.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\n[span] Your store for 43219 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.273, 0.847, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4652", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_1c3d2eff-3a63-4757-bdbf-48e0f4ba8d4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.167, 0.552, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4653", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_d0f8939d-9053-4418-a49a-b8bbb6bae5d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.444, 0.32, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4654", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_11731204-34aa-4719-a789-eae83d26586a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[link] Mens -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.867, 0.233, 0.898] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4655", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_e2fccb19-f1cf-467f-9917-1202c07e965e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK\n[textarea] -> TYPE: Directors"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.455, 0.609, 0.494] in the image\nAnd my action is Action: TYPE\nValue: To Watch"}]}, {"id": "mind2web_4656", "image": {"bytes": "", "path": "./images/66a5b212-cf94-4917-8015-58970dc54187_6afdf927-8e32-47d0-a8a1-3197de949c01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the amtrak national route map\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[link] \uf18a SEE ALL ROUTES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.133, 0.655, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4657", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_7e768de4-2cc5-4d90-8eb6-7b30dc596f7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.264, 0.915, 0.284] in the image\nAnd my action is Action: SELECT\nValue: 04 30 PM"}]}, {"id": "mind2web_4658", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_deb51466-b520-47d8-bba4-841ca652c58f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[path] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[button] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.232, 0.966, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4659", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_2a015d4e-2e74-4a02-ae2e-1e529eabf668.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK\n[span] 9 -> CLICK\n[button] SEARCH FLIGHTS -> CLICK\n[span] Price per person -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.011, 0.808, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4660", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_ed47769d-ee72-44f7-bdc9-f58989e4f21d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York\n[button] Times Square, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.612, 0.168, 0.83, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4661", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_9b1378d8-3623-4724-a26a-b493469ca55c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK\n[combobox] Select Sort Order -> SELECT: Lowest mileage first\n[div] Request Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.622, 0.682, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4662", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_8838a017-236c-4418-8aa7-48ad6c0514bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.077, 0.386, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4663", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_18cbba50-27fd-4d98-84c7-7b9802e028d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\n[link] Fishing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.201, 0.192, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4664", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_778f35dc-5a7c-4835-a404-f057a5b4311b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.341, 0.619, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4665", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_065612a0-3a86-4991-b06d-abb9ec4e1de3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight\n[textbox] Enter your pick-up location or zip code -> CLICK\n[combobox] Return Time -> SELECT: noon"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.225, 0.5, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4666", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_ccced979-b0e9-4efc-997c-d53364206c7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.128, 0.842, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4667", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_3ed11a2a-8fd5-4f13-af2b-be976fd73a0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK\n[button] Lighting -> CLICK\n[link] Decorative lighting -> CLICK\n[img] LED candles -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.524, 0.938, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4668", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f5cb11a1-e04c-4d99-98f2-5c902d3c1283.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[span] -> CLICK\n[span] 1 -> CLICK\n[div] Sort by -> CLICK\n[div] Top rated -> CLICK\n[link] Get tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.457, 0.348, 0.614, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4669", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_a1bb6c97-bc21-4cbe-ba5b-6a8d0e0536e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.101, 0.215, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4670", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_69b182b6-f5e4-4aa1-85d5-a98b88129a7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.079, 0.948, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4671", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_eef7607c-2b44-4939-8098-d82b207e60f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] July -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.01, 0.82, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4672", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_4e1f470d-9dfd-4136-9785-360b584f0683.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India \n[div] India -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.081, 0.97, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4673", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0387a16c-0486-4263-97b5-a8e3145814bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.822, 0.813, 0.859] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4674", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_0f407117-ae70-42ea-9230-41fca96353ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.002, 0.925, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4675", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_2804c209-5ed1-40c8-9ed5-bc60068ae0fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK\n[textbox] Select End Date -> CLICK\n[gridcell] Sun Apr 23 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.144, 0.844, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4676", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_b0e6bcb2-ebc1-47f3-959c-3cba6751f827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport\n[option] Boston Logan Int'l Airport, 1 Harborside Dr, East ... -> CLICK\n[combobox] To\u00a0 -> TYPE: North Station\n[link] T orange line green line D green line E commuter ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.248, 0.825, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4677", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_4eaaee20-45f8-42fd-8046-0020ea934869.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.16, 0.85, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4678", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5baed715-106d-4b5c-b7b0-353a8b06f423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.7, 0.166, 0.737] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4679", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_c776cdbf-4c36-4f53-9966-ee977e4f6309.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK\n[mat-pseudo-checkbox] -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.349, 0.831, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4680", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_7a897c64-a917-42b1-9c88-4587761e7767.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.399, 0.959, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4681", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_f2307de6-5b1d-42b8-9bc9-b568deacc0e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER\n[link] Jazz -> CLICK\n[img] Image for Red Hot Chili Peppers 2023 Tour -> CLICK\n[button] VIP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 1.01, 0.08, 1.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4682", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_dda39a16-4008-4904-8895-51a26c5f8e31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.615, 0.307, 0.627] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4683", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_d1797a5c-2b21-43af-8036-66c4b2fa4941.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK\n[generic] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.278, 0.321, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4684", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_96dab6e9-ebf8-4646-813a-ded293589c49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.413, 0.509, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4685", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_662c8c2a-d32f-4265-85c2-2c854b72c7e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.147, 0.266, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4686", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_1aa6829d-0535-4794-be5c-4f934498abeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 1.863, 0.829, 1.874] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4687", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_6474e85f-0cdf-4124-be63-64495755c3dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.097, 0.677, 0.117] in the image\nAnd my action is Action: TYPE\nValue: Uncharted Legacy of Thieves Collection"}]}, {"id": "mind2web_4688", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_a5ebdd01-9df5-4350-a0cd-bdc3f2ca1173.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.12, 0.571, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4689", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f77f54bb-e260-4d62-a12d-3e8f3df35306.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.309, 0.471, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4690", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_a8f5854f-36b5-45ff-94b0-1a79573adb4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\n[link] Model S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.139, 0.491, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4691", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_2aa336c5-81e8-426c-b8cc-18d8ed689c5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.01, 0.863, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4692", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_4b74b3ef-b1db-41c2-a207-02fec15b8daa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Alagnak"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.12, 0.784, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4693", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_d9a18977-172e-40e0-8bf9-513bbdf8ce7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi\n[input] -> TYPE: Wolowitz\n[input] -> TYPE: debbi.wo@bbt.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.905, 0.809, 0.923] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4694", "image": {"bytes": "", "path": "./images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_75c1a95a-3206-4beb-9527-099e88355322.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the most popular photos of the Adam A-500 model aircraft.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.036, 0.742, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4695", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_0f4264dd-1f6e-4313-b1e6-c3f392fc27c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.341, 0.469, 0.361] in the image\nAnd my action is Action: TYPE\nValue: Calgary"}]}, {"id": "mind2web_4696", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_4ddbdea8-b5ac-4696-820c-befd4dff83c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] No Accidents (4) -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[link] Confirm Availability for Used 2019 Buick Encore Pr... -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.259, 0.493, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4697", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4b230fe6-7974-432d-89e3-e9d599c8b47e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.046, 0.673, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4698", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_d1e667d6-a1c0-4ea4-8af5-2197e32eef24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.089, 0.128, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4699", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_146148dd-b0a6-4ee8-a061-0ecbe585e606.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.028, 0.178, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4700", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_96e8f2c6-30ca-4af9-9cad-68c16acf5eff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.645, 0.408, 0.65] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4701", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_75c668c5-0c35-4978-83b5-45de8d786e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[option] Chennai, Tamil Nadu, India -> CLICK\n[button] Monday March 20, 2023 -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[tab] Review score -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.596, 0.081, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4702", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_0428fa36-92d7-4cc9-8e63-e5e07cfa06e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lave Hot Springs East KOA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.142, 0.489, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4703", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_0f984b4b-992d-4e92-b019-f3e933eb6465.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.849, 0.339, 0.865] in the image\nAnd my action is Action: TYPE\nValue: Debbi"}]}, {"id": "mind2web_4704", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_49946c93-dc95-4827-97d8-1d4712866ae9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.645, 0.476, 0.66] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4705", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_1335391d-8f77-4fc8-ab7e-983f67cc075a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.005, 0.82, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4706", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_f5e72948-efaa-4d29-88d8-b29de4097021.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[img] Add -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[textbox] Minimum price filter -> TYPE: 150\n[textbox] Maximum price filter -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.438, 0.089, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4707", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_88c965f7-c3ca-411a-a58b-9c9db6803254.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.121, 0.434, 0.139] in the image\nAnd my action is Action: TYPE\nValue: Lansing"}]}, {"id": "mind2web_4708", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_8ee9e907-8abc-4563-b292-038c3e0e5edf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.343, 0.274, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4709", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_2c9106bd-de29-4ee2-a559-b876ebeec9de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] -> CLICK\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.309, 0.337, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4710", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_9e3771b1-9b5b-4f07-b3d7-ccd343968002.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.398, 0.639, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4711", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_5e1ff95e-1727-43e6-9876-c4e2480529f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.368, 0.157, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4712", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a5b8ee8c-a196-4fb7-b6c4-b3c98765d90c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.064, 0.785, 0.082] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_4713", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_f31dd533-86bd-4d07-af19-fa8d0f61bb64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK\n[option] United States of America (USA) -> CLICK\n[combobox] Vaccination status Vaccination status -> CLICK\n[option] Fully vaccinated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.904, 0.44, 0.944, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4714", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_bf68b1e8-6439-482f-9667-b1bd3845d2e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.891, 0.285, 0.91, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4715", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_32c17d65-3a2c-4123-b579-31095e299b66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.405, 0.494, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4716", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2e501862-ee92-4af0-8eb0-7594690edef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Purple -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.76, 0.233, 0.791] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4717", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_de5d2cb5-2deb-4b0f-817c-5a1f3d8f6b1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.07, 0.988, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4718", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d35d8236-d15b-4fde-b1a4-cb2250309b2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High\n[img] Black Diamond Zone Climbing Shoes 0 -> CLICK\n[button] Add to cart\u2014$46.73 -> CLICK\n[link] Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.456, 0.956, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4719", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_ec2892e2-3184-4086-bef5-33ba043db515.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\n[link] Solar Panels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.857, 0.798, 0.894] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4720", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_f35539ff-43bf-48f6-af52-483fc39a7cc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.044, 0.43, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4721", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_067334cd-e49b-45ac-8a32-31cdec47b52b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.258, 0.249, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4722", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5cb18345-d17d-4d6a-9db8-dbfe0d3cd3bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.113, 0.83, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4723", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_93aaf6df-5228-4992-b532-9613a18117d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.207, 0.35, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4724", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_0c1dc335-6547-4426-bfed-610421e2c194.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.266, 0.495, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4725", "image": {"bytes": "", "path": "./images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_24eb707a-5e41-4be7-8b4f-b7d2233b07e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open gift list for beauty products.\nPrevious actions:\n[path] -> CLICK\n[button] More ways to shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.06, 0.5, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4726", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_e9b55aac-5518-472f-b369-9fd23371f29f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK\n[link] GIGABYTE B450M DS3H WIFI AM4 AMD B450 SATA 6Gb/s M... -> CLICK\n[a] Processors - Desktops -> CLICK\n[div] AMD Ryzen 5 5600 - Ryzen 5 5000 Series Vermeer (Ze... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 1.033, 0.689, 1.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4727", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_a433db85-74f7-4865-aef9-fcf866f4a035.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.113, 0.969, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4728", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_85e3c094-4c46-499f-90fa-05b2a66d9a39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.937, 0.241, 0.956] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4729", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_b82ef559-4765-4aea-9ff7-59980a30a227.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.049, 0.633, 0.096] in the image\nAnd my action is Action: SELECT\nValue: 65"}]}, {"id": "mind2web_4730", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_7872a569-29b2-44ff-9e06-811c8577edff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.091, 0.56, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4731", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_71990241-9b93-43ab-8200-e6ea3063bca2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.859, 0.438, 0.902, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4732", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_904633a2-814a-4074-830e-bf4096bc461a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\n[combobox] Search -> TYPE: Atlantis\n[svg] -> CLICK\n[link] Atlantis -> CLICK\n[button] \uf168 Add To Collection -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.372, 0.638, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4733", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_5caef1d1-97f5-4407-b1f3-5cbfc6655121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.233, 0.097, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4734", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_716b1d6d-07d1-4ace-b6af-d1fa67a344c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK\n[select] All -> SELECT: Audio (376)\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.445, 0.196, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4735", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6ec19c62-07fe-42e9-99da-b36682d1ab92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 200\n[textbox] Maximum Value in $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.467, 0.176, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4736", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_68d7bbb8-fe62-4300-9f0e-cc06b85a6552.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.304, 0.249, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4737", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_2c87ab6d-07c9-414a-856c-558889e7cd0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] Customer Experience- Customer Services Agent (Cont... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.162, 0.844, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4738", "image": {"bytes": "", "path": "./images/bafd6a44-5938-431f-8e2e-17d680d5c48b_76adfe82-7943-40af-9121-513e8de299ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about Wi-Fi subscriptions.\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.242, 0.348, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4739", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_513ceb8e-6771-4ee4-850a-2aabe2c17e0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK\n[link] HOT DEALS \uf0da -> CLICK\n[textbox] City, State or Zip -> TYPE: 10001\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.047, 0.717, 0.056] in the image\nAnd my action is Action: SELECT\nValue: 500 Miles"}]}, {"id": "mind2web_4740", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_360a4b82-5666-4d87-8b10-2ea3b37f78ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\n[link] \ud83d\ude9aOrder Status -> CLICK\n[textbox] Order number * -> TYPE: X123456789\n[textbox] E-mail used on order * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.657, 0.418, 0.766, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4741", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_3bb8a4f7-e32f-4613-ba9a-f72be20a839b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.026] in the image\nAnd my action is Action: TYPE\nValue: the weeknd"}]}, {"id": "mind2web_4742", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_dff4e313-9134-4041-a303-6eb0720df8be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[div] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 1.233, 0.475, 1.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4743", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40f1da67-fce1-4c14-9e24-9b3f57fe90cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: berlin"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.198, 0.366, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4744", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_46d1aec9-efcf-40c7-bfeb-13d5c0db36c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[button] Search -> CLICK\n[link] TICKETS -> CLICK\n[span] -> CLICK\n[label] 2 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.221, 0.837, 0.245] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_4745", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_ba706103-fef4-4462-9ee1-8c8022b3388b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK\n[textbox] Flight origin input -> TYPE: madurai\n[div] Madurai, Tamil Nadu, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.179, 0.262, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4746", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_24ad4f85-593a-4b4e-bbfc-ecec0c6f3e00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.163, 0.702, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4747", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_24a30d96-2890-4243-9595-14ea4999444c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: los angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.145, 0.42, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4748", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_9dbba049-3068-4fc1-853d-a48205645473.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK\n[checkbox] 19\" Wheels -> CLICK\n[textbox] Registration Zip Code Where you will register the ... -> TYPE: 60602"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.256, 0.282, 0.28] in the image\nAnd my action is Action: SELECT\nValue: 50 miles"}]}, {"id": "mind2web_4749", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_40c1286c-8c7e-4b32-b160-227c9e2ef1f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.808, 0.638, 0.842] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4750", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_d39863cc-1c9b-4c24-bccf-c7f95e0ade4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Pop Rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.135, 0.163, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4751", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_516711a6-50e7-4f43-a97a-38b6442f384d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.201, 0.481, 0.228] in the image\nAnd my action is Action: TYPE\nValue: Toronto"}]}, {"id": "mind2web_4752", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_6aa2eb8f-0a6a-4844-8aa0-f1a9e66a2deb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK\n[link] Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.28, 0.28, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4753", "image": {"bytes": "", "path": "./images/78c52592-76e4-4c45-afd5-f94cf213314e_9149d122-ada8-4f08-98c1-30557c30f762.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a star wars movie trailer.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Star Wars"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.631, 0.011, 0.649, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4754", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_fcd0a544-caa9-4349-bbaa-c79b998d2979.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK\n[link] Fare Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.362, 0.342, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4755", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_860311ce-e09d-4946-99fa-8af11abde481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.022, 0.526, 0.05] in the image\nAnd my action is Action: TYPE\nValue: CHIANTI"}]}, {"id": "mind2web_4756", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_aa73fdd1-6124-47ae-b9a4-922e2cf5c1b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.25, 0.796, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4757", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_18a087b4-2e0a-4bb1-828e-6eabfe01b850.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.414, 0.84, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4758", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_66f6a6e6-f08f-40b2-95a6-0ad325c3aa2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.277, 0.479, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4759", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_5c7395ff-ffb0-411f-a8a1-bd2d6f51d101.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.616, 0.258, 0.672, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4760", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_f39d7ec8-50c8-4394-b48e-97e4a42e0dac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.39, 0.605, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4761", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_9c650bd4-031f-4287-8751-f277c6861d52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.186, 0.196, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4762", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_fd9fc151-a496-427a-ab5b-a46920baf5cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER\n[link] Order Status -> CLICK\n[textbox] Order number -> TYPE: 24124124091\n[textbox] Email address -> TYPE: boobear@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.497, 0.872, 0.549] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4763", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_61dce3f4-e036-403d-b3c3-bc956eb57807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male\n[span] Mal\u00e9, Maldives -> CLICK\n[button] Start date calendar input -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.387, 0.391, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4764", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1463c3d8-4e6e-4c2f-897f-4ad740d598d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.443, 0.868, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4765", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_f8551872-cb70-46ad-b3d7-435fef6cf6ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.739, 0.846, 0.762] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4766", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_8183f7ea-b058-4050-9a78-7d016c2f1e14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.292, 0.383, 0.35, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4767", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_9d601e7e-2e4f-42d0-a3d4-8e24b865fed4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[span] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.346, 0.256, 0.377] in the image\nAnd my action is Action: SELECT\nValue: Forward facing"}]}, {"id": "mind2web_4768", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ecaed200-95a2-4e5a-b81d-7e4638985800.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.16, 0.254, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4769", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_d5f3fc99-7306-4bf6-8f6f-d0efa393636a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: professional boxing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.168, 0.392, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4770", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_203ab1c5-86ca-4185-87ee-b74643fa9e97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.523, 0.336, 0.55] in the image\nAnd my action is Action: TYPE\nValue: 1"}]}, {"id": "mind2web_4771", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_a9c6510b-8e65-41cb-b24e-eee23f722354.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.095, 0.248, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4772", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_f933ceb6-cb8e-401e-a15f-74121d8541ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10003"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.208, 0.514, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4773", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_bee75aa3-6f7d-4626-be6f-1b217ac16733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.776, 0.713, 0.798] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4774", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_0d6383fc-e3a0-4402-ace2-f80e4c686a24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.139, 0.267, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4775", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_1a8b410e-a1b9-42cb-9ac0-fa9515cf6140.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4776", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_b3159b0c-489d-49a5-86d8-e614669b3b88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Next -> CLICK\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.26, 0.487, 0.296] in the image\nAnd my action is Action: TYPE\nValue: 252-654-5258"}]}, {"id": "mind2web_4777", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_2bf233d9-c3f4-4d32-8f7c-fbfa4c810e2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK\n[input] -> TYPE: South Station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.196, 0.562, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4778", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_fe3572a1-398a-479d-ba14-aa4ce84f34cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.162, 0.273, 0.184] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_4779", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_62d7062f-27a8-4e87-b201-1aebf4117985.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles & Hardware -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.205, 0.375, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4780", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_7466e760-1596-4515-be42-00af9e90ecb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.251, 0.89, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4781", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_79305d4e-54ba-42af-8bb6-7ae0e8aa483c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK\n[link] 28 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.126, 0.902, 0.165] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_4782", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_29af2ad7-b801-42cd-93d0-f2c973573ee3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.384, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4783", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_8f0334e0-1f8a-4958-9416-68b2d03744a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER\n[link] All MLB Tickets -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.463, 0.881, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4784", "image": {"bytes": "", "path": "./images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_86c7afdc-89d2-4a50-8f67-18f069d328f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out details about cancellation fees.\nPrevious actions:\n[link] FAQs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.18, 0.328, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4785", "image": {"bytes": "", "path": "./images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_66a067aa-db40-45b7-bf6a-a4ba43889d2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get me info about planning a wedding cruise\nPrevious actions:\n[link] Weddings & Occasions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.721, 0.815, 0.923, 0.833] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4786", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_62d943fd-4365-4704-9f70-94cb2619c702.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\n[button] Vehicles -> CLICK\n[link] Exotic Cars -> CLICK\n[link] Explore Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.416, 0.139, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4787", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_8aaf0965-49ce-4370-9a67-300ef0a9123f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 3.96, 0.315, 4.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4788", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5cf2cb2e-ee55-47fe-8fee-f18dbe96fb3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023\n[combobox] Number of passengers -> SELECT: 1\n[combobox] Passenger 1 -> SELECT: Adult (16-64)\n[combobox] Search by -> SELECT: Lowest fare"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.71, 0.774, 0.895, 0.798] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4789", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_02471179-19b4-45e2-9121-a5e8a2a39f26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK\n[link] XL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.224, 0.495, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4790", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_f1ee27e8-e8cb-43fd-882f-97d3c7dbdfb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.207, 0.364, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4791", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_fdf8015b-4c08-45d5-a48a-750a95229995.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK\n[span] -> CLICK\n[button] Category -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.515, 0.977, 0.535] in the image\nAnd my action is Action: SELECT\nValue: Most recent"}]}, {"id": "mind2web_4792", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_20a29fa4-5700-4dae-a6c7-46b5d878e615.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[radio] Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.969, 0.284, 0.994] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4793", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_549866d9-de41-45c4-934c-6f26d3529dd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Okay, got it. -> CLICK\n[img] undefined -> CLICK\n[button] Continue without a seat -> CLICK\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.996, 0.953, 1.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4794", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_2e5b9bce-a220-413b-bf5b-eb0cc86e8fef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.54, 0.29, 0.561] in the image\nAnd my action is Action: TYPE\nValue: CDG"}]}, {"id": "mind2web_4795", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_36eaf5a7-66ea-447c-87c7-8db65126fffa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.396, 0.192, 0.438] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_4796", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_5c53350f-286b-4be8-b37e-346ce0772af0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.162, 0.753, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4797", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_2bc8e547-5166-4076-90bc-4c1d37ee725b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.839, 0.2, 0.848] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4798", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_931b38e6-e860-43fa-9d36-5b864e1ff95b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\n[link] Manage trips / Check-in -> CLICK\n[textbox] Passenger last name , required. -> TYPE: Smith\n[textbox] Confirmation / Record locator , required. -> TYPE: X899987799"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.483, 0.875, 0.503] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4799", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_b444233b-12da-405d-b489-b08e50eeecc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.032, 0.547, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4800", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e9b98033-42d1-478a-ba2b-e7e73105a6f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[radio] Owned -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 1.09, 0.284, 1.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4801", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_76fd2922-8aaf-4d4b-9266-d53fa5daf0de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK\n[button] Shape -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.368, 0.966, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4802", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_aa0d6885-065b-4dfe-8471-916d90dc4f57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.057, 0.746, 0.081] in the image\nAnd my action is Action: TYPE\nValue: Jk rowling"}]}, {"id": "mind2web_4803", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_fc909f7d-4360-4b36-8cb3-086b4a086b5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.217, 0.687, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4804", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_206ec9bd-b2fe-4964-8dcb-c593a923ad7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.114, 0.525, 0.128] in the image\nAnd my action is Action: TYPE\nValue: Tom Hanks"}]}, {"id": "mind2web_4805", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_5728ce0c-5baf-4b2c-98c4-dac3a0343b10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.363, 0.605, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4806", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_b30b7141-b970-418b-ac3d-3069ae385e86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University\n[li] New York University, New York, NY, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] Valet (15) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.658, 0.458, 0.695] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4807", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_1245d53c-a9f4-4a43-b386-dfbdf4e4aed6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york knicks\n[option] New York Knicks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.204, 0.941, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4808", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_d2462cfe-1b26-4571-be84-5f838fdcbd5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Ballet"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.149, 0.871, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4809", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d75139cd-0143-414a-83ba-e2fdd4372c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.348, 0.969, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4810", "image": {"bytes": "", "path": "./images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_e0567d5e-e397-4a09-bfb2-83578e05ae26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals in New York.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.0, 0.417, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4811", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_a14c70a0-22cb-4218-9f3f-281c20bcfd0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.368, 0.645, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4812", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_d229d3ba-1804-4ac9-ab0f-8fff81657d28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 1.037, 0.479, 1.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4813", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_6dbd8788-9384-4c38-be4c-9511cdae63f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.341, 0.619, 0.361] in the image\nAnd my action is Action: TYPE\nValue: EWR"}]}, {"id": "mind2web_4814", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9a1f3e01-87cc-45c1-bb32-f90ce0bc5eed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.148, 0.95, 0.182] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_4815", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_d469455a-3a83-4df8-a461-ebe480791b9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, Tours \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.184, 0.493, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4816", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_83363272-ba05-42ae-b732-707f2ceeecf1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK\n[span] -> CLICK\n[button] Find tickets -> CLICK\n[combobox] Sort: -> SELECT: Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.391, 0.793, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4817", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_0ab53b01-e6f9-417c-87e4-bde4e5ba5393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.432, 0.441, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4818", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_c42d4ac2-8268-4a2e-95c1-399ab2e7ae1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[label] Most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.161, 0.92, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4819", "image": {"bytes": "", "path": "./images/39358d9c-6db2-4662-a91e-47a416eeacf7_765485e9-a5cf-4af2-b2b7-e1810fd891a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what deals there are for Dish Outdoor.\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.112, 0.266, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4820", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_609ac7e9-480b-4b27-bfb7-6cecf26afdb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[button] Next -> CLICK\n[gridcell] Fri May 12 2023 -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK\n[span] Traveler Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.181, 0.565, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4821", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_b9e37de8-55fb-4bdf-9bd3-a9fd20adb92d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.592, 0.076, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4822", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_23f2c136-1524-48cd-a3cf-e66581e35dad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK\n[span] Grocery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.242, 0.316, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4823", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_a504d03f-205a-4905-8823-9493469d0034.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick\n[textbox] Last Name -> TYPE: Smith\n[textbox] Email -> TYPE: smith@gmail.com\n[combobox] Park of Preference -> SELECT: Six Flags Magic Mountain / Los Angeles, CA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.888, 0.678, 0.906] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4824", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_0e970a9f-f965-4bcd-8555-8f396193105e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS\n[span] Paris -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: MILAN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.167, 0.326, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4825", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_fd745a55-eadc-4aff-a3b3-fd9c98aafbb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.162, 0.084, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4826", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_d118ec61-bac9-44db-bf4d-acc261383072.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: organic dog food\n[link] organic dog food -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.738, 0.945, 0.763] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4827", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ba1b3830-c493-48da-9c25-87c05df40afb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.448, 0.31, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4828", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_1579fed0-00b7-47db-bfb3-7098175a0ebd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.436, 0.331, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4829", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_ed4fbad4-bbb8-48a2-98b6-3f0b9cf383ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Polos -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.794, 0.233, 0.836] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4830", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_6d149fca-3072-4909-90ba-487c98b599cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.342, 0.336, 0.375] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_4831", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_ca5a6e89-1bbb-4600-89e3-030ef9d18217.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.207, 0.495, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4832", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_43686440-2fc6-402a-baf6-13907700d8c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.033, 0.187, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4833", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_73330912-2776-4a17-99b0-8b5976828695.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.163, 0.376, 0.202] in the image\nAnd my action is Action: TYPE\nValue: madurai"}]}, {"id": "mind2web_4834", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_571bfafa-ad8c-454a-bcd7-5d507abb8478.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Choose date -> CLICK\n[div] Jun -> CLICK\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.351, 0.702, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4835", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_26c209a2-46d8-42f1-bac1-7f3ed1d525bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 1.078, 0.301, 1.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4836", "image": {"bytes": "", "path": "./images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_ff9510d8-86fe-40ad-b787-0a90b1d78a19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music event organizers and follow the second one.\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.231, 0.997, 0.316, 1.015] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4837", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_6c95fe7c-6317-48bf-a43b-7f3032763ef7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: organic dog food"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.081, 0.757, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4838", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_32e7b754-8ce2-4176-a691-0dce0ebe24af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston\n[button] Place Houston, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.15, 0.923, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4839", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_dc9fbc47-c21f-4f1d-bd12-6bd2c9d95272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.157, 0.478, 0.196] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_4840", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_c42f6c09-f6c3-462b-959f-2973c7f727bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.581, 0.634, 0.625] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4841", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_5a47ed74-98e7-45da-a78e-7084c186f24c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Done -> CLICK\n[button] Back -> CLICK\n[path] -> CLICK\n[link] Shower Essentials -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.63, 0.83, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4842", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_50fd7aaa-959c-4d4c-b224-5dd9a2bd05fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: Mumbai\n[span] Chhatrapati Shivaji Intl -> CLICK\n[textbox] Flight destination input -> TYPE: Dubai\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.165, 0.292, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4843", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_abeec9fe-726d-4040-9765-cc8bb0a8b920.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.113, 0.914, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4844", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_d28a21e8-c00e-4910-b822-cc0f714abbc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.407, 0.375, 0.422] in the image\nAnd my action is Action: SELECT\nValue: New"}]}, {"id": "mind2web_4845", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_d0ce30cd-701a-4a18-88a0-296d0f6c054c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.026, 0.93, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4846", "image": {"bytes": "", "path": "./images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_83a8bc23-465e-4b2f-a976-ae902a22fc9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out details about cancellation fees.\nPrevious actions:\n[link] FAQs -> CLICK\n[link] \uf2b1Cancellation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.682, 0.879, 0.688] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4847", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5302abb9-8cce-45a3-8c07-a1b13fc6f6a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK\n[div] Find a Store -> CLICK\n[link] FIND A STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.212, 0.657, 0.244] in the image\nAnd my action is Action: TYPE\nValue: 43240"}]}, {"id": "mind2web_4848", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_0f7759b9-f43a-4d97-ace7-6405722611eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai\n[option] Chennai, Tamil Nadu, India -> CLICK\n[button] Monday March 20, 2023 -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.552, 0.233, 0.765, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4849", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_4fc9c4f6-0be4-45fe-b57b-1950681d0415.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.687, 0.381, 0.783, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4850", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_8abf4ce0-758b-4ae6-8a7d-5a906da17d25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.217, 0.148, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4851", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_cff604f9-1605-4b09-b220-446853102b4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.575, 0.37, 0.592] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4852", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e24f4566-ec7e-49e8-b98f-bfab996bad35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.189, 0.082, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4853", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_857ae6d3-3942-4710-a68b-2ecaf84fda28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2\n[listbox] select child age -> SELECT: 5-15\n[button] Done -> CLICK\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.441, 0.709, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4854", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4ab4e823-aad2-4316-90bd-3e6b9c41cf08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.503, 0.366, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4855", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_6f4fd24e-96c4-4cba-9914-2abd10715701.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Canyon de Chelly"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.118, 0.784, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4856", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_57555471-f75f-42f1-a810-cf336ce2258b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.172, 0.183, 0.196] in the image\nAnd my action is Action: SELECT\nValue: Audi"}]}, {"id": "mind2web_4857", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3fb0efb0-a518-4c6b-b5e6-709f2274140e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.31, 0.078, 0.417, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4858", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_b3cf21f4-85bf-4461-8154-b500af3a6b9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.509, 0.492, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4859", "image": {"bytes": "", "path": "./images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_2a83533c-7eff-4390-817b-d6032b626a7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the San Francisco 49ers.\nPrevious actions:\n[link] Schedule -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.056, 0.387, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4860", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_e1f976e1-1ac3-447e-921f-672cd8545c6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.224, 0.46, 0.25] in the image\nAnd my action is Action: TYPE\nValue: Boston Logan Airport"}]}, {"id": "mind2web_4861", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_62309d1f-10e6-4601-9cbb-6b407fe0a0a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.094, 0.617, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4862", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_6a78e4e9-77a2-4025-8623-e0d7eda8379b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK\n[button] Find flights -> CLICK\n[textbox] Date -> CLICK\n[button] Move backward to switch to the previous month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.291, 0.627, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4863", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_fca33043-62dc-44e2-b64d-f14bb211f687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.365, 0.618, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4864", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_c06ba573-4b50-4b1e-9a87-70d18fa8474a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA HOLIDAY CAMPGROUNDS \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.659, 0.282, 0.667] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4865", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8c0f2845-0345-4194-a6bd-c1143e3da795.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[option] 6 -> CLICK\n[button] Update -> CLICK\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.574, 0.278, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4866", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a7be10f1-c85f-444d-93a6-48f078088d83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.007, 0.475, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4867", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_1222eefa-0175-4eef-a66f-e6bd0d109c4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.477, 0.347, 0.519] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_4868", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_b8094fea-6545-48cd-b82a-d9420fd540c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.279, 0.729, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4869", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_90c6af14-70ab-4b7d-962e-c01741f97a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.216, 0.89, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4870", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_6c7ee22f-e6b6-4cdb-a287-5162da143ba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.109, 0.389, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4871", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_db0dd015-f9d9-4fd6-9c1b-90ee29a84a8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.49, 0.24, 0.518] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_4872", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_e977dfbb-fe57-43f2-979f-b9249b2747b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.361, 0.474, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4873", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_85fb9ffc-48b1-4e4b-b07f-f81e89cebb4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA\n[input] -> TYPE: car accident lawyers\n[link] car accident lawyers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.096, 0.683, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4874", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_92de7066-88c4-424e-97df-4a48ba0b59b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.268, 0.259, 0.304] in the image\nAnd my action is Action: TYPE\nValue: SPRINGFIELD"}]}, {"id": "mind2web_4875", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_e1297f39-b8fc-447a-a101-078ec44c68f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.328, 0.195, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4876", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_807e15c8-e808-4db8-abf7-e3e606df063c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.665, 0.659, 0.677] in the image\nAnd my action is Action: TYPE\nValue: 123-999-0000"}]}, {"id": "mind2web_4877", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_865d6d65-322d-4261-8173-2a3f843e747d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.0, 0.313, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4878", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_53510142-0f5f-4f73-86f1-61dc206fb9a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[generic] Run Search -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.12, 0.338, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4879", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_5630e994-101d-43c2-8c69-da80024e3159.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.478, 0.688, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4880", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_238e3167-126e-4c08-8de2-c51cb969c94b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise\n[span] Le Marais, Paris, France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.383, 0.246, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4881", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_62b5851a-66ac-4f7b-ab41-5c8ca47f87b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\n[button] Explore -> HOVER\n[link] Dining -> CLICK\n[span] EXPLORE DINING -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.371, 0.055, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4882", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_90dd2ba7-06e3-4e8d-b4a7-cd9f16ee5fd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[link] See All -> CLICK\n[link] Computers -> CLICK\n[link] Drives & Storage -> CLICK\n[link] External Solid State Drives -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.847, 0.151, 0.979, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4883", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_bf3a4668-958b-4149-a5b0-3870bf764b06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.679, 0.2, 0.698] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4884", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_a5d935f5-61f6-4797-9dc6-33eb9a260ece.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK\n[div] Fri, Mar 31 -> CLICK\n[checkbox] 29 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.174, 0.923, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4885", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_0d5b7f9f-9236-43f6-a551-ef4633323303.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[i] -> CLICK\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.077, 0.777, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4886", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_1bdbfb19-e149-4494-8c81-823066198ce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\n[button] Manage -> HOVER\n[use] -> CLICK\n[link] Visit our cruise deals page to view your offers -> CLICK\n[button] Show My Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.455, 0.323, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4887", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_f3f39cfc-eb80-4f5e-ab84-0bce4f894d21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[span] -> CLICK\n[button] Hiring Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.482, 0.218, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4888", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_bf283bf2-f76d-42be-b04d-3dcf4f25f1ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.0, 0.597, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4889", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_78a07e5a-688d-4a24-9bde-901d62e64a5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.09, 0.327, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4890", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ed7860e2-f5ac-45ce-9b5f-6eedf85b7cd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 2.792, 0.069, 2.819] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4891", "image": {"bytes": "", "path": "./images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_2d6cc8ed-6f0c-4311-a5aa-860a52c8452e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are today's Limited Time Offers deals?\nPrevious actions:\n[link] Coupons & Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.352, 0.096, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4892", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_00aa52ec-0e86-450f-b72e-2dc795817cac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\n[span] Hurricane Harbor Los Angeles -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.056, 0.434, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4893", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_d2244643-d85f-47f6-a9c3-8db219104141.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.261, 0.212, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4894", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ef71de36-8d22-4f74-a4db-9ef1d45fd9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.152, 0.474, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4895", "image": {"bytes": "", "path": "./images/5a181549-c79c-499c-b7d7-90860f0e0068_a2f3a3c8-b17a-48c3-9762-f1311a93667c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play Trailer of \"The Flash\" 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.265, 0.156, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4896", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_cdfc251d-d069-4874-9855-405b68bd27ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28\n[link] 7:30pm -> CLICK\n[text] J10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.82, 0.591, 0.889] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4897", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_4749e515-087a-47b2-a652-3a8342d174a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[label] Weekend -> CLICK\n[div] May -> CLICK\n[span] -> CLICK\n[span] Filters -> CLICK\n[textbox] max price $ -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.427, 0.495, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4898", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_4fdb839d-8ee9-406c-bd79-3ae4d764b752.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.178, 0.574, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4899", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_08747f7d-7119-4877-821b-f4fc61f180d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.523, 1.0, 0.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4900", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_b7ea921e-9106-4ffa-8427-c196f77649fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.357, 0.385, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4901", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_9a38f090-86ba-4472-b089-7737200bcfaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.23, 0.606, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4902", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_587b5c69-60df-46d7-a9bb-da2624630a64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.218, 0.459, 0.244] in the image\nAnd my action is Action: TYPE\nValue: 10000"}]}, {"id": "mind2web_4903", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_5633f55b-e5ea-434b-9aae-06fec4fbe863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Ariana Grande\n[button] Search -> CLICK\n[a] -> CLICK\n[button] Sorted by: Last 7 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.558, 0.656, 0.575] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4904", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_0ea5b7ba-2e88-4415-ac7e-eb3b6a7f71e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.382, 0.963, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4905", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_6a689c11-d9a8-4139-b828-7312938f530d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.149, 0.782, 0.177] in the image\nAnd my action is Action: TYPE\nValue: New york yankees"}]}, {"id": "mind2web_4906", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_5c48bea6-ba29-458e-8ccb-ab038bfe9f9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.182, 0.487, 0.218] in the image\nAnd my action is Action: TYPE\nValue: 05/05/1995"}]}, {"id": "mind2web_4907", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_a48a19f1-6b38-44bf-9f1c-923274418b08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.057, 0.966, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4908", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d4d09cdb-86cd-4870-9e02-5cc4fc3a08d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.308, 0.428, 0.332] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_4909", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_ed1bafa7-9add-4b87-989b-90aa882fac86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.404, 0.393, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4910", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_6c20de6d-fcb6-460e-a454-34f681cbb142.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.028, 0.837, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4911", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_2763fe68-209b-4181-b309-e9e75ebaf703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK\n[input] -> CLICK\n[input] -> TYPE: 1648926800\n[select] All -> SELECT: Hindi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.859, 0.383, 0.969, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4912", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8b26cb30-f938-42d7-ad51-858d186a5422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.479, 0.512, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4913", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_f0e05a7d-56d2-4f09-b264-8e29f664c2ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.191, 0.485, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4914", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_b106cb92-845b-4cc4-b750-58e03d6ac5f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.007, 0.781, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4915", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_1a28a0b0-4e06-4b68-9287-28d439b713ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.048, 0.201, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4916", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_a35dc0e8-fa41-4eb8-a854-1f952e660828.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.29, 0.286, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4917", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_aee3baf0-fbec-4ac0-8ff2-60d01149dd39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 1.16, 0.823, 1.183] in the image\nAnd my action is Action: SELECT\nValue: New Jersey"}]}, {"id": "mind2web_4918", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_8efdb3e2-d599-4e4c-91de-518fbcfe3e4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.232, 0.316, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4919", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_eb82b9a7-62d4-4ad0-93aa-7ddc9c93cb65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.466, 0.196, 0.488] in the image\nAnd my action is Action: SELECT\nValue: Hindi (59)"}]}, {"id": "mind2web_4920", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_517a437f-10b1-4713-b44e-9d72da782cb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena\n[button] Get Directions -> CLICK\n[li] Cheyenne, WY, USA -> CLICK\n[li] Helena, Montana -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.705, 0.573, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4921", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_1643a5ef-db8a-42fb-a052-37a0e9122ac1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[img] A person holding a tomato and a grocery bag with a... -> CLICK\n[button] Next -> CLICK\n[link] Personal Care -> CLICK\n[svg] -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.737, 0.83, 0.751] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4922", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_98caa132-9fac-4589-8b0d-4fcc6e8e0f75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.339, 0.216, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4923", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_75f3bf7b-84da-4fb6-9810-13ca7ce311fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.064, 0.442, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4924", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_34e7f545-430f-4557-84b4-ec4cfea0876d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 1.0, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4925", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_2be16347-d879-4bba-abe1-8c4028244c19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.004, 0.651, 0.02] in the image\nAnd my action is Action: TYPE\nValue: surge protector"}]}, {"id": "mind2web_4926", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_8c2bdab0-6b93-4326-83f2-c3bd35cff1de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[span] Landscaping -> CLICK\n[textbox] Near -> TYPE: WEST HOLLYWOOD\n[span] West Hollywood -> CLICK\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.553, 0.075, 0.65, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4927", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_b946c050-003b-4cd1-a7cc-f23d7e291ba1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.744, 0.254, 0.767, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4928", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_67f3e885-cc8d-4607-bde7-b6dd64775a4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 2.539, 0.227, 2.565] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4929", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_bc272ed7-686b-48e6-be10-19d50e5ff9d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.488, 0.648, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4930", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_46fee503-6ddf-4dcb-914b-7b66ac2afbaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.497, 0.259, 0.742, 0.276] in the image\nAnd my action is Action: TYPE\nValue: Lo"}]}, {"id": "mind2web_4931", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_5660db7c-d327-43d3-ba70-3c9541460e84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\n[link] Find Stores -> CLICK\n[link] View store directory -> CLICK\n[link] Texas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.249, 0.559, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4932", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_226e8bc5-1ca6-42ef-867a-370029f7942b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[div] Recommended -> CLICK\n[option] Price (Low to High) -> CLICK\n[heading] La Quinta Inn & Suites by Wyndham Houston Hobby Ai... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.403, 0.953, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4933", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78e4da0e-d2e1-458b-a8ec-ee5acaa8d971.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.805, 0.766, 0.834] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4934", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_5382b192-80ec-4d29-8cfe-cea3aa9af99b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes\n[button] Go -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] Under Armour Men's Charged Assert 9 Running Shoe -> CLICK\n[button] 7 X-Wide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.729, 0.42, 0.869, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4935", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_4c53006a-8253-499c-9e1f-0abe87119311.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\n[searchbox] Search -> TYPE: Nirvana"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.018, 0.567, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4936", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8aabcd7c-a3ec-4ba4-83c7-c61f37de5cea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.242, 0.713, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4937", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_ec93c9c4-0c2e-4576-84b5-0f558804edc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK\n[button] Go! -> CLICK\n[link] Plan Your Visit \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.749, 0.328, 0.894, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4938", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_507874fe-1115-4387-ae3a-678440621c58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.087, 0.327, 0.116] in the image\nAnd my action is Action: TYPE\nValue: london"}]}, {"id": "mind2web_4939", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_184d3ce5-9bab-402e-bf38-9d7a0072c5ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.027, 0.524, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4940", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_951b37fc-526a-4203-93fe-e65dfab59126.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.186, 0.515, 0.225] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_4941", "image": {"bytes": "", "path": "./images/6a326478-2a1b-4e47-b298-53f3ac12ed51_e46c7544-0dea-4eee-8a35-8253034883a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with information about luggage and what to bring.\nPrevious actions:\n[button] Plan -> HOVER\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.686, 0.262, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4942", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_c523456c-2910-4d24-a99f-3ca35aa410c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] Premium -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.337, 0.331, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4943", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_41227868-8f44-46fc-9ee1-31604f7f4dbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[button] Okay, got it. -> CLICK\n[img] undefined -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.545, 0.953, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4944", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_e1f6fdb8-4efe-45dc-90d8-624bdd5a4e2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay\n[paragraph] Coldplay -> CLICK\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK\n[div] 17 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.501, 0.645, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4945", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_e2bca4e2-c8ba-4505-bb4a-2c11560be18b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.388, 0.875, 0.422] in the image\nAnd my action is Action: SELECT\nValue: First"}]}, {"id": "mind2web_4946", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_c2d31bb2-2ab9-4d3f-b785-17fddb4b85f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok\n[button] To make this website accessible to screen reader, ... -> CLICK\n[button] To make this website accessible to screen reader, ... -> CLICK\n[link] To make this website accessible to screen reader, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.777, 0.114, 0.879, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4947", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_1ce7ca32-49ee-4274-abe9-5294b2487601.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.015] in the image\nAnd my action is Action: TYPE\nValue: Selena Gomez"}]}, {"id": "mind2web_4948", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_411cc346-144b-4861-ba9e-c9395f8f0598.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: John\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: Davis"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.417, 0.895, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4949", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_5e5cfcd1-5a2e-4b97-9a6c-60e242291757.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.196, 0.359, 0.214] in the image\nAnd my action is Action: TYPE\nValue: staten island"}]}, {"id": "mind2web_4950", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_096ee7c8-8209-4457-9239-6737dd54c324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.109, 0.104, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4951", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_043449b3-6d42-43f7-844d-b5855db096f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.914, 0.263, 0.921] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4952", "image": {"bytes": "", "path": "./images/160fc162-7f03-4f59-83e1-5502d00806f2_edb1d676-2d90-478f-a19b-c083f267b082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what is playing on Showtime sorted by newest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.094, 0.668, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4953", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_1b86bf52-d450-49a1-b6e9-54aac49ff1cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.283, 0.312, 0.306] in the image\nAnd my action is Action: TYPE\nValue: Houston"}]}, {"id": "mind2web_4954", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_02680f4d-0ad2-4cac-b260-f95bda93cf34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK\n[option] One way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.489, 0.795, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4955", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_fec13c43-55d5-4c4d-9059-7137018069eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.24, 0.158, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4956", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_c19e76a4-4664-435b-ba46-9aa4971e02db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.034, 0.468, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4957", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_cd20ad0a-0250-46b7-93aa-2bcd1837d9f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.036, 0.652, 0.063] in the image\nAnd my action is Action: TYPE\nValue: laptop"}]}, {"id": "mind2web_4958", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bfc2aafb-1493-4af0-8bd9-8680ffbec320.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.265, 0.652, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4959", "image": {"bytes": "", "path": "./images/03e45ce0-4375-44aa-b57f-cf439ccbe363_08c9c18f-c5f0-460c-ba3d-a1e51201ddf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news article and send an email about it.\nPrevious actions:\n[link] Jets signing former Packers QB Boyle to 1-year dea... -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.374, 0.768, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4960", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_9078fd3a-f526-43ca-8756-187c5f59b43f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.231, 0.262, 0.443, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4961", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_f932237e-6a41-40a9-8df8-38ab876cc6b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK\n[svg] -> CLICK\n[button] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.355, 0.724, 0.401] in the image\nAnd my action is Action: SELECT\nValue: 10 00 AM"}]}, {"id": "mind2web_4962", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_4fc26c69-ac92-4f10-b4ac-36bdbe42d9af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.083, 0.487, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4963", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_a007f0aa-95eb-4604-96a1-fe8c2dfbd07c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.031, 0.535, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4964", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_c7f7cff6-1d18-48c2-8a61-dc14b5b44b96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.587, 0.465, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4965", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_c3bca17f-7481-4506-a7fd-bded60c14834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.202, 0.438, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4966", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_93aa5675-083a-42ed-9c3f-a25176a028ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.243, 0.325, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4967", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_34f7ec94-d726-48df-b6da-a798f9bc8325.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[link] Events -> CLICK\n[button] All -> CLICK\n[input] -> CLICK\n[button] All -> CLICK\n[label] Children\u2019s Program -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.583, 0.161, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4968", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0262cd12-5355-4602-be83-a0e4fc8e5196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.513, 0.688, 0.531] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4969", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_075e3102-01a5-4d21-a14a-22ffb129f1b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 3.278, 0.183, 3.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4970", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_f60f9f16-8348-47e1-b2ad-67b88dd5fac7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.311, 0.312, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4971", "image": {"bytes": "", "path": "./images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_a43bbcfe-96dc-4222-ac22-4f2afc78bc28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Travel Pack for hiking.\nPrevious actions:\n[button] Travel -> CLICK\n[link] Travel Backpacks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.871, 0.11, 0.884] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4972", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_2ba8d331-eb38-47cf-a09d-60885c37a401.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.595, 0.294, 0.608] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4973", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_45d41999-3dfb-4c9d-ba3b-cac736ee5256.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.362, 0.568, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4974", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_4fdd3302-d141-439e-a0a2-9a01d9249890.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[link] WOMEN -> CLICK\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.149, 0.623, 0.179] in the image\nAnd my action is Action: TYPE\nValue: women t-shirts"}]}, {"id": "mind2web_4975", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_98a9cc6a-d5a0-4500-938f-546404bb57f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.017, 0.509, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4976", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eb8c0d47-b9b8-4622-a93d-57b975949833.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.103, 0.16, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4977", "image": {"bytes": "", "path": "./images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_cdff5762-b4b7-4a22-955e-f8148168d909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up information on the potential side effects of rogaine.\nPrevious actions:\n[combobox] Search products and services -> TYPE: rogaine"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.246, 0.33, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4978", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_712639b1-118b-4f73-b96f-cd5b48c06cb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.327, 0.471, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4979", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_c9c98fc1-8b2b-42ba-a708-893695e385df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.181, 0.89, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4980", "image": {"bytes": "", "path": "./images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_50018063-9417-46ac-a1ed-269e8302453d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Last of Us series and add it to my watch list.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Last of Us\n[link] The Last of Us The Last of Us 2023 Pedro Pascal, B... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.502, 0.942, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4981", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_e4ab6d47-920a-4d58-98a1-6ae5bf1c6cab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[link] Hostels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.277, 0.41, 0.307] in the image\nAnd my action is Action: TYPE\nValue: udupi"}]}, {"id": "mind2web_4982", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_91b89cbd-5da5-4edf-a302-06a3338116a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.519, 0.851, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4983", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_516114e2-6390-41c7-b809-44aea3dfef43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.199, 0.923, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4984", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_67a25df3-31cd-4a0c-88ca-4468b63ad958.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.229, 0.343, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4985", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_850cc85e-d691-4b91-ac4c-1212d64d2b5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] Country -> CLICK\n[span] -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.399, 0.245, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4986", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_dc54e8cf-9e8b-4094-a90c-2230a4eedbff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.607, 0.087, 0.622] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4987", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_fbca318e-9c4e-417a-9f36-e39c77345c0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.528, 0.481, 0.573, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4988", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_6ad45996-e569-422b-8e82-f5d261f319d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4989", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_8acab6b2-6b28-45c9-84b2-0c56d4964684.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 2.699, 0.194, 2.725] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4990", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_62479223-7350-45b8-a272-43a71a83db44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.567, 0.867, 0.697] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4991", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_7e4e90e4-e0f9-4262-878b-221a78155dee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018\n[button] Make \ue920 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 1.622, 0.277, 1.637] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4992", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_677f0c0a-d900-4ca9-8c5e-73fd4036a379.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[combobox] By State -> SELECT: Colorado"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.195, 0.641, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4993", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9ae02909-fe47-4383-8a72-7194c0f533cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.658, 0.158, 0.676] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4994", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c35de92f-4b70-42f2-827e-95ba59506320.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.479, 0.255, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4995", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_e8f30da3-c814-4d47-8040-6e285a960ae4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\n[button] Manage -> HOVER\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.983, 0.755, 0.997] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4996", "image": {"bytes": "", "path": "./images/ae969e05-d10e-4255-99f7-c27e071fad69_3e7af91f-3368-46f6-9c85-0c5de6736fa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the standings for the English Premier League.\nPrevious actions:\n[div] \u2026 -> CLICK\n[link] Soccer . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.118, 0.391, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4997", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_62891d67-0dd1-4e76-86cf-cb8ae1c0e520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.102, 0.713, 0.115] in the image\nAnd my action is Action: TYPE\nValue: Athens, Attica, Greeece"}]}, {"id": "mind2web_4998", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_f5d1ec4a-c8d9-4910-9f1e-577da891b353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.684, 0.787, 0.696] in the image\nAnd my action is Action: TYPE\nValue: 8888888888"}]}, {"id": "mind2web_4999", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_21f4dc7e-c394-4957-8985-4f8889128c41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.257, 0.153, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5000", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_8c84c4cf-69a8-4598-aae3-828c35e95aa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.01, 0.673, 0.01] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5001", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_fc871d4d-aa26-42a3-a595-7ec4b6c676ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.337, 0.148, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5002", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_e043291f-ff92-42c8-a9d2-016c8f3469f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.044, 0.243, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5003", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_45555477-fab7-4449-b1ff-66433e3230ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 1.988, 0.192, 2.005] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5004", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3e0071a1-0e7a-4dcc-afaf-8d49bc8ba14b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK\n[link] Tablets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.515, 0.389, 0.542] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5005", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_614535fe-72dd-44fb-bb5e-97d4a2547a1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.811, 0.192, 0.816] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5006", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_5dcf5117-8341-4ea2-a6eb-a516c41a71b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK\n[link] XL -> CLICK\n[img] Men's UA Tech\u2122 2.0 Short Sleeve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.867, 0.344, 0.924, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5007", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_dacb180b-b588-4ff6-982a-c9294c11bddb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5008", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_ddeea3d8-a96c-4584-8dae-084e4a76aaae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\n[span] COVID-19 booster, testing, treatment & records -> CLICK\n[link] COVID-19 testing Schedule a COVID-19 test -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 0.283, 0.844, 0.308] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_5009", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_430ba357-5e86-4d8c-a1a0-66fe657b0197.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.287, 0.717, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5010", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_b4b0eb8a-8008-4e51-8b11-0f2dc0fb6013.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.969, 0.702, 1.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5011", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_8676f7b9-73e2-4bf3-b8e6-d38576f3f87f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.067, 0.441, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5012", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_f52cc040-3159-499d-8b13-ea4613b23b63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.464, 0.335, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5013", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_52e1bc08-aa96-46ba-aa6d-d0191b7a51a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\n[span] Nearby Stations & Stops -> CLICK\n[searchbox] Address, station, landmark -> CLICK\n[searchbox] Address, station, landmark -> TYPE: 07055"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.235, 0.366, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5014", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b060d99c-865a-4016-b147-d497c82a20db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.122, 0.203, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5015", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_bea83466-1716-4e8b-81aa-7df1c9d2586d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.073, 0.206, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5016", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_5b8da6f5-c53c-4b69-bfad-7bdfd2e6ce20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.149, 0.5, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5017", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_a5c25f8f-2e98-4c30-bbdc-a56abaee49bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.031, 0.578, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5018", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_dfbd7cb5-d7b8-4500-b831-e7be9b8494eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\n[button] Reviews -> CLICK\n[link] Game Reviews -> CLICK\n[combobox] Platform -> SELECT: PC"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.599, 0.291, 0.625] in the image\nAnd my action is Action: SELECT\nValue: Sort by Score"}]}, {"id": "mind2web_5019", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_96597d17-6899-4300-8c45-7ae8387e89dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] December -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: High to Low -> CLICK\n[button] DONE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.08, 0.195, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5020", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_afe07d47-565f-4f9c-be14-366c5a45b1c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[heading] Color -> CLICK\n[span] BLACK -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.463, 0.758, 0.482, 0.776] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5021", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_c45c6c0c-d446-41da-99c0-cc44abed21eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK\n[button] Recommended -> CLICK\n[span] Most Reviewed -> CLICK\n[button] Verified License -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.38, 0.626, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5022", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_d4bf4941-facb-40a5-844f-31f00302fd71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK\n[button] France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.413, 0.715, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5023", "image": {"bytes": "", "path": "./images/66a5b212-cf94-4917-8015-58970dc54187_1a954a07-0ffd-4322-a31d-b66f330025eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the amtrak national route map\nPrevious actions:\n[button] DESTINATIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.224, 0.922, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5024", "image": {"bytes": "", "path": "./images/b1a1f767-8611-4539-9c08-475011d38e12_3703e141-87f5-412d-9675-2eaf6c10dbea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news about Mikal Bridges\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.057, 0.966, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5025", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_c8297402-4dc8-4983-9ec1-6ee82f468ab6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK\n[combobox] Search by Service Number -> TYPE: SE4\n[span] Columbia, SC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.212, 0.98, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5026", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_de99fc69-4313-49d0-9740-e0fabd61bc14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.843, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5027", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_a09ec185-740a-4090-85e2-0bb866a277d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.13, 0.953, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5028", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_6f943877-135c-4690-bc5b-ee941b3a8565.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK\n[checkbox] Accepts Apple Pay -> CLICK\n[button] Search -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.206, 0.612, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5029", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_b48691e5-ea8b-45b3-8bef-d5389d03b4a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK\n[link] Shop by Color -> CLICK\n[link] Shop red -> CLICK\n[button] Show filter modal Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.511, 0.563, 0.543] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5030", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a702b9eb-d196-46e6-b587-372a8c3c648a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] MAKE MY STORE -> CLICK\n[button] Departments -> HOVER\n[button] Grocery & Home -> HOVER\n[link] Grocery -> CLICK\n[link] Bread -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.172, 0.122, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5031", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_5c1e2823-28b4-4884-9036-1d917f7a70e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[link] Music -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.355, 0.247, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5032", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_d9a6085d-eed6-4737-bec7-50f8e2953d86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (10) -> CLICK\n[button] Show 10 Results -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.569, 0.372, 0.584] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5033", "image": {"bytes": "", "path": "./images/d29e8a14-ee66-4330-b282-09cb1955aad0_98f2a61d-1e65-44d2-b21b-8856adfb16c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the weekly ad in List View.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.016, 0.497, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5034", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_ca273023-d776-42ee-b189-656af8e4a2f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.376, 0.656, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5035", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_03b9d01e-5454-4f71-88b8-20e5c41872f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.192, 0.716, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5036", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_c31831cf-fcb1-4fc6-a696-aa5540372aa1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.167, 0.877, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5037", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_700030bb-f584-4e85-91bd-357f444c6051.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER\n[link] M3 Rock Festival -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.351, 0.553, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5038", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_d9c1648c-f508-47c9-83d1-5649ca2da7df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.178, 0.432, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5039", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1967f971-1242-4c07-8421-62e434f90fef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK\n[span] 28 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.342, 0.478, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5040", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_78d05d85-15c8-4638-b44f-b3bcdade5119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Rock -> CLICK\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.314, 0.419, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5041", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f626fb4a-bfe2-443e-aaaf-663f8eae8055.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.452, 0.235, 0.47] in the image\nAnd my action is Action: SELECT\nValue: 2023"}]}, {"id": "mind2web_5042", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_df64899f-f2f6-4a81-99ec-a0029e9790a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.004, 0.791, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5043", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_e964b7d2-296b-49f0-9a08-4813d10b5a46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow\n[span] Glasgow Central -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5044", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e6bb3bac-2055-4e4d-b429-cf5310a8955a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK\n[button] Abbotsford, WI \ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.034, 0.13, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5045", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_c94f5e1a-4d22-4c21-90b6-164dcee297dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.107, 0.914, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5046", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_21334cc2-269f-4dd5-898b-f2cab62a8b19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.677, 0.813, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5047", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_212a36a7-c358-4fd7-9122-8c6721b7ed7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.739, 0.48, 0.764] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5048", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_d48017ee-76d2-4d21-8bd8-112aa9cb8bc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.0, 0.559, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5049", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_b80dae22-0311-4f5c-9aed-76f14574703d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Michael Jordan\n[textbox] Search -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.24, 0.207, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5050", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8aeecea7-7ffa-475a-9844-2b49f26b6ce1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.729, 0.278, 0.741] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5051", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_e3c9691e-c9cc-44c9-b2ca-c8f93c388a5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.604, 0.787, 0.616] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_5052", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_7d42e63b-49be-41b1-a453-28707cb28367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[textbox] Where to? -> TYPE: Dubai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.279, 0.729, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5053", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_ed9a300a-422a-4ee9-ac12-b6e26509649d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.47, 0.275, 0.52, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5054", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_45697305-9fe5-4695-9ccf-4a73a68552e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.016, 0.673, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5055", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_331cc579-44f2-4e80-ace7-f8b7909fd044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.013, 0.418, 0.029] in the image\nAnd my action is Action: TYPE\nValue: high tide"}]}, {"id": "mind2web_5056", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_4c487c29-f51f-47a4-b521-08bb8e2c8253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris\n[button] sam harris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.19, 0.219, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5057", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_6484f6b2-1c3b-438a-b60d-739032df779a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.395, 0.574, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5058", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_51c840cc-adac-4cc2-a914-b07bcef81959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.348, 0.956, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5059", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_1f5cb659-0c88-4f0e-a389-97e9e90a0893.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.091, 0.281, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5060", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_04e9cc0d-00d8-48a5-b493-a9b27a1aa465.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.337, 0.353, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5061", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_bdc9a239-d845-44f2-ac6d-a78a43ce85a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK\n[span] -> CLICK\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.295, 0.709, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5062", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_309508ef-3fbc-4301-898a-906c004937a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 1.431, 0.176, 1.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5063", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_0dbe031f-7330-4084-81bd-d133f5f5014a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.213, 0.313, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5064", "image": {"bytes": "", "path": "./images/0c577209-47dc-4645-8d10-0b659663a969_2e7bca75-da1d-4ae9-a4e3-63c8a0469fdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nba game played by the phoenix suns.\nPrevious actions:\n[combobox] Search query -> TYPE: phoenix suns\n[link] Phoenix Phoenix NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.859, 0.976, 0.942] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5065", "image": {"bytes": "", "path": "./images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_97649533-7183-42a1-ae1f-275a69e171b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open gift list for beauty products.\nPrevious actions:\n[path] -> CLICK\n[button] More ways to shop -> CLICK\n[link] Send gifts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.383, 0.178, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5066", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_14510cda-06ca-4191-bea4-39e0e54bb281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Bridges and Tunnels tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.551, 0.367, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5067", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_745f0ada-af2d-4846-ae61-94bc84783005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.219, 0.571, 0.252] in the image\nAnd my action is Action: TYPE\nValue: New Delhi"}]}, {"id": "mind2web_5068", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_1a23c4b3-11dc-419a-a0c3-e4d328690204.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.162, 0.76, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5069", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_da89b808-f145-4860-a45f-450834fba4e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.021, 0.228, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5070", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_d81b3416-82ef-4ec6-b938-3da5c2548270.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK\n[span] -> CLICK\n[button] Change store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.256, 0.716, 0.279] in the image\nAnd my action is Action: TYPE\nValue: Seattle, WA"}]}, {"id": "mind2web_5071", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_b2e7c3b2-0fa3-49bc-8478-4cea20de1dc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.312, 0.891, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5072", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_e238e976-5417-483a-bff6-e54699142179.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.004, 0.686, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5073", "image": {"bytes": "", "path": "./images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_ccf4155a-d9ed-4ede-a1a6-010e16f61ea7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of Broadway events sorted by date.\nPrevious actions:\n[link] broadway. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.114, 0.894, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5074", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_062d3d9a-c1d4-47c7-8192-39cfc6fcf0a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.202, 0.601, 0.247] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_5075", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_c1406787-2858-4cfd-9ff0-18dd822ec56a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.33, 0.384, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5076", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8c67fc5e-0ba2-4649-b99f-249f1310f9c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: UNITED STATES\n[input] -> TYPE: smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.581, 0.347, 0.902, 0.387] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_5077", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_2f0192ff-5e1a-44a6-8649-bfff77330b42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.301, 0.435, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5078", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_1ad4f2f9-ba55-482c-bda8-b879589bdb54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.123, 0.99, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5079", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_43f085f3-b693-48ab-ac7a-3d9c3b9f7af2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.659, 0.944, 0.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5080", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8084a818-7884-4e70-90c3-6f94536efcb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.186, 0.223, 0.2] in the image\nAnd my action is Action: TYPE\nValue: 10017"}]}, {"id": "mind2web_5081", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_e5be8100-215f-469b-bd1a-791ce30bfe16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.005, 0.873, 0.022] in the image\nAnd my action is Action: TYPE\nValue: sports wear"}]}, {"id": "mind2web_5082", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_1596c7ad-f09a-48bc-b641-b66197b5b5d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.039, 0.838, 0.06] in the image\nAnd my action is Action: TYPE\nValue: ukulele"}]}, {"id": "mind2web_5083", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_938f123e-a8b0-46fe-82bd-580041111df6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.648, 0.29, 0.668] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5084", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_aac2e48b-eaab-49e0-8d87-8fdf57bb909a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.168, 0.657, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5085", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_30732d9f-efee-4ba4-8b2a-a72e47d5bde6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.371, 0.518, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5086", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_a134016b-dd49-4e6d-9c0a-f2a9c11f25f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK\n[button] Climb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.15, 0.546, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5087", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_2ab5735a-26cd-414e-b9ed-52d802f1408d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.089, 0.262, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5088", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_794a7443-6027-4bd4-bc18-229028decf0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.848, 0.262, 0.989, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5089", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_b08e91e1-2c15-4dda-a02d-558267a8292f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo\n[textbox] Confirmation or ticket number* -> TYPE: 12345678912345"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.411, 0.482, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5090", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_6e15b8bd-803c-4dd4-8e9e-0b102e3d9a69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.469, 0.153, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5091", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_c7a0d65f-bad9-4424-90af-42b14680cc05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK\n[link] View Tickets -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.582, 0.975, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5092", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_411a33e1-09da-4e0a-96a5-303cfa86ccae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Reggae -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.243, 0.655, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5093", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_3b3620c3-4b5c-44fa-a170-36828db8938a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.295, 0.048, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5094", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_8159f605-56cd-4c82-8451-7c0f4743d451.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[span] -> CLICK\n[button] Show all 14 -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.377, 0.263, 1.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5095", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_08b7c08f-c6d0-44a6-95d8-59c4f97021f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.467, 0.278, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5096", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_919527a3-b5dd-493f-ab2b-dc9cc35b2cbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[link] Wichita -> CLICK\n[link] Showtimes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.15, 0.559, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5097", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_034c4eb3-eecf-41d3-b403-54797be9544a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.202, 0.579, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5098", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_f3c95979-f964-4912-8cf9-a627b0322a93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[path] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.326, 0.969, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5099", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_1ac506ac-dd62-4adb-8cc6-e42e39ea1e35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Toronto"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.293, 0.582, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5100", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ff5337a-6643-4d26-88c1-f731c8d15f93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.443, 0.15, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5101", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_57ecc027-a48c-4a61-9ffb-931ae1fab2a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Same Day Delivery -> CLICK\n[link] Self-Rising Crust Uncured Pepperoni Frozen Pizza -... -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.593, 0.775, 0.629] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5102", "image": {"bytes": "", "path": "./images/789b7d2d-fb01-453c-b933-383965e6123c_e7becd76-12f3-404f-a927-5c51aa736b85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cast and crew of Titanic and add to watchlist\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Titanic"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.12, 0.594, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5103", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cd9ce436-abb2-4fd7-aef5-356733c7e1a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.244, 0.489, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5104", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3c33e494-db1f-4561-ac74-d928cd7cca26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK\n[checkbox] 8 April 2023 -> CLICK\n[button] Search -> CLICK\n[menuitem] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.295, 0.382, 0.547, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5105", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_9fd6dca3-1d20-46bb-814e-0786016ce859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.122, 0.712, 0.135] in the image\nAnd my action is Action: SELECT\nValue: L Sedan 4D"}]}, {"id": "mind2web_5106", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_1f81f2b6-3fc7-4827-aeee-4a41116ee19d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.427, 0.31, 0.443] in the image\nAnd my action is Action: TYPE\nValue: 155000"}]}, {"id": "mind2web_5107", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_3151a491-e1ce-480a-9cfb-d389ee8624df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.294, 0.431, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5108", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_c219dddc-bdf3-4b52-b770-2f8c34504fc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.078, 0.294, 0.091] in the image\nAnd my action is Action: TYPE\nValue: 49107"}]}, {"id": "mind2web_5109", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_0a45420f-cf51-42a2-82da-24ffd4e8dba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Order Food & Drinks -> CLICK\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.148, 0.637, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5110", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_e1e17c9e-26b5-4a08-a7ad-8e999a9870d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.424, 0.943, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5111", "image": {"bytes": "", "path": "./images/6b831239-435b-494e-9aa8-a49e8605d0b3_163e98c4-a7ef-42c2-b151-5cc75d670ca5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is trending now on AMC on-demand?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.027, 0.633, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5112", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_321b75fd-1b65-443f-ba11-fcc65a6007b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 1.071, 0.231, 1.083] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5113", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_0ba0e97c-8b51-40e2-9387-368af44c654c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.763, 0.779, 0.776] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5114", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_5df6fedf-afb3-4095-b06a-c9a3317e485a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.677, 0.325, 0.845, 0.351] in the image\nAnd my action is Action: TYPE\nValue: south station"}]}, {"id": "mind2web_5115", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86eb051c-670a-49bb-b354-428ae03e2016.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.024, 0.054, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5116", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_474d7869-7905-4b42-90a7-c75117862cbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.303, 0.077, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5117", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_9055b4da-fa8e-445e-ae40-52b8c5e24167.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.616, 0.089, 0.622] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5118", "image": {"bytes": "", "path": "./images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_6b99a1ca-17aa-452a-9370-27bb2a175812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about how to buy metro card on booth.\nPrevious actions:\n[link] Fares & Tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.816, 0.5, 0.825] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5119", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_3fb3d43d-eda2-454f-bd81-4beaabe0e47c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[div] Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.345, 0.281, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5120", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_c7822f35-404a-4681-8945-0b6ac3c36b54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey\n[radio] Yes -> CLICK\n[radio] No -> CLICK\n[radio] Yes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.598, 1.115, 0.793, 1.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5121", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3dafde2b-5c39-47e2-b9b3-0c1e19c6dc3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.326, 0.492, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5122", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_52dfe9c5-b379-4ce0-8c66-dd85b7724207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK\n[generic] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.389, 0.131, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5123", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_bfa98fe8-342a-4833-b221-f1274a517937.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[div] Seoul, Republic Of Korea -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.289, 0.942, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5124", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_58d10ad0-dc54-4dad-9f1e-4e11611a1176.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.309, 0.777, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5125", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_460dd5e5-220f-4476-a4fa-639b266566fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000\n[span] Good -> CLICK\n[span] 48 mo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.591, 0.787, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5126", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_5bf7212f-6896-4585-addb-9f5a65a58eb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.053, 0.054, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5127", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_b1ec99ca-9953-4575-b633-8b9e6da0aee4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.198, 0.414, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5128", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d3696fd4-af25-471e-851b-6b0f1e991970.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[div] open -> CLICK\n[option] 6 -> CLICK\n[button] Update -> CLICK\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.508, 0.278, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5129", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_ca5e412b-7659-4de8-b48b-d24749857658.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.228, 0.481, 0.259] in the image\nAnd my action is Action: TYPE\nValue: NYC"}]}, {"id": "mind2web_5130", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_bc27f335-5b49-47ef-8632-88d20acb5da2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.257, 0.459, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5131", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_c3904d4c-b5f4-4a2d-9fdd-68dc50c3227b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.273, 0.409, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5132", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_eb8ba29f-90ce-4615-8e5b-92f140b46bf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.756, 0.207, 0.796, 0.231] in the image\nAnd my action is Action: TYPE\nValue: 50"}]}, {"id": "mind2web_5133", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2722ee03-60cc-45cc-8e74-a341b470de12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.453, 0.846, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5134", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_5b2e46d2-a778-4bd0-9ba5-fbd526c7c17e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (10) -> CLICK\n[button] Show 10 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.404, 0.397, 0.417] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_5135", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_e07fd3ab-03a2-4115-baef-7334d62c7687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK\n[span] Castles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.06, 0.97, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5136", "image": {"bytes": "", "path": "./images/bd4b77db-00a5-405f-bf0a-a4d168967d64_c649a304-2019-4ff6-9bab-817b700a4e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Panini Diamond Kings Baseball cards set below $25.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.036, 0.646, 0.049] in the image\nAnd my action is Action: TYPE\nValue: Panini Diamonds Kings Baseball cards"}]}, {"id": "mind2web_5137", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_74dcfb10-d052-4f30-b65c-a2a06d3bb983.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.016, 0.891, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5138", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_405b8bdc-3279-4507-8ed8-b6102b66252e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK\n[button] 16 -> CLICK\n[div] Petaluma Music Festival -> CLICK\n[button] Get tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.199, 0.573, 0.226] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_5139", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_b88c0362-1340-41c4-be55-38cf7e7c180d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.365, 0.28, 0.405] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_5140", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_e65a5c29-d512-478a-99f5-ff82dcc22246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Rock -> CLICK\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.237, 0.77, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5141", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_b69043a4-7684-42c4-b359-62bb1badccce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.465, 0.611, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5142", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_b4e1ca8e-3b28-4edf-ad6a-992c4ed20441.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK\n[link] Price Low To High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.514, 0.177, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5143", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_da1da5de-72e9-43ca-93c7-7638f6b66736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[link] Superhero -> CLICK\n[checkbox] Superhero Sci Fi (745) -> CLICK\n[checkbox] Based On Comic Book (226) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.171, 0.313, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5144", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_f04f3bd9-068a-491b-a24c-c356de9dfcc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 0.186, 0.652, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5145", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_ad4e03b0-1b4d-4943-b2d9-94ab2b408563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.24, 0.464, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5146", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_960ebc24-1a13-4086-afd4-fd6e30b783fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK\n[input] -> CLICK\n[option] 2 -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.46, 0.491, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5147", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_068af6bd-86ad-4e0b-8449-467e35cd186b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo\n[textbox] Confirmation or ticket number* -> TYPE: 12345678912345\n[button] Add flight -> CLICK\n[textbox] Email address * -> TYPE: ian.lo@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.632, 0.189, 0.651] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5148", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_580a4333-fdc6-4c07-aa20-748b8b6c9ffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK\n[select] 2023 -> SELECT: 2019\n[select] All -> SELECT: UFC"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.3, 0.299, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5149", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_f2b7effa-1ee4-4c0d-a8c6-c086818a5542.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.344, 0.526, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5150", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_60fa0498-f3bd-4aec-b81c-d4bc4ee53e24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.24, 0.324, 0.26] in the image\nAnd my action is Action: SELECT\nValue: i3"}]}, {"id": "mind2web_5151", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_22107c45-e852-4ec8-9e35-2609a62c2bbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK\n[button] Overview of the Annual Pass -> CLICK\n[button] Annual Pass Internet Order Questions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 1.563, 0.95, 1.59] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5152", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_f320b86a-6600-4949-8b57-4df2b3f9b664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] -> CLICK\n[tab] Fastest -> CLICK\n[button] See flight -> CLICK\n[button] Select -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.668, 0.617, 0.712] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5153", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_643210af-4c59-4a5f-af43-ebfed3c9b5e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK\n[searchbox] Search carmax locations. -> TYPE: california\n[button] Search. -> CLICK\n[div] Modesto -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.81, 0.465, 0.837] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5154", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_91b03325-cd3b-4a7f-b0dd-7a308e18b42b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.259, 0.317, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5155", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_62c7af18-1fb9-4022-a9ec-a69d457cf223.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.343, 0.352, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5156", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_d5a67096-558b-4e6c-be09-957a0e4ae20e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 1.161, 0.208, 1.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5157", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_9f957e9c-cf83-4dc3-8223-a5da537ceafc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK\n[button] Next -> CLICK\n[gridcell] Mon May 01 2023 -> CLICK\n[gridcell] Sun May 07 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.503, 0.78, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5158", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_b756f03a-d366-4b47-a6f4-5acb671698b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[link] Price + Shipping: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.327, 0.027, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5159", "image": {"bytes": "", "path": "./images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_34e4693d-d86b-4536-ba7c-274c55c63850.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the profile page for author of latest shot\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.044, 0.222, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5160", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_1f0b8000-4fe1-4b1f-8034-8f8e4023440d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight\n[textbox] Enter your pick-up location or zip code -> CLICK\n[combobox] Return Time -> SELECT: noon\n[textbox] Return to same location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.329, 0.567, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5161", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_b4e28822-72ba-426d-820b-e5984992fff9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.409, 0.219, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5162", "image": {"bytes": "", "path": "./images/b1a1f767-8611-4539-9c08-475011d38e12_f720be0a-2053-4046-94b6-7ce03c2d5f6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news about Mikal Bridges\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Mikal Bridges"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.233, 0.931, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5163", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_e0ca183e-3787-4b09-87f3-b7b9079d4b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[textbox] Where to? -> TYPE: Dubai\n[button] Dubai United Arab Emirates -> CLICK\n[path] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.379, 0.103, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5164", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_2861d75f-d51e-42a9-bf74-e1e0fc01fda6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.003, 0.031, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5165", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_b1b1da1b-a169-4ae8-8540-090e1735abfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.259, 0.264, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5166", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_84bf94c3-3a36-4743-a629-a73720bfa17f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.952, 0.278, 0.994, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5167", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8deea034-568e-4813-992f-b74bdf900906.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.498, 0.406, 0.596] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5168", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_585cb70c-a451-4298-add8-b19c4b26f1c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.423, 0.648, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5169", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_0dffc638-04bd-4d82-87de-2094b4767d4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.442, 0.233, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5170", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_a36b8ff4-cf58-4fa8-9f5c-e3246e288c83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.147, 0.216, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5171", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_fc7ce1dd-bf86-4110-95b6-073aa3e5b082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.122, 0.389, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5172", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_341e6e6d-828b-40a6-acfd-90e47191518d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.211, 0.89, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5173", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_ef0e466a-4175-4c3c-80fb-9471d37a9103.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[tab] My flights -> CLICK\n[textbox] Confirmation number (required) -> TYPE: 10000002"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.27, 0.406, 0.446, 0.429] in the image\nAnd my action is Action: TYPE\nValue: Son"}]}, {"id": "mind2web_5174", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_56e1e554-8541-4c1a-a4bb-d8ad1f7b95cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.295, 0.573, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5175", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_a7e54311-1339-4fde-a1b9-2571f6f85d29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 2.983, 0.353, 2.996] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5176", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_9f90d836-c2d6-473c-9f78-705033f9ec52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: AUSTRALIA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.347, 0.557, 0.387] in the image\nAnd my action is Action: TYPE\nValue: Walker"}]}, {"id": "mind2web_5177", "image": {"bytes": "", "path": "./images/b307117b-e10c-470f-a85d-968b2e442b19_cc41d893-f1d0-4303-b3a7-b19fc73f05a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a coffee shop with wi-fi.\nPrevious actions:\n[textbox] Find -> TYPE: coffee shop\n[span] Coffee Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.522, 0.363, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5178", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_8e68485f-08ab-473e-9845-e5fb8af0833b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Movers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.016, 0.564, 0.025] in the image\nAnd my action is Action: TYPE\nValue: HONOLULU"}]}, {"id": "mind2web_5179", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_3565c84e-3f3a-4a37-9d8c-1bfcae9e432f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.11, 0.326, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5180", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_05ffdddd-205e-48b4-9f8a-65ff0ac005ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[combobox] autocomplete -> TYPE: LAS VEGAS\n[div] Las Vegas -> CLICK\n[svg] -> CLICK\n[div] Tomorrow -> CLICK\n[p] Startups & Small Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 1.172, 0.26, 1.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5181", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78bfd4cf-87db-44f9-9bc0-a390988df75b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.193, 0.573, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5182", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_5be16eae-be60-40bd-ab2c-acfab3a0cd36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK\n[button] - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.827, 0.307, 0.927, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5183", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_40298f0b-61e7-46f8-bb2e-5c1acb88b464.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Golden State Warriors"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.148, 0.931, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5184", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_db6d2722-20ac-437d-ba01-65b4408ee420.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2\n[listbox] select child age -> SELECT: 5-15"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.448, 0.391, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5185", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_1d9aa0c2-db0a-47b1-9e38-267bea54a66c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.13, 0.587, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5186", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cde21781-bb33-4185-a6ab-f03a12216547.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.459, 0.588, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5187", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_abe9b06e-30c5-4200-a4da-afbd766799c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.355, 0.502, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5188", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_608512bb-519f-407a-a619-1eeffcb9d896.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.485, 0.253, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5189", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_994e0184-cc07-44d1-b721-977f549fd4a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.109, 0.418, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5190", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_572f791d-97d7-4e35-adf9-34762045fb72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.179, 0.548, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5191", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_602e1f0b-b86e-4f60-8c92-40b3aece1274.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.697, 0.452, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5192", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_5f14babf-98ed-4a84-a458-84233cd7bb3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.409, 0.781, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5193", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2be3b5da-bcc5-4de4-b691-c7115cd419f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.683, 0.95, 0.716] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_5194", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_fad40521-7262-4bf8-9611-be44c197681a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 1.105, 0.266, 1.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5195", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_443327ef-bef2-4f4c-8aa3-77669cbad78a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK\n[searchbox] Search carmax locations. -> TYPE: california\n[button] Search. -> CLICK\n[div] Modesto -> CLICK\n[link] Visit Modesto store details. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.233, 0.46, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5196", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_c696d8ea-3bb7-4e69-9be9-d9f7228436db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.079, 0.661, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5197", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_32d68cd0-e6e1-470e-aa15-4f34d99a7f9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.712, 0.622, 0.729] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5198", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2c6307f7-6697-4a4f-8e2e-73682ce6f1a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM\n[select] 04:30 PM -> SELECT: 04:30 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.317, 0.915, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5199", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_36d6ee3c-4be7-4367-a004-181ac15edf7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.208, 0.909, 0.235] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5200", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_519c155a-6823-4c59-8683-a50bb52b637a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.071, 0.129, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5201", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65a752cb-43ca-4607-bc88-ec49b8285742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.482, 0.355, 0.494] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_5202", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_b3929fbc-0cb0-4e73-8144-eab7ac9ebb5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.529, 0.102, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5203", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_5bf6ca81-46f9-4681-b086-fc23bd8fb027.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.183, 0.592, 0.201] in the image\nAnd my action is Action: TYPE\nValue: radio city music hall"}]}, {"id": "mind2web_5204", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_e62f4bfc-502c-4f11-a37e-f64cc8febe6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.07, 0.111, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5205", "image": {"bytes": "", "path": "./images/d29fd2a4-2305-4276-8a0e-2599291d0a17_9d01e4d4-cf3a-47ce-98a3-26f66887e574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of reviews I wrote about my games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.0, 0.589, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5206", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bf663b13-de7d-4099-9aa6-cdd33f15c1f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.462, 0.246, 0.496] in the image\nAnd my action is Action: TYPE\nValue: 30"}]}, {"id": "mind2web_5207", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_7e7b3cd5-ee9e-4a9c-aee6-96220607e196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK\n[combobox] SORT BY -> SELECT: Price: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.269, 0.914, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5208", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc038372-b9b4-4b34-9eb0-10f048962d43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.541, 0.448, 0.588] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5209", "image": {"bytes": "", "path": "./images/1538e37b-9c33-48b0-b10e-662e192ad53a_455a4731-6e47-4746-9c56-df4a0cc128eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stops in Alanson, MI\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.064, 0.436, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5210", "image": {"bytes": "", "path": "./images/90557510-32dc-415f-8507-41b050594962_0f456a6f-04d4-4bc3-beb1-b377715f08b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the coming soon AMC Artisan Films\nPrevious actions:\n[link] Visit the See A Movie page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.198, 0.912, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5211", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_3e01a8bb-5799-45c2-b0c9-83891b43492b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.071, 0.354, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5212", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_5d2eec92-a8c9-436b-a89a-ed85f6174d4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees\n[option] New York Yankees -> CLICK\n[link] TICKETS -> CLICK\n[div] More Options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.744, 0.316, 0.834, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5213", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_4ebb9e93-734c-4664-9f53-e5562feb80e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Cannes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.473, 0.705, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5214", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_3dc9bc1a-201e-4f46-beb8-69950bc0d565.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu\n[option] Honolulu, HI, US (HNL) -> CLICK\n[textbox] Budget. Please enter a numerical value -> TYPE: 1300\n[div] Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.645, 0.94, 0.669] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5215", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_c49c8db3-a45d-4c32-97bc-8a71c035485e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.381, 0.959, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5216", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_adf9d11d-07e4-4751-8d2e-3cd3ce8f311d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] - -> CLICK\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.846, 0.37, 0.934, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5217", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_bb8869c7-6b6f-4878-852b-40a52c258f7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Real Madrid\n[link] Real Madrid LaLiga -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.19, 0.093, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5218", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_4ea0b732-6a9c-4d03-8da2-9045ecc460b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.044, 0.493, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5219", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_be1bb8af-ec73-4160-82af-3279a45e05de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[textbox] Search -> TYPE: Katy Perry\n[button] Search -> CLICK\n[a] -> CLICK\n[button] BOOKMARK -> CLICK\n[button] OKAY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.564, 0.698, 0.645, 0.708] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5220", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_819a213b-c14d-4a2b-92d9-438c0755d8da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.255, 0.84, 0.287] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_5221", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_aa99e548-cd12-4d60-876a-1b739f2c9009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.199, 0.866, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5222", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_7de0d4fb-357b-420e-997e-df792f9099ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[svg] -> CLICK\n[label] 32\" -> CLICK\n[svg] -> CLICK\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.46, 0.981, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5223", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_6468eb70-ab1c-4fce-9744-5fe7bfea7cde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[span] Luggage -> CLICK\n[span] Carry-on Luggage -> CLICK\n[img] -> CLICK\n[svg] -> CLICK\n[button] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.132, 0.777, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5224", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_a991029f-599d-41f2-ad44-8fdc236dbc68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.813, 0.435, 0.937, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5225", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_011c68d2-620f-46fc-a2cd-48650f992b99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.514, 0.977, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5226", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8066875c-cbad-453f-8371-11f45293f19b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.201, 0.573, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5227", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_b2e83bac-a21a-425c-a069-086f2dca47e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.522, 0.245, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5228", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_91d99a7d-e270-4d8e-ac84-4df6ee2ab313.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[span] Exterior Color -> CLICK\n[div] -> CLICK\n[checkbox] Online Paperwork (4)\uf05a -> CLICK\n[span] Vehicle History -> CLICK\n[checkbox] No Accidents (4) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.206, 0.888, 0.235] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_5229", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_290eda81-bc60-42d8-95be-e11a2a5de824.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.198, 0.932, 0.231] in the image\nAnd my action is Action: TYPE\nValue: PS4"}]}, {"id": "mind2web_5230", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_2dfa3c40-55c0-42ec-b141-50b68a77b02a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK\n[input] -> TYPE: John\n[input] -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.321, 0.688, 0.347] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5231", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_99c101a0-07d4-433d-86f7-9d16d464c14b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK\n[button] Let's go -> CLICK\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK\n[ins] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.826, 0.34, 0.849] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5232", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_637682df-5b80-4530-a3eb-e242bef29336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.192, 0.565, 0.224] in the image\nAnd my action is Action: SELECT\nValue: PS5"}]}, {"id": "mind2web_5233", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_c0865eda-5f46-4b7d-bb9e-a5592539ec17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA RESORT CAMPGROUNDS \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 1.083, 0.149, 1.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5234", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_413185f7-a793-4718-93ff-3c654b9f1c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Charging -> CLICK\n[link] Find Us -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.097, 0.216, 0.13] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_5235", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_b46bd2d8-3838-4c77-9166-af6ada07da63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_5236", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_9bf58be2-cf7a-4732-af73-5e7c17b70540.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec\n[button] Search for zyrtec -> CLICK\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.219, 0.381, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5237", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_32e030ec-b522-40ed-9217-95c09cb73aee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: m s subbulakshmi\n[button] Search -> CLICK\n[link] M.S. Subbulakshmi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.425, 0.192, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5238", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_4f6b4130-a0b4-40e7-b91f-ffe8a6443083.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.27, 0.891, 0.304] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_5239", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_91007a2c-d94c-4c8d-ad80-1b8ca2c877db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.224, 0.539, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5240", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_8cae68b6-ff63-4283-8f18-a3a8e7ba48bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK\n[link] VIEW ALL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.192, 0.656, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5241", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_6d6e210e-ec5d-44db-bfde-0d32e4dd500a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[textbox] Find A Theatre -> TYPE: 10001\n[generic] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.306, 0.14, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5242", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_73a60d20-1d3e-4b8a-bbea-d906c3a1faeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] \uf002 -> CLICK\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.39, 0.037] in the image\nAnd my action is Action: TYPE\nValue: 8GB Ram"}]}, {"id": "mind2web_5243", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6cf7c15e-7f95-413a-b4d7-01e26c009e43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.467, 0.147, 0.475] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_5244", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_7dd2e5c2-af14-4f5c-b147-58913a3bb612.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.336, 0.567, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5245", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_747b1dea-1bd9-469d-8c76-54d19e3895ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Beachfront -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.699, 0.089, 0.704] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5246", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_06195ec9-afde-4c23-a8d3-db3666bc04a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK\n[button] DURATION -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.364, 0.616, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5247", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_b5eb98db-7b56-403f-8497-7bec0ac4fd57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.343, 0.045, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5248", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f053b82b-ae9c-4cf9-9b60-45a18358f2bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK\n[heading] Pocketable UV Protection 3D Cut Parka -> CLICK\n[checkbox] ORANGE -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.279, 0.843, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5249", "image": {"bytes": "", "path": "./images/b3a28e48-3912-4b0e-b3a9-d359da13864d_7f52cfc8-106d-40e8-ba47-2b67a7d462ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL passing touchdown season stats.\nPrevious actions:\n[link] NFL . -> HOVER\n[link] Stats . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.85, 0.125, 0.86] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5250", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ca113dc2-d281-4cf9-9793-9122d1170097.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking\n[button] Done button - Press enter key to submit travel pre... -> CLICK\n[label] Accessible Trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.282, 0.359, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5251", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_6b765434-a76e-4653-b427-7662d96ba478.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] Avis -> CLICK\n[button] More filters -> CLICK\n[checkbox] 4+ doors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 1.214, 0.373, 1.224] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_5252", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_081badf7-327c-4983-a9e2-7f77d44cb4f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.275, 0.196, 0.296] in the image\nAnd my action is Action: SELECT\nValue: In Stock (7,640)"}]}, {"id": "mind2web_5253", "image": {"bytes": "", "path": "./images/716ed90e-a138-452e-b5b5-167911871fda_4a544b84-9172-41ad-aa8a-e19736c63137.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Playstation gift card of $10.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: playstation gift card $10\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.343, 0.179, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5254", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7c35e712-8cec-40c1-baa4-93011d5f7d76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.443, 0.127, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5255", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_d9556517-1528-497f-b701-9edc906c9343.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.088, 0.128, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5256", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_fe43de0e-b79f-4f44-b856-0087c33dbbf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.044, 0.615, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5257", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_ab469efb-c6dc-47f7-9426-0938350e8063.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201\n[button] Antenna -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.349, 0.779, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5258", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_ad513ef7-dd90-4c7b-b00b-ec97683ed0fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK\n[div] Remove -> CLICK\n[p] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.332, 0.72, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5259", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_51f6ca95-3089-4b4d-b3e4-ccb627fba834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.21, 0.5, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5260", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_af945b27-92ce-453c-9c39-59ce63a8190a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.064, 0.863, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5261", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_671088e4-09b7-41ef-b93b-264eead46e7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.377, 0.177, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5262", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_90936bb2-dd00-403c-b782-4b006604b686.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK\n[span] Active -> CLICK\n[button] APPLY -> CLICK\n[span] Bluetooth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.855, 0.192, 0.871] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5263", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_7d2559d3-fa15-4fb3-ac78-fdbd51dcc976.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.591, 0.829, 0.598] in the image\nAnd my action is Action: TYPE\nValue: seattle"}]}, {"id": "mind2web_5264", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_609108a1-eaaf-4f18-8442-8fec437811f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: PlayStation 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.041, 0.378, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5265", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_3484246f-8de9-4c54-884b-0ffcaf153cb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.373, 0.494, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5266", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8203c2a7-4502-4ff2-ac52-1440d2c847ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Netflix streaming -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.501, 0.43, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5267", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_1295eaa5-670e-4aa2-b430-998bd21fda2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK\n[span] Size -> CLICK\n[link] Under 2'x3' (38) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.374, 0.974, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5268", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_ba7b7ba5-9604-43af-8fbe-fea243c8bb58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.079, 0.106, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5269", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_7d52e0c3-c338-4214-889f-318d4ce15d5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $21.99/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.869, 0.777, 0.894] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5270", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_0c87b2bb-027a-4daa-867f-bc0d3d2382fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] City -> CLICK\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.845, 0.26, 0.966, 0.288] in the image\nAnd my action is Action: SELECT\nValue: Most recent"}]}, {"id": "mind2web_5271", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_c8b5a410-7058-493a-bb19-342825c78916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.308, 0.481, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5272", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_72893745-5e04-4e66-8557-81416454ade4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.175, 0.474, 0.196] in the image\nAnd my action is Action: TYPE\nValue: Bay Shore"}]}, {"id": "mind2web_5273", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_df415b14-e121-43a8-8548-058989210645.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email Address (required) -> TYPE: allan.smith@gmail.com\n[textbox] Confirm Email Address (required) -> TYPE: allan.smith@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.504, 0.754, 0.541] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_5274", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_749c3a9d-c4aa-4517-8e1f-ec5fa5845eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] American -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.424, 0.812, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5275", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_9124ca10-aa2c-45cb-870b-29a580fbb2f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.305, 0.754, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5276", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_4cadd81b-2ad9-43cc-a6ae-7785bf77b8b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: los angeles \n[div] Los Angeles -> CLICK\n[path] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.145, 0.237, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5277", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_06e9c069-d60e-483d-936f-6a14544521fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.218, 0.154, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5278", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_3dcc458a-5ca1-4057-a628-51580cd69e87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.331, 0.327, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5279", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_fbede47e-7ae1-4ed2-9cc9-0d0d7a55577a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Traveling with children -> CLICK\n[link] sit on a parent\u2019s lap -> CLICK\n[link] approved car seat -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.483, 0.918, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5280", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_a03b4d90-3f1b-4a34-a8ac-dda1d6e458e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[button] Go to next month -> CLICK\n[button] 2023-04-05 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.409, 0.908, 0.439] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_5281", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_73592b67-ffcd-4021-8342-1ec06c4a56ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA\n[option] Museum of Modern Art (MoMA) \u00a0\u00a011 West 53rd St, New... -> CLICK\n[link] MONTHLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.53, 0.328, 0.557] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5282", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_af8d6e2e-cb67-4ba2-b95d-734aeb121700.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK\n[input] -> CLICK\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.778, 0.159, 0.806, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5283", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_363d203f-3721-4830-bd17-b3ba4819cdb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK\n[span] Filters -> CLICK\n[textbox] min price $ -> TYPE: 200\n[textbox] max price $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.688, 0.786, 0.722] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5284", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_b84e4315-a4fa-4c98-85d7-362aa485addc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK\n[link] Tracks -> CLICK\n[link] Any length -> CLICK\n[link] 10-30 min -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.425, 0.108, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5285", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_deaf5409-9171-444b-af73-c5f6b73aec49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota\n[combobox] Select Model -> SELECT: Corolla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.234, 0.599, 0.261] in the image\nAnd my action is Action: TYPE\nValue: 10019"}]}, {"id": "mind2web_5286", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_20b9f688-742f-4a8f-8955-04d57f566697.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK\n[option] One way -> CLICK\n[gridcell] 14 April 2023 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.383, 0.94, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5287", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_b64c2417-c44e-46c4-bb0b-ff1775e7da29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.206, 0.266, 0.237] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn Central"}]}, {"id": "mind2web_5288", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_a1b66ea7-5509-4164-b0a8-e7591a52b9b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.012, 0.265, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5289", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_69f3a5c7-082c-4b11-a016-a1138abc3d8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.175, 0.834, 0.194] in the image\nAnd my action is Action: SELECT\nValue: Publication date, new to old"}]}, {"id": "mind2web_5290", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_1a7f0d8f-b5fa-4866-b871-59de5b9c1c1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.698, 0.329, 0.737] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5291", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_4344eb2f-250c-4aba-b34e-a5555f30c841.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (10) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.658, 0.458, 0.695] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5292", "image": {"bytes": "", "path": "./images/3e142eee-7a62-4ad7-ae16-419d596ab63b_fddf53b9-c162-4b31-9ab7-90a60f30363f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of injured NBA players.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.092, 0.335, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5293", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_5475af7a-bbf1-45fc-8a4e-0cc96327858b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.357, 0.639, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5294", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_7daaafa4-1726-4b9a-895e-79ec1c80b455.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota\n[combobox] Select Model -> SELECT: Corolla\n[textbox] Zip -> TYPE: 10019"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.601, 0.243, 0.748, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5295", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_49c4fcaf-64f4-4bbe-8357-c31d97aa56ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Jerry Trainor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.013, 0.671, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5296", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_57746056-f9a1-4dee-a17d-3ce65f01e4b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.757, 0.155, 0.765] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5297", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_c4edf30a-cfa0-4d58-b4cf-a0df18c146c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 1.305, 0.296, 1.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5298", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_2c62f1f6-f57c-482b-9321-5cf44af07e07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: 90028\n[span] 90028 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.515, 0.459, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5299", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_70e1077e-985b-4404-8a85-fa82c80db258.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.05, 0.309, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5300", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b34f0027-2cd9-4f3d-9ef6-3590b99cc795.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.658, 0.286, 0.697] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5301", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_06264c9a-c4ab-4c01-ad10-8b7cd5d82367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.28, 0.359, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5302", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_4dfdc265-d3cd-47da-8fe1-7808d1596608.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.781, 0.249, 0.794] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5303", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_c0f7e9d2-0b58-43a7-bdb5-3aab72d5ffb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.18, 0.451, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5304", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_fc043e8d-37d3-44a0-a1fd-fc04dd4d87de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.32, 0.385, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5305", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_e2409464-38c3-4846-b44d-16d5e4f8752c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\n[span] COVID-19 booster, testing, treatment & records -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.747, 0.485, 0.756] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5306", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_9d7d0da5-57b8-4690-83ca-7ac5bc0523d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.357, 0.353, 0.397] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_5307", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_726244d6-5065-4024-9dad-6bf45baea932.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.53, 0.034, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5308", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_92f2909f-c267-419a-b4e0-2a5ec5b1fae2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK\n[span] -> CLICK\n[span] Plastic -> CLICK\n[button] Show sorting options modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.405, 0.307, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5309", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_dc4a1187-c24d-4f06-bcad-066836cd1a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: New York, United States, 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.082, 0.426, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5310", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a81330b9-cafd-455a-851e-4c12df331c37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.328, 0.195, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5311", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_37627ad6-4621-4127-bbeb-101ffa0b748f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.749, 0.085, 0.86, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5312", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_a939b3d8-1cd7-4b45-9cd1-3ef535f86ff7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.297, 0.133, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5313", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_15e83256-e4cc-4937-b835-1cf9ab6b1cee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 1.367, 0.452, 1.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5314", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_c88d4a1e-9abf-487f-886a-34ca0b837800.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.523, 0.456, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5315", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_8c9d79e2-b2a1-4197-9879-8ef7936e9e85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK\n[link] Price Low To High -> CLICK\n[link] Comic Coloring Book - Nintendo Switch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.962, 0.316, 0.991, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5316", "image": {"bytes": "", "path": "./images/440273fa-f1b0-41e8-be75-90732ad5170d_ecb3d820-f6e3-4ffb-84c9-e31d01c412bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Mens size guide for bottoms\nPrevious actions:\n[link] Size Guide -> CLICK\n[link] Men -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.448, 0.557, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5317", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_2a40aabf-98f7-48bd-863b-0f5a3dd0ebb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[span] Black -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.33, 0.284, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5318", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_b211b155-4276-43bf-9669-5973995ff7f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\n[link] WOMEN -> CLICK\n[link] Cropped Tops -> CLICK\n[generic] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.259, 0.943, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5319", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_74e08d5f-5c4d-4ca2-9071-4ca8955b2592.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[span] Cannes -> CLICK\n[link] 2022 -> CLICK\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.451, 0.722, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5320", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_84ac998d-2de1-42a7-802a-4df326c3ff1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.438, 0.73, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5321", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_4993fb7b-d906-4cad-8fa3-13bfe605511b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863\n[path] -> CLICK\n[generic] Bike shop Pigeon Forge -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 1.742, 0.561, 1.769] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5322", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b0b71afe-6f0d-43ef-84e1-9739bd184012.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] -> CLICK\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.179, 0.953, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5323", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_25ed6cca-1f43-4bd0-a185-8d4db5e858e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.255, 0.84, 0.287] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_5324", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_9712bbdc-3c5d-417f-b2d8-d9532b8fd75f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\n[link] shopping. -> CLICK\n[li] Neighborhood -> CLICK\n[link] Chelsea -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.259, 0.206, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5325", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_14d7f372-72b4-45e3-9082-e6915b5bce86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.254, 0.473, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5326", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_a9db226a-fd14-429d-9f96-905cb66d254e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[span] 20 -> CLICK\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.187, 0.341, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5327", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_3a8e2481-e070-4ea5-8ff9-d87a03299985.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\n[link] National Football League NFL -> CLICK\n[button] Open More Dropdown -> CLICK\n[heading] STATS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.17, 0.693, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5328", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_e2eb86ff-e660-46d6-a5d8-7109a895d213.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.265, 0.362, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5329", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_134500d5-c3c2-4f6e-b266-64e10e38b77a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK\n[button] Overview of the Annual Pass -> CLICK\n[button] Annual Pass Internet Order Questions -> CLICK\n[button] Annual Pass Use -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 2.257, 0.95, 2.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5330", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5e1e5cf7-2414-4425-a730-3a1d08d2897a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[span] -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.326, 0.693, 0.348] in the image\nAnd my action is Action: TYPE\nValue: 04/22/2023"}]}, {"id": "mind2web_5331", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ffd68484-58e9-46e4-9d94-e0aa3514e9fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK\n[combobox] Passengers -> SELECT: 2\n[button] Search -> CLICK\n[button] Choose a standard-size taxi costing US$126.84 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.855, 1.173, 0.988, 1.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5332", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_9edd8d21-46ba-4b5e-a9c2-d5677b6f439f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK\n[gridcell] Wednesday, April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.539, 0.795, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5333", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_6017cb86-e365-4f2e-ae94-89c66c382a9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.674, 0.068, 0.685] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5334", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_9a06fdfb-25fa-4319-903f-ca492483c9fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK\n[label] XXS -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.399, 0.256, 0.64] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5335", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_e1d49b1d-8ccb-40dc-b6ed-08c19adbb66b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, OH, US Select -> CLICK\n[combobox] Renter Age -> SELECT: 18"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.462, 0.837, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5336", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_1817cbf8-9fa6-4bba-9c16-d9485c6a8b6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK\n[button] Chicago -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.139, 0.048, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5337", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d26c73be-ce7d-42a2-8980-4bb23f15a0ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB\n[button] \uf002 -> CLICK\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.216, 0.852, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5338", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_775aaaea-a625-4f60-aaea-007d6535c143.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.023, 0.917, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5339", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_1bf154c8-15bb-47d8-98fd-60b02921b167.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 0.953, 0.607, 0.961] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5340", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4944ca15-6133-4d09-8a1c-cf0e040131c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.414, 0.307, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5341", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_a42afdd3-8e38-4ae9-bc0b-ddd2a3d058e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.374, 0.084, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5342", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_6f9d9303-c179-4500-90b0-311631d41991.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.289, 0.345, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5343", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d5d020f2-aeae-4c90-9b5f-0b0183babe5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 1.007, 0.083, 1.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5344", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_6fcc8f6e-834b-491d-ae80-20b826c834a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK\n[button] Furniture -> CLICK\n[link] Dining Sets -> CLICK\n[button] Buying Format -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.83, 0.545, 0.971, 0.562] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5345", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_449e6dc9-7f9a-408c-bed6-3d20020ddddf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[button] 10, Monday, April 2023. Available. Select as check... -> CLICK\n[button] 12, Wednesday, April 2023. Available. Select as ch... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.621, 0.115, 0.737, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5346", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_41d423cb-c3e5-4dae-92e0-2e6fd5ce03d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch\n[link] nintendo switch -> CLICK\n[img] Nintendo Switch with Neon Blue and Neon Red Joy-Co... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.251, 0.769, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5347", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_b5cf3337-8a5b-42c9-b0a3-2d56740dd044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.069, 0.713, 0.086] in the image\nAnd my action is Action: TYPE\nValue: street taco"}]}, {"id": "mind2web_5348", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f547410c-3931-49b3-8113-614e741e6ad6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.458, 0.47, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5349", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_b542e191-6e7d-40fd-bc21-5c9cf5e57afa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.14, 0.463, 0.144] in the image\nAnd my action is Action: TYPE\nValue: India"}]}, {"id": "mind2web_5350", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_d7a941ee-56f7-4e42-8143-8a9ef38682bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.244, 0.481, 0.277] in the image\nAnd my action is Action: TYPE\nValue: AUSTIN"}]}, {"id": "mind2web_5351", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_8e540eed-1de3-4c82-8db8-76b4c92dbf45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10018"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.208, 0.514, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5352", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_441ca13c-8adc-428d-b2ff-025df829b1b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: adele\n[option] Adele -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.516, 0.941, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5353", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_f15001c6-c158-486a-8987-66186ce22fab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.45, 0.194, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5354", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_981565cd-b59b-4909-b094-0e73023b641d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.093, 0.326, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5355", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_8b4597c5-e6f7-4480-b226-acb1effa91d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.516, 0.263, 0.527] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5356", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_6b855ba0-f2bd-493a-bc6f-9a7379dfbd8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.075, 0.789, 0.089] in the image\nAnd my action is Action: TYPE\nValue: Alagnak"}]}, {"id": "mind2web_5357", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_45a80841-1cc0-465b-9537-9b8b10dae0d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[div] -> CLICK\n[checkbox] Avis -> CLICK\n[button] More filters -> CLICK\n[checkbox] 4+ doors -> CLICK\n[spinbutton] Maximum price -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.893, 0.716, 0.923] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5358", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_3d3c3e48-24f0-4760-b98f-803f6a4dbe61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 29 -> CLICK\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at\n[listbox] hour -> SELECT: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.252, 0.194, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 45"}]}, {"id": "mind2web_5359", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_465ea3dd-835f-4dba-b0fb-7d1092c13c1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.415, 0.374, 0.442] in the image\nAnd my action is Action: TYPE\nValue: Tokyo"}]}, {"id": "mind2web_5360", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_d6a1eadd-6cae-44c4-850a-a5c685fc157f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.245, 0.637, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5361", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_99d9fbae-2e31-4c14-b028-651a7fe28b17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.252, 0.133, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 08"}]}, {"id": "mind2web_5362", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_431d589f-9030-4d59-8246-23b942dbc896.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5363", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_d91f9ef8-1eb0-4b4e-97f4-53ffdd24f253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK\n[link] Past year -> CLICK\n[link] Any length -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.315, 0.212, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5364", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_04a916be-3c46-4417-917f-c2ebb4477795.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK\n[button] increase number -> CLICK\n[button] Apply -> CLICK\n[button] 10:30 AM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.319, 0.962, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5365", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_cdacd995-13b3-4369-94a3-ae13afd2727b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.144, 0.83, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5366", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_334334b1-8249-4f2b-8bbe-957ca969ed1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[button] India Asia -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[gridcell] Thu Jun 01 2023 -> CLICK\n[gridcell] Fri Jun 30 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.49, 0.78, 0.515] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5367", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_5ff7d430-6fc0-4de3-9a6d-d9eb5dae3fdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH\n[button] find store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.224, 0.586, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5368", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_f51fd4e2-ed05-4127-9077-0f925bf4755a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.219, 0.95, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5369", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_dd6de110-8d90-4416-9a1b-0987e282bec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.012, 0.1, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5370", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_4fa8f954-d625-412d-9832-bf3a4b9c1388.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] SEARCH BY KEYWORD -> CLICK\n[searchbox] SEARCH BY KEYWORD -> TYPE: Chicago\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.558, 0.977, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5371", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_35e91393-85cf-48da-a8ab-49d8e51a5972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.515, 0.222, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5372", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_f9a35bff-eddb-43f7-b14b-e3749487b47f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.299, 0.246, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5373", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_c0c75b87-87d9-4bf8-b4c8-62bd4f5cd482.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.235, 0.716, 0.258] in the image\nAnd my action is Action: TYPE\nValue: La Bergamote"}]}, {"id": "mind2web_5374", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8ceb9903-e4ac-4aed-bd80-57d318467160.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago\n[span] Chicago, IL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.223, 0.568, 0.241] in the image\nAnd my action is Action: TYPE\nValue: Ann Arbor"}]}, {"id": "mind2web_5375", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_9b96a9ad-7b49-4f3c-90ed-d242d5015b41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $16.99/Day$6.55/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 1.146, 0.777, 1.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5376", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_18bf58f3-ebb4-42d1-b1e3-ef8ba7e28ea5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK\n[img] Vitamin D -> CLICK\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.131, 0.287, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5377", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_c6dcefac-fb03-4657-86c0-8738db10dd14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Veterinarians -> CLICK\n[label] Veterinarian Emergency Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.146, 0.426, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5378", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_997cc562-d56c-4861-bad7-1022dcced9d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: berlin\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK\n[generic] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.297, 0.346, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5379", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_6f701fb9-97ae-44d8-8687-8b254b1ffb58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[radio] Price: low to high -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.442, 0.969, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5380", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_d2c8159b-e150-4b43-8385-f0fc12d07bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.331, 0.327, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5381", "image": {"bytes": "", "path": "./images/d516b2f7-b180-422f-9e24-6fb778cb4b55_e909e452-ca74-4e47-8a19-0bae9174a58e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me Ed Sheeran Chords & Tabs\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.037, 0.838, 0.056] in the image\nAnd my action is Action: TYPE\nValue: Ed Sheeran"}]}, {"id": "mind2web_5382", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_29aa4ba4-c9ce-417a-9ffd-24d73d1dec89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.197, 0.63, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5383", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e206ee61-e177-44d4-9979-26f39dc2239a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.811, 0.056, 0.919, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5384", "image": {"bytes": "", "path": "./images/a31de393-c6e0-4175-858b-03cdc435d585_4f58be7e-a7f8-4d07-b40c-649e97d4ab84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse events happening at Madison Square Garden.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.306, 0.754, 0.329] in the image\nAnd my action is Action: TYPE\nValue: Madison Square Garden"}]}, {"id": "mind2web_5385", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_3ae4ddd5-b5f1-47cf-bcaf-9e139114ca80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.24, 0.0, 0.293, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5386", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_077ba195-2e04-43b3-afce-0b47b400e479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.093, 0.782, 0.11] in the image\nAnd my action is Action: TYPE\nValue: Lubbock, Texas"}]}, {"id": "mind2web_5387", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_705d55f8-4fbb-4b6a-8f06-cf33aef62a05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\n[span] Menu -> CLICK\n[link] Accessibility -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.296, 0.859, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5388", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_a8d5de92-8fd0-4c76-abb2-99501c4f2e36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK\n[link] Used -> CLICK\n[button] Style -> CLICK\n[link] French -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.243, 0.603, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5389", "image": {"bytes": "", "path": "./images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_1cf49d43-a70f-4b13-aeeb-fe28f507be53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite the top rock track\nPrevious actions:\n[link] Music -> CLICK\n[link] rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.11, 0.515, 0.135, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5390", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3b5e3d83-6a1f-443a-b5cd-0946e3dbc507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.388, 0.62, 0.409] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5391", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_2af9e053-5c3c-4c50-bf2b-199258df6d98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.196, 0.567, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5392", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_a730b544-051d-4ef2-a3d2-cbe725ac4ee0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.395, 0.805, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5393", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_56ff70da-d235-48f9-875f-9f3a17423d58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK\n[link] Car rentals -> CLICK\n[textbox] Pick up -> TYPE: Houston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.137, 0.274, 0.463, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5394", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_7b60bb01-31dd-49a7-b2a1-b0f9ed18651f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.013, 0.469, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5395", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_afe77c2d-c101-407d-8a8e-f73b0bfa6588.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK\n[link] 18 March 2023, Saturday -> CLICK\n[button] done -> CLICK\n[spinbutton] Flight Number (Required) -> TYPE: 2819"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.894, 0.096, 0.934, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5396", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_03937750-0402-4b65-b1c4-f83e6b72cca9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.197, 0.926, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5397", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_be4e1af7-e734-4d31-bba3-fd751a4fd8a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Most Popular TV Shows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.643, 0.673, 0.653] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5398", "image": {"bytes": "", "path": "./images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_a0ec89c8-5b88-4f8e-9547-e6f22bed7148.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a living History event to attend in in April .\nPrevious actions:\n[button] Open Menu -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.569, 0.404, 0.576] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5399", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_6aa2eb7f-be4a-467b-b5c5-96e9dd543d22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.39, 0.58, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5400", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d02d1e0d-1558-49ea-a007-fd43a7560a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.296, 0.699, 0.323] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_5401", "image": {"bytes": "", "path": "./images/f3850ec8-bf7c-42c3-9469-457836914f77_56682bee-956b-4d10-ab12-b895346b9589.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for events in Boston.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.027, 0.464, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5402", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_df54b5a8-f70c-4695-9c8c-5780019eedb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.374, 0.297, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5403", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d650769-7360-42e5-9686-c24cfbaf2a2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK\n[textbox] price to -> TYPE: 70\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.153, 0.4, 0.166] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_5404", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b740bff0-dc6c-49fc-8895-96b5959e3fc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK\n[link] User Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.155, 0.614, 0.17] in the image\nAnd my action is Action: TYPE\nValue: Joe Bloggs"}]}, {"id": "mind2web_5405", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_224ce086-9076-4f11-8961-b3ccb3285081.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.873, 0.399, 0.941, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5406", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_f16dc8f3-48c9-43db-9468-9db70f01934a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.711, 0.38, 0.729] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5407", "image": {"bytes": "", "path": "./images/f8428085-905f-4190-9404-3e28fb691252_314dfd22-8e83-4475-b8eb-430c8eb22cef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the newest on-demand releases.\nPrevious actions:\n[link] Visit the On Demand page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.116, 0.488, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5408", "image": {"bytes": "", "path": "./images/ef23fbf3-f05e-41e2-b847-a27028f42470_bb1ae489-b33e-475a-83fd-3ecabe7d1fab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me \u201cpizza\u201d restaurants near Atlanta\nPrevious actions:\n[svg] -> CLICK\n[button] Atlanta -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: pizza"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.018, 0.659, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5409", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_2755eead-c61e-4ce0-b14c-e041ca4d1562.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK\n[radio] Return -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5410", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_a3e5cb5c-e864-45c7-98d1-9518af067926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.359, 0.567, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5411", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4af813e6-fc10-468a-bc8e-cba17f545e06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.74, 0.678, 0.758] in the image\nAnd my action is Action: TYPE\nValue: Dick"}]}, {"id": "mind2web_5412", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_bc99632b-a2a3-4848-88cd-6917ff4e3596.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.204, 0.891, 0.234] in the image\nAnd my action is Action: SELECT\nValue: 3 00 AM"}]}, {"id": "mind2web_5413", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_a86258c1-e7d5-4cb9-9e2f-2ed9966b5cf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK\n[button] January 2024 -> CLICK\n[button] DURATION -> CLICK\n[button] 10+ Days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.307, 0.871, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5414", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_24f091f2-2302-45cf-8a3b-6926028a8c8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.161, 0.414, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5415", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_42d37c11-0990-4059-8327-9f2c132e1b28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK\n[div] Price -> CLICK\n[link] $10 to $25 (3) -> CLICK\n[link] $25 to $50 (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.639, 0.121, 0.652] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5416", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_29c3163c-4c53-49b0-a0a1-49bc3b1e21ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.697, 0.509, 0.707] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5417", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_0d993232-3746-4084-95fc-0dd93e7de7f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK\n[link] Past year -> CLICK\n[link] Any length -> CLICK\n[link] 2-10 min -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.514, 0.212, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5418", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a911199f-5c06-4042-8dff-d1d095ba7f21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to New Releases At The Kiosk See More -> CLICK\n[img] Plane (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.323, 0.333, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5419", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_cc2547c1-82f2-4e44-8419-61fc4536e234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[link] Fitted -> CLICK\n[div] Size -> CLICK\n[span] Now Trending -> CLICK\n[li] Newest -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.218, 0.716, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5420", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_1b9a4422-dbca-4194-9891-f0fefedd9e5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[tab] My flights -> CLICK\n[textbox] Confirmation number (required) -> TYPE: 10000002\n[textbox] Last name (required) -> TYPE: Son"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.403, 0.691, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5421", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_40c833ba-e627-4bdc-9593-c749ee3807a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Flavor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.653, 0.802, 0.671] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5422", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_c0c80763-722c-4265-b734-24d8908b159c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew\n[textbox] Email address -> TYPE: johndoew@gmail.com\n[textbox] Phone number -> TYPE: 4533234565"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.834, 0.495, 0.851] in the image\nAnd my action is Action: TYPE\nValue: 234567895"}]}, {"id": "mind2web_5423", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_58fb22af-7875-4218-861b-bfca9cc56c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.952, 0.276, 0.994, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5424", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_a2078d5e-eaad-4060-a2e4-c26ecefb4a9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.278, 0.512, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5425", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_88b3d2aa-9a00-423e-9dcf-8527c310e228.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.102, 0.28, 0.152] in the image\nAnd my action is Action: TYPE\nValue: Miami"}]}, {"id": "mind2web_5426", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_236326dd-e3a4-4b2a-98b0-a495fe869504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.143, 0.957, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5427", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_39e3abcc-6e56-4032-b225-9e56cbd89bb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK\n[span] 12 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.425, 0.686, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5428", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_1f3155ce-428a-4cd1-bb4a-b7fbf7469ddd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.019, 0.917, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5429", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_897fecac-fd54-4b7f-bfeb-5ed4dcc72950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.257, 0.232, 0.51, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5430", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_71afffe2-cba0-43d4-abc9-095a2bcd083a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[link] More -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.188, 0.449, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5431", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_56328637-4511-4d5f-87dd-f73738934bf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.015, 0.83, 0.039] in the image\nAnd my action is Action: TYPE\nValue: oak grove station"}]}, {"id": "mind2web_5432", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_03783199-4419-495a-897f-12d1d1e5b7f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5433", "image": {"bytes": "", "path": "./images/5b433cc4-26bf-4e62-b406-f00dc09c274d_eb2f0b10-9e1a-410a-b238-358836e1ed04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a CVS brand covid home test kit to the cart.\nPrevious actions:\n[img] -> CLICK\n[span] Shop all at-home COVID-19 tests -> CLICK\n[div] CVS Health At Home COVID-19 Test Kit, 2 CT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.828, 0.15, 0.969, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5434", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_d0df5da7-08b6-4ba6-a359-e6f4de52d074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Airfare Included -> CLICK\n[button] All-inclusive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.508, 0.772, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5435", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_45d4c97a-1f19-4b89-9069-3f4820b8484d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, OH, US Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.351, 0.84, 0.383] in the image\nAnd my action is Action: SELECT\nValue: 18"}]}, {"id": "mind2web_5436", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_7e8dffb8-17a7-40c5-9344-b115886fd488.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK\n[link] Stats \ue00d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.071, 0.403, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5437", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_7412202e-0c5f-47b7-a72f-0570cd883473.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles\n[b] Los Angeles -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.379, 0.556, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5438", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_50b39168-3d21-4d0d-8664-8a507729784e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.031, 0.553, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5439", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_74ff6222-caf3-40c2-abc4-2ec3029d571e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 3.236, 0.368, 3.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5440", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_eff68b62-ec7c-4049-8586-0a770d5b987d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia\n[select] Model -> SELECT: Carnival"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.198, 0.711, 0.24] in the image\nAnd my action is Action: TYPE\nValue: 11101"}]}, {"id": "mind2web_5441", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94ff5fd-45f1-46e0-bfba-90fd5f6dc7d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.074, 0.664, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5442", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_85649f4f-ff05-45bf-870c-6154412c5750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.349, 0.611, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5443", "image": {"bytes": "", "path": "./images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_e9b80c7a-04d6-40ee-87d9-c678b93317d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse camping stoves that have an auto ignition feature.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.085, 0.128, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5444", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_5e676867-a433-4845-8aa2-777f5e66b86e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[button] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.335, 0.943, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5445", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_cba1e975-0bdf-4726-b146-be1142353fe5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK\n[link] Insurance -> CLICK\n[textbox] Ask a Question -> TYPE: Health Insurance Top Up plans beneficial or not?"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.18, 0.588, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5446", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_017f0e17-837f-451d-aed3-0c99dd21581f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.0, 0.191, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5447", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_ed26e713-359e-4d11-b4ac-600a1d0d1610.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK\n[link] Tablets -> CLICK\n[img] Samsung -> CLICK\n[span] 11\" & Larger -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.397, 0.192, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5448", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_e7501c60-a8f3-453a-8f8b-bbb68c545ace.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Caribbean -> CLICK\n[button] SEARCH CRUISES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.091, 0.212, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5449", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_38057c1f-4752-4761-a83d-b914e6702b85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK\n[link] My trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.528, 0.426, 0.542] in the image\nAnd my action is Action: TYPE\nValue: Atas"}]}, {"id": "mind2web_5450", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2151f674-7324-4d95-a39d-4fdf73e0b0ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.315, 0.312, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5451", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_e474adb2-ec08-4464-b477-30f533b43209.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.156, 0.957, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5452", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_2efe3302-5f5f-4b26-ba7b-7348f700afe8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.065, 0.129, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5453", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_a56e7cd6-7657-431b-8ae4-cb15032e2f97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.548, 0.473, 0.566] in the image\nAnd my action is Action: TYPE\nValue: 6000"}]}, {"id": "mind2web_5454", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_b2a2f8a2-4de6-48e2-bbf7-6eef4fff3631.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[heading] Price -> CLICK\n[label] $0-$10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.322, 0.463, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5455", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_68d2bf36-92a5-4fc5-a7c0-5c1f2fe3cffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.301, 0.191, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_5456", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_92044c3e-58bd-45b2-b161-6eea8af0c53f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york knicks\n[option] New York Knicks -> CLICK\n[link] TICKETS -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.244, 0.78, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5457", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_f299ac63-fe5e-4e4a-b93c-db89bfcabadf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[group] RETURN -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.245, 0.448, 0.391, 0.475] in the image\nAnd my action is Action: SELECT\nValue: 5-15"}]}, {"id": "mind2web_5458", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_6a1d5aa2-cd89-47b8-83c1-7f435c7c4b3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.182, 0.522, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5459", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c2398364-0bf3-4627-8450-2d6b21c767c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.006, 0.561, 0.023] in the image\nAnd my action is Action: TYPE\nValue: rock"}]}, {"id": "mind2web_5460", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2c1fe6d9-48b1-4b39-9d5d-09b14fb70ff9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[li] Business -> CLICK\n[span] -> CLICK\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 7\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.881, 0.14, 0.958, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5461", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_08e49083-5052-42c3-b813-4591b4e718c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK\n[span] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.631, 0.205, 0.749, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5462", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_64252790-add7-4554-9918-d7c39f24a67c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK\n[select] All -> SELECT: In Stock (41,088)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.509, 0.196, 0.529] in the image\nAnd my action is Action: SELECT\nValue: Hardback (13,067)"}]}, {"id": "mind2web_5463", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4ff39709-c42b-4174-ab7e-bbff789845f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.326, 0.42, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5464", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_c866b19d-d657-4385-9c9f-c43f7e09d2f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.285, 0.783, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5465", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_4f3c0b77-487b-4fe7-b29a-8b691d8fd423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[select] All -> SELECT: In stock (53476)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.364, 0.196, 0.386] in the image\nAnd my action is Action: SELECT\nValue: Digital"}]}, {"id": "mind2web_5466", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_d51b4396-e234-4878-91a8-2e31706e71ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.515, 0.196, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5467", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_d90ea9f6-714f-4585-92f0-7e3eecf2e396.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.279, 0.259, 0.317] in the image\nAnd my action is Action: TYPE\nValue: Pune"}]}, {"id": "mind2web_5468", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_62d5d1f6-80a3-4d6b-93fc-18c08f34309b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.065, 0.95, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5469", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_92ca8e78-e5a3-407d-bea8-5a1000c3f54b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.613, 0.233, 0.62] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5470", "image": {"bytes": "", "path": "./images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_f6247f9c-9e1b-43d9-a842-0ee512d1cbef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the page to find classic rock concert tickets.\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.062, 0.282, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5471", "image": {"bytes": "", "path": "./images/cb07d410-75ff-483a-920c-3ce2a295524f_ee5016b8-c8d1-4c30-9157-5cef60f94e71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the kicker with the most made field goals in the 2022-2023 NFL season.\nPrevious actions:\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.448, 0.155, 0.53, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5472", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_9bf9f632-5dce-4566-8ce7-b94a42b1908e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK\n[div] Narrow By -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 1.04, 0.297, 1.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5473", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_ac166cc9-fe74-40f0-8f14-eb93e372c3e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\n[button] Vehicles -> CLICK\n[link] Exotic Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.78, 0.146, 0.793] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5474", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_b70a73e5-9154-46c8-8498-5790c0807ec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: La Bomba\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.289, 0.259, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5475", "image": {"bytes": "", "path": "./images/85bd1881-6efd-458d-97c9-ae507ecba1ca_f74957d1-682d-41c3-a460-6813810fd440.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the odds for upcoming NHL matches.\nPrevious actions:\n[span] Odds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.737, 0.055, 0.758, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5476", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_bd3e1205-bb50-4e1e-87ac-9e39b9b46b1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.215, 0.5, 0.248] in the image\nAnd my action is Action: TYPE\nValue: Harry Reid Intl Airport, LAS"}]}, {"id": "mind2web_5477", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_6a2988b2-cb53-4b88-8132-7cb3c86dee20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent\n[span] Analyst - Sales Programs -> CLICK\n[button] Apply Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.715, 0.5, 0.757] in the image\nAnd my action is Action: TYPE\nValue: jacksparrow@gmail.com"}]}, {"id": "mind2web_5478", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_c42e1fad-4d83-4494-bd83-247af16e8ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.386, 0.539, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5479", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_a32fbe21-0c34-441f-ad48-e12583c525a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.088, 0.702, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5480", "image": {"bytes": "", "path": "./images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_7fdff63e-288e-4dc3-b053-5253f6c23c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a park in the state of California called Castle Mountains National Monument and find out it's Basic Information.\nPrevious actions:\n[button] Find a Park by State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.404, 0.788, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5481", "image": {"bytes": "", "path": "./images/effb9df8-3b3f-4349-8033-f79ba1587a4d_f3b29e1a-40cc-42f2-91d6-1d06d66f7941.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a projector which accepts crypto currency as payment.\nPrevious actions:\n[searchbox] Search Site -> TYPE: projectors"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.009, 0.546, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5482", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_0e2ae006-05fe-4806-bb31-742c673af29f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK\n[img] Women's Sonoma Goods For Life\u00ae Everyday V-Neck Tee -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.58, 0.725, 0.597] in the image\nAnd my action is Action: TYPE\nValue: 10"}]}, {"id": "mind2web_5483", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_17f036e1-04aa-4a66-828d-e19685efb75c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[link] Shoes -> CLICK\n[link] Running -> CLICK\n[div] Size -> CLICK\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.206, 0.947, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5484", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_7a035d11-468f-404b-b6d4-b45b72f78c6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.418, 0.702, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5485", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_88df9183-9dc1-4c06-9622-20981a4cf886.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.371, 0.454, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5486", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_241b1896-c37f-452b-b824-73ff06f7df2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.048, 0.248, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5487", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_83e37cb8-d3a2-4121-a740-a1d75d3dd80e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.021, 0.74, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5488", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_c1fda10f-e5ef-47f2-852f-a6385a2ed99f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick\n[textbox] Last Name -> TYPE: Smith\n[textbox] Email -> TYPE: smith@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.828, 0.678, 0.847] in the image\nAnd my action is Action: SELECT\nValue: Six Flags Magic Mountain / Los Angeles, CA"}]}, {"id": "mind2web_5489", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bf884456-1c0e-4856-8141-57bf30f5da56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.237, 0.984, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5490", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_dfdb363b-157c-44ca-9aad-be92a8572f15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.705, 0.31, 0.716] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5491", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_33a7ea3f-5ecb-4453-9465-1a9d3739e42a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[button] Fast-responding -> CLICK\n[button] Open Now -> CLICK\n[button] Request a Quote -> CLICK\n[radio] Installation or replacement -> CLICK\n[radio] Light fixture -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.369, 0.722, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5492", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_59b4a48b-83b0-4522-a66e-fec2fa0c6069.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.341, 0.469, 0.361] in the image\nAnd my action is Action: TYPE\nValue: SFO"}]}, {"id": "mind2web_5493", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_d9e3b9bf-8a2c-41c6-9c58-d3f0ae781f45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.103, 0.203, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5494", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_18f516f8-beaa-4338-a0dd-659293279207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.348, 0.39, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5495", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_fad7bc53-f4c5-435a-abee-ee54d8595ecd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.201, 0.442, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5496", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_fb405733-e00b-4fc3-b323-85e0f0a76157.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.674, 0.218, 0.709, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5497", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_3c33b382-bf0c-4407-8f1d-128cd260334b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK\n[button] Filter -> CLICK\n[checkbox] Self Park (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.678, 0.452, 0.715] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5498", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_971dc47e-71e7-475e-9eef-87d837b34356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Socks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.891, 0.291, 0.959, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5499", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_2dcc9e7f-6127-4495-9e4a-c3a5d59725f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK\n[div] SUVs -> CLICK\n[link] See Details -> CLICK\n[span] 11% -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.85, 0.662, 0.855] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5500", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_4498554c-48ae-409f-ab30-cdd208c0ae22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\n[heading] Check-in -> CLICK\n[textbox] Confirmation or ticket number* -> TYPE: 123456\n[textbox] Last name* -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.311, 0.478, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5501", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_e1d11f3d-bddc-40e4-9b38-e2ab641223d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK\n[gridcell] March 26, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.285, 0.831, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5502", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_5c9d8232-4c6b-4537-923a-dd0727032022.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.844, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5503", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_ba8b539c-78b7-4c10-ad5d-a34107d8aa38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon\n[input] -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.272, 0.585, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5504", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_5ec56760-b47d-4c6d-bdac-c3a6640b443a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.277, 0.573, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5505", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_83aba46c-31c6-4a64-bd2f-dfc6ce379419.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5506", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_78fab376-fd1d-406e-9818-c9ec36f48546.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER\n[link] Order Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.324, 0.872, 0.376] in the image\nAnd my action is Action: TYPE\nValue: 24124124091"}]}, {"id": "mind2web_5507", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_18511120-cc2f-421c-97a5-7ed7fc32cdb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[textbox] Minimum price filter -> TYPE: 150\n[textbox] Maximum price filter -> TYPE: 200\n[checkbox] list-filter-item-label-4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.172, 0.748, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5508", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_ad867891-0aa7-44b2-a033-6f297e36b85f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK\n[button] April 7, 2023. -> CLICK\n[combobox] Guests -> SELECT: 3 Guests\n[button] 5:00 PM Outdoor Table -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.299, 0.523, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5509", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_98338e7d-fa1c-4ab3-a522-00aa99888699.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.56, 0.512, 0.578] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5510", "image": {"bytes": "", "path": "./images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_4fbee4b2-dd73-4f1f-b6e7-11092cc67c5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Gloomhaven's ratings?\nPrevious actions:\n[combobox] Search -> TYPE: gloomhaven"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.058, 0.986, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5511", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_73c4997a-aec5-4943-b19d-803a0e57ca5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK\n[button] 05/02/2023 -> CLICK\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.444, 0.432, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5512", "image": {"bytes": "", "path": "./images/2a8ae104-6f06-47cb-80a0-045188125868_57b4fe29-38c0-4171-8721-a773b02c3366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Display details of new lanched iPad pro 11-inch\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.0, 0.291, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5513", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_ec602108-1535-425d-be37-bc2d202490c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email Address (required) -> TYPE: allan.smith@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.444, 0.754, 0.482] in the image\nAnd my action is Action: TYPE\nValue: allan.smith@gmail.com"}]}, {"id": "mind2web_5514", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c27a29c4-d67c-47ac-93f0-713b9aed25ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.361, 0.791, 0.389] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_5515", "image": {"bytes": "", "path": "./images/e8603513-2740-485e-adf9-86361dd015f4_69f51f8b-8192-4638-beed-fcc9f187a9a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare FlightAware subscriptions and signup for Enterprise plan.\nPrevious actions:\n[span] Products -> CLICK\n[span] Premium Subscriptions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.428, 0.418, 0.572, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5516", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_558a5ab4-4e4c-48e0-b00d-6ee44a495bae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.482, 0.037, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5517", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_bf4f08dd-0a44-4f6e-abc7-5d2272eb0b50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.097, 0.277, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5518", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_4c0199ea-38cf-4516-9beb-08e2fcf2e5c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Medical -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.324, 0.196, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5519", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_1f130b1f-91a7-4219-b5fa-a656b6fdba86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.333, 0.359, 0.354] in the image\nAnd my action is Action: SELECT\nValue: AM"}]}, {"id": "mind2web_5520", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_f973e7a4-81f8-4019-8c27-0eed5a0883fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[link] See availability -> CLICK\n[button] Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.694, 0.923, 0.707] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5521", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_b48151cc-387f-4ee5-828f-77dd0d8b0209.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK\n[checkbox] Ford \ue066 -> CLICK\n[checkbox] Jeep \ue066 -> CLICK\n[checkbox] Toyota \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 1.298, 0.408, 1.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5522", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_672f3990-9c90-4b51-9727-a2ebfd6c5ffe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.179, 0.271, 0.2] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5523", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_90113f80-02c9-4c12-ad3c-a2324f74842b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.588, 0.846, 0.611] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5524", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_6aa1c326-0259-434d-98e9-cb78d7e25950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.021, 0.183, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5525", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_3653852b-16f2-4d36-8496-d814ef3f9c56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.236, 0.937, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5526", "image": {"bytes": "", "path": "./images/059327ab-e26e-4af6-9063-f58c50ecd2d2_dc6589d2-8730-4e3b-8b8a-905044c2167f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the schedule and maps for the orange line\nPrevious actions:\n[link] subway Subway Lines -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.319, 0.339, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5527", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f99aa013-809e-4fb4-8fd9-80ca0220ca54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.68, 0.324, 0.689] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5528", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_ee3e4de5-183f-4367-aab7-af3f83f6191c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.054, 0.454, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5529", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_f135e797-d76a-4008-a5d1-7de7cb7b4e15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK\n[combobox] \uf0d7 -> SELECT: 1 Room\n[combobox] \uf0d7 -> SELECT: 2 Adults\n[combobox] \uf0d7 -> SELECT: 1 Child"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.466, 0.034, 0.529, 0.038] in the image\nAnd my action is Action: SELECT\nValue: 0"}]}, {"id": "mind2web_5530", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_38f65ad6-587d-45ea-9b87-d3c973ca9acf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.529, 0.512, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5531", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_164ac1a3-6c06-47a9-93f8-0dd205f683dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.36, 0.384, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5532", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_444f21bd-d835-4249-980c-92b55df4b4c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas\n[button] Search Jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.338, 0.295, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5533", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_64d2cf14-ff84-4d7e-8dfa-fa0fd7eb2bec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK\n[img] expand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.994, 0.059, 1.012] in the image\nAnd my action is Action: TYPE\nValue: 0"}]}, {"id": "mind2web_5534", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_cd121173-fad8-49c8-9a0e-05fb88fc82f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.423, 0.804, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5535", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_f9c51916-2ac2-4cd4-b949-bce0411788a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.358, 0.514, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5536", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_11a73dfc-fd0b-4135-94dc-02552e25ead2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[link] More -> HOVER\n[span] Phone Repair -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.021, 0.564, 0.034] in the image\nAnd my action is Action: TYPE\nValue: houston"}]}, {"id": "mind2web_5537", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0fa434b2-302e-4839-bd13-a2426c8a7367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 0.111, 0.573, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5538", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_493e29f6-6afc-495a-a79d-e419581db53b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.47, 0.258, 0.661, 0.27] in the image\nAnd my action is Action: TYPE\nValue: Dota 2"}]}, {"id": "mind2web_5539", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_2dd318cd-167f-4a33-9395-981c43cd92ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK\n[span] New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.769, 0.192, 0.784] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5540", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_8ba30792-56ba-4381-9106-0a693cd4b83f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.45, 0.239, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5541", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_3bc606e8-219f-40a1-ac23-7465abf97b1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\n[svg] -> CLICK\n[link] Super Bowl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.758, 0.981, 0.762] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5542", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_d2ee59f3-468a-4eb4-a530-2babf9e4d776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.023, 0.16, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5543", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_5ab24f88-6b27-4bb0-8a0d-91e54d6f8dae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.399, 0.385, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5544", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_fdd7c5bb-257a-4e04-9762-7079f631669a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Canyon de Chelly\n[option] Canyon de Chelly National Monument -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.289, 0.854, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5545", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_9789cf64-f7e6-4e99-b1b4-77eb41a6e876.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[gridcell] Thu Jun 01 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.468, 0.743, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5546", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_37e3a163-4d51-41c1-bd49-ec440145578d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.023, 0.553, 0.041] in the image\nAnd my action is Action: TYPE\nValue: bath towels"}]}, {"id": "mind2web_5547", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_0dc80fc9-d1d3-48b8-abfa-8e7025ea84e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.282, 0.261, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5548", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_62b32b28-721f-4b72-a3e8-a2b1483e66d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Adults-Only -> CLICK\n[button] Romantic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.377, 0.772, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5549", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_72b18f17-346a-41ad-887f-c8a7dfb072a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000\n[input] -> TYPE: 10000\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.393, 0.473, 0.424] in the image\nAnd my action is Action: TYPE\nValue: 4"}]}, {"id": "mind2web_5550", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_2adcf298-cf71-48ac-9531-fdc5708bd6a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.032, 0.45, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5551", "image": {"bytes": "", "path": "./images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_823c594c-a988-4977-9651-e7eef65e4f8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Travel Pack for hiking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.089, 0.499, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5552", "image": {"bytes": "", "path": "./images/f4623be1-31c6-4546-a567-92bfd1da9cd7_16766ce2-4e9e-4955-80fd-4578cec08085.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Upgrade the count of the current SSD in my cart to 10\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.903, 0.009, 0.984, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5553", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_f2be8876-e549-4589-b545-6cfe1baed3e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\n[span] Nearby Stations & Stops -> CLICK\n[searchbox] Address, station, landmark -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.179, 0.335, 0.191] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_5554", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_44247c08-488a-41ec-ac6c-8eafb6ef3703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[input] -> CLICK\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.296, 0.249, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5555", "image": {"bytes": "", "path": "./images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_97b059c4-5e26-47d1-ad8f-575d1ce528c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all used Tesla cars for 10017 zip code.\nPrevious actions:\n[combobox] Select Make -> SELECT: Tesla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.261, 0.599, 0.291] in the image\nAnd my action is Action: TYPE\nValue: 10017"}]}, {"id": "mind2web_5556", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_5df94a66-8e54-4048-adb6-54fe66727e42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.08, 0.429, 0.121] in the image\nAnd my action is Action: TYPE\nValue: orlando"}]}, {"id": "mind2web_5557", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_5d7ae1da-80a4-41de-a2e0-8088ad791b79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.279, 0.027, 1.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5558", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_91682497-693e-4ef4-8f75-fc5329114dbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\n[textbox] Search IMDb -> CLICK\n[textbox] Search IMDb -> TYPE: Prometheus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.03, 0.657, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5559", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_ea452bf8-dd86-41d7-91fd-c461362e9c16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\n[label] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.172, 0.429, 0.189] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_5560", "image": {"bytes": "", "path": "./images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_55b748c3-742e-4732-89a5-6966da49d829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if there are tickets availabe for the Hamilton musical in Richmond, VA.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Hamilton"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.237, 0.408, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5561", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_90eaeb46-910e-4fd2-8657-16cef6654d28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.223, 0.759, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5562", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_99d8f741-6ca7-4310-9914-0a821e9e9e48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.58, 0.563, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5563", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_1e0238fc-8df5-4b99-bf1b-f1c1e7c88e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[link] One Day Add-Ons -> CLICK\n[link] Buy Now -> CLICK\n[menuitem] Meal Deals -> CLICK\n[menuitem] All Season Dining -> CLICK\n[img] One Meal Season Dining Pass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.409, 0.781, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5564", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_763e2e66-b9b4-4a26-87bc-e6969bb9fb7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.258, 0.588, 0.297] in the image\nAnd my action is Action: TYPE\nValue: COOPER"}]}, {"id": "mind2web_5565", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_bf67284f-ff0d-423c-ac34-ec7359de7867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.48, 0.77, 0.499] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5566", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_9f25fc17-b721-4977-a0dc-11a07f97ed5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 1.487, 0.938, 1.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5567", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_b7e5976b-55cf-4461-a63b-e6cb9a069717.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\n[button] Location Atlanta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.068, 0.47, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5568", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_9cbe4f75-eb82-4ae9-a013-d12eea58f7a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Swimwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.449, 0.509, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5569", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_59acd30d-a2a0-4546-86d2-667a574ec341.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.36, 0.492, 0.381] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_5570", "image": {"bytes": "", "path": "./images/30e310ca-af64-46b4-a0f6-14b8f04fa734_0eb8808f-9c59-4b77-ae36-5cdab2faa0dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up Popular Photos in the Community featuring Aegean Airlines\nPrevious actions:\n[span] Community -> CLICK\n[link] Popular Photos -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.365, 0.269, 0.391] in the image\nAnd my action is Action: SELECT\nValue: Aegean Airlines \"Aegean\" (AEE) (452)"}]}, {"id": "mind2web_5571", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_44a3d5ab-4a1d-4104-b3a5-67f097ea5778.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[span] -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.122, 0.93, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5572", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_f12823bb-cb42-43f8-b311-6ec6b90b82bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.347, 0.76, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5573", "image": {"bytes": "", "path": "./images/099a9da4-c8db-4900-ada2-76600f3655a4_d6e40d10-518e-4a8e-95a4-ff8756b67c8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of baby products that are on sale and under 10 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.003, 0.371, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5574", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_05857621-902c-41b0-b42e-96bb7a9958bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.012, 0.05, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5575", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_63bb5158-324b-4f1e-9cf1-226e776b0641.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.163, 0.438, 0.177] in the image\nAnd my action is Action: TYPE\nValue: BANGKOK"}]}, {"id": "mind2web_5576", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_e41f8a52-5ce6-4d8f-8baa-3f605832a080.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] Scores -> CLICK\n[button] Calendar -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.332, 0.514, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5577", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_ff1e132b-8f0d-41a4-a915-ae5332d7612e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.14, 0.258, 0.86, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5578", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_3eec5eec-1aed-40dd-bc93-8742767cf94d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: Lubbock, Texas\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.134, 0.515, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5579", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_7d2f0f0a-fb03-4063-a5e9-5f047e6285fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.484, 0.48, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5580", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_a515f870-4760-4d67-b2bf-1d756fe18960.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.162, 0.491, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5581", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8c63e1f7-d7ba-4b22-97a1-c688e34f5959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.741, 0.334, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5582", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_cb8feb86-5c07-4ad6-bcb4-9e4bd08ee0ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[button] Paris France -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Food Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 1.1, 0.132, 1.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5583", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_ea68d691-915a-45f6-b4ce-fc194d1a5207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: ohio\n[div] Ohio -> CLICK\n[generic] Run Search -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.169, 0.478, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5584", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_2b13428f-ca4c-4db2-bec9-35b0966a4e75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.13, 0.199, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5585", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_0739722b-b2ed-44ee-9d7b-4442f4e241b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.113, 0.914, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5586", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_aeacd270-9832-4504-8b6b-2767cc583100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[option] 8:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.634, 0.481, 0.651] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5587", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_a58f44bd-baa3-4a90-8fca-7abece0f83bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.39, 0.037] in the image\nAnd my action is Action: TYPE\nValue: 600w power supply"}]}, {"id": "mind2web_5588", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6e91dd35-e5c5-4066-9548-60e357a91b79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.108, 0.326, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5589", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_897e782f-f661-462f-9b43-bfe25ae73ffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.207, 0.35, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5590", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_713c45db-226a-41c6-adb6-c348424b9e20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.238, 0.58, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5591", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_db292549-3e50-409d-9242-d3fed37a72d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[label] XXS -> CLICK\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.554, 0.784, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5592", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_6e31d60d-78ec-444d-b5b2-09cddb6700d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.379, 0.234, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5593", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ac051332-2898-4bc0-96ac-0c7c39c53824.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW\n[combobox] Select Model -> SELECT: i3"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.295, 0.13, 0.315] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_5594", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_bdea650c-4e2f-49e6-85c2-22989794fba9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (18) -> CLICK\n[button] Show 18 Results -> CLICK\n[combobox] Start Time -> SELECT: 10:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.322, 0.3, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5595", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_b39e9045-b25c-47a9-afce-478fbf734715.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags M\u00e9xico -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.813, 0.07, 0.932, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5596", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_a5259ece-0829-477f-b30d-f47c1f508515.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 1.775, 0.141, 1.782] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5597", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ba0752ba-76d5-439e-baaa-e9f077356cc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Size -> CLICK\n[link] YXL -> CLICK\n[div] Size -> CLICK\n[div] Sports -> CLICK\n[div] Fit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 1.008, 0.194, 1.015] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5598", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_a63671f9-def0-48d6-bcac-289e2360a5c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.242, 0.682, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5599", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_d8db2dc7-0796-421d-86ab-314c2f1ea86e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.141, 0.392, 0.155] in the image\nAnd my action is Action: TYPE\nValue: India"}]}, {"id": "mind2web_5600", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_76836f77-682c-4d0d-a708-0e890ec81eb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.169, 0.938, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5601", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_38ed349f-c786-4ede-ad54-2636970b733e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK\n[link] Tom Brady -> CLICK\n[link] STATS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.288, 0.181, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5602", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_5ca31bb4-3862-4dba-a5e2-25444ff45cf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.104, 0.206, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5603", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b6225a8b-0610-400b-859b-f9e1b5e4fe1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.24, 0.321, 0.322, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5604", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_22568b7b-dd05-491b-880f-c1c3e5df037a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.421, 0.728, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5605", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_825f81a5-2ff7-4beb-8ba3-c99fe1d14250.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.018, 0.564, 0.028] in the image\nAnd my action is Action: TYPE\nValue: 44012"}]}, {"id": "mind2web_5606", "image": {"bytes": "", "path": "./images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_78355c99-0145-4534-9eeb-48c77afa1487.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all used Tesla cars for 10017 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.268, 0.358, 0.303] in the image\nAnd my action is Action: SELECT\nValue: Tesla"}]}, {"id": "mind2web_5607", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_73b6d459-3322-42df-b999-02a0b249731d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.279, 0.615, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5608", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b546074a-9ac5-4304-9d50-cb5dbc2fb3da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[span] -> CLICK\n[button] Next -> CLICK\n[div] License Plate -> CLICK\n[textbox] License Plate -> TYPE: AZXA46\n[combobox] State -> SELECT: AZ"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.476, 0.618, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5609", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_fb91be3b-ec6b-40ee-8ca0-c9a87489b5cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\n[link] Coupons -> CLICK\n[input] -> TYPE: new york\n[link] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.011, 0.867, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5610", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_50e29032-1d28-41a0-9348-754e15b4cfa9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Lebron James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.173, 0.079, 0.447, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5611", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_eef408ce-d3d3-416e-8a22-75d2730b5cdf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.217, 0.467, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5612", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_e23c889f-caa3-430f-87f1-00c0ed71a29e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[link] Categories -> CLICK\n[span] Luggage -> CLICK\n[span] Carry-on Luggage -> CLICK\n[img] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.344, 0.988, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5613", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_57599408-9c94-4845-b966-d78e7c2fdd24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Real Madrid\n[link] Real Madrid LaLiga -> CLICK\n[link] Fixtures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.348, 0.138, 0.366] in the image\nAnd my action is Action: SELECT\nValue: UEFA Champions League"}]}, {"id": "mind2web_5614", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_60a5012f-5b05-4bd5-b6f1-9a6932903e03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.0, 0.334, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5615", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_1b93049b-6898-492f-b0a7-fe1adb3bcd9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.103, 0.513, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5616", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_a4df195b-f418-472c-be2f-9883758c1acb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\n[div] Sports -> HOVER\n[link] NBA -> HOVER\n[link] Los Angeles Lakers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.125, 0.158, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5617", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_a68e9d26-9737-4b6b-853d-5f3675ce82d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\n[link] \ud83d\ude9aOrder Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.244, 0.766, 0.276] in the image\nAnd my action is Action: TYPE\nValue: X123456789"}]}, {"id": "mind2web_5618", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_c8a5297b-22ae-40b2-9e2e-b4950bd670f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK\n[gridcell] 19 -> CLICK\n[input] -> CLICK\n[gridcell] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.253, 0.426, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5619", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_76999e4d-1134-413a-8def-ee37b4d1c84d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.172, 0.364, 0.196] in the image\nAnd my action is Action: SELECT\nValue: A6"}]}, {"id": "mind2web_5620", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_d2348c91-b246-41b8-8215-b8ab7894ba2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK\n[link] Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.712, 0.243, 0.771, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5621", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_5b94aca3-e2e8-4f77-8fdb-1ba0de275494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5622", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b0beeee0-4e2a-477e-8e63-b6195edd64f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK\n[button] Book now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.148, 0.631, 0.182] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_5623", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5bd0bd84-5701-40c3-88eb-20c1cf1c37c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.492, 0.409, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5624", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_f09e8313-161d-4b4b-90ba-c795643614be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.006, 0.67, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5625", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_da522562-f672-4425-bfb0-d6afe495664d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.424, 0.32, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5626", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3752eb02-5cf2-4205-b0b7-3ffe26b7d0be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Start Journey -> SELECT: Train\n[combobox] End Journey -> SELECT: Bus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.591, 0.848, 0.631] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5627", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_66435b68-2782-4173-be98-4b9456a69591.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $16.99/Day$6.55/Day -> CLICK\n[checkbox] $5.99/Day$1.38/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 1.248, 0.777, 1.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5628", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8dfab805-e808-4b48-a914-5fd5765be1aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.868, 0.142, 0.892, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5629", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_bc4fec9c-046b-407c-ab83-4c635522ec54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.102, 0.493, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5630", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_717f5404-ab6d-4271-b550-e620c34e6c75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.14, 0.85, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5631", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_bb6f1a40-1fd1-4508-973c-5492eac6636a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.55, 0.162, 0.637, 0.21] in the image\nAnd my action is Action: SELECT\nValue: 9 30 AM"}]}, {"id": "mind2web_5632", "image": {"bytes": "", "path": "./images/e031c695-28e2-4507-949e-bbb65edf9f3d_56319ea6-1d3f-4ea2-8ab7-5a064a3d4502.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an article about a new restaurant and share it on Twitter\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.115, 0.988, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5633", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_e1f18ee3-1577-44fb-a283-1be215e5ae52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.253, 0.891, 0.288] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_5634", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_fe20b467-a94f-48d5-a52e-dac99270a61f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[link] Search open jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.307, 0.582, 0.327] in the image\nAnd my action is Action: TYPE\nValue: TX"}]}, {"id": "mind2web_5635", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_418c5ba7-fa3b-477d-a6f9-939e21fd0c7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK\n[span] No trade-in -> CLICK\n[span] Buy -> CLICK\n[span] No AppleCare+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 2.2, 0.938, 2.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5636", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_8fdcf5a9-ea15-4bc4-961f-bf32820c84c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.094, 0.327, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5637", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_bcf1c6bd-f9df-41a2-b31f-2547f79a5ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Caribbean -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.378, 0.871, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5638", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_e47cbe0a-a59a-4622-8016-d9d8f32cf08e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.025, 0.93, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5639", "image": {"bytes": "", "path": "./images/96c35c7a-a0d6-42c3-9814-eb2698c802a4_2386ece1-b158-438a-ac9b-aad2f882a746.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the news page with the UFC schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.101, 0.679, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5640", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_16d6591a-59b8-4700-9681-4750058e8157.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.13, 0.374, 0.143] in the image\nAnd my action is Action: TYPE\nValue: faro"}]}, {"id": "mind2web_5641", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_520c72ed-8115-475e-8bcf-6f01cc2526f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.409, 0.846, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5642", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_3944fa19-d153-448f-82e6-3c32ea641127.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.868, 0.293, 0.997, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5643", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_614dad39-55ae-45d7-8e8b-f51b7daa07fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.018, 0.056, 0.137, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5644", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_594751d7-d499-4286-ba1a-207ebba0d47a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.874, 0.271, 0.93, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5645", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_1efe7992-b578-46ea-bbc0-6720f221b9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.196, 0.313, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5646", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_4836d82b-9f3b-4116-b7a9-0130346c4835.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.237, 0.249, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5647", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_ba296304-ef11-42fd-a39e-3d7c465a811a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty\n[textbox] * Required Fields Last Name * Required Fields Las... -> TYPE: Lue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.244, 0.238, 0.262] in the image\nAnd my action is Action: TYPE\nValue: 4847"}]}, {"id": "mind2web_5648", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_7b310218-134f-4b1d-aff8-4c79ffd81728.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.585, 0.848, 0.622] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5649", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_d5d0ca15-7313-4bb4-8d8d-0bf611109aef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.351, 0.452, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5650", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_32691a48-f07e-4724-8c49-ba80367012ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.004, 0.867, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5651", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_96d386cb-2247-46a8-8589-52d65dd3f735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.374, 0.233, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5652", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_75d85232-55ad-4c33-8fa6-f604a65b9a08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.389, 0.397, 0.402] in the image\nAnd my action is Action: SELECT\nValue: Sort by Distance"}]}, {"id": "mind2web_5653", "image": {"bytes": "", "path": "./images/40fbda9d-22c5-4aab-9798-3db50d981c5c_a651f53a-5897-49f2-b132-6e19082d77c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to answer a question in the home improvement section.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.001, 0.288, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5654", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_dc3b0597-0b0b-44a3-922a-df69bcb0df24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Tab -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.209, 0.97, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5655", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_21008ae4-2f47-4263-93cf-a947f4c43b2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.36, 0.597, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5656", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_18dba795-9a54-4dfb-bb8d-b3b849528278.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.561, 0.106, 0.621, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5657", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_d7ddf3e3-19d0-496a-b683-73230cc1be3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.563, 0.543, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5658", "image": {"bytes": "", "path": "./images/fb73611b-dc68-4a75-bf5b-7e151dc151af_015ff302-508f-4b98-934e-31687f84b870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get tickets for the Buckeyes football game on April 15th for a group of 5 people.\nPrevious actions:\n[link] BUY TICKETS -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.916, 0.209, 0.926, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5659", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4b61c893-4f21-4ecb-8f84-7ce763a40e70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.442, 0.38, 0.482] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_5660", "image": {"bytes": "", "path": "./images/fb73611b-dc68-4a75-bf5b-7e151dc151af_f20c0850-fa76-4979-b946-e7e48831e68a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get tickets for the Buckeyes football game on April 15th for a group of 5 people.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.669, 0.857, 0.687] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5661", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_7030a4e7-b607-4f7e-92e4-8ca04044bd8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.17, 0.25, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5662", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_48077a71-0bd3-484d-8f19-596861e7e8fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.015, 0.441, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5663", "image": {"bytes": "", "path": "./images/ae969e05-d10e-4255-99f7-c27e071fad69_1f17b922-6a57-438e-84cd-bb7a5d08ddcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the standings for the English Premier League.\nPrevious actions:\n[div] \u2026 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.118, 0.861, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5664", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_2bc7cc9b-a4f6-477c-a8e6-91f2bd06d27b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.265, 0.348, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5665", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_de127ffa-a357-431b-9f94-8ad89dfbe7c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Size -> CLICK\n[link] S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.356, 0.073, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5666", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_5fbd504c-aa43-420a-9f09-73ebbb6b7e0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK\n[button] HOME -> CLICK\n[button] Discover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.459, 0.216, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5667", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_a28d7d39-b925-4d55-b0e6-ab865d7409ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.697, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5668", "image": {"bytes": "", "path": "./images/66625c9d-5bf3-42d1-b463-ab2767307201_4d3a86ff-5ae7-4d7a-b585-f7555dd04d67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Learn more about the Partner Deal that gives 25% off for Veterans.\nPrevious actions:\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.094, 0.749, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5669", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_f6a205a1-171b-4d72-ba1e-49aeaff0f3c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.386, 0.222, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5670", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_c4c50fd3-c84e-455d-84e8-0276aa7a9aab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[span] 11\" & Larger -> CLICK\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK\n[span] $200 - $300 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 1.07, 0.192, 1.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5671", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_db42143b-781e-4365-b5ee-5c02269ede06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK\n[button] + -> CLICK\n[button] Apply Filters -> CLICK\n[checkbox] Passenger with Disability or Assistance Needed? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.143, 0.957, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5672", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_222a5bfe-e904-480f-8a24-10d338acdc22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\n[link] Locations -> CLICK\n[combobox] Search by ZIP code, city, or state -> TYPE: 43215\n[span] Columbus, Ohio -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.387, 0.302, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5673", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f6fd8a06-3181-48db-a79e-4bae343e1ee1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.16, 0.292, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5674", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_a0f039d4-ca5d-4ad5-aec8-3e2db31d10b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.069, 0.713, 0.086] in the image\nAnd my action is Action: TYPE\nValue: busch stadium"}]}, {"id": "mind2web_5675", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_c26ad15c-1ac6-4940-b5e5-4b16ad0d23e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.216, 0.277, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5676", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_5606d646-3bbc-46a1-abec-deb28c34e776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Venice Beach\n[em] Venice -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.286, 0.379, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5677", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_a8e10c19-7b5f-40c7-9779-b0a96c1e1733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK\n[heading] to next step -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.455, 0.629, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5678", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_306b6b4a-ad42-4cea-8a57-5f9e54bd1f04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.58, 0.045, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5679", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_bffe389f-8f10-4e39-870d-51c2d169992c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.206, 0.429, 0.225] in the image\nAnd my action is Action: SELECT\nValue: Lowest mileage first"}]}, {"id": "mind2web_5680", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_cc9cb352-2aea-4969-a19f-e40d05fad832.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.617, 0.054, 0.693, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5681", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_6592e896-575d-4583-9c89-7cb0a9a099c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.101, 0.149, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5682", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_a2a1777d-0072-4151-ba64-a138c5158bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.332, 0.359, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5683", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_182ccf4f-51ff-45df-badd-9fddd96a70bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.449, 0.543, 0.49, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5684", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e0888309-3a6f-4b59-9c24-1eda62b45b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK\n[combobox] Flying from -> TYPE: MUMBAI\n[option] Airport Chhatrapati Shivaji Maharaj International ... -> CLICK\n[combobox] Flying to -> TYPE: NEW DELHI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.361, 0.792, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5685", "image": {"bytes": "", "path": "./images/14f0e837-af77-44b9-9cad-a8911aab30c6_5c14ea08-04a7-4e9d-b602-4c6a24be2182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the status of flight from Columbus, number 1234 on April 5th, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.167, 0.127, 0.278, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5686", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_8ffbcf91-77e2-469f-be1c-a9fc64ea6f62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK\n[select] 1 -> SELECT: 8"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.206, 0.791, 0.226] in the image\nAnd my action is Action: SELECT\nValue: 37"}]}, {"id": "mind2web_5687", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_0c9ca57e-f570-49cd-bcd3-cafce120d060.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\n[link] Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.064, 0.325, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5688", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_7393d933-1951-4632-880e-50e665f52a82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota\n[combobox] Select Model -> SELECT: Corolla\n[textbox] Zip -> TYPE: 10019\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.111, 0.429, 0.122] in the image\nAnd my action is Action: SELECT\nValue: Lowest price first"}]}, {"id": "mind2web_5689", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9c7ed1f9-73cf-40b2-807e-d0e4aa0cd853.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK\n[span] Sony -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.536, 0.158, 0.549] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5690", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_3abb0c61-a81b-4dc7-ac70-d88eb176a529.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK\n[span] Distance -> CLICK\n[link] Price -> CLICK\n[button] Book now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.11, 0.622, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5691", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_61a9ea08-e8d9-4584-affd-51e292f094e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.283, 0.721, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5692", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_1b19b03c-7423-46e6-abea-ab1bb37a520b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.067, 0.129, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5693", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_3f694618-f0a3-47d5-98c5-024c53562900.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.013, 0.546, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5694", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_fb3aafe7-6077-4b37-9cfc-65b1e614cdea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.16, 0.782, 0.19] in the image\nAnd my action is Action: TYPE\nValue: adele"}]}, {"id": "mind2web_5695", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_ffd3f2e7-1bd0-4bd4-b057-e7b05e92d656.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK\n[div] Narrow By -> CLICK\n[link] Adventure -> CLICK\n[generic] TOP RATED -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.804, 0.632, 0.811] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5696", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_ab0c775c-0a0a-4b43-85ea-e348652a38da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.222, 0.838, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5697", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_f783514e-6e62-4f91-bf42-00e2edb90295.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.092, 0.181, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5698", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_12e7f7fd-5ca7-42df-8962-903e1a49e4fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\n[link] National Football League NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.043, 0.775, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5699", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_5968648f-ed49-45e3-903a-229b30081048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK\n[link] Limited-Time Offers -> CLICK\n[gridcell] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.489, 0.165, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5700", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_0a904ba2-dca6-4ad1-8af2-f6568ea6d8eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.195, 0.641, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5701", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_3dd8bf82-b783-4ede-b42a-0b632c8cb365.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Women -> HOVER\n[link] Swimwear -> CLICK\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK\n[link] Black (294) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.707, 0.192, 0.877, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5702", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_a686b3cc-e59b-43b8-bb1e-22e1bef857da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.374, 0.391, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5703", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_f49b8bab-88cc-4b73-a5a1-d63b597c4b0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.214, 0.335, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5704", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_a5338369-622d-496d-b922-ce73b9e1b5df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] Online Paperwork (4)\uf05a -> CLICK\n[span] Vehicle History -> CLICK\n[checkbox] No Accidents (4) -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[link] Confirm Availability for Used 2019 Buick Encore Pr... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.156, 0.494, 0.185] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_5705", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_3107856a-910f-40db-a9d0-abe314f18545.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.359, 0.705, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5706", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_9331a1b5-54f8-4de6-acd5-dd60c9a19d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK\n[link] Find a bike shop near you -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 10013\n[button] Search nearest REI stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 1.368, 0.318, 1.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5707", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_2e6cf0b7-800a-42f3-af4a-d2f3c85a6bf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK\n[div] Fri., Apr. 21 -> CLICK\n[button] April 17, 2023. -> CLICK\n[combobox] Guests -> SELECT: 2 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.432, 0.119, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5708", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_9e8314a0-1e1e-4db1-9e85-422dfa0bb165.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.142, 0.343, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5709", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_4672b855-b26c-4b81-9010-18d6ec210c9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.196, 0.715, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5710", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_64c3c386-4170-4ca4-a34b-5e3c589da638.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles & Hardware -> CLICK\n[link] PlayStation 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.042, 0.378, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5711", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_31ddff52-15df-4d0d-916c-18e1fb240ea2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.261, 0.905, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5712", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_d194ee95-87f0-4ad2-a6ce-d06cf89fec9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.113, 0.78, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5713", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_cb9d3a61-b898-4330-8767-fb2a56c37b64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER\n[link] Action Figures -> CLICK\n[img] Hasbro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.915, 0.313, 0.969, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5714", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_9294ad81-1046-46ae-a950-85a3a34e1b77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK\n[button] Furniture -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.408, 0.184, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5715", "image": {"bytes": "", "path": "./images/020bc054-a829-4af5-8f0a-6efce012c7ac_72174279-0b65-4da3-8ed8-69a5f4bd03cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the number 4 ranked board game on the geekmarket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.008, 0.183, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5716", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_ef74239b-9f8a-4f92-aeba-6ba4ef836c53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.139, 0.488, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5717", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_fe6c0a7d-c18c-4084-b223-e178afccd592.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.217, 0.635, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5718", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_5a217967-5bc9-47a5-8827-7b36d5e4c9ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.034, 0.788, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5719", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_d816bb8a-ffc1-44f5-b1d4-87e9d0c46851.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.172, 0.964, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5720", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_aff3dec2-7e47-483f-b156-9ff640444b30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.027, 0.45, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5721", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_d9c94040-4de0-473a-a1c5-6a909a5c5319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] Premium -> CLICK\n[label] Distance -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[div] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.807, 0.632, 0.848] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5722", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_96808158-b0a9-46ad-bddf-d79b9823a094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\n[textbox] Search -> TYPE: scream\n[p] Neve Campbell, Courteney Cox, David Arquette -> CLICK\n[link] Melissa Barrera -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.575, 0.388, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5723", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_7d20bcbf-3186-404e-bea7-f553986347de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John\n[textbox] last name maxlimit is 30 -> TYPE: Smith\n[select] Confirmation Code -> SELECT: Ticket Number"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.367, 0.562, 0.402] in the image\nAnd my action is Action: TYPE\nValue: 123456780"}]}, {"id": "mind2web_5724", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_d0e650ae-54ec-4146-9ae4-b3380b3d6c02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.013, 0.542, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5725", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_23f74267-67c7-467c-a379-2b044cca97f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\n[span] Top activities -> HOVER\n[span] Colosseum -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.225, 0.247, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5726", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_b36d2bf4-1700-4742-a245-e46c85973a6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[checkbox] Outdoors -> CLICK\n[checkbox] Wine tasting -> CLICK\n[button] Update search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.419, 0.141, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5727", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_e11c9f8b-0365-4908-b2cd-c64e9f0e9b7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.465, 0.959, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5728", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_6197c144-0a0c-4e0d-abcf-1b380989feed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.052, 0.153, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5729", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_3cf4529f-653a-47d7-9d84-f577dd79329f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[button] Type -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.956, 0.013, 0.988, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5730", "image": {"bytes": "", "path": "./images/2159d768-6657-40af-b336-ad5726fec1e2_37493ad9-bdcf-4e45-a8bc-de60d4dc7fde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my wishlist the top rated JRPG game.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.144, 0.06, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5731", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_3de56dba-079d-48d7-ae00-1612cbd66ca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.085, 0.387, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5732", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_f95e947b-7409-4178-aa26-31b38f194d40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.351, 0.34, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5733", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f2e91900-b7dc-48c1-86c8-0add81412717.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.348, 0.908, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5734", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_42ebb272-99de-4d4e-b103-5062fbbb61ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: los angeles kings\n[option] Los Angeles Kings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.154, 0.65, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5735", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_e7072a8a-6a01-4c81-a4f3-9a80483927f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.563, 0.501, 0.568] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5736", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_a1d7fbd9-6bde-48f1-aa51-307d0fcac7a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[div] Product Type -> CLICK\n[link] Polos -> CLICK\n[div] Size -> CLICK\n[link] M -> CLICK\n[span] Now Trending -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.218, 0.947, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5737", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_753df684-6110-40eb-88b5-aae9df30ed15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 2.481, 0.308, 2.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5738", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_d153d7e1-bde1-467f-a08a-77052c38a054.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[svg] -> CLICK\n[label] Brown -> CLICK\n[svg] -> CLICK\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.661, 0.062, 0.675] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5739", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_db4941b4-a391-4fff-911c-fd74052dfd3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.054, 0.753, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5740", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b6481fae-c97a-4af4-a416-ccd071c8cdc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.004, 0.016, 0.041, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5741", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_d8a64f26-d3fa-47cc-a614-7ac555797a95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[span] 25 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[div] 1 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.26, 0.927, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5742", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_0cd3a314-e9db-447c-ac17-e07b23307fca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] City -> CLICK\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 1.136, 0.792, 1.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5743", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_2f874e77-6f98-43bf-a476-bb69382c7197.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.424, 0.943, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5744", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_d0fa9c16-fbc1-4ab6-bdc5-13758977249b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[button] Add to Wish List -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.925, 0.192, 0.947, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5745", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0c02b8e-48d9-4b91-aff4-829b7a9d82c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 2.361, 0.482, 2.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5746", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_3f1a8555-f976-4d2d-a9cc-d53a972709bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.091, 0.734, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5747", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_69a737f7-e943-4d47-87e7-54c115520042.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[img] Blue -> CLICK\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.79, 0.691, 0.807] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5748", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_d86ccff6-5a66-4228-8fd7-92644017347d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag\n[button] Search -> CLICK\n[textbox] Upper Bound -> TYPE: 40"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.896, 0.244, 0.915] in the image\nAnd my action is Action: TYPE\nValue: 0"}]}, {"id": "mind2web_5749", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_f9f59927-eae1-481b-9bbc-c3cca1b2fb0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Locations -> CLICK\n[button] Germany -> CLICK\n[button] Posting Dates -> CLICK\n[button] Less than 7 days -> CLICK\n[span] Voyage Program, European Union Member States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.739, 0.606, 0.772] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5750", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_65dc9fff-ffa8-4c1c-abef-2e06d1af7b05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami\n[option] Mint. Miami area -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.39, 0.274, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5751", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_f4121080-df90-4bbe-bf17-79c8b584ef9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.249, 0.715, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5752", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_d5e383de-e2e1-4615-a52c-c0e09c504d91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.212, 0.359, 0.232] in the image\nAnd my action is Action: TYPE\nValue: broadway"}]}, {"id": "mind2web_5753", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_966ba3c5-09f3-4484-85b9-82df82f9af62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[div] 14 -> CLICK\n[button] Apply -> CLICK\n[span] From $73 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.314, 0.202, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5754", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_461b3ec5-fb73-4017-b2e2-07d17b336e0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Free cancellation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.415, 0.045, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5755", "image": {"bytes": "", "path": "./images/74226fab-6285-45da-8582-d25a876aa7b0_40391570-2276-4e50-b19e-5677a25066e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for the next pop concert.\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.056, 0.282, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5756", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_5d6062eb-95c5-4098-85ff-ac3fd095c9d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Toronto\n[button] Toronto, ON, CA (YYZ) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.306, 0.478, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5757", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_774ff5d1-0b71-489b-81f0-c0cc6ba9e6cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.343, 0.334, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5758", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e975344d-35a1-4268-8e2e-d15e4617cd26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[combobox] Flying to -> TYPE: NEW DELHI\n[option] Airport Indira Gandhi International Airport -> CLICK\n[button] Next Month -> CLICK\n[use] -> CLICK\n[div] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.41, 0.592, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5759", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_8d138ab7-7a82-4a74-b799-e1e64d929f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.942, 0.222, 0.977, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5760", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_9756bb38-2000-423d-b77a-30db19b21f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK\n[span] No trade-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 1.593, 0.938, 1.637] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5761", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_54ae18ba-fa04-4295-a6b3-509266945442.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.039, 0.666, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5762", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_392b753f-57be-49d0-bcc1-0b44af7ec1a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.277, 0.284, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5763", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_b53df9a0-55be-448b-ba60-f6d1fba1653c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: pet festival"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.315, 0.398, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5764", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_bc096211-84e0-4bc1-9823-94b5011a8780.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 1.412, 0.296, 1.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5765", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa97ab13-302f-4371-b31f-a17cb1c4c0f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[generic] Sort by -> CLICK\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK\n[img] Short Socks (2 Pairs) -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.411, 0.776, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5766", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_d025456f-7f08-48c2-bd5c-368b869e6a5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[button] Refine results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.401, 0.366, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5767", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_9b93d5af-3dd7-41fb-8252-19a7406bc245.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.449, 0.94, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5768", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_188b0bed-cc38-40e9-8652-97811bb3b5e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\n[span] Your store for 43219 -> CLICK\n[searchbox] Enter ZIP or State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.273, 0.847, 0.304] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_5769", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_ca1921ca-fb66-4d9d-b1a6-7695452f3ce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK\n[span] Distance -> CLICK\n[link] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.45, 0.328, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5770", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_1fa019d8-0d92-44b0-803b-88881eac1293.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK\n[div] Fri, Mar 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.381, 0.542, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5771", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_57b2efcc-12bd-437b-973a-1c5fd75e39db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK\n[gridcell] Wednesday, April 5, 2023 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.527, 0.957, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5772", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_1e146ce2-ae82-47e6-91ed-0f36ecc4b61d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\n[img] movie poster for Creed III -> CLICK\n[link] get tickets for Creed III -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 0.049, 0.293, 0.096] in the image\nAnd my action is Action: SELECT\nValue: Change Location..."}]}, {"id": "mind2web_5773", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_0cc8cc75-1c79-42f9-a1d1-1af3cf84ff58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[button] Filter -> CLICK\n[button] Adults-Only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.304, 0.387, 0.4, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5774", "image": {"bytes": "", "path": "./images/b4872f0e-9d9e-4259-8b1e-844509b85712_1e74a3a8-01f9-480c-a924-561348ab26d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all campgrounds located in California.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.114, 0.266, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5775", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_7e70b73f-5b14-457e-b4cc-532742d72dcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 1.029, 0.045, 1.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5776", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_8a1d16e9-9527-42fd-9a08-6a0e9d39c051.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.315, 0.047, 0.44, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5777", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_0bc81932-85d7-467f-8ef7-294f55118587.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM\n[svg] -> CLICK\n[span] 2 guests -> CLICK\n[combobox] Size -> SELECT: 1 guest"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.691, 0.525, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5778", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_9b7e228b-1dc4-478c-9137-946f5ef3034c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.186, 0.966, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5779", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_fe69325d-0689-46ae-b411-dbb199d259aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.435, 0.066, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5780", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_23f591fd-977a-437a-931d-4be0a372db4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.897, 0.5, 0.902] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5781", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_92c31707-5c0d-450e-a9bd-b0290f28f907.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK\n[link] US Deals -> CLICK\n[button] Save Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.019, 0.598, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5782", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_45f14dab-05eb-4113-a9ba-6dd6aad8acd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 1.173, 0.277, 1.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5783", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_d4f1c897-c5aa-4cb7-afa2-3ddd845df114.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.631, 0.787, 0.643] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_5784", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_a82c1270-924d-4758-86cd-30ba60260eb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK\n[svg] -> CLICK\n[img] SUV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.148, 0.23, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5785", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_6f204b33-e51a-4ce0-ae28-d22278162aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.162, 0.923, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5786", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_5612cccb-4e39-483a-8b5b-9d4f9261f5b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.379, 0.62, 0.399] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_5787", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_3253cf50-b912-4446-9ca0-00ff7813f42a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.21, 0.981, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5788", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_35dd7c87-f653-4de6-a617-d3503d0bdc73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.568, 0.688, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5789", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_898bc6da-8851-414f-9e66-eacba595b118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.887, 0.485, 0.956, 0.496] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5790", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_94ea2d0e-1ab8-4d5f-bdd8-a9735cfc485e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Size -> CLICK\n[label] L -> CLICK\n[heading] Color -> CLICK\n[label] BLACK -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.531, 0.266, 0.8] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5791", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_1b6cc686-4b86-41f5-9df2-290404ccfc53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.099, 0.789, 0.118] in the image\nAnd my action is Action: TYPE\nValue: Canyon de Chelly"}]}, {"id": "mind2web_5792", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_92169b81-9840-4ae7-af42-73477badeb33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[span] Silver -> CLICK\n[p] Black -> CLICK\n[p] Lexus -> CLICK\n[p] Backup Camera -> CLICK\n[combobox] Select Sort Order -> SELECT: Newest first (by car year)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.221, 0.617, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5793", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_656ed1ca-1b96-4f94-9342-a57e054a3cdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Itinerary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.343, 0.424, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5794", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8b0ee93f-8fc6-4664-930a-4f58525661ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[tab] One-way -> CLICK\n[button] 1 adult -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.163, 0.432, 0.202] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_5795", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_a0c205cb-1a8c-44e3-af38-ff210c95571e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK\n[tab] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.125, 0.677, 0.146] in the image\nAnd my action is Action: TYPE\nValue: Prometheus"}]}, {"id": "mind2web_5796", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_05a173a8-2088-4da9-97e3-f8988811b5b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.49, 0.501, 0.518] in the image\nAnd my action is Action: SELECT\nValue: New York"}]}, {"id": "mind2web_5797", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_e3b4f654-b33f-429f-8327-feb0da8ca5e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER\n[link] Action Figures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.383, 0.503, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5798", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_90a2701a-0940-4f95-a93c-d61d8a2cecaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: TYPE\nValue: las vegas"}]}, {"id": "mind2web_5799", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_c8af097d-59f1-4eae-ac13-da33e869dd8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.197, 0.291, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5800", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_4c33082e-6e43-478e-a153-e427d1b17fc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.444, 0.837, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5801", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99cd5f2e-d013-4b2e-864c-902cb13df909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK\n[link] Advanced search -> CLICK\n[textbox] Search by keyword -> TYPE: Taylor Swift"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.325, 0.983, 0.35] in the image\nAnd my action is Action: SELECT\nValue: 1 Months Ago"}]}, {"id": "mind2web_5802", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_31af69cc-8439-4cfd-8d2b-1a335a0e1e5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.34, 0.61, 0.37] in the image\nAnd my action is Action: SELECT\nValue: Train"}]}, {"id": "mind2web_5803", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_3be026ae-9dc4-4f3a-aefe-230af68e72dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\n[link] subway Subway Lines -> CLICK\n[span] Red Line -> CLICK\n[link] Alerts 8 alerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.354, 0.295, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5804", "image": {"bytes": "", "path": "./images/099a9da4-c8db-4900-ada2-76600f3655a4_d1da3a0f-8824-4fcf-ba81-24debe082563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of baby products that are on sale and under 10 dollars.\nPrevious actions:\n[link] BABY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.107, 0.727, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5805", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_76a9fd32-80ff-45ce-879a-aa7d959c9b62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.564, 0.128, 0.728, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5806", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_4f966b9f-2163-4b2a-88cd-500239870dfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.203, 0.652, 0.256] in the image\nAnd my action is Action: TYPE\nValue: Miami, FL"}]}, {"id": "mind2web_5807", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_3904a380-086e-454f-aacf-140c31c9974b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK\n[button] Boston -> CLICK\n[link] {{ 'see_more_label' | translate }} {{::list.info.n... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.149, 0.048, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5808", "image": {"bytes": "", "path": "./images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_b60d6d8e-b331-4d00-945a-f2a2a29926a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the rating and user reviews for the game \"Deathloop\".\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Deathloop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.222, 0.677, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5809", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_fb716bd0-0cb0-4d20-b64c-5c603a5af0e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.25, 0.966, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5810", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_8de0346e-4043-48dd-b59e-01e8edd713df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[button] Times Square, New York, NY, USA -> CLICK\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.473, 0.263, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5811", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_e4fd0574-b204-4e03-bb92-4ece87b183d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.558, 0.501, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5812", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_a3af5576-db64-4685-bb1e-df34b324f361.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.467, 0.121, 0.53, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5813", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_effb76da-8083-4512-999b-1c3c41b8d5a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.028, 0.553, 0.049] in the image\nAnd my action is Action: TYPE\nValue: mens hiking shoes"}]}, {"id": "mind2web_5814", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_6fa44411-fae8-427a-a086-b687187d19a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.551, 0.24, 0.578] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_5815", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_8c524439-5c1d-44a2-842d-14d4cf92a4c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.4, 0.552, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5816", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_22dbddd3-037a-4ffc-8622-f0181c16c949.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.07, 0.386, 0.083] in the image\nAnd my action is Action: TYPE\nValue: bananas"}]}, {"id": "mind2web_5817", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_b20c7f83-1126-476a-bc80-04de993b895d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[svg] -> CLICK\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK\n[label] In Stock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 1.032, 0.222, 1.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5818", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b8c12588-7323-4532-ab73-d2a388e1fa4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] select to navigate to New -> CLICK\n[img] Missing (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 0.323, 0.492, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5819", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_980113f4-2f0a-4f86-bb2f-143710c7653f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.186, 0.285, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5820", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_82517181-c0b9-44b8-99db-a12fe6acd05f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.185, 0.316, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5821", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_d6620023-7972-4e34-8818-7f7a51768f61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] 29 -> CLICK\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.089, 0.453, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5822", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_e3b492b2-86e6-4b47-b744-aa81675abad6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.43, 0.715, 0.573] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5823", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_390be8cb-8c11-4011-8a91-b0eb8dffe25c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.321, 0.158, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5824", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_3ea20727-aaf8-408f-91bf-7dd93234a5bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog bed 30 inches"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.006, 0.686, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5825", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_08dff165-73d4-4827-8dfd-92aee651a914.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.632, 0.104, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5826", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_61dea86d-e842-4be0-b179-ec76381b455e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.301, 0.198, 0.322] in the image\nAnd my action is Action: SELECT\nValue: 2018"}]}, {"id": "mind2web_5827", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_dbb56b26-c531-4672-9299-555c711b8688.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] FISHING REELS -> CLICK\n[link] Power Assisted Reels (6) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.357, 0.074, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5828", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_45607f84-2528-47d1-b276-81c3a6d51f07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.291, 0.894, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5829", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_64bd7423-2284-491d-be4f-1c12ee2eaab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.145, 0.771, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5830", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_3559cfdd-31d0-481e-9598-8b9b8f75aa31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK\n[button] Show more filters modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.361, 0.969, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5831", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d796ca41-ff49-434e-9980-14d8b156e4c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.26, 0.763, 0.278] in the image\nAnd my action is Action: TYPE\nValue: Miami"}]}, {"id": "mind2web_5832", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_74456eba-7768-489b-838d-3f49d90d29b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.266, 0.693, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5833", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_f22dc09a-f72a-46e8-b245-40fa16163f84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.268, 0.891, 0.302] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_5834", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_34656a89-2cb1-4e13-b63b-9d643eece29e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Bridges and Tunnels tolls -> CLICK\n[span] Toll Rates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.535, 0.617, 0.619] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5835", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_0c7057f1-7639-49e3-8429-720608a24422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.9, 0.288, 0.959, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5836", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_c3548b02-cec7-474f-bce8-7e280432e230.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.393, 0.209, 0.418] in the image\nAnd my action is Action: SELECT\nValue: 9"}]}, {"id": "mind2web_5837", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_90fb63c3-5f3d-4c14-9878-f5ce0458bb6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\n[link] Music -> CLICK\n[link] hip-hop -> CLICK\n[gridcell] Clint Eastwood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.553, 0.639, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5838", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_fae30fbd-7fbe-4be2-a718-1695f357385d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[button] -> CLICK\n[div] White Water Rafting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.226, 0.963, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5839", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3fd67889-8ab4-4640-b382-b8491611e103.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.06, 0.491, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5840", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_828390d5-8a45-4d89-af60-3ced4439f066.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.678, 0.277, 0.691] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5841", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_ab228503-2ac5-4989-b2bb-57db3bf18fc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.438, 0.748, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5842", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_a52e0685-b9cd-429b-af8f-e1a9a994a2c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\n[link] Store -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.1, 0.382, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5843", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_11ef7ceb-4c64-4d8c-a2f9-8cfb1874d942.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\n[textbox] Search -> TYPE: scream"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.164, 0.704, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5844", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_61ab2c90-12d3-4294-96d2-bd79d9ee8181.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.054, 0.523, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5845", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_b9b3f9b0-6440-4894-b23c-6be659a69df5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.337, 0.384, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5846", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_a2a4a0ae-d58f-4e0c-9c4b-84c36e89dbe8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[option] Udupi Karnataka,\u00a0India -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.299, 0.824, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5847", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a43d3350-8e3a-4d40-b1cb-fba874d15c92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.613, 0.29, 0.631] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5848", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8d8d0373-60d3-481b-8aa8-41c5cd2de300.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK\n[generic] Thursday April 20th -> CLICK\n[div] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.173, 0.963, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5849", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0d5bf127-c6f5-4d6a-91ee-7a365759f335.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[button] Search -> CLICK\n[button] List of search results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.045, 1.516, 0.309, 1.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5850", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_51cf2cc6-26b8-45fd-b9b8-eea01ca732a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.282, 0.073, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5851", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_4ce5adec-f536-4f51-9dc7-4867949f1c20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK\n[option] Apr 7, 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.227, 0.892, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5852", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_055baae4-1d23-47fc-afe9-c93f39a6ceb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.245, 0.568, 0.267] in the image\nAnd my action is Action: TYPE\nValue: Bloomington, NY"}]}, {"id": "mind2web_5853", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_174d81fa-298e-4062-bc37-7e88037a43d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.103, 0.332, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5854", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_09b385ea-8bba-47d6-bdc4-1f42b4cfe5f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.027, 0.348, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5855", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_14157082-7e93-422c-9c85-b1595ec919f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.266, 0.664, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5856", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_cc81bedb-0054-414a-873f-dc03997bd360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.412, 0.956, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5857", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_478e55a2-a3ab-4f80-b442-3f6c356c95f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.025, 0.564, 0.039] in the image\nAnd my action is Action: TYPE\nValue: Concord"}]}, {"id": "mind2web_5858", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_4093dcf4-7a5e-49f9-8eb7-adf5db810557.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.186, 0.177, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5859", "image": {"bytes": "", "path": "./images/7f1f085b-5765-40f8-86c7-8df6e8b68053_a6242145-4277-493b-86f0-175a233fea76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about baggage allowance for business class.\nPrevious actions:\n[button] Experience -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.14, 0.171, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5860", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_b6d33f4e-d09e-4d10-ac72-98ddab6a40ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.294, 0.984, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5861", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_578fb390-89d1-4041-a5a8-867b7d55b182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK\n[checkbox] 5 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.306, 0.942, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5862", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_8442397f-7a50-4c98-b836-3c9da40df35f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.058, 0.661, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5863", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_a4b6efac-e32f-478c-9177-28e49d7ac7de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.089, 0.984, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5864", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_8f4f2684-c054-41e1-aa3e-7ddd71a11026.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK\n[option] 2:00 pm -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.334, 0.241, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5865", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b18889ca-80bd-49a5-a847-ed799ac183bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: berlin\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.353, 0.312, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5866", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_5ea6f7f1-9226-40bc-921b-fbaba9cc580a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.311, 0.518, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5867", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_54995115-8314-40d1-bdb1-564538ecd6f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.296, 0.699, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5868", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_cbfcbc6c-9ccd-4e7f-8376-6ec56fbf2469.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City\n[div] New York City, NY -> CLICK\n[textbox] When? -> CLICK\n[li] Spring -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.009, 0.82, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5869", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_8c53712e-89d1-46d1-bfe6-d2ace827c9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.926, 0.157, 1.938] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5870", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_73c9f429-644c-4c67-a7fe-47f68f350c36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.055, 0.479, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5871", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_5e6fe782-07c7-4444-b163-1b8063d3aafb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.256, 0.325, 0.3] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5872", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_f6508bab-5a51-44f5-abd0-fa6863f8d1d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[button] 12, Wednesday, April 2023. Available. Select as ch... -> CLICK\n[div] Add guests -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.122, 0.819, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5873", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_b5bf6287-38d3-4152-9941-e345eb0396ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] PlayStation 5 -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.176, 0.13, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5874", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_2fef2ebc-4457-4de8-a2b1-20a39a197b6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.048, 0.263, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5875", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_2fb8c104-cff9-426a-848b-db783f818ab8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.112, 0.418, 0.146] in the image\nAnd my action is Action: TYPE\nValue: los angeles"}]}, {"id": "mind2web_5876", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_916fed28-a897-4397-bbb1-6829346f320d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK\n[link] $0 - $10 $0 - $10 -> CLICK\n[button] Customer Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.456, 0.052, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5877", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_66caa56b-9a19-485d-ad22-cbb39fda106a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.179, 0.147, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5878", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e78aa8b5-4d2f-4aab-a13a-e7a4d0be9428.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Hawaii Vacations -> CLICK\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.274, 0.509, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5879", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_dee8a727-5865-49e4-b498-d1e5742c704e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.098, 0.948, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5880", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_8254ee13-e78b-4f68-8a4a-f3b80026d454.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK\n[button] Table of Contents -> CLICK\n[link] Uncharted: Legacy of Thieves Collection - Wiki Bun... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.178, 1.865, 0.244, 1.875] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5881", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_70093fcf-8cdd-4fd6-acd9-a4ba14673610.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey\n[radio] Yes -> CLICK\n[radio] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 1.137, 0.609, 1.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5882", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_2c9a65ce-531f-4010-b149-949ce3004142.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Casino -> CLICK\n[checkbox] Restaurant -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.764, 0.089, 0.77] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5883", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_197bb442-771f-4fff-84e1-cef8b3978bd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK\n[link] Sci-Fi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.011, 0.1, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5884", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_10d3f0c0-5536-4a81-809e-3a9b1bd98b96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.23, 0.481, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5885", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_3b858472-560e-42e4-9f9a-d3134e1e2f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.001, 0.288, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5886", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_53fd9509-ec94-4760-82c5-afdc92e45ba4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.0, 0.291, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5887", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_1361d84a-7104-44ca-a6d3-373efea244df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.119, 0.892, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5888", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_5e1eaaa5-5b32-4ef7-8a06-5934987e804c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.004, 0.31, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5889", "image": {"bytes": "", "path": "./images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_80f11b1b-c7f8-4ad7-be9d-68556e06ba5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow the Denver Nuggets NBA team.\nPrevious actions:\n[link] NBA . -> HOVER\n[div] Denver -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.459, 0.201, 0.527, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5890", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_943474ad-4378-4912-9784-e64ea2b22a7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.352, 0.68, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5891", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f55faaf5-acea-484d-be37-0cc18774f094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.703, 0.226, 0.712] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5892", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_78c4087b-0f97-4920-8529-834cdb618baa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.341, 0.143, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5893", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_df3abd68-bd67-4399-a450-33a89f3e7929.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[combobox] From -> TYPE: Abbotsford\n[div] Abbotsford -> CLICK\n[button] Find my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.256, 0.403, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5894", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_2a3fa5ea-4980-48d8-974f-86b5b0a904d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.063, 0.783, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5895", "image": {"bytes": "", "path": "./images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_1a91c6ec-4978-47c9-8bf7-8f15d6a78b47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the profile page for author of latest shot\nPrevious actions:\n[link] Shots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.084, 0.567, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5896", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a8fa1979-765c-48b2-9f63-f931c7c44900.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.266, 0.277, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5897", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_9f96c9e1-3fd7-4fdc-be01-3c98192d9cbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.062, 0.719, 0.085] in the image\nAnd my action is Action: TYPE\nValue: detroit"}]}, {"id": "mind2web_5898", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_5aa47c09-f306-4e03-b55b-95b304d7a729.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.465, 0.701, 0.499] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5899", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_de5428ef-b45a-4d20-ac2b-f8e854db520c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.84, 0.938, 0.873] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5900", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a57e5ed1-2c94-4dc6-b280-6d75b63a3eea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] close -> CLICK\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.258, 0.565, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5901", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_c8346f41-3686-4039-b18c-40eb8b76516b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[div] 14 -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.3, 0.553, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5902", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_1ea88ab5-b80f-4656-8554-af68a9752d0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.248, 0.344, 0.276] in the image\nAnd my action is Action: SELECT\nValue: Colorado"}]}, {"id": "mind2web_5903", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_fb1207f7-703d-411a-8128-546df580dbe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[link] Tickets & Passes \uf078 -> CLICK\n[link] One Day Add-Ons -> CLICK\n[link] Buy Now -> CLICK\n[menuitem] Meal Deals -> CLICK\n[menuitem] All Season Dining -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.076, 0.266, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5904", "image": {"bytes": "", "path": "./images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3d393bea-c440-4777-9e40-6d7d9bc4fac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the most recent NASCAR driver news.\nPrevious actions:\n[link] NASCAR Cup Series NASCAR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.936, 0.046, 0.978, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5905", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_b75c9c94-7ef6-4420-bb81-33661e5e430d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Mal\u00e9, Maldives -> CLICK\n[button] Start date calendar input -> CLICK\n[div] 13 -> CLICK\n[button] Search -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 1.998, 0.815, 2.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5906", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_01301e65-56a9-4d31-8d3e-ce354d6fa71a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK\n[button] \ue908 Depart and Return Calendar Use enter to open, es... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 0.319, 0.591, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5907", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8887cd5c-e8b7-419e-b48b-3fcb1e46fd82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.453, 0.874, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5908", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_210df914-bbcb-4529-9054-666734af4cc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.875, 0.679, 0.882] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5909", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_eef7bfcb-4ff5-42d2-b573-855aa991eb4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.372, 0.834, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5910", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_d9cda9b4-abee-42dd-9dee-81f0f2d76601.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] Sale -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Tops -> CLICK\n[heading] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.816, 0.266, 0.83] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5911", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_51df100c-08cd-426d-839f-fce05efbf3c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK\n[p] Compact -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.374, 0.567, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5912", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_67703df2-19ca-4bac-b1d6-272be445bcf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.479, 0.04, 0.509, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5913", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_3fb76e56-0be0-4742-beb1-49587ce945aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK\n[button] Show sorting options modal -> CLICK\n[span] Price: low to high -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.186, 0.255, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5914", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e3b859b3-3158-4566-bb2e-e81319206a73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.145, 0.712, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5915", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_12b569ac-fd66-4dc8-a875-f3542d60c848.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.228, 0.894, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5916", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_c2d435f9-82ee-451a-b32e-6045541e4c48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.197, 0.677, 0.23] in the image\nAnd my action is Action: TYPE\nValue: Guardians of the Galaxy"}]}, {"id": "mind2web_5917", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_061388be-251a-4437-b7a2-8f6cd7bfcbb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK\n[link] Phil Collins - Both Sides (CD, Album, RE + CD + Dl... -> CLICK\n[link] Add\u00a0to\u00a0Cart -> CLICK\n[checkbox] I agree to Sales & Transaction Policy and Seller ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.414, 0.665, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5918", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_4aabad19-31dc-4141-a99c-6d665544a782.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\n[button] Soccer -> HOVER\n[link] Leagues & Cups -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.25, 0.168, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5919", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_0d72fff6-d3e1-4d08-9fa3-ecd760e525fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.057, 0.882, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5920", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_d94ad8e6-7e5e-4aa8-a2db-c7f469e82776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.176, 0.571, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5921", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_fbd56342-ff93-4a36-92a4-b463d0d1c9c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456\n[textbox] last name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.326, 0.94, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5922", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_22f40101-359f-4039-b0cb-fde2895aadc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.424, 0.371, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5923", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_b0d38385-d24a-44e4-9d2c-8083d639762f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] LKR\u00a01,120,521 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.499, 0.326, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5924", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_0495ec9b-5a3c-4d1f-9f95-7384ed92414d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.191, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5925", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_e2b1b2d9-dacd-4da6-9c79-fff51e9fd7e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_5926", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_30604de5-f631-477f-96ac-daa281fcef83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.691, 0.493, 0.713] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5927", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b99de965-6c3b-41f8-af69-0188a1db8435.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.139, 0.917, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5928", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_b308cce5-d50d-4080-abf6-23523051267b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[combobox] Start Time -> SELECT: 9:00 AM\n[combobox] End Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.353, 0.3, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5929", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_da776b38-bc28-4400-92f8-870b52637a55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 5.167, 0.094, 5.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5930", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_f4364245-326b-4e7b-8712-6a5189987c6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.124, 0.628, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5931", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_7aae967a-36c3-48e3-9d72-b741fa568806.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK\n[searchbox] Please type your destination -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.301, 0.409, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5932", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_e1032b62-f375-4745-9278-9923947deba4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[div] Recommended -> CLICK\n[option] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.292, 0.536, 0.503] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5933", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_5f031bff-a772-4cd4-a912-b6d83a0c0d7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK\n[button] \ue908 Depart and Return Calendar Use enter to open, es... -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.093, 0.934, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5934", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_700fb498-5bab-4e61-9e6b-6f3679b2ca2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\n[textbox] Search -> TYPE: Tom Hanks\n[p] Tom Hanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.202, 0.52, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5935", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_5bbb896b-8564-4603-9fc7-16ef2a072d56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.075, 0.719, 0.103] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_5936", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_230628ec-5182-4284-8bc5-a4bf221832e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22\n[button] Vehicle Class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.442, 0.341, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5937", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_43f0a0c4-1c11-406e-b5e6-38cdca83e896.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.334, 0.744, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5938", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_0f483551-50fe-4653-8fac-ed575e420118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.663, 0.316, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5939", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_a5959020-fb70-4c99-b3bd-4e1ca12b85c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SEARCH CRUISES -> CLICK\n[button] Number of Guests -> CLICK\n[path] -> CLICK\n[path] -> CLICK\n[button] Ships -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.553, 0.217, 0.654, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5940", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_070bbaab-3707-41c9-b426-8c019877cacd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[button] Next -> CLICK\n[button] Next -> CLICK\n[gridcell] Fri May 12 2023 -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.233, 0.905, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5941", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_2b9925ab-a059-47a3-9bcb-4007b10f734a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK\n[textbox] To , required. -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.313, 0.764, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5942", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_43648ae0-85c2-474e-a170-3220f5ffa6e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK\n[i] -> CLICK\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.1, 0.464, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5943", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_5065da71-9a45-4c92-8cd3-8e15708647a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\n[link] Library -> CLICK\n[link] History -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.647, 0.113, 0.721, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5944", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_2cde0581-1919-4200-9358-c3d15bd24028.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK\n[button] Climb -> CLICK\n[link] Explore Climb -> CLICK\n[link] Shop all climbing gear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.291, 0.428, 0.312] in the image\nAnd my action is Action: SELECT\nValue: Price High - Low"}]}, {"id": "mind2web_5945", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_6caa49b0-990d-4fdf-8534-bc3e4e6ab8d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.33, 0.234, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5946", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_5e85f39c-3a4a-40aa-8120-4491ff59cbc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.189, 0.613, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5947", "image": {"bytes": "", "path": "./images/4c578076-b877-4097-bf67-e231e349d56f_e8bf99cb-e1b5-4b35-9c26-9bc74fcd3cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of most popular upcoming game releases.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.068, 0.31, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5948", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_deeff052-6db8-4239-be1e-1939ba33fe3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Color -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Material -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.013, 0.988, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5949", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_b8a214f6-b3a1-41b0-997b-d341e62a8bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK\n[div] Find a Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.65, 0.76, 0.664] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5950", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_d5e4afb2-e893-4df1-a0de-9602b4a381c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK\n[span] View -> CLICK\n[link] I don't have the password -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.283, 0.671, 0.317] in the image\nAnd my action is Action: TYPE\nValue: Michael Cahill"}]}, {"id": "mind2web_5951", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_45786006-20a8-4fb4-b399-e07ae8d308d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Los Angeles -> CLICK\n[button] All dates -> CLICK\n[link] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.389, 0.941, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5952", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_622461a1-47ff-4c2c-b95e-05e62fa43a01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.26, 0.929, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5953", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_d4384cf9-8eba-4b9b-8cf6-fedec53fe0db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.254, 0.877, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5954", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_ed7f9c58-052e-4dd7-9452-9e017dc53f6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK\n[label] All Regions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.483, 0.269, 0.496] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5955", "image": {"bytes": "", "path": "./images/61c8e051-a847-4424-9d8b-b8bc2c134a35_a008646a-eed9-4a5d-ba30-872e8775a109.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the recent trades\nPrevious actions:\n[button] Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.049, 0.42, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5956", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_c8e636ed-095c-4824-ba93-64541480befc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.169, 0.902, 0.206] in the image\nAnd my action is Action: SELECT\nValue: 2 00 PM"}]}, {"id": "mind2web_5957", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_2dcf1f5a-3eb9-43e5-9f31-661189ad71d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK\n[link] $0 - $10 $0 - $10 -> CLICK\n[button] Customer Rating -> CLICK\n[checkbox] & up & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.042, 0.378, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5958", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_bb78dc80-dcd2-4a33-bc62-fba8db2989f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.179, 0.846, 0.226] in the image\nAnd my action is Action: SELECT\nValue: 11 00 PM"}]}, {"id": "mind2web_5959", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_cf9d27ef-6bc2-4be0-a3e9-39527c596408.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK\n[link] 1 -> CLICK\n[link] 2 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.225, 0.891, 0.26] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_5960", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_5631a528-35ea-425a-acb7-41c0fa888737.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK\n[svg] -> CLICK\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.355, 0.855, 0.401] in the image\nAnd my action is Action: SELECT\nValue: 4 guests"}]}, {"id": "mind2web_5961", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_1e652aec-cd7e-4681-824e-ead22c58c1e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Greenville\n[span] Greenville -> CLICK\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: McDonalds"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.012, 0.62, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5962", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_11129f78-0976-4251-bc55-5dc77032e1c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.603, 0.391, 0.614] in the image\nAnd my action is Action: TYPE\nValue: Roy"}]}, {"id": "mind2web_5963", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_966832cc-a997-4288-a3ad-1d8a567483a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.212, 0.713, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5964", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_a0fe253d-3df0-4239-b233-b9f03863b5b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.235, 0.623, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5965", "image": {"bytes": "", "path": "./images/6df317e6-3414-4f2e-b5fc-b70914def4eb_d1178e23-83d4-4c13-8b20-31c9cf3a4166.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Jazz music genre albums and tracks.\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.174, 0.645, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5966", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f6e611c9-ad21-49ca-a841-7ad529b56c95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.125, 0.987, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5967", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_06873a11-d752-4bbb-98ad-c892947fbbc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.093, 0.326, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5968", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_2d74ad4e-f2c3-492b-8a78-ae86a999f90e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 1.078, 0.609, 1.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5969", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_4ca00eeb-f8c1-4324-9f21-78059e35b12e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.047, 0.384, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5970", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_9dc761aa-1a13-4905-a922-726a3f469738.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station\n[link] T red line silver line commuter rail Zone 1A Sout... -> CLICK\n[combobox] Enter a location -> TYPE: north station\n[link] T orange line green line D green line E commuter ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.417, 0.863, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5971", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_7e18cfda-a3ba-4519-b4b9-4c0b79083f6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.033, 0.661, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5972", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_6777b49e-bd42-42a4-83c8-6924d950ddec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.502, 0.236, 0.526] in the image\nAnd my action is Action: TYPE\nValue: black"}]}, {"id": "mind2web_5973", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_4325202f-ddfb-4f0d-9997-9e911bd2079a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.319, 0.831, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5974", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_1d55f87c-4f09-44d6-a9e3-9b9809a3584e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.042, 0.036, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5975", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_bc8d44c6-bcd3-4cb2-8d4e-f7e33a3a71fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 11231"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.105, 0.948, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5976", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_d6eb2826-502e-4acf-b43c-29e25ace5847.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.146, 0.42, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5977", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_685aa3ad-c42f-4765-92f4-8f2ee0b62d85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK\n[textbox] Down Payment -> TYPE: 500\n[combobox] State -> SELECT: New Jersey\n[combobox] Credit Score -> SELECT: Challenged (< 580 FICO\u00ae Score)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.428, 0.459, 0.456] in the image\nAnd my action is Action: SELECT\nValue: 72 months"}]}, {"id": "mind2web_5978", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_ea265149-8907-442c-97ef-ed3136183634.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\n[link] BABY -> CLICK\n[link] One-Pieces -> CLICK\n[img] Ribbed 1*1 Long-Sleeve One Piece Outfit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.364, 0.906, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5979", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_1c9cfff8-1391-492c-8dc1-3071bbfe0f64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.183, 0.765, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5980", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0245d291-b55c-42cc-9700-3869687e0b6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK\n[link] Planned Service Changes -> CLICK\n[searchbox] Search Routes -> TYPE: 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.133, 0.866, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5981", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_2b36eab1-cf61-497e-92cc-c454fb7c4aaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK\n[searchbox] Type your destination -> TYPE: Las Vegas\n[option] Las Vegas Nevada,\u00a0United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.466, 0.483, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5982", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_7c8aca4c-f6b7-4812-a730-34902fbf8b54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK\n[button] Find Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.264, 0.41, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5983", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_d4beccf5-98e8-4e29-9e4a-f6f38a31e064.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.16, 0.553, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5984", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_4d3c65b7-8e9c-4bb7-9347-708aaba58996.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.391, 0.846, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5985", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f14a0102-32f0-44f4-8a4f-28a04f537807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Music -> HOVER\n[a] Trending -> HOVER\n[link] Beyonce -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.184, 0.255, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5986", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_1c772b71-1382-4a17-9f3c-fd3cf0115d5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.572, 3.678, 0.606, 3.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5987", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_5f4295e1-0830-4af2-a782-84396e3d8a0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.163, 0.188, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5988", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_58afded5-ed7e-4bc6-b0c7-b83e4bfb4234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Pop Rock -> CLICK\n[link] 958,222 United Kingdom -> CLICK\n[link] 246,685 CD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.502, 0.163, 0.515] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5989", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_7335eea8-a7a4-4655-85a4-67ac3a93642a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.425, 0.72, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5990", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_69b8b5e3-07de-49f3-a2dd-149dcd1bef3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK\n[button] Ship Location: Any -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.234, 0.535, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5991", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ac7a36f2-839a-4c24-bda8-118aa8ec52be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.218, 0.968, 0.235] in the image\nAnd my action is Action: TYPE\nValue: ATLANTA"}]}, {"id": "mind2web_5992", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_0b8fc837-695c-4373-a0fc-9a01cb61535a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.049, 0.082, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5993", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_ba1ffe14-ee2a-4736-96d7-b3c1d5f6f99f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.206, 0.474, 0.227] in the image\nAnd my action is Action: TYPE\nValue: Breakneck ridge"}]}, {"id": "mind2web_5994", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_e44d2293-b6f9-4388-b87b-f11c66219504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.614, 0.861, 0.631] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_5995", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_c6dbb23e-ae8c-4d6a-94e6-58ea0df2339e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University\n[li] New York University, New York, NY, USA -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.373, 0.339, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5996", "image": {"bytes": "", "path": "./images/61c8e051-a847-4424-9d8b-b8bc2c134a35_16006817-5fb0-425b-80e7-16d8eda37863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the recent trades\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.006, 0.393, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5997", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_600847c5-d8ae-4f6b-96a4-c247607440b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.292, 0.359, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5998", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_fc0499e3-1006-4cbb-ad05-f2530c0915e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.003, 0.31, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5999", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_d4376ba0-aefd-4d57-a775-a3cb687627e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.102, 0.145, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6000", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_aef3f78b-c01d-4cad-b931-cf7360857b74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.194, 0.991, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6001", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_bb98aed0-a7f0-492a-95e4-623bdc9edf55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia\n[select] Model -> SELECT: Carnival\n[textbox] ZIP -> TYPE: 11101"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.728, 0.198, 0.947, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6002", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_006fd607-bc1b-4a63-a6b6-49ce4554b83b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.361, 0.474, 0.391] in the image\nAnd my action is Action: TYPE\nValue: stoney brook"}]}, {"id": "mind2web_6003", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_0a8c70e6-c258-4a5d-90e4-9b1e497ecd19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\n[button] Shopping -> CLICK\n[link] BGG Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.086, 0.376, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6004", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_10995afb-c7d3-4055-b7eb-853178f8205a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.216, 0.245, 0.239] in the image\nAnd my action is Action: TYPE\nValue: San Francisco"}]}, {"id": "mind2web_6005", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_c326a9f7-8f91-48da-b17e-8fa200ceabef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.3, 0.312, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6006", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_7521f3d0-1427-4e25-b646-ae267d83d1ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK\n[img] AIRism Cotton Graphic Short-Sleeve Raglan T-Shirt -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.376, 0.803, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6007", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_33e7bd87-795d-44da-81be-390346b9829c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.149, 0.691, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6008", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_5c56aa6b-095e-4946-8cce-398de16ac7e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\n[link] Restaurants -> CLICK\n[link] OPEN 24 Hours -> CLICK\n[link] Default -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.197, 0.683, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6009", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_8964f24a-8d05-46b5-a096-d8fe1fec006d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.118, 0.355, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6010", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b5b40650-0447-4f2a-adbc-15b3faf5babb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.43, 0.463, 0.457] in the image\nAnd my action is Action: TYPE\nValue: SMith"}]}, {"id": "mind2web_6011", "image": {"bytes": "", "path": "./images/f45b0783-4325-4b3e-959d-c181971d72f6_4dee51f3-114f-4ef5-924f-25e997581347.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest news and rumors about the NBA team the Los Angeles Lakers.\nPrevious actions:\n[link] NBA . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.13, 0.47, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6012", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8b5bb82d-a7cc-4864-a16f-ed089b55f45b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.077, 0.587, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6013", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_1549bfcf-8eef-4ed1-bb64-9f74c32b7be0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\n[link] attractions. -> CLICK\n[li] Neighborhood -> CLICK\n[link] Midtown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.221, 0.218, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6014", "image": {"bytes": "", "path": "./images/dd38e3f0-242c-4ef2-b4d9-bd13adc675b7_c116fa0e-3a0a-498f-a57c-bb6427441019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with download options for fantasy apps.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.092, 0.14, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6015", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_ed8cb3d3-6c92-4bc6-a927-ea4a11ef074d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.521, 0.233, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6016", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9204f384-9544-4d77-abbd-a69960ef3360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.903, 0.284, 0.928] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6017", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_03fa7043-d0b4-486d-846d-27c2930cf768.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.292, 0.378, 0.388, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6018", "image": {"bytes": "", "path": "./images/66a5b212-cf94-4917-8015-58970dc54187_6a84d789-0284-4b64-9412-96091f7b7a32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the amtrak national route map\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.042, 0.139, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6019", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_a14dcd4a-847a-4935-9708-4dda2e60137a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.69, 0.264, 0.838, 0.287] in the image\nAnd my action is Action: SELECT\nValue: Queen"}]}, {"id": "mind2web_6020", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3d51e444-a3b2-4aaf-abaf-dde9346fdd65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.336, 0.025, 0.345, 0.033] in the image\nAnd my action is Action: TYPE\nValue: 44240"}]}, {"id": "mind2web_6021", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e6c9df9f-b289-4bb9-8552-e6367e9e3057.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK\n[link] Theaters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.456, 0.375, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6022", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_75c2b629-cdff-4b3e-820e-15b047009f95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.064, 0.672, 0.299, 0.698] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6023", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_0914fa6f-323a-4498-9742-0b1fad40a9bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.04, 0.727, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6024", "image": {"bytes": "", "path": "./images/64051efe-53dc-4e79-9980-c3d75d34c4aa_f64548ad-02e9-4c42-af4c-240a494202e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my item inventory.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.0, 0.519, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6025", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_7dc2e9bb-ea97-4cc9-8824-aaf9e70f74d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\n[textbox] Search -> TYPE: Tom Hanks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.44, 0.704, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6026", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f4439995-85b8-459b-b852-f64741f39d39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.438, 0.415, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6027", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_873ad00e-a6df-4834-ad94-1f8d537ee77c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK\n[heading] Boston Bruins -> CLICK\n[link] Full Team Statistics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.333, 0.223, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6028", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_3d3cf273-c147-48b7-8ac7-0e2f84ccfc4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.332, 0.72, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6029", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_6d6f7cec-62f9-470d-bec2-d7867d662dba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\n[input] -> CLICK\n[input] -> TYPE: KCCR\n[option] \uf041\u00a0Buchanan Fld (Concord)\u00a0 KCCR CCR \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.079, 0.45, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6030", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_62135e6a-b2f3-46bf-b769-ba492e7b2c13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, April 17, 2023 -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.344, 0.944, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6031", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_3607097c-cce3-4597-9d98-6882d4f5621e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK\n[combobox] SORT BY -> SELECT: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.875, 0.059, 0.893] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6032", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_2dcd2fcc-3dd2-4ca1-bedd-d70892c4f393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] LKR\u00a01,120,521 -> CLICK\n[button] Select fare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.35, 0.727, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6033", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_833d7854-cd6b-4f03-ba0c-a4c72e905d03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK\n[link] Intro 65,171 -> CLICK\n[link] Guitar Pro -> CLICK\n[div] Today's most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.309, 0.97, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6034", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_837dcc3a-4018-485b-a267-89a4c738349a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\n[link] Amazon Basics -> CLICK\n[link] BEDROOM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.231, 0.38, 0.4, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6035", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_fbfca76a-4bd4-4f4e-a063-acbbe70803c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\n[button] Shopping -> CLICK\n[link] BGG Store -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.193, 0.233, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6036", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_0e89a5cb-a2b2-43a0-8c4e-9c4962e3096c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Kitchen -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.38, 0.283, 0.561, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6037", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_148c30a5-ecb3-409a-a1b2-610d4b504d8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.095, 0.191, 0.129] in the image\nAnd my action is Action: TYPE\nValue: 25504"}]}, {"id": "mind2web_6038", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_d1701f0b-aad7-4ffa-ac74-2ced4ccbf481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846\n[input] -> TYPE: Texas\n[button] Complete -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.444, 0.664, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6039", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_1b9b9aeb-9fc2-47b7-88ba-d9aa35bd748f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.474, 0.568, 0.496] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6040", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_3e5ef950-3ea6-411c-86a9-59318940c3aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.36, 0.658, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6041", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_396e747f-8d48-472d-9972-4fb76df776fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.231, 0.528, 0.268] in the image\nAnd my action is Action: TYPE\nValue: mexico city"}]}, {"id": "mind2web_6042", "image": {"bytes": "", "path": "./images/fb7741f6-f388-4535-903d-d07315ea995e_7ee7d0b5-88a4-40da-9ffc-b863efa019a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find fitness events for this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.358, 0.939, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6043", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_a0f73b91-7f84-4c1b-b00f-816a3592bea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK\n[button] CONTINUE SHOPPING -> CLICK\n[checkbox] PURPLE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.339, 0.906, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6044", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5175d48c-cd70-4186-be31-ffda1afc9e9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK\n[button] Subscribe -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.283, 0.977, 0.322] in the image\nAnd my action is Action: TYPE\nValue: abc@abc.com"}]}, {"id": "mind2web_6045", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_8a6f950a-4bb4-4b36-b0e9-e2d45e8d69fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.378, 0.808, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6046", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_c0462513-58ef-424d-baba-92baeaac15cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.243, 0.641, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6047", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_8971ff26-7b5c-4b17-be3d-006f780b3657.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.103, 0.416, 0.121] in the image\nAnd my action is Action: TYPE\nValue: developer"}]}, {"id": "mind2web_6048", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8230e92a-1a42-47f3-8884-891a159c10bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.24, 0.496, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6049", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_395355a1-b038-4bc0-b846-7df25d07f4d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 400\n[textbox] Maximum Value in $ -> TYPE: 500"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 1.063, 0.176, 1.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6050", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_933b0ad5-f7f5-4195-96aa-530e47401fbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.544, 0.251, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6051", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_35eeeb9b-b19e-424f-a037-42daf164c207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.528, 0.285, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6052", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_d9c1a3c4-2953-4441-b535-b0ae2fed6215.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.306, 0.341, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6053", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_7e41a150-71e0-49cf-9c81-2ab0101e943d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[checkbox] Outdoor Seating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.3, 0.529, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6054", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_cffd21a6-6348-47b9-a5f3-461b9532ad99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.248, 0.758, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6055", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_d09aa5b6-073d-4456-895a-50e397fb9f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.205, 0.406, 0.248] in the image\nAnd my action is Action: TYPE\nValue: car accident lawyers"}]}, {"id": "mind2web_6056", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_a921a26c-218a-4d0b-98bc-d5df89444762.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863\n[path] -> CLICK\n[generic] Bike shop Pigeon Forge -> CLICK\n[link] See services menu for Rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.888, 0.072, 0.897] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6057", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_f00798ea-e59e-4f62-8079-eeb0d52ac0a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.221, 0.84, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6058", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_f159143b-b456-45b0-8ac4-0a4c3a2a9f6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.21, 0.586, 0.254] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6059", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_399cefa2-831f-4fc6-83da-87899078705b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.218, 0.329, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6060", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_0b3d6a70-f8fc-4541-80bc-ec18ed024db3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square\n[li] Times Square, New York, NY, USA -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.509, 0.339, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6061", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_aa129fe1-fa82-4007-80d5-c8700bb6dac4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.104, 0.562, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6062", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_47a6ae98-a2c3-4fbc-8a06-389316088503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.036, 0.866, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6063", "image": {"bytes": "", "path": "./images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_552b53c7-b311-478a-9d2e-752a31e92556.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an available wine at Kroger.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.045, 0.249, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6064", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f2609a37-f14c-4a51-9474-cb3de2b81745.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.304, 0.473, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6065", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_d2848639-97f6-4e63-a587-3a55a70d0525.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Start Journey -> SELECT: Train\n[combobox] End Journey -> SELECT: Bus\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.297, 0.359, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6066", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_ed2436f8-6375-4214-b4ff-64c690a30d12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena\n[button] Get Directions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.096, 0.578, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6067", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_6f8808c4-2c04-47c8-b464-b6fa5494f4d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.061, 0.248, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6068", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_93a58437-0259-4b0a-b430-74597d880feb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] All dates -> CLICK\n[span] -> CLICK\n[span] 1 -> CLICK\n[div] Sort by -> CLICK\n[div] Top rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.181, 0.892, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6069", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_55f783b6-bbd7-44aa-b83b-cf2fa14ef9dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[img] -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.277, 0.984, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6070", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_a87b377d-8822-4c90-8f14-11e689d3aacf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.456, 0.562, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6071", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f7490aaa-f8c1-4fb4-b36b-2f6c2be04574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.014, 0.369, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6072", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8ef7b552-971c-4c7e-b142-a295424b5e0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.582, 0.477, 0.606] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6073", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_6a110bc2-e04c-4274-b0b5-4b40194e6780.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK\n[a] Software Development -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.375, 0.426, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6074", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_ce22d6a6-4223-44ce-83fd-889b45b45818.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.03, 0.426, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6075", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c05241f0-b26e-40df-b388-6067f69ff404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.183, 0.902, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6076", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_2d197653-4bfc-436a-83be-5d50125fe4c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018\n[button] Make \ue920 -> CLICK\n[checkbox] Honda (549) -> CLICK\n[checkbox] Civic (122) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.219, 0.888, 0.25] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_6077", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_34f58f02-a3b6-4e7d-a1b8-03b2370311ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.042, 0.749, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6078", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c8de24b2-8468-492f-bcc6-dfac28f0b19e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[button] April 20, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK\n[button] View more availability for Canal Street Eatery & M... -> CLICK\n[button] 2:00 PM Eatery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.301, 0.523, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6079", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_1bba89d9-59d9-446b-8e59-2ff15fcb4302.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu\n[option] Honolulu, HI, US (HNL) -> CLICK\n[textbox] Budget. Please enter a numerical value -> TYPE: 1300"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.513, 0.793, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6080", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_21dba24e-8f7e-4330-b7ef-66a664eb3024.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.029, 0.553, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6081", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_26e354fd-cad7-453f-a070-138d9ebc55ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.373, 0.278, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6082", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b909fb5c-7fa3-4c7e-b535-b80693e60d34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.324, 0.812, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6083", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_aae409a5-3510-4340-a48c-8f4040559538.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 4.641, 0.107, 4.651] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6084", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_2c12ecd9-ae60-4175-ba36-c56cd052b480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK\n[label] Accessible Trip -> CLICK\n[link] Plan my Trip - Press enter key to submit the form ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.324, 0.332, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6085", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_26843443-9d32-4833-931f-cf80f8a6b542.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.156, 0.616, 0.17] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_6086", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_3ab91777-9f12-48f7-b203-03dc68f21c05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.033, 0.036, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6087", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_8f8ea352-b937-46b3-aabe-c25ac19c884e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 246,685 CD -> CLICK\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.121, 0.331, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6088", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_aef60faa-2b24-4efc-9056-42572c18b68e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK\n[span] View -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.403, 0.571, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6089", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_0e94f861-5839-4768-8110-49739e46dfed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.333, 0.284, 0.354] in the image\nAnd my action is Action: SELECT\nValue: 45"}]}, {"id": "mind2web_6090", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_81ccfa2d-166d-495a-831b-1dbb94eff401.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\n[svg] -> CLICK\n[link] Super Bowl -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.746, 0.97, 0.826] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6091", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_e93a3684-bb9d-444f-af6c-bacd809f740e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[checkbox] Tortillas (4) -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.661, 0.99, 0.682] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6092", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_8f5236d2-4814-46b1-8952-9fd67d4d4a13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK\n[button] CHECKOUT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.23, 0.628, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6093", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_52744503-3e70-4e58-9502-dfbd39ccbedc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.072, 0.837, 0.1] in the image\nAnd my action is Action: TYPE\nValue: Magtag electric dryer"}]}, {"id": "mind2web_6094", "image": {"bytes": "", "path": "./images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_52fd01f6-62fb-44a2-afa8-13ff5b1088cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Gloomhaven's ratings?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.005, 0.995, 0.021] in the image\nAnd my action is Action: TYPE\nValue: gloomhaven"}]}, {"id": "mind2web_6095", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_cb4e102c-6cbf-4b5b-ab7e-c6b1e6be700c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.431, 0.241, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6096", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_9f6a84e6-6b69-424c-8e55-1759affbedd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.313, 0.693, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6097", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_96aaffb2-02ba-4ac2-b804-6a30b524648c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.401, 0.373, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6098", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_ff05c512-c5cf-458b-a977-051cf2423d2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.247, 0.128, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6099", "image": {"bytes": "", "path": "./images/330d5618-9db4-447b-9b56-0d2c33f414d5_23526806-c2f8-4218-b6a7-e66c61eb3c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the full menu for AMC dine-in locations.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.027, 0.524, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6100", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_2e6609c2-84a3-4a3a-bc5d-f29bee7e86f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.268, 0.192, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6101", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8dc7145a-abf6-4b53-94fa-c2a1348aab81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: UNITED STATES"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.347, 0.557, 0.387] in the image\nAnd my action is Action: TYPE\nValue: smith"}]}, {"id": "mind2web_6102", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_d1944090-239b-4c54-a478-91e6b01bdfba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.242, 0.393, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6103", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_726bec92-5493-4eaf-ae53-ccf5041b29d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[link] Flatware -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.339, 0.648, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6104", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_d616b2c2-43e5-43e0-89db-5a8daf4728de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.129, 0.885, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6105", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_cf2ada0b-3120-4416-b301-08bf8df0fa65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.13, 0.713, 0.143] in the image\nAnd my action is Action: TYPE\nValue: VENICE"}]}, {"id": "mind2web_6106", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_3f38216e-16cf-4a62-82d5-00bd6a493e5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.086, 0.735, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6107", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_2fa2e6c0-de6b-4376-bed6-9acdf1eb3c84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\n[link] Coupons -> CLICK\n[input] -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.077, 0.788, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6108", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_a451a11c-b4e5-4d40-a845-71a26097a776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK\n[span] 20 -> CLICK\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.146, 0.341, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6109", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_45b58892-de1f-4e13-b47a-bb947376442d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[div] Select your dates -> CLICK\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.332, 0.164, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6110", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_941508d6-247e-481b-9735-b0798b4133a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.476, 0.366, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6111", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_7ff4adcf-0ad0-4b73-bae6-6d5f955da03e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.414, 0.396, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6112", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_9fa4d940-d07f-412d-8bff-7f66d56fc5e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[button] Continue to flight results -> CLICK\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK\n[label] Points -> CLICK\n[link] Sort & Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.174, 0.439, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6113", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_88026934-0d2a-4303-86b2-0cbebe66da86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[span] Paris -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: MILAN\n[span] Milano (Milan) -> CLICK\n[textbox] Date use format: 01-Apr-23 -> CLICK\n[rect] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.363, 0.282, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6114", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_571fca71-af69-4723-b8b5-c0ca0f59f498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.366, 0.434, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6115", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_f6b71d34-d022-4c76-867a-5aac1e9c41c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.036, 0.551, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6116", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_34355832-70f9-4329-a27d-567ce662a636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK\n[link] The Smiths -> CLICK\n[button] Sorted by: Last 7 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.474, 0.656, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6117", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_8613dbd6-8b64-4a19-9b1c-fe4c2190d93f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: smith\n[button] SEARCH -> CLICK\n[button] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.099, 0.897, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6118", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_9c594686-36b7-46ca-840f-21c065100725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.473, 0.925, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6119", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_9681f5d0-e126-4c7b-91ca-97c50520ae5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2017\n[combobox] Select Maximum Year -> SELECT: 2017\n[div] White -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.28, 0.429, 0.306] in the image\nAnd my action is Action: SELECT\nValue: Lowest price first"}]}, {"id": "mind2web_6120", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_0fd72db4-9850-4454-9f19-30b877e934a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.43, 0.24, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6121", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_6dc7cd7d-428f-4f78-971b-fa96dc6a2afc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[svg] -> CLICK\n[button] Done -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.159, 0.795, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6122", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_5dbc5df6-49dc-425b-a6b4-27142ff6f88f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK\n[link] Produce -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.352, 0.193, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6123", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_d661979e-d1ee-4a39-8bf0-6e7167d44cc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.364, 0.284, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6124", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_fdf4113a-2a65-4a27-8cb5-594795802f21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.26, 0.397, 0.296] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_6125", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_0acfa22e-2dfe-4d1f-b42d-93ddfd168334.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.313, 0.318, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6126", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5a4a2016-b8c2-4a54-86c0-e69897f19172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK\n[div] -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.87, 0.273, 0.918] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6127", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_ef11694e-c52c-41dd-bc90-a2ec7bc71e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK\n[link] Tab -> CLICK\n[link] High rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.249, 0.523, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6128", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_2a12305f-5f27-4743-b696-61ca159e6fc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[label] Armenia -> CLICK\n[label] Austria -> CLICK\n[label] Belgium -> CLICK\n[label] Bulgaria -> CLICK\n[div] Popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.844, 0.134, 0.948, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6129", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_3bf8ca73-e41c-42b9-b642-7cf7743311ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[button] 04/11/2023 -> CLICK\n[link] 12, Wednesday April 2023 -> CLICK\n[link] Find Schedules -> CLICK\n[div] Earlier -> CLICK\n[generic] Press enter key to get details about this schedule -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.489, 0.875, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6130", "image": {"bytes": "", "path": "./images/4c578076-b877-4097-bf67-e231e349d56f_5485fe1f-9623-4530-be5e-76bf6dce88c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of most popular upcoming game releases.\nPrevious actions:\n[link] New & Noteworthy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.16, 0.303, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6131", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7e967714-530e-4ee3-aab8-c8943a08b141.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[link] S -> CLICK\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.809, 0.178, 0.814] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6132", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_f5a5e597-4007-4c76-b1d0-69beef875a67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Create flight status notification -> CLICK\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.242, 0.595, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6133", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_9783b6dc-cd33-4763-99c7-92b577797400.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.007, 0.675, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6134", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_2bd52759-721b-4129-a6e8-16877c8237d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK\n[option] Apr 7, 2023 -> CLICK\n[button] Search -> CLICK\n[link] View details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.492, 0.931, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6135", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_660f5a50-1d68-4a30-a58b-25330fcabbe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK\n[textbox] Pickup -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.408, 0.326, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6136", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_d2e28992-f3bc-445d-b2a4-876daf96d479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Beyonce -> CLICK\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.272, 0.487, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6137", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_788991d0-db86-4c7f-94b6-f19ffa1d7996.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[label] Month -> CLICK\n[div] Add guests -> CLICK\n[path] -> CLICK\n[button] increase value -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.122, 0.819, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6138", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_220eebf1-381e-490c-8f48-c96d8228e83c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.332, 0.125, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6139", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_1c165a7e-dea3-4fb6-96f1-f52d15dc0ed7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK\n[button] All -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.421, 0.93, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6140", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_dc327d5f-d07b-496d-8680-400483790fce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.076, 0.653, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6141", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fca5d423-f301-4411-b33c-f7956eae3a0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.462, 0.699, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6142", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_de893c6e-8bae-40fc-ae87-01165ef350f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.464, 0.112, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6143", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_dd7801cd-f2c0-4d87-a7fa-cc6d80723a61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK\n[option] United States of America (USA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.737, 0.44, 0.876, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6144", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_c88f469f-8d6f-4573-895e-f79f176a1c0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Divorce -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.231, 0.559, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6145", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_454c214a-547b-4f92-bd1a-8c7af5315360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.253, 0.727, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6146", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f78b743a-0b12-4f1c-b33d-a1e29de080df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.365, 0.648, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6147", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_f7f3bec6-4a04-4892-9ec9-705082705c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK\n[link] VIEW ALL -> CLICK\n[span] Tomatometer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.373, 0.802, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6148", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_c91647f3-2ca4-43e8-b3a6-0868ad5cbb47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.262, 0.764, 0.276] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6149", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_f6aefd41-aa8a-43b1-8161-19406378a4db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.956, 0.244, 0.976] in the image\nAnd my action is Action: TYPE\nValue: 40"}]}, {"id": "mind2web_6150", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_bb572b4c-5926-4951-8d9d-69a1dea4bbde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.562, 0.813, 0.886, 0.857] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6151", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_de6219f6-89fb-4574-9f9d-a5f9841ac5f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[input] -> CLICK\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.196, 0.915, 0.216] in the image\nAnd my action is Action: SELECT\nValue: 04 00 PM"}]}, {"id": "mind2web_6152", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_3a8538ef-d7f5-4aa9-bb0c-3397534a6f13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle\n[span] Seattle, Washington, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.169, 0.964, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6153", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9301c32c-6e2d-47b7-b52e-d71621d2e4e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.653, 0.329, 0.663] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6154", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_269409d6-ecee-4f2b-99b4-ba29d6714c6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.312, 0.777, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6155", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_1668cab6-5869-42d8-8680-ba5dcec3b260.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK\n[textbox] Reservation/Ticket Number -> TYPE: 123456\n[div] -> CLICK\n[option] Last Name -> CLICK\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.274, 0.623, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6156", "image": {"bytes": "", "path": "./images/2159d768-6657-40af-b336-ad5726fec1e2_5c6b6d07-967f-43cf-9ddb-b8dc31465744.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my wishlist the top rated JRPG game.\nPrevious actions:\n[link] Categories -> CLICK\n[link] JRPG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 1.212, 0.432, 1.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6157", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_9044b1b5-0c59-4b6b-beda-cd9346f97119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Guides -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.394, 0.186, 0.469, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6158", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_89271800-5603-4e29-92cc-dd53f66ecbcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Purple -> CLICK\n[div] Support Level -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.788, 0.194, 0.797] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6159", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_5971c14a-a5d3-406e-b9e1-73501ebafdc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.272, 0.34, 0.294] in the image\nAnd my action is Action: TYPE\nValue: 10018"}]}, {"id": "mind2web_6160", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_b1b264ad-e45c-499e-b244-7057ae2b6a20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.233, 0.196, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6161", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_1aca95b5-b812-4f65-8921-f9a045ac4c34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.244, 0.481, 0.277] in the image\nAnd my action is Action: TYPE\nValue: LONDON"}]}, {"id": "mind2web_6162", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_13946ef7-7b91-478e-adc6-d3ef4f6a8270.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\n[link] Tabs -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK\n[link] Absolute Beginner 91,138 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.38, 0.282, 0.523, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6163", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_2616aadf-415f-4074-990c-4b08b8a936f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.112, 0.257, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6164", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_4703ca2c-dcb8-47c4-b517-1e71d5bdca63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.146, 0.424, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6165", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_67839d50-ac48-453d-a9a0-acb0cf67f1de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.291, 0.462, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6166", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1ef02895-60ba-4c09-b182-0296afae7c18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.066, 0.525, 0.083] in the image\nAnd my action is Action: TYPE\nValue: scream"}]}, {"id": "mind2web_6167", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d23e8ae2-e172-437f-93f0-db24ea60fe87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK\n[button] State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.44, 0.123, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6168", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_17a4c8ec-34de-455d-b607-6752a9cfdd37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.262, 0.156, 0.28] in the image\nAnd my action is Action: TYPE\nValue: The Elephant Whisperers"}]}, {"id": "mind2web_6169", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_76103e97-41d0-43d7-9d47-732f0067c485.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang\n[div] Ford -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.17, 0.888, 0.195] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_6170", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_5d5f28c5-fb7c-4135-823c-8ff20b3c324d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.122, 0.238, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6171", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_9ef05896-4029-4ebf-a6ba-c5fee0ad34ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK\n[button] Release Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.498, 0.638, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6172", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_44d13dde-5192-4e92-9fe1-1246632f3e97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.365, 0.233, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6173", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_e35d2867-2a3f-478a-a454-b0ba703b2765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.388, 0.571, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6174", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_451ce382-7c2f-460e-9e44-773995a6b6ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India \n[div] India -> CLICK\n[span] -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.269, 0.739, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6175", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_5400dcf0-be71-4d8e-9a26-ad6290b3814d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.328, 0.518, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6176", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_65379b10-2f40-4af4-a21a-685cdc35a990.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.211, 0.89, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6177", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_c595cdaf-154a-4496-81bf-4db06cba5982.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.126, 0.316, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6178", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_f8078f35-e8f2-4eb4-be8b-f3a68ee359fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[link] Restaurants -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.115, 0.329, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6179", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_8ad738de-57cb-45e4-8224-58518d4392df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.123, 0.424, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6180", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_acdc38cd-1d94-4d85-bbd1-5179e1ce1ae5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[svg] -> CLICK\n[div] RawElegant.Life -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.199, 0.556, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6181", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_15a08e40-68ed-4f4d-a357-4fd4c651041b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.234, 0.506, 0.274] in the image\nAnd my action is Action: TYPE\nValue: miami"}]}, {"id": "mind2web_6182", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_a7515530-9517-4dbe-acf5-de91208b0e87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.084, 0.961, 0.114] in the image\nAnd my action is Action: TYPE\nValue: san diego"}]}, {"id": "mind2web_6183", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9eb1b18e-f4c5-49f7-b46d-5a8ba355de59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.139, 0.292, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6184", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_6afbd6e5-eb0c-41c2-a3b1-6befe4805e1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK\n[span] No trade-in -> CLICK\n[span] Buy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 1.961, 0.938, 1.984] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6185", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_65fab831-efd4-477b-9da8-0faaaef8bb8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK\n[textbox] Monthly Payment -> TYPE: 250\n[textbox] Down Payment -> TYPE: 3000\n[combobox] State -> SELECT: Tennessee"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.336, 0.459, 0.364] in the image\nAnd my action is Action: SELECT\nValue: Challenged (< 580 FICO\u00ae Score)"}]}, {"id": "mind2web_6186", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_75e8514e-c3b1-4654-9bca-cd6f81f056d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK\n[select] All -> SELECT: In Stock (7,640)\n[select] All -> SELECT: Spanish (42)\n[select] All -> SELECT: Paperback (39,356)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.399, 0.196, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6187", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_89f727a9-5994-4db2-bb45-0252e4288321.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[checkbox] Vehicle -> CLICK\n[checkbox] Large appliances -> CLICK\n[radio] Out-of-state -> CLICK\n[button] Virtual Consultations -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.119, 0.612, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6188", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0f4fe5e2-b0da-40f2-855c-b90d7b3b2911.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.285, 0.633, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6189", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_753c3a25-32d1-4440-bc15-21fe074f1507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[span] -> CLICK\n[span] Search flights -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK\n[span] Price (low to high) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.534, 0.817, 0.548] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6190", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f9bef785-c84b-4232-9461-02f5773cd5b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.606, 0.74, 0.727, 0.758] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6191", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_fe4efd1c-410a-48b7-b828-680d8788f260.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.358, 0.514, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6192", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_47f27a16-dea7-46da-b800-33f2c3f70383.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10003\n[button] Find care -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.401, 0.448, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6193", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_56bb169a-e765-48b8-a83e-afbef30548bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[path] -> CLICK\n[switch] COMPARE -> CLICK\n[button] Add to Compare -> CLICK\n[button] Add to Compare -> CLICK\n[button] Go button to Compare the Selected Car's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.448, 0.17, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6194", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_e0553eae-5195-46a6-a861-4a5d92255ee0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.243, 0.919, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6195", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_ad9349f2-8c75-4639-902b-53ab55d5777d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK\n[button] $10,000 -> CLICK\n[menuitem] $20,000 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.337, 0.236, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6196", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_45b39ab8-87b0-414a-9d33-24d95074b735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.303, 0.252, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6197", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_839a0b11-1b05-4278-a88b-7643ec8d49e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.555, 0.547, 0.56] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6198", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_75b1e3b3-0f65-46e3-9bdd-183e1fa89c13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\n[link] The Hit List -> CLICK\n[a] -> CLICK\n[link] Book Now\ue90b -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.284, 0.354, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6199", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_a9e8fc20-2ea7-4e96-a9b0-57178326a414.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\n[combobox] Search -> TYPE: Atlantis\n[svg] -> CLICK\n[link] Atlantis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.345, 0.509, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6200", "image": {"bytes": "", "path": "./images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_94548ec8-59f2-4d02-9f36-8261307d4a80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nfl game played by the las vegas raiders.\nPrevious actions:\n[combobox] Search query -> TYPE: las vegas raiders"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.141, 0.259, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6201", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_fb4ec3f7-9d27-400e-bfdb-d206cf95919c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.657, 0.165, 0.662] in the image\nAnd my action is Action: TYPE\nValue: caldwell"}]}, {"id": "mind2web_6202", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a2dc6c3f-b0d4-432b-b984-65d526e49e90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] 2+ -> CLICK\n[radio] Owned -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.765, 0.111, 0.805] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6203", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_766ece84-ce32-403a-a5b1-b4d395c07763.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.466, 0.846, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6204", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_22ad3562-e0f4-42c3-b096-8c173a47673c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\n[button] Find a Park by State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.279, 0.788, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6205", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_de659bc7-ef2a-4d54-82e1-0f451dcb0ad2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.345, 0.382, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6206", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_48083d7b-9980-4fdf-a149-9e9b59d87979.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.478, 0.171, 0.586, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6207", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f2ea8d95-fcd1-4372-b4da-1cc5f1afbbc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.04, 0.917, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6208", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_3c4d601f-4977-410f-85d7-145e7bcfeedd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.289, 0.206, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6209", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_1ae9ef10-208d-4de5-8039-ca99154d2c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.251, 0.173, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6210", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_2b0e0d18-c0cf-4ae9-a1da-3a815944a4b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.274, 0.294, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6211", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_924bce62-03db-4c07-8747-b201b4878623.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Oakland, CA\n[span] Oakland, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.023, 0.268, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6212", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_851b2cbd-f474-4372-976c-f0b18b7fdbf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 1.272, 0.194, 1.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6213", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_b355331e-4d3d-4f44-855e-35803f0c361d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[button] Monday March 20, 2023 -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[tab] Review score -> CLICK\n[checkbox] Free internet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.621, 0.081, 0.628] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6214", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_57b08e09-a0e6-42ad-a73b-bec681a0fe05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.077, 0.777, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6215", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_f3ce94f2-22f0-4a8c-b1a8-9bd4a4b30725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.583, 0.01, 0.644, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6216", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4c7fd1d2-f2a3-4fb3-b095-0b4009b4d455.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 0.049, 0.294, 0.096] in the image\nAnd my action is Action: SELECT\nValue: AMC Grove City 14"}]}, {"id": "mind2web_6217", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_76413976-1d41-430a-ba07-e0862e40f90d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Material -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Pickup -> CLICK\n[button] Pick up Plastic Eggs in Egg Nesting Easter Eggs Mi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.425, 0.988, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6218", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_6c0e6305-bd44-4bc3-a988-f15e494a4983.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.376, 0.368, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6219", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_ee2d1aef-5a2a-4702-bd44-e5d6536ca7d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.396, 0.71, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6220", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_d1b22a5f-79ea-4ec1-adf8-4b3f11890d9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.138, 0.494, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6221", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_12955726-9213-4e4c-bf79-a6773d5f74f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.799, 0.291, 0.837] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6222", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_ef86012d-abba-4be3-96bc-e4952b0e8c66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[span] Manchester -> CLICK\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.305, 0.633, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6223", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_2478e44d-fd60-463e-bf01-3073e5b5b703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.018] in the image\nAnd my action is Action: TYPE\nValue: Mark Knight"}]}, {"id": "mind2web_6224", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_aded9f6c-a6d3-4ac8-9090-f2bd5ceeb5fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[div] Apr -> CLICK\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.293, 0.161, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6225", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a1f30be5-5578-4ce5-bf6d-749fb72afe40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK\n[combobox] Passengers -> SELECT: 2\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.229, 0.553, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6226", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_7450671e-a644-40ce-b909-56b5ee226fad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.287, 0.045, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6227", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_aec31435-a64c-4d66-9fc2-9e9600ff35c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.455, 0.693, 0.476] in the image\nAnd my action is Action: TYPE\nValue: 04/21/2023"}]}, {"id": "mind2web_6228", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ea7ad801-a927-4346-8491-60ac1394d7fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.069, 0.441, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6229", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_d3712cc7-e7db-450c-98e2-ffedf82420bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.312, 0.226, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6230", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_9b8d9526-5190-4d9e-8ab1-2845681d329c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.351, 0.271, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6231", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_0cfb8e52-e1e1-4d68-8dd0-9510f371d4d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK\n[button] Sail To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.16, 0.185, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6232", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0233f7f3-ea8f-4b99-a6ff-89ebc2d48120.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.24, 0.613, 0.285] in the image\nAnd my action is Action: TYPE\nValue: Dirty"}]}, {"id": "mind2web_6233", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_7b711ada-4f5d-41ae-b080-8f23e4d171fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.216, 0.709, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6234", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a6f3a180-c5b5-4939-9b17-8493fda922f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK\n[link] 17 -> CLICK\n[link] 20 -> CLICK\n[combobox] How many guests? -> SELECT: 4 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.107, 0.964, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6235", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_4f276e90-fedf-456b-846d-97813a812772.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\n[link] commuter rail Commuter Rail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.195, 0.395, 0.22] in the image\nAnd my action is Action: TYPE\nValue: Oyster Bay"}]}, {"id": "mind2web_6236", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_10273ad8-391b-434a-be8d-9bc3df13ce88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.33, 0.249, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6237", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_59519c11-891c-4029-9ef0-3ba24de3ac95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK\n[button] \ue908 Depart and Return Calendar Use enter to open, es... -> CLICK\n[link] 21 April 2023, Friday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.386, 0.858, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6238", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_badb1150-e62e-4e2b-aec2-bc2053436366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.404, 0.393, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6239", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_e160c509-1fef-4f7f-b0bd-43295ecb6d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.457, 0.858, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6240", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_fe8b8ffe-c907-403c-a03d-6850c3d9f96b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK\n[link] Limited-Time Offers -> CLICK\n[gridcell] Size -> CLICK\n[label] 11-12Y(150) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.373, 0.473, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6241", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_fc37ffb2-77a7-460f-a6bb-b4b3437bd545.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.855, 0.071, 0.9, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6242", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_85be9dc1-3b83-4792-8d50-19e0cb3540ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK\n[span] View -> CLICK\n[link] I don't have the password -> CLICK\n[textbox] Your Name -> TYPE: Michael Cahill"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.336, 0.671, 0.37] in the image\nAnd my action is Action: TYPE\nValue: cahillm@gmail.com"}]}, {"id": "mind2web_6243", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_01228a6c-8f69-4071-b709-39c2001dbcdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[button] Add to Cart -> CLICK\n[textbox] Search Amazon -> TYPE: laundry detergent\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.053, 0.866, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6244", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_a73ad60f-4274-4365-bfbc-944f9bab2057.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.536, 0.333, 0.812, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6245", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_9b46df9e-8342-4070-a385-365f7f893f7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 2 Guests\n[link] Este -> CLICK\n[button] 12:00 PM Dining -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.324, 0.523, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6246", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_01ebc433-929a-451d-88ea-5e8a625df494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.227, 0.605, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6247", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_0295c274-6cda-45f7-98c7-7166ccc9b078.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.363, 0.328, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6248", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_987ecaf2-68de-479d-ac77-4f3e3210dc44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 29 -> CLICK\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.252, 0.133, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 10"}]}, {"id": "mind2web_6249", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_74e387ce-066e-4f96-8387-702c1b05bf50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[button] 4 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.458, 0.389, 0.474] in the image\nAnd my action is Action: SELECT\nValue: 5"}]}, {"id": "mind2web_6250", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_3118d8f6-34c8-4f6f-80f7-f5d6a50d8d16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK\n[link] Amazon Pharmacy -> CLICK\n[span] Type your medication name -> TYPE: Metformin 1000mg\n[li] metformin 1000mg tablet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.138, 0.378, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6251", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_1b89abdb-f73a-43c1-ac9e-1c0f9c6469f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.144, 0.914, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6252", "image": {"bytes": "", "path": "./images/5b433cc4-26bf-4e62-b406-f00dc09c274d_69d65ccf-9e5a-4fe1-a6f9-cd639fb37903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a CVS brand covid home test kit to the cart.\nPrevious actions:\n[img] -> CLICK\n[span] Shop all at-home COVID-19 tests -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.702, 0.377, 0.72] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6253", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_c359dc6b-ba98-46b8-873c-82c429ee952c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.345, 0.384, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6254", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_f9d90439-ae28-4177-8b1a-a13aba005f33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.287, 0.389, 0.319] in the image\nAnd my action is Action: TYPE\nValue: Sheboygan"}]}, {"id": "mind2web_6255", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65e952d7-d927-49cd-921c-69fdba1e5f71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith\n[input] -> TYPE: 123st rd\n[textbox] Email Address -> TYPE: abc@abc.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.725, 0.135, 0.739] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6256", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_a61a15a9-e82e-4e0b-a708-692906fc75a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.482, 0.634, 0.559] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6257", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c34fd044-2f8d-46fe-b315-356e1882f1db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.375, 0.352, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6258", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_02254aee-cb52-48db-bd49-b5397932ee15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\n[link] \ue92f Computer Systems \uf105 -> HOVER\n[link] Gaming Desktops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.423, 0.158, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6259", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_35261f26-98d1-44e4-80c9-9aead528ab00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.824, 0.306, 0.831] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6260", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_beae922c-80e6-4d7a-940c-8e6b259f2e64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.545, 0.285, 0.613] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6261", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_012c30d7-d7bb-42ba-9e01-2f8f6b5d986a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.09, 0.047, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6262", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_be5b4fb3-a2e4-4dff-a6a7-c3050aea75b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.142, 0.196, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6263", "image": {"bytes": "", "path": "./images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_2724b6e4-0312-4e04-8e67-424ee5c3c16c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the most recent NASCAR driver news.\nPrevious actions:\n[link] NASCAR Cup Series NASCAR -> CLICK\n[button] Open More Dropdown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.096, 0.978, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6264", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_a921fb0a-6baa-41d4-9927-a3400e96af32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.021, 0.598, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6265", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_e28396bc-f80b-4f22-adc6-9462051a4b4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK\n[label] 0 - 5 Miles (2) -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.642, 0.944, 0.677] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6266", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_79c4b306-cc3f-48cf-afe4-16fc9eaaf580.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.278, 0.748, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6267", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_5354557d-77af-42ce-9b8d-f4948fc805b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK\n[button] 23 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.367, 0.393, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6268", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_99a9f5ae-ce6c-4ca8-aad5-2a7374738144.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\n[link] subway Subway Lines -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.284, 0.339, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6269", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_d5098452-d25b-474d-8bf7-267ce1c1b48a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.254, 0.757, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6270", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_389ba481-1a87-4e37-bd09-b2c6934e5bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6271", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_148f65ff-4194-4d67-a558-70f7122f3ca9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[radio] Owned -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 1.145, 0.156, 1.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6272", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_580a3f91-4259-4821-8318-d3a02646e2ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200\n[combobox] Number of Stories -> SELECT: Two-Story"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.793, 0.963, 0.83] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6273", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_b8e73317-d7a9-4a9a-8eda-7d78fd298cd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK\n[button] View more availability for BayLeaf Modern Indian C... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.475, 0.354, 0.499] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6274", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_04d56d8e-6a56-43e5-b0cd-91f655b199c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.09, 0.459, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6275", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_09906659-140d-4f28-bfc2-14222fa6aa19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.527, 0.837, 0.56] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6276", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_a20f898d-4a59-40a7-8710-5fbb4914d1fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK\n[link] View Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.194, 0.991, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6277", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_04bf0b33-af4b-4c0e-ac4e-cc990e747c79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[textbox] Flight origin input -> TYPE: madurai\n[div] Madurai, Tamil Nadu, India -> CLICK\n[path] -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.7, 0.101, 0.705] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6278", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_e78c04e6-b25d-428f-8632-af2289e2059d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.309, 0.777, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6279", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_d1b389f4-383f-4b10-8e74-cf4f8ad0dfe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.835, 0.263, 0.868] in the image\nAnd my action is Action: TYPE\nValue: Phoenix"}]}, {"id": "mind2web_6280", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1e410a9f-d065-48b2-ab36-3ff5f7565e28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.503, 0.835, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6281", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_dcdbcae5-1f3a-4f5a-8794-b75c41184eae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK\n[div] Make/Model -> CLICK\n[combobox] Year -> SELECT: 2016"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.392, 0.361, 0.436] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_6282", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_7fe8d3a7-525e-468f-9651-e4b48a64e849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.088, 0.128, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6283", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_1f80f5d5-546c-4959-bdc5-865a0879eadf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.361, 0.727, 0.389] in the image\nAnd my action is Action: SELECT\nValue: 10"}]}, {"id": "mind2web_6284", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_ffaef589-2d9e-4621-9fd9-ac90bf31af16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\n[li] Soccer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.093, 0.36, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6285", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_cc1da7b0-3376-49e3-8030-9d2dc5302d38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.241, 0.277, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6286", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_af60907a-d2d1-4c07-9b1c-fbc64cd98f23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.924, 0.214, 0.936, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6287", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_33e83e8c-d733-4027-8a09-202f4e49feb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[button] Condition -> CLICK\n[checkbox] Pre-Owned Pre-Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.042, 0.378, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6288", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0dbd5cb-4ecf-4e6b-8612-ad6b7974e5aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[span] -> CLICK\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.523, 0.959, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6289", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_7f55529b-891c-4541-901a-90309af19a6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK\n[span] Asia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 1.256, 0.28, 1.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6290", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_74f68853-2766-4276-8df3-1703d486591f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.093, 0.471, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6291", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_41b402fe-382e-48b8-802f-92c009ea16d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Herbs -> CLICK\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.201, 0.113, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6292", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_ea9ed2c3-e0a1-449f-85b9-3c708ee4cc7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch\n[link] nintendo switch -> CLICK\n[img] Nintendo Switch with Neon Blue and Neon Red Joy-Co... -> CLICK\n[button] pickup - unselected - 1 of 3 - Ready within 2 hour... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.874, 0.363, 0.97, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6293", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b072a168-e520-4371-8fd7-bec2c6b65157.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.278, 0.404, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6294", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_7f04e0ee-bde9-4028-ab6d-0f911194d39b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.167, 0.104, 0.278, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6295", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_0b5bef49-b4a1-4dfa-958b-2ca8f9b5d0b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, April 17, 2023 -> CLICK\n[button] FIND TRAINS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.874, 0.274, 0.93, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6296", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_89a74936-e94d-46b5-acc8-142543492cd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK\n[link] Car rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.233, 0.469, 0.256] in the image\nAnd my action is Action: TYPE\nValue: Houston"}]}, {"id": "mind2web_6297", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_57e8de5c-e063-4c92-a186-10c330179a68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Grocery & Home -> HOVER\n[link] Grocery -> CLICK\n[link] Bread -> CLICK\n[label] In Stock Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.365, 0.078, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6298", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_89d4ed36-f44d-45fc-b119-11e4213af3f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.159, 0.74, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6299", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_0277e754-72a2-4593-be67-f4a773a1bc74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK\n[link] The Smiths -> CLICK\n[button] Sorted by: Last 7 days -> CLICK\n[link] Last 30 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.471, 0.104, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6300", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_7fa88677-82b5-4d90-876b-5f482ce96cf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.25, 0.188, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6301", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_1e35aad2-d7c4-415f-a641-d0f2f2249eae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[combobox] Return Time -> SELECT: 11:00 AM\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.329, 0.567, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6302", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_bb5bc946-e348-4e88-878a-a8b5ca8d580b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 20 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.426, 0.875, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6303", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_bf59aa95-f3c4-46cf-a6d8-9dd2cc9b7d93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 07718\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.334, 0.403, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6304", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_46d72dc5-24a9-488b-9fda-4f168686e6be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.377, 0.35, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6305", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_98a5fa7f-da39-49bb-a5af-0e4fc96dee15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\n[tab] Hourly -> CLICK\n[textbox] Search for parking -> TYPE: Atlanta International Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.14, 0.258, 0.86, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6306", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_ef1fc0e7-40f7-4a73-91c6-b2442598e009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.174, 0.488, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6307", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_63b17018-e0a0-4c59-95fe-5f76311a2bf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.438, 0.512, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6308", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_72feb769-7538-4166-8839-69d4ab675c3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.295, 0.446, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6309", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_3e4047ec-800b-44dd-bd0d-eb31c5702bbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Spain -> CLICK\n[button] All cities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.136, 0.652, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6310", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_9fb3ceb3-fc32-4e90-a632-a140bbf943f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK\n[checkbox] list-filter-item-label-12 -> CLICK\n[checkbox] list-filter-item-label-3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 1.086, 0.089, 1.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6311", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_ca8ea814-846f-4509-8df6-9bcd231c1753.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai\n[heading] Senyai Thai Kitchen -> CLICK\n[button] Notify for Dinner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.545, 0.607, 0.594] in the image\nAnd my action is Action: SELECT\nValue: 7 00 PM"}]}, {"id": "mind2web_6312", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_4d685f2b-b04d-4d96-9ed3-8bd8f3208911.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK\n[link] Shop by Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 1.511, 0.139, 1.527] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6313", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_af4d003a-a706-4015-902e-83ede88b94cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.047, 0.263, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6314", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_b1592aec-679c-4045-8cc0-3fa78dddac1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK\n[span] see all stores -> CLICK\n[button] SET AS MY STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.166, 0.291, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6315", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_07d2e417-e70c-4681-b7bf-d317df0f8582.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.38, 0.858, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6316", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_71867b4f-43e7-401c-8d43-19485f985139.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai\n[heading] Senyai Thai Kitchen -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.439, 0.235, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6317", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_b87396c2-e500-49d9-b71a-b664bc30e50d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK\n[span] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.005, 0.2, 0.128, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6318", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_5a234296-fb23-449c-877a-cbc770096ab7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London\n[button] London England -> CLICK\n[circle] -> CLICK\n[link] Unique Experiences -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.403, 0.107, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6319", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_e0fd3f28-3f04-455d-8bde-a480f0ec1b0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.243, 0.158, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6320", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_e92228fd-d6ce-45f2-9dfd-42fc9c17c776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12\n[combobox] Minute -> TYPE: 00\n[combobox] AM or PM -> SELECT: PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.56, 0.353, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6321", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_cff4214f-6139-4db1-9095-650cfe5bbd79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.362, 0.568, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6322", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_b900e955-2f87-44cd-aff7-61b5ec066da7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.35, 0.48, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6323", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_898acdb2-6360-4662-92d3-040ee591da52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.0, 0.557, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6324", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_c8c86f76-7509-46a1-bb49-dc9b4b8a0664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.301, 0.442, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6325", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_654b35b4-a888-4328-a473-69f63632a8e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.141, 0.601, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6326", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_843eff2d-9962-4a8b-9e30-2c0b32f05d88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.417, 0.376, 0.449] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_6327", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_d345b944-ded9-4a4b-b8c0-e1eaf822f340.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK\n[link] Notify me -> CLICK\n[link] Add to wishlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.081, 0.716, 0.103] in the image\nAnd my action is Action: TYPE\nValue: Must buy"}]}, {"id": "mind2web_6328", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_c87db43b-1abd-4fd3-83a9-fe2edd7bfa4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] MEN -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.266, 0.461, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6329", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4f8ec5f7-cb1b-4f13-9702-36f547598319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.029, 0.553, 0.052] in the image\nAnd my action is Action: TYPE\nValue: black stroller"}]}, {"id": "mind2web_6330", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b10d981a-f02e-4c54-b95c-df1ac0369cdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.026, 0.977, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6331", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c8d056c9-ef8e-4e07-9631-352f60776776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.351, 0.29, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6332", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_58ba99b5-6faa-44dd-b30f-0e2896aa3265.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books\n[button] Search -> CLICK\n[link] Auction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.718, 0.095, 0.792, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6333", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_83bc3946-6fe5-400e-b95a-ba5c990b552c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.897, 0.465, 0.948] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6334", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_9e72492a-35b1-496e-9260-942c1aaf9854.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.39, 0.895, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6335", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_d70dfa91-1ad9-4da1-acd8-cc6f2f92b944.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.172, 0.352, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6336", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_397474e3-703a-42e4-b314-d556bd57c30c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.229, 0.249, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6337", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_0ddfeb2e-f66f-45bf-83fa-9e1e9b969560.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK\n[button] + -> CLICK\n[button] Apply Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.178, 0.206, 0.192, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6338", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_d7bb0019-1d5f-4911-ad86-8eb40fe86004.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.214, 0.891, 0.247] in the image\nAnd my action is Action: SELECT\nValue: 3 00 PM"}]}, {"id": "mind2web_6339", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_a9f12745-aa3a-4825-b3f5-06459fe37511.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.559, 0.636, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6340", "image": {"bytes": "", "path": "./images/78c52592-76e4-4c45-afd5-f94cf213314e_0a182a4e-608c-4def-8a52-ea6734b8cc01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a star wars movie trailer.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.009, 0.613, 0.033] in the image\nAnd my action is Action: TYPE\nValue: Star Wars"}]}, {"id": "mind2web_6341", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_7cbb3878-36cd-48df-96b4-d28fad34a7c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.246, 0.571, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6342", "image": {"bytes": "", "path": "./images/3c9442f9-5542-4395-918a-6551dbba3e3a_5ebe5f2c-a94a-441c-9a90-094aa2e89e97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Massively Multiplayer games which can be played on VR.\nPrevious actions:\n[link] Massively Multiplayer -> CLICK\n[generic] VR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.782, 0.536, 0.797] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6343", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_9c268783-bff3-4bc7-8657-d596565595c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.029, 0.164, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6344", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_ac6c5b85-a57e-437d-88c5-ba2902646ae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: SHANGHAI\n[div] Shanghai, China -> CLICK\n[textbox] Where to? -> TYPE: SEOUL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.267, 0.493, 0.656, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6345", "image": {"bytes": "", "path": "./images/851ed4e6-51ee-47ad-a861-a28bdc61a102_077c4e0c-a79b-448b-8bfe-0973673ac73a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to schedule a Model X test drive.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.054, 0.869, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6346", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_6699b58e-e6c0-46f8-8547-370f9a9e6248.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK\n[span] Grocery -> CLICK\n[span] Easter Baking -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.301, 0.459, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6347", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ad5f42ba-311a-4ef7-9762-870220d71672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.014, 0.39, 0.044] in the image\nAnd my action is Action: TYPE\nValue: lenovo laptop"}]}, {"id": "mind2web_6348", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_b7887659-969f-4d57-b28d-4c563523c87a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.363, 0.352, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6349", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_13f39fc7-d314-4c0a-afec-4a96349324c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK\n[button] - -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.443, 0.263, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6350", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_d59b9cb4-58a1-43ef-885c-cbb45c1d1897.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.388, 0.365, 0.409] in the image\nAnd my action is Action: TYPE\nValue: 6944"}]}, {"id": "mind2web_6351", "image": {"bytes": "", "path": "./images/cb07d410-75ff-483a-920c-3ce2a295524f_7ba6a708-e79c-4eb7-a274-40acd111584b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the kicker with the most made field goals in the 2022-2023 NFL season.\nPrevious actions:\n[link] Stats -> CLICK\n[link] Field Goals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.351, 0.124, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6352", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_8a76a674-3387-477b-95a5-02919a9dd32d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.174, 0.734, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6353", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_9cce8c69-8195-4b45-822d-283e082837b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.238, 0.278, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6354", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_feb0706b-a5b2-4b57-b2f4-a4574d9af828.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.042, 0.652, 0.075] in the image\nAnd my action is Action: TYPE\nValue: mens timberland boots"}]}, {"id": "mind2web_6355", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_ecec671d-d6ef-4da5-ac94-b680f5e904dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter\n[button] Search -> CLICK\n[link] Harry Potter and the Cursed Child - Parts I & II -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.294, 0.973, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6356", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_b12e1589-6f9a-4f0c-9123-81a42039d8c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.012, 0.804, 0.03] in the image\nAnd my action is Action: TYPE\nValue: benadryl"}]}, {"id": "mind2web_6357", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_6d1f7b66-139a-4ec9-ad57-c5a574f6988f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.346, 0.866, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6358", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_21c94120-79c7-4305-af25-b347848f9b6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.229, 0.561, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6359", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_13288e86-8e09-4608-93f3-ed250f087a42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] Search -> CLICK\n[div] Premium -> CLICK\n[label] Distance -> CLICK\n[svg] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.426, 0.916, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6360", "image": {"bytes": "", "path": "./images/0c577209-47dc-4645-8d10-0b659663a969_88cb6de4-642d-4878-916b-7ab443d2af7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nba game played by the phoenix suns.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.042, 0.728, 0.076] in the image\nAnd my action is Action: TYPE\nValue: phoenix suns"}]}, {"id": "mind2web_6361", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_81263190-8c66-422d-ba92-ece80af4d80b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK\n[div] Sort by: -> CLICK\n[link] Number of Seats (High to Low) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.244, 0.918, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6362", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_c859da83-a276-460b-ba2b-d37555a94449.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.1, 0.523, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6363", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_6bb0ea02-190d-46c8-98fb-ebfe04f58ecd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.429, 0.194, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6364", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_991f70b5-0160-4220-bfc2-f69b70d2b1f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.093, 0.411, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6365", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_20a9bb0e-d885-42ee-bfb2-0a0ab6c13706.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[div] Goa -> CLICK\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.904, 0.263, 0.911] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6366", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_dc3c5e75-a90a-4e6f-877f-dd1b5c40e9c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.49, 0.394, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6367", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_a2f2bb0d-a51e-4c53-9468-5b4fa030a112.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.299, 0.219, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6368", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_38ec759c-f290-4d3f-8336-d7f5d20f5580.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Computers -> CLICK\n[link] Drives & Storage -> CLICK\n[link] External Solid State Drives -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.899, 0.101, 0.929, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6369", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_223fed6c-ab5a-40ca-8fd7-4ca5fdc52d9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[link] More -> HOVER\n[span] Dry Cleaning -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.019, 0.564, 0.03] in the image\nAnd my action is Action: TYPE\nValue: new york city"}]}, {"id": "mind2web_6370", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7ace8414-2b72-436a-84f4-f81ce2d5ecc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[link] Sports Bras -> CLICK\n[div] Size -> CLICK\n[link] S -> CLICK\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 1.118, 0.102, 1.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6371", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_ca9851d3-60f2-424e-9945-db5862f53d2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\n[link] Order Tracker -> CLICK\n[textbox] Order number EXAMPLES: ECEA12345, 01234567 -> TYPE: 456481897"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.241, 0.908, 0.27] in the image\nAnd my action is Action: TYPE\nValue: 898-448-6474"}]}, {"id": "mind2web_6372", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4e3a9490-e833-4e4c-957f-e0556fb8e96c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.443, 0.478, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6373", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_70b1e9a6-c1b1-42d3-8b25-a284ee385e10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 25000\n[input] -> TYPE: 5000\n[input] -> TYPE: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.455, 0.265, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6374", "image": {"bytes": "", "path": "./images/059327ab-e26e-4af6-9063-f58c50ecd2d2_b567c00f-f405-4acf-999c-13b2ccdc84f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the schedule and maps for the orange line\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.291, 0.423, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6375", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_528e9700-0759-4a2e-a6b2-b5eceaa76ec4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.254, 0.084, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6376", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_da47c157-9f8f-414e-a839-ea1a2dfb5244.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.426, 0.13, 0.454] in the image\nAnd my action is Action: SELECT\nValue: 2010"}]}, {"id": "mind2web_6377", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_b81f2774-e594-49a0-a9fc-07df56177c9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.591, 0.429, 0.62] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6378", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_fc5ded8c-80f4-42d5-b087-79319e6d4d09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[button] All Filters -> CLICK\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.55, 0.757, 0.574] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6379", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a79f5fc4-c635-43b6-8229-911c45a5874c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.187, 0.524, 0.21] in the image\nAnd my action is Action: TYPE\nValue: South Station"}]}, {"id": "mind2web_6380", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_0d1d347a-d1b1-4f96-884b-502fa81b3184.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.032, 0.564, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6381", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_a3a1bddc-b996-4166-829c-41ba7edc29a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.062, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6382", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_2c488a7d-0773-4c99-9420-d0e8103c6d3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: New York\n[link] New York, NY -> CLICK\n[button] Find -> CLICK\n[link] Beauty & Youth Village Spa -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.006, 0.867, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6383", "image": {"bytes": "", "path": "./images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_2e2727b7-9ba0-46a3-8338-849b1a5ed4fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out details about cancellation fees.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 2.362, 0.24, 2.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6384", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_18ed9e91-64f4-4929-8827-5d7634c0101a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.533, 0.154, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6385", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_a148f761-c294-45ac-a94f-292cbf472e4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 08\n[group] RETURN -> CLICK\n[link] 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.252, 0.265, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 23"}]}, {"id": "mind2web_6386", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a957bb96-e539-4222-b03f-b8c371629b9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.543, 0.492, 0.564] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6387", "image": {"bytes": "", "path": "./images/9223ed29-5abb-4f4d-8108-1c3a584a7017_71c0293f-f272-4abb-96b5-f08d24560f51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about reduced bus fares.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.014, 0.441, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6388", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_8fb7f444-5f4c-47e0-998b-193424bfc319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.095, 0.301, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6389", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_21aada65-2d3c-4713-abae-5b5693c2de68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy\n[textbox] Phone Number -> TYPE: 123-999-0000\n[textbox] Email Address -> TYPE: RA@gmail.com\n[textbox] Zip Code -> TYPE: 90001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.928, 0.401, 0.951] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6390", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_f69045c0-0476-4c5d-9f6e-c84d5488fb80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.711, 0.487, 0.725] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6391", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_a23b9fbd-1e7a-41fa-9116-32747b5b9649.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.413, 0.941, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6392", "image": {"bytes": "", "path": "./images/8ab30c9e-f768-46e8-8174-83651d361b9e_36bdd9ba-ddea-4b12-81a7-7d1e8fb3a665.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the scores of today's NBA games.\nPrevious actions:\n[span] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.056, 0.611, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6393", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_0200f0fe-002c-4088-8037-f34bfff4156c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.267, 0.795, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6394", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_83929b11-5af8-4c6d-ad37-5ca0f73ad849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.148, 0.888, 0.174] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_6395", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_ba9f81d9-3f78-4f0c-95c4-5795d13a3183.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.293, 0.831, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6396", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_31fa2e7e-646b-4f4b-aaca-fed108191241.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.415, 0.639, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6397", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_d1fd77e6-3905-49e3-8aee-58aa0a2df50d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.258, 0.027, 1.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6398", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_14b29326-0525-482c-a3f7-ac9b37978045.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.115, 0.41, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6399", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_62cf5905-dba2-4936-abd9-9b6e872672d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK\n[div] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 1.678, 0.404, 1.693] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6400", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_0ee2e627-345c-4b15-8542-3c13034733b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.385, 0.647, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6401", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_50f46316-29db-42fb-9b52-320a814c5355.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse TV Shows by Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 1.263, 0.182, 1.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6402", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_432a59c6-c207-4996-8339-e180f43164bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.306, 0.341, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6403", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_e4bfc892-619e-42af-a6d8-c208fbf54abc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK\n[label] Medium Light -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.918, 0.051, 0.928] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6404", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_cfd7af8e-2b20-4e37-9c53-bc573db84b80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.003, 0.204, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6405", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b413aea4-57db-4202-ae50-8c0adbc9e2d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.203, 0.652, 0.256] in the image\nAnd my action is Action: TYPE\nValue: changi"}]}, {"id": "mind2web_6406", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_58a721bb-f7cf-4303-b880-5014865024b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.129, 0.83, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6407", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_9f7bc34f-467d-4f7f-bbea-3cf3193b675d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.227, 0.474, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6408", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d49c529a-3bf3-4aa5-a54f-0de7ededcd0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.173, 0.75, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6409", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_795d2e6f-3df8-4c64-a81e-fd2558515b2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.282, 0.843, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6410", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_578f1744-b335-4030-b230-ae524d9563ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.266, 0.664, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6411", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_83616b73-0718-430d-979e-39e05350f0a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 2.882, 0.906, 2.91] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6412", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_55d5d079-4d18-450a-8fe7-9eee0ea3d7cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK\n[link] Reservation -> CLICK\n[button] Menu -> CLICK\n[tab] Dinner Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 1.061, 0.452, 1.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6413", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_892aaf63-9463-482a-95b8-2a7e145c429f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 1.782, 0.277, 1.787] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6414", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_e96f5968-a7e3-4e14-90a4-c528877899fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\n[link] Manage trips / Check-in -> CLICK\n[textbox] Passenger last name , required. -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.427, 0.365, 0.448] in the image\nAnd my action is Action: TYPE\nValue: X899987799"}]}, {"id": "mind2web_6415", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_84e6c79d-94e9-4e04-b994-04ebf807383a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.044, 0.402, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6416", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_3785a3c5-0358-4871-bc29-918d4d0b6fc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.09, 0.959, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6417", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_23187c57-820d-46f2-9022-ea9050f9f41d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[span] 12 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[tab] Lowest price first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.292, 0.089, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6418", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_1429f24c-c8ac-40fb-aaea-d2c48942177c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.038, 0.405, 0.05] in the image\nAnd my action is Action: TYPE\nValue: Lebron James"}]}, {"id": "mind2web_6419", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_3b8ad033-513a-4bff-9546-bda8e4d9c844.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.298, 0.83, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6420", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_40384767-ea26-43fd-af97-41d9f4f1070c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\n[link] Store Locator -> CLICK\n[textbox] Please enter City, State, or Zip Code -> TYPE: SPRING, TX\n[div] Spring, TX, US -> CLICK\n[link] Click to submit search form -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.234, 0.147, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6421", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_67bf019d-0b9f-412d-abd8-1b30480269c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.547, 0.188, 0.844, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6422", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_1c0713cb-8c16-4984-a9d8-a39278a27255.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2\n[listbox] select child age -> SELECT: 5-15\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.331, 0.327, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6423", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_36fb74bd-494a-45a8-9dd0-de77fd479449.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[b] Columbus -> TYPE: NEW YORK\n[span] -> CLICK\n[svg] -> CLICK\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.164, 0.298, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6424", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_438596ce-4f48-4b91-987f-08aae356b4ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.301, 0.191, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_6425", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_43dc260b-617f-4482-b487-26b210b2a179.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK\n[button] $10,000 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.632, 0.125, 0.658] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6426", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_c8d4e4d8-3926-494b-b1ae-2f1317e4cfd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK\n[div] Most Popular (this week) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.345, 0.436, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6427", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_b770f788-e3a6-45d2-96df-e3a62380ac46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.491, 0.486, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6428", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_dffd4c05-f61c-46df-8ab9-a2c7da5b03af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: sofi stadium"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.319, 0.326, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6429", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_0a054763-8af3-4199-864e-2582834bd49d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.3, 0.156, 0.318] in the image\nAnd my action is Action: TYPE\nValue: Jerry Trainor"}]}, {"id": "mind2web_6430", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cebfac0b-996a-4c18-b6e8-08e9f22c8751.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK\n[label] Brown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.756, 0.223, 0.769] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6431", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_e6655c90-5529-4167-9f8b-b3f458a83f8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.465, 0.648, 0.495] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_6432", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_804c3901-8299-4c08-b8b9-3ec0bee96528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.175, 0.463, 0.18] in the image\nAnd my action is Action: TYPE\nValue: Montana"}]}, {"id": "mind2web_6433", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_d5c808ea-5cdc-4d6e-b820-2b1a6406910b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK\n[link] Explore the directory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.157, 0.287, 0.192] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_6434", "image": {"bytes": "", "path": "./images/b307117b-e10c-470f-a85d-968b2e442b19_9800f6fc-8573-4f1d-bbf8-e425e6dc4fdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a coffee shop with wi-fi.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.032, 0.232, 0.051] in the image\nAnd my action is Action: TYPE\nValue: coffee shop"}]}, {"id": "mind2web_6435", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_121e1a49-f7dc-441e-a922-e668447ccb12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 0.076, 0.648, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6436", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_23f0080b-9e6b-46fe-8c36-a1ea6f957e0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456\n[textbox] First Name (required) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.359, 0.488, 0.389] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_6437", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5089f916-bda7-4572-a489-5174ee03e1bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.1, 0.463, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6438", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_fc65a09b-bdf4-48e6-899b-e01ec2453e7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.297, 0.459, 0.325] in the image\nAnd my action is Action: SELECT\nValue: Michigan"}]}, {"id": "mind2web_6439", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_14307382-3b81-4395-88fd-b75a99a93339.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK\n[button] Buy Now -> CLICK\n[button] Add to Cart -> CLICK\n[button] No Thanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.459, 0.605, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6440", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_d7ce815c-bfdb-421e-b2a4-c5ccbb3a1470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.176, 0.438, 0.195] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_6441", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_fd9ff4a8-72af-417a-a5ed-69dd689c1143.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK\n[link] 7 -> CLICK\n[textbox] CHECK OUT -> CLICK\n[link] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.121, 0.771, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6442", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3532f564-c8fa-40c9-ae22-fb22f6068baf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.19, 0.441, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6443", "image": {"bytes": "", "path": "./images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e79b9f3c-2ebf-4731-b982-935811aeddf1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of jazz albums released in 1890.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.0, 0.465, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6444", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_c1a76ce4-fd5d-4879-8605-31cbeee8f12a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6445", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_c98a20ca-42e8-470f-ba7c-a78cbedd0804.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.397, 0.335, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6446", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_be0e790e-1b06-41b4-ae7a-26e06db06d59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.312, 0.899, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6447", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_b2fae695-2147-4942-8629-9379ac0a96e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, NY, US Select -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.477, 0.471, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6448", "image": {"bytes": "", "path": "./images/41ff100f-582a-422e-b387-3abd9008cee4_c92672cc-f930-4bee-a4cc-abaf9489d0d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open red line subway schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Subway -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.281, 0.339, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6449", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_f063a765-bdb0-49b3-916e-7297e2dd0019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[path] -> CLICK\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.018, 0.981, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6450", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_254dd796-b699-4c64-9b37-efaf31f2eac2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.454, 0.636, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6451", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_e3071752-60a1-41f5-9b69-33df9f273c08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.261, 0.473, 0.286] in the image\nAnd my action is Action: TYPE\nValue: 10000"}]}, {"id": "mind2web_6452", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_bb8662a9-6602-4a34-814b-7c8c9177374e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Ballet\n[button] Search -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.241, 0.871, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6453", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_1882a323-7275-4bea-95a4-89908286cee0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[button] Cabin -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 1.146, 0.523, 1.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6454", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_84171eea-4480-415c-a5cd-77899aae8110.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK\n[select] 2023 -> SELECT: 2019\n[select] All -> SELECT: UFC\n[link] UFC Fight Night: Edgar vs. The Korean Zombie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.647, 0.222, 0.655] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6455", "image": {"bytes": "", "path": "./images/54112d86-1d85-4abf-9e12-86f526d314c2_e00a7248-aab6-4799-9307-6f4750f0a727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the BGG rules for Game Submissions?\nPrevious actions:\n[button] Help -> CLICK\n[link] Guide to BGG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.513, 0.28, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6456", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9992efab-d6f6-4d4e-81f3-0ce885f45457.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.244, 0.833, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6457", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_8ed71c65-50b2-4399-9ccd-41e9efbd5525.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.03, 0.352, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6458", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_ad2e9a55-fccc-47c3-addf-579d53655742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Rock -> CLICK\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK\n[radio] New List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.304, 0.675, 0.325] in the image\nAnd my action is Action: TYPE\nValue: New"}]}, {"id": "mind2web_6459", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_3f7b29b7-0875-4dc9-8d95-f024555edf4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.44, 0.541, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6460", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_98ae2435-3f48-4ed5-a069-ca2cd6f44cd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK\n[select] All -> SELECT: Audio (376)\n[select] All -> SELECT: Under US$20\n[button] Refine results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.561, 0.366, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6461", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_6ba27ea0-5559-4c96-b207-7a504a0f96c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.089, 0.866, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6462", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_a89f2fe4-6202-487f-b994-ddcd7cdac194.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 1.779, 0.145, 1.79] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6463", "image": {"bytes": "", "path": "./images/4f395aad-6f10-4055-932a-d2af443e6bfa_88e84f41-446a-49a3-a1ae-fd9d685f93c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Star Wars The Mandalorian statue and add to cart.\nPrevious actions:\n[searchbox] Search... -> TYPE: Star Wars The Mandalorian statue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.067, 0.562, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6464", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_758f22f5-565b-4381-ae08-76f669e70273.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.239, 0.359, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6465", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_481eba4d-b954-4f4a-9def-fa3045120562.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: New York, United States, 100\n[textbox] Enter your pick-up location or zip code -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.227, 0.349, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6466", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_79a47f9f-c64f-40d5-a039-2f51a467d145.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK\n[button] SEARCH BY JOB TITLE OR KEYWORD -> CLICK\n[link] Accounts Payable Associate -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.178, 0.888, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6467", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_fcf2f0d4-5415-4a02-9ed9-5cd383824fe0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.333, 0.13, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6468", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_f6c8620e-b239-4c5f-a904-a73fca89bc97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.29, 0.739, 0.306] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_6469", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_c82b0897-8695-4678-9894-9e6dc3f0dec4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.441, 0.373, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6470", "image": {"bytes": "", "path": "./images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_6222129b-f9e3-4f1d-b425-baeb45366cb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get me info about planning a wedding cruise\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 2.094, 0.216, 2.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6471", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_7bf6f0e5-aacb-414b-aa02-8cdd5e2677c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2005"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.315, 0.266, 0.349] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_6472", "image": {"bytes": "", "path": "./images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_683aa55c-8275-4665-901c-4148a4b9ba73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Edit my movie watchlist.\nPrevious actions:\n[button] Watchlist5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.242, 0.559, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6473", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0031514c-8d35-43c7-ab3f-8723ef5b8647.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.395, 0.092, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6474", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_0551b291-b54f-4dc7-9c5e-60e28d345655.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.207, 0.35, 0.243] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles, CA"}]}, {"id": "mind2web_6475", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_e0021e91-9a16-4aee-8a31-b7efe4147c37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.101, 0.104, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6476", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b036523c-2ab5-412e-b346-8ca1741f8efb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.543, 0.693, 0.564] in the image\nAnd my action is Action: TYPE\nValue: 04/23/2023"}]}, {"id": "mind2web_6477", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_1e9e9fcf-9e4e-4520-9442-9f6cdef14eac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.37, 0.704, 0.396] in the image\nAnd my action is Action: SELECT\nValue: OH"}]}, {"id": "mind2web_6478", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b82baee9-9e68-4f5e-bf8d-287b72418176.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK\n[link] NPGallery -> CLICK\n[span] Search all Parks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.239, 0.864, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6479", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_a320d96f-1ba9-4eac-978f-0716a62c6f42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 94115"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.016, 0.335, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6480", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_698847eb-4f57-4615-90b7-cde1436b7612.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[switch] COMPARE -> CLICK\n[path] -> CLICK\n[button] Add to Compare -> CLICK\n[button] Go button to Compare the Selected Car's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.202, 0.583, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6481", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_a031109e-9ed4-4b76-8497-83fe74913b87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] New -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.118, 0.999, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6482", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_9d81fbb9-c6c8-4473-bb74-f9725bc210ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: adele"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.241, 0.483, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6483", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_dde2fc42-7cd0-4124-8cdb-3f51b425bf9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.388, 0.571, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6484", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_a093b329-7c9f-44b6-ae32-b2ee3a114cc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston\n[option] Boston area -> CLICK\n[button] Today, Tue Apr 11 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.598, 0.331, 0.798, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6485", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_d4b41cf2-20f4-4ed5-bcd0-ae109880502e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.371, 0.488, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6486", "image": {"bytes": "", "path": "./images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_8450177b-97fb-4355-8b95-ac90354952fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up the scores for the previous day's NBA games\nPrevious actions:\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.139, 0.312, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6487", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_1a541cb9-2269-426c-8687-241e040beb84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.346, 0.195, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6488", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_c19e9a86-378c-4f24-b5bf-8c5c78cfc272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK\n[link] Thursday, May 4th | American Express Presents CARB... -> CLICK\n[combobox] 2 tickets for Thursday, May 4th | American Express... -> SELECT: 3 Tickets\n[button] Book Now a ticket for Centurion\u00ae Member Access to ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.311, 0.523, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6489", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_d8cb1754-877f-4815-9831-75dfd9de4b51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.563, 0.196, 0.592] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6490", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_aff1da14-373b-4bd3-b9e4-248ae4224872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.804, 0.096, 0.857, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6491", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_c3feded9-8223-48bf-becb-6538339f3784.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.178, 0.554, 0.199] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_6492", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_22bbc1e3-9c82-4dcb-a01d-a34c70a62cef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK\n[input] -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 2.772, 0.037, 2.799] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6493", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_95f9528e-68a1-4ce4-9f96-4a9888e5eefd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[b] Paris -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.304, 0.777, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6494", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_d0bb7e5d-b098-470f-926b-27415618e851.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK\n[button] + -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.173, 0.579, 0.264, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6495", "image": {"bytes": "", "path": "./images/bbfed209-df70-434a-aece-5c5fc7a38f4f_d7d7ba00-17c0-4836-a944-04a73a6eeeff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the push notification settings\nPrevious actions:\n[svg] -> CLICK\n[link] Your account settings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.371, 0.198, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6496", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_4cfe3658-8ba0-4c53-b71a-15ef5a8820c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.225, 0.891, 0.26] in the image\nAnd my action is Action: SELECT\nValue: 10 00 AM"}]}, {"id": "mind2web_6497", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_3e93eb48-8370-4f9e-8adb-36dde059ff13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\n[div] Sports -> HOVER\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.291, 0.446, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6498", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_91c7893c-d68e-4a03-80d1-ea26d677995e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.247, 0.825, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6499", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_0a2b00df-658b-4670-ae54-556abe0f89dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[link] Kindle Books -> CLICK\n[textbox] Search Amazon -> TYPE: roman empire history\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK\n[link] English -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.701, 0.078, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6500", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_dad6902a-d307-4bf4-822c-922230877a59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Asia -> CLICK\n[div] Kyoto -> CLICK\n[div] Select your dates -> CLICK\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.513, 0.221, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6501", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_08e81213-3c6e-46da-9b79-9286f704685d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.907, 0.037, 0.934, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6502", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_c5dd3eae-488c-4ece-a2fd-8bf08531a739.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.031, 0.958, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6503", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_eeecfa15-c5a3-4487-bfc1-6c14e0030ccb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.094, 0.327, 0.116] in the image\nAnd my action is Action: TYPE\nValue: LONDON"}]}, {"id": "mind2web_6504", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_cb395227-5541-4782-8fd4-5262f8c4f95e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.524, 0.421, 0.546] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6505", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_cdea18a2-830d-4169-81c4-6750fefe1837.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.412, 0.218, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6506", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_89abfe33-14ce-492d-ab90-2fe0710f6f7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 1.123, 0.45, 1.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6507", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_85b15500-7ef7-44a8-bb3c-d956b2a1361b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[button] Okay, got it. -> CLICK\n[img] undefined -> CLICK\n[button] Continue without a seat -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.428, 0.367, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6508", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1004b2e9-f35b-41b0-8d61-f0b4bc059024.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.042, 0.673, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6509", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_9329b2b4-204c-456d-803b-fd5be3bb63a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\n[button] Location Atlanta -> CLICK\n[button] Seattle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.016, 0.423, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6510", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_d8abd1e3-05b6-4a3c-8f94-95268d8eb712.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.13, 0.692, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6511", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_c0a0ef81-6f9f-44d3-9189-18f73175c4ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.056, 0.266, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6512", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_91fc5db3-bc62-423e-ac0f-c4b6fa8cb02f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.081, 0.259, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6513", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_db1a7212-3913-4f4a-97d9-dea87a43cf1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $14.45/Day$13.95/Day -> CLICK\n[checkbox] $12.99/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 1.406, 0.93, 1.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6514", "image": {"bytes": "", "path": "./images/a88676d0-c252-408f-b796-93c95f6b71fc_5499a9c1-e7dc-4ed6-a400-bbb551015eba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my trade offers.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.134, 0.552, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6515", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_6f6671ae-98f0-4b14-8c7c-870d0ed1d39d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] Postal code, address, store name -> TYPE: 10017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.156, 0.727, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6516", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_d7bfb473-8c73-4808-96bc-187d00be5ad7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 1.156, 0.846, 1.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6517", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_bddaad73-e494-46f5-a14b-25dddc2c137b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 0.162, 0.587, 0.201] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_6518", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1c9b36d1-16c5-4ca4-9e82-ce67012536be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.386, 0.666, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6519", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_3d969a7c-5bb4-45b6-9fd1-ba7943641510.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\n[searchbox] Search -> TYPE: game mix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.004, 0.553, 0.008] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6520", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_d5cf93d2-7fa2-4971-8668-436c866e37c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.32, 0.754, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6521", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_963d0e12-9794-4fa0-bf40-2c8b8d7a7885.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK\n[img] Right -> CLICK\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.456, 0.464, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6522", "image": {"bytes": "", "path": "./images/66625c9d-5bf3-42d1-b463-ab2767307201_ef8fc370-3a59-4d06-bcc6-a8048ad914f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Learn more about the Partner Deal that gives 25% off for Veterans.\nPrevious actions:\n[button] Deals -> CLICK\n[link] Partner Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.743, 0.618, 0.762] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6523", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_0ebc6a29-19cc-47fc-bd87-454c1635c3a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM\n[combobox] Return Time -> SELECT: 6:00 PM\n[button] Select My Car -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.278, 0.882, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6524", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_1abdf652-813a-4b30-8713-7c1777b532cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[link] Amazon Basics 7-Piece Lightweight Microfiber Bed-i... -> CLICK\n[button] Red Buffalo Plaid $57.81 In Stock. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.811, 0.31, 0.97, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6525", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_23038b09-7316-4566-ac68-64d95c9eccbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami\n[option] Mint. Miami area -> CLICK\n[button] Explore flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.448, 0.345, 0.552, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6526", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_de2d5ac6-ca8b-4dd1-a72a-f464a3709a05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK\n[a] Software Development -> CLICK\n[a] Hybrid -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.413, 0.692, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6527", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_1cda2d0c-fc85-46e2-9352-deea3a3d9d8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Tab -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK\n[link] Stairway To Heaven -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.101, 0.852, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6528", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_d20044ad-f983-497b-a184-5cc46fe9b448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.824, 0.468, 0.855] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6529", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_2996fbd4-6cdc-42a2-a6ad-cc26162df9bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.281, 0.546, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6530", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_71442cf6-a544-45d1-8185-2965fe1171b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.023, 0.553, 0.041] in the image\nAnd my action is Action: TYPE\nValue: diamond stud earrings"}]}, {"id": "mind2web_6531", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_a7d026d8-89be-4bfb-b9f9-98e603c88313.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.085, 0.582, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6532", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_efdf1978-a4d6-4b14-8198-ec383c1f8703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK\n[checkbox] 1 Stop (49) -> CLICK\n[checkbox] Seat choice included -> CLICK\n[checkbox] No cancel fee -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.287, 0.825, 0.319] in the image\nAnd my action is Action: SELECT\nValue: Price (Lowest)"}]}, {"id": "mind2web_6533", "image": {"bytes": "", "path": "./images/612653f8-defe-41be-ae48-26ed859d98ca_fd02da0a-4aff-4945-b1c6-b3d6c65623b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate Dry Tortugas in the state of Florida and find out the Current Conditions.\nPrevious actions:\n[button] Find a Park by State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.42, 0.788, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6534", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_09210be8-ee5d-4061-87d4-9f48ede5dafa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old\n[div] Age range -> CLICK\n[select] All -> SELECT: Ages 3-5 (31)\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.445, 0.196, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6535", "image": {"bytes": "", "path": "./images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_14d7a703-5b89-4d70-a04d-48db66be0fc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of HomePod mini\nPrevious actions:\n[link] Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.743, 0.226, 0.809] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6536", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_67529ff9-0625-47b7-bf03-7a04f7556fd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Coming soon to theaters -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.49, 0.43, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6537", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_c3ee6477-58ce-4d62-bd31-236bfd3babe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[link] 20 -> CLICK\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM\n[combobox] Return Time -> SELECT: 6:00 PM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.223, 0.349, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6538", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_ddb2dc50-ccd8-4d59-a989-a955e7f43f9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.492, 0.868, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6539", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_4f50c9e8-dcf6-44d5-b5d4-0cc813b9f8a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK\n[link] Reservation -> CLICK\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.739, 0.309, 0.758] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6540", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_3ed13d1d-b299-4bc0-87d9-be6a6aa17641.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.394, 0.0, 0.493, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6541", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_086ab8cc-7f5e-43b0-9b03-ba376d208f4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.095, 0.621, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6542", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_8b324073-038b-40d6-b5b3-7566305fb60e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.777, 0.243, 0.812, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6543", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_7fea74c7-272b-4eca-a381-ce0ae5ec874e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.113, 0.93, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6544", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2702fbb5-0714-4760-8194-cf4cbf66de8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.376, 0.419, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6545", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_92018224-3ef4-440f-aace-50d82122188c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.013, 0.863, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6546", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_8b11a754-1a38-47d8-8712-457499d2b048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK\n[label] Brown -> CLICK\n[svg] -> CLICK\n[label] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.35, 0.126, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6547", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_620c6af5-4ea4-4b24-9a37-6d5b9f511ead.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK\n[span] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.353, 0.912, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6548", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_0ef8edbc-14a0-42b9-9b98-3551bd624d87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Airfare Included -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.338, 0.568, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6549", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_a5e3b4dd-7550-4133-ab81-97aff1f3e12d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Spain -> CLICK\n[button] All cities -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.408, 0.12, 0.416] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6550", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_0d69ea98-ed44-4420-9611-46a13ab910fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.163, 0.928, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6551", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_bef2efa7-2137-4ab1-b2e1-158c78b052cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl\n[button] Search for benadryl -> CLICK\n[img] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.737, 0.153, 0.766] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6552", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_acc19e2a-1a0f-4208-a5a2-8c63425767d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Shape -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.368, 0.966, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6553", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_fa213183-95d3-41e5-a2ed-9593cb0934c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK\n[link] Seattle, WA -> CLICK\n[button] Find -> CLICK\n[span] Coupons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.148, 0.23, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6554", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_6bf9852e-ea2c-456d-8c70-2fc0f68b13dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.286, 0.845, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6555", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_ffb9e96b-9a62-4d47-b786-609d07e1a214.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Home Essentials Under $20 -> CLICK\n[button] Save to favorites, KUDDARNA, Chair pad, outdoor -> CLICK\n[button] Save to favorites, GULLBERGS\u00d6, Cushion cover, in/o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.537, 0.609, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6556", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_466cf611-17b2-457e-97cc-7dc9d643ef86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.126, 0.441, 0.139] in the image\nAnd my action is Action: TYPE\nValue: Elevated Escape"}]}, {"id": "mind2web_6557", "image": {"bytes": "", "path": "./images/978376c1-8545-4160-81d5-722bdea60434_025ff1b1-db7d-4df4-ad4b-77f3a2b2ee2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Start the process to sign up for CVS ExtraCare\nPrevious actions:\n[button] Prescriptions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.333, 0.397, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6558", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_5fe867ec-cc98-4ef2-85ce-691bb9dadb48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK\n[link] Schedule & Maps -> CLICK\n[button] Connections \uf107 -> CLICK\n[button] Fares \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.648, 0.416, 0.868, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6559", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_760ccf46-fb45-43e6-adde-f9f3799c52bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.558, 0.351, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6560", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_9d29ae83-8236-4a48-96a6-61cc6b26aab2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.042, 0.807, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6561", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_b74a4253-bfcd-4616-9d96-4219baf3cce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.047, 0.263, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6562", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_032dd419-18b5-4870-988f-085b2fa6d74a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.201, 0.984, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6563", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_0add092e-4553-40ee-8190-317048d85eb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.26, 0.562, 0.294] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_6564", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_a1e3d261-9e4d-4aa2-b851-4df6032e1794.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[button] 10, Monday, April 2023. Available. Select as check... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.393, 0.359, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6565", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_7db38950-78ce-4a65-a62e-a5df13e62ff5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.258, 0.443, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6566", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_f60eedf6-96f7-429a-bb55-803977b8efe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK\n[label] XXS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.331, 0.463, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6567", "image": {"bytes": "", "path": "./images/693ae151-6a70-41b1-b016-87279c4c532e_80c51282-2c04-482f-a0f3-1dbbc5d4574b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the cheapest xbox 360 game available for purchase\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: xbox 360 games\n[searchbox] Search games, consoles & more -> ENTER\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.112, 0.261, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6568", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_57dfa0ac-76e8-4753-948d-3d86bac41a80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 1.189, 0.225, 1.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6569", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_d143425a-b021-4736-b687-76deed6509ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK\n[span] -> CLICK\n[div] Leather -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.217, 0.284, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6570", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fa48db1f-c5f0-48de-863a-93d7dbd7f15a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[link] YXL -> CLICK\n[div] Size -> CLICK\n[div] Sports -> CLICK\n[div] Fit -> CLICK\n[link] Fitted -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.539, 0.233, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6571", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5b95df26-ff7a-49e4-b1ea-3abec2316d97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Color -> CLICK\n[label] BLACK -> CLICK\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.548, 0.906, 0.583] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6572", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_d5348938-9876-40b0-81d5-01436fa10e76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Electricians -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.022, 0.564, 0.035] in the image\nAnd my action is Action: TYPE\nValue: WESTMINSTER"}]}, {"id": "mind2web_6573", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_afa03dc3-151d-4448-8936-79d4ce60b351.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Fiesta Texas -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.099, 0.714, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6574", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_5dc7ac8f-f789-4d98-9805-733815243c37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.729, 0.135, 0.787, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6575", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_a3a0d384-cfcc-439e-a071-d50217cb46b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK\n[option] One way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.518, 0.16, 0.548] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6576", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_7ca8088d-aaf6-4a6a-a81f-854a7fddcd12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.258, 0.839, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6577", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_fe0d8eda-4222-4b4f-989d-c947c8219867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\n[heading] Check-in -> CLICK\n[textbox] Confirmation or ticket number* -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.339, 0.481, 0.377] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_6578", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_43243a2c-3d76-40fc-8e5e-7fc1f48328e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.23, 0.359, 0.255] in the image\nAnd my action is Action: TYPE\nValue: 52nd street, brooklyn"}]}, {"id": "mind2web_6579", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_346e50e8-6e74-4963-907f-f63753b97234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.106, 0.421, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6580", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_d3fae0d1-475d-4570-b0bf-7288bf69fc36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.07, 0.81, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6581", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c2b9362-222c-4103-b794-de349596c06d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.564, 0.595, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6582", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f6af5393-0ce0-46f0-8b74-4e24be4e2eb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.205, 0.335, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6583", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_d4189d13-7ce7-4aeb-80da-c3fe1b0ac5a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK\n[link] Rock 2,420 -> CLICK\n[link] Tab -> CLICK\n[div] Today's most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.331, 0.97, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6584", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_20ad31c2-db7f-461e-ae21-1fc7eb68b0bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.475, 0.478, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6585", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_8f862cf0-ac81-4f52-84aa-550ecc2e259c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.007, 0.686, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6586", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_eb02273c-148b-4fdf-9b98-90ddaec0236a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.005, 0.781, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6587", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_04457367-8505-4973-b9af-ecd5eb814182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.845, 0.447, 0.875] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6588", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_c30f4d49-9308-4bb4-95e9-925b39ffde9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.201, 0.984, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6589", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_3208832e-8eae-44a6-afde-a00344187ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.373, 0.385, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6590", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bfb2a8b3-ca1e-4ce4-8be1-65c9a7ddad63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.072, 0.793, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6591", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_492f1698-0267-4a06-b636-cc4f0480d04c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK\n[select] All -> SELECT: Audio (376)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.321, 0.196, 0.342] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_6592", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_98f238b5-18bb-4181-816a-6d9b5a5d3b55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.256, 0.841, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6593", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_86fafa00-08fd-4d15-9ca1-88658c080ec1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.3, 0.466, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6594", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c75eb377-9d6e-4a59-866a-86b6912f4e6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Black (1) Black (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.299, 0.249, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6595", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_23bd27eb-bad8-45f9-88df-ba23d36bc19d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.226, 0.215, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6596", "image": {"bytes": "", "path": "./images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_2ff008e7-d6e6-46bb-893d-375d5dd41af9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vacation packages to Hawaii.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.275, 0.509, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6597", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_39c6a073-6af9-4a14-9be0-2a6d782ae73d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\n[textbox] Where to? -> CLICK\n[button] Nearby -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.436, 0.705, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6598", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_f93c512a-0e48-4c6d-9271-edbd7a0af295.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.075, 0.287, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6599", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_091dfb3c-d0d9-49c2-a922-18e6468bc29a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK\n[button] Check in / Check out Any week -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.393, 0.284, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6600", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_af658e24-111b-4b86-bc49-099bb5c8baec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK\n[link] Phil Collins - Both Sides (CD, Album, RE + CD + Dl... -> CLICK\n[link] Add\u00a0to\u00a0Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.372, 0.485, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6601", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_408335c6-cd6f-4e47-a018-845ce17a180d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.025, 0.243, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6602", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_c33f9822-9241-43c5-99b9-d2f95f871ed9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK\n[combobox] Search by Service Number -> TYPE: 10000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6603", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_8af6839d-a6e9-4945-ba6a-3cacefda382a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.468, 0.711, 0.493] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_6604", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_4b083d95-5ecd-4c5e-b53c-f9940aa9134d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.243, 0.205, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6605", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_57afa34e-90f7-4742-b214-dbeae90b3f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\n[button] Soccer -> HOVER\n[link] Leagues & Cups -> CLICK\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.21, 0.091, 0.226] in the image\nAnd my action is Action: SELECT\nValue: 2022"}]}, {"id": "mind2web_6606", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_82ac4858-e319-4d30-b3e5-f4a4c395f697.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK\n[heading] Boston Bruins -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 1.09, 0.903, 1.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6607", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_33c5ade5-919c-4875-be8a-5061b1ed3947.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.771, 0.214, 0.867, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6608", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_c744062d-fb8a-4354-b660-adb22d70dc2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Condition -> CLICK\n[link] Used -> CLICK\n[button] Style -> CLICK\n[link] French -> CLICK\n[button] Material -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.524, 0.671, 0.549] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6609", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_01cf52dc-6cba-4c51-a75d-5f2a7730b67c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[span] -> CLICK\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.899, 0.244, 0.969, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6610", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_a360bd43-6df1-447e-a026-95d0a8b1cac9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.374, 0.718, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6611", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_39c6496a-5532-4d83-8d37-d129f42d5ea2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.202, 0.397, 0.247] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_6612", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_b5539183-9fb0-420f-a745-564979f75b5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.075, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6613", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_89f2b1aa-4e07-4d42-858c-a415206f1d5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.411, 0.619, 0.429] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6614", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_049acc7d-e917-45d0-99ed-6c273ee77075.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK\n[link] Notify me -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.24, 0.973, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6615", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_f8c7c15b-6d3a-4e6c-b1af-f569d552ca2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[div] -> CLICK\n[div] -> CLICK\n[span] 36 -> CLICK\n[button] 34 -> CLICK\n[button] Confirm Seats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.344, 0.926, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6616", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_e28c2d70-46de-45d6-b3ed-20ea099217a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK\n[link] US Deals -> CLICK\n[button] Save Now -> CLICK\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.039, 0.749, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6617", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_8f549f9d-9187-4482-bc70-2a244ffd8c8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.325, 0.153, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6618", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d61748b4-b9f8-457e-bc90-a8a516c8e12a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK\n[searchbox] Please type your destination -> TYPE: MANILA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.34, 0.405, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6619", "image": {"bytes": "", "path": "./images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_09bfdb44-76c7-465e-ba08-dab8c6dc2e1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals in New York.\nPrevious actions:\n[a] City Pages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 0.211, 0.712, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6620", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_205dfea0-1032-44c3-8c8d-3b2e3c7d1daf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.032, 0.485, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6621", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_88c687c0-458c-43e5-b265-561b2efdf331.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.551, 0.685, 0.578] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6622", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_74c03d0b-0836-477c-ad20-05abb250cd56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.521, 0.158, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6623", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_8ca47df3-bfb0-474d-bb4c-705ed1bbf199.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.012, 0.592, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6624", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_1387e66f-88ee-4ac8-8cc8-363de89dd7bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.089, 0.181, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6625", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_ae649734-cc80-41d9-a091-a527d4701cf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.405, 0.922, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6626", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_7c115bfc-020b-4e0e-a063-b947e23e0649.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.228, 0.863, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6627", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_8a5133cf-2c25-469b-97f9-4451368b96a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.086, 0.257, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6628", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_aa2386b0-24c1-4193-add3-fb6646cfc330.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK\n[button] Lighting -> CLICK\n[link] Decorative lighting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.135, 0.41, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6629", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6fea70db-8ef9-4ac9-b662-4ee385b4af59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.447, 0.15, 0.553, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6630", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_cd172bd3-2207-4a53-99df-1609f3cf87a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[button] Get cheapest tickets -> CLICK\n[span] Continue -> CLICK\n[listbox] Direction -> SELECT: Forward facing\n[listbox] Position -> SELECT: Window\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.185, 0.925, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6631", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_b08451d0-5987-4e39-a51a-6ff6fb83cf22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK\n[button] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.46, 0.777, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6632", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_e60b21c2-3627-44e4-9b13-975c218d9d1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square\n[li] Times Square, New York, NY, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (12) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.639, 0.458, 0.676] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6633", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_bebe4a89-a653-4ad1-8562-d2d151c0fa90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.304, 0.847, 0.338] in the image\nAnd my action is Action: TYPE\nValue: 08817"}]}, {"id": "mind2web_6634", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_b7d8a0ab-d6fb-4d63-aef6-6a71b72079ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: dog treats"}]}, {"id": "mind2web_6635", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_39d3c8ab-ef00-491e-87fa-2c87e399e835.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.303, 0.113, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6636", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_2d3970c4-2b0a-43cd-bd19-103d5bcb4dd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.258, 0.592, 0.284] in the image\nAnd my action is Action: TYPE\nValue: stewart hotel"}]}, {"id": "mind2web_6637", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_f4d5366b-3609-465c-a8e6-285b40935b03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.419, 0.464, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6638", "image": {"bytes": "", "path": "./images/0991035b-6acb-4dca-aaef-5384a0739781_b57c2e90-05e1-41db-9d48-e8af6eb99120.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find careers at the company on the Product Management team\nPrevious actions:\n[link] Company -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.423, 0.161, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6639", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_b0fe0bea-76e5-4c18-9b01-925d5f4d247e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.187, 0.168, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6640", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_ca69e7b5-562b-4bf2-9457-9254ea31cee2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[listitem] X5 (87) X5 (87) -> CLICK\n[path] -> CLICK\n[switch] COMPARE -> CLICK\n[button] Add to Compare -> CLICK\n[button] Add to Compare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.138, 0.899, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6641", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c72364e6-89d8-4b78-8d47-2636bcd591d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Adults-Only -> CLICK\n[button] Romantic -> CLICK\n[button] DONE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.383, 0.491, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6642", "image": {"bytes": "", "path": "./images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_ea4f8853-82be-4579-8d16-ba3a72401d3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the user reviews for the game \"Cyberpunk 2077\"\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.07, 0.047, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6643", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_0564ac9b-e9d9-4084-a3f6-7688481a04d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.22, 0.609, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6644", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_7d0cf261-5cc6-41cf-8142-21fbdd4ffda8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.521, 0.2, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6645", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b959a299-bca3-4ce1-a18c-1605c44fc90a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.207, 0.931, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6646", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_8a5b0737-d36f-4476-bfdc-64c4c76d5551.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: wall art\n[input] -> CLICK\n[link] Wall Art -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.428, 0.127, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6647", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7305cebb-6397-41a8-a67f-cec246c6c821.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[button] Okay, got it. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.133, 0.582, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6648", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_51296200-a767-42ec-86ee-fe3bc80412f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Music -> HOVER\n[a] Trending -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.079, 0.517, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6649", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_8177f07d-7a0e-40d0-8cd1-7185952cceb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.112, 0.018, 0.205, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6650", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6c045a29-7c8f-408d-bf6d-e75aafa65bac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.104, 0.652, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6651", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_496a9af2-4f3f-48b8-aaa4-2c0e5f30309a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.27, 0.894, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6652", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_05fdd74b-f274-4e42-8108-8a4d93a95506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Beachfront -> CLICK\n[checkbox] Hot tub -> CLICK\n[checkbox] Pool -> CLICK\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.892, 0.089, 0.897] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6653", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_18773f07-6800-4427-8cc0-f846379f6f4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Golden State Warriors\n[link] Golden State Warriors NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.137, 0.093, 0.182, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6654", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_8031f639-c35b-4b15-b569-2d863a8cd52f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.248, 0.259, 0.281] in the image\nAnd my action is Action: TYPE\nValue: MUMBAI"}]}, {"id": "mind2web_6655", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_4d976fd8-c4fb-49c4-bc13-c59b2122b543.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.28, 0.32, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6656", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_60718d13-f9e1-432f-9901-b1bdea2cc50d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK\n[div] SUVs -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.243, 0.753, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6657", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_2faa9698-198d-4c20-a35f-8e569196c53e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.233, 0.253, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6658", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_119a3ffb-4740-4938-8d4e-ebe3d288562a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.011, 0.592, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6659", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_8b05e2c4-bf97-4b1b-a37f-8c1cf88209f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.239, 0.926, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6660", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_09199a3b-66e4-43b0-9207-2b16c63f458d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.178, 0.668, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6661", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_4fc61431-330e-4811-a880-f1d5e77c1642.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.539, 0.559, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6662", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_94d5fd23-1ec9-4921-8aeb-0f577f753252.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456\n[textbox] Date you received your order (DD/MM/YY) * -> TYPE: 08/04/23\n[textbox] Please cancel my order for the following products ... -> TYPE: Harry Potter Box Set"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.574, 0.759, 0.603] in the image\nAnd my action is Action: TYPE\nValue: Not available at address"}]}, {"id": "mind2web_6663", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_71adc0ae-641d-4097-929f-2698381759d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\n[link] Coupons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.012, 0.789, 0.048] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_6664", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_07da8bf9-a101-42cb-865c-597a7150f981.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.26, 0.256, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6665", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_5c130bc2-2d39-4454-9e2b-13392f1fbe4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[gridcell] Fri Jun 30 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Sightseeing Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.951, 0.105, 0.961] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6666", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_b7b26215-0fc1-4125-824d-34fab74c6e32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.06, 0.181, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6667", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_b9cf6d20-8607-44e1-841c-7a2fac6505e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.192, 0.647, 0.214] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_6668", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_b28fa573-26af-4855-b623-b308ad84fd80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.15, 0.159, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6669", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_6c38c5ba-28c2-45c8-bc24-96c1388d7b6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.518, 0.944, 0.538] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6670", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_acffa63c-d008-44bd-9e78-eb0247cd85cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.418, 0.12, 0.462] in the image\nAnd my action is Action: SELECT\nValue: 2005"}]}, {"id": "mind2web_6671", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_52743c90-fd78-45d7-bfbe-99eb4ee84c1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[span] Scores -> CLICK\n[heading] SOCCER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.106, 0.267, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6672", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_0ab87d28-e48a-4d69-abc3-8606181a83dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.278, 0.331, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6673", "image": {"bytes": "", "path": "./images/978376c1-8545-4160-81d5-722bdea60434_97e6816f-d2a6-4372-95a4-4801d088446e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Start the process to sign up for CVS ExtraCare\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.014, 0.194, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6674", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_45e135d4-8e5f-4562-bd2d-e03101466691.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz\n[link] Gorillaz Gorillaz Artist -> CLICK\n[link] Gorillaz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.105, 0.832, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6675", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_a0430d87-5a11-4409-8dc3-fff996002ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\n[button] Reviews -> CLICK\n[link] Game Reviews -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.474, 0.55, 0.495] in the image\nAnd my action is Action: SELECT\nValue: PC"}]}, {"id": "mind2web_6676", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_3132d536-bc3a-4ad1-a326-4db8b75dc6ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.225, 0.691, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6677", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_dc06c01d-2251-4bed-b48d-e4f0c2639a7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.218, 0.853, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6678", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_a848bcb3-b6ff-4c1a-9a10-e66ec68c5196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK\n[span] Filters -> CLICK\n[textbox] min price $ -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.257, 0.739, 0.271] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_6679", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_442b805f-3033-4997-a3d7-ef6375d4f79e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.03, 0.352, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6680", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_7dbbe8fd-aead-477a-92b2-485de3aec565.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.001, 0.417, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6681", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_4aaa493a-ae2e-4cb4-8081-485b49488432.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK\n[link] Produce -> CLICK\n[link] Fresh Fruits -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.244, 0.815, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6682", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_3d415174-370e-47c0-bb34-e9e442c78a84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.04] in the image\nAnd my action is Action: TYPE\nValue: Katy Perry"}]}, {"id": "mind2web_6683", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_d20e2519-15c9-4c7d-943b-75513d98fbb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.259, 0.359, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6684", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_7e1f4374-8e17-49c2-be93-d6bba3d0ec0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.429, 0.316, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6685", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_4fc94d2c-6706-4bf3-8e0c-3c65f2f15b5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.177, 0.691, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6686", "image": {"bytes": "", "path": "./images/7f640279-bd9d-45ae-b3fc-43338977e2c1_95f17546-5fa4-44c6-a51c-d57bf20770b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of Museums in New York City.\nPrevious actions:\n[link] CITIES -> CLICK\n[link] NEW YORK CITY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.487, 0.36, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6687", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_50f2d198-a0f4-4230-9b83-ed557dc56d79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK\n[button] CONTINUE SHOPPING -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.16, 0.843, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6688", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_fef9b6f7-061e-4b76-b684-5505c9d7eb70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.306, 0.195, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6689", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_3f3803f1-9aa7-4da7-807e-d31136723db3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\n[link] NBA -> CLICK\n[link] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.241, 0.717, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6690", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_5de17e26-948b-45a1-8b27-0a3a8a79b72d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.093, 0.326, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6691", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_85c9acad-b16f-4c31-bc8b-86e56639c5e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[div] Madurai, Tamil Nadu, India -> CLICK\n[path] -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[label] Air India -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.694, 0.074, 0.699] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6692", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_e4018dc3-e21a-46c7-b1c3-4add061eb3ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.012, 0.151, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6693", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_c4d4d763-c6fa-47ba-8efb-eb4fb52f41dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.501, 0.95, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6694", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_68e483d8-9bfd-4c8c-9327-82577a11be18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.0, 0.445, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6695", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_6de1827e-c854-41bf-86ca-4ebe2a33339c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK\n[div] Price -> CLICK\n[link] $25 to $50 (2,237) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.169, 0.986, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6696", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_76bc3358-5f1f-416e-acf7-b934c7231a1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK\n[img] -> CLICK\n[textbox] To: -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 0.425, 0.99, 0.452] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_6697", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_a18e6348-cedb-4bbb-9fb5-a4a982378a3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.033, 0.643, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6698", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_c95ca89d-2256-4891-bbe7-98503507593e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK\n[button] 1 Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.342, 0.481, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6699", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_c006a047-8042-442d-ae2c-a608af6664b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 958,222 United Kingdom -> CLICK\n[link] 246,685 CD -> CLICK\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.31, 0.163, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6700", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_b8bd887f-f0a3-4977-9bbd-8ab6d095f115.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] SEARCH -> CLICK\n[span] View -> CLICK\n[link] I don't have the password -> CLICK\n[textbox] Your Name -> TYPE: Michael Cahill\n[textbox] Your Email -> TYPE: cahillm@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.4, 0.58, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6701", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_9a4627a2-fa39-4c85-b295-a6ebe37f5a95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[combobox] From -> TYPE: Abbotsford"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.273, 0.363, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6702", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_aade8bc2-34c6-4374-8812-2fa5b8bba84a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.014, 0.577, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6703", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_20dd0a1b-ca66-4ea6-973b-3ce45a7fff77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\n[link] Locations -> CLICK\n[combobox] Search by ZIP code, city, or state -> TYPE: 43215"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.194, 0.287, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6704", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_a45c4879-9bb8-4c59-b2af-754d7f00f809.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK\n[link] Europe & Mediterranean Cruises 16 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: High to Low -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.513, 0.772, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6705", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_7afa03ca-d746-4e09-aa3d-3b2b4d0805e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.151, 0.73, 0.171] in the image\nAnd my action is Action: TYPE\nValue: New York University"}]}, {"id": "mind2web_6706", "image": {"bytes": "", "path": "./images/8ab30c9e-f768-46e8-8174-83651d361b9e_509995b9-2de6-4613-94b8-4da7b566ba2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the scores of today's NBA games.\nPrevious actions:\n[span] Scores -> CLICK\n[heading] NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.111, 0.592, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6707", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e3e842b2-02c0-4a19-8fbc-05f8de17a805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.591, 0.829, 0.598] in the image\nAnd my action is Action: TYPE\nValue: Tokyo"}]}, {"id": "mind2web_6708", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b442079b-5565-4361-b7f3-666110df8ba4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.36, 0.29, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6709", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_e478223e-6ef8-445f-9130-d6f6645f4f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.169, 0.41, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6710", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_85649359-30cc-4848-9453-3f2577813005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.735, 0.222, 0.746] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6711", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_76bcc527-fafd-4e31-b315-fac7ab42f06f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.658, 0.622, 0.675] in the image\nAnd my action is Action: TYPE\nValue: san francisco"}]}, {"id": "mind2web_6712", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_e62362e7-8e6f-4005-a005-b8bdb6c3d5bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.377, 0.299, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6713", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2911643c-507a-480b-b496-9cfb4b77d8f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[textbox] Date use format: 01-Apr-23 -> CLICK\n[rect] -> CLICK\n[link] 26 -> CLICK\n[polyline] -> CLICK\n[span] Youth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.187, 0.291, 0.212] in the image\nAnd my action is Action: SELECT\nValue: 16"}]}, {"id": "mind2web_6714", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_638420e6-97e6-466f-a168-587f17749282.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\n[textbox] Search IMDb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.007, 0.613, 0.025] in the image\nAnd my action is Action: TYPE\nValue: Prometheus"}]}, {"id": "mind2web_6715", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_23546e33-8ba3-46f1-b1b7-f8e0acf5ebc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK\n[div] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.904, 0.249, 0.92, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6716", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_e7444f63-7c77-4462-a723-ab00e729c46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.37, 0.894, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6717", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_07e02302-548c-4332-9e6c-188c7e6baade.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[textbox] Near -> TYPE: houston\n[span] Houston -> CLICK\n[button] Fast-responding -> CLICK\n[radio] Data recovery -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.098, 0.612, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6718", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_bc008675-e4f9-468b-a15a-02d622cc6f06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.202, 0.991, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6719", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_5a806dc6-8254-4dee-a2cc-c981755d5bb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\n[textbox] search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.087, 0.93, 0.103] in the image\nAnd my action is Action: TYPE\nValue: Dota 2"}]}, {"id": "mind2web_6720", "image": {"bytes": "", "path": "./images/d29e8a14-ee66-4330-b282-09cb1955aad0_5600ca04-7659-49c0-b34a-7c7de417fea1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the weekly ad in List View.\nPrevious actions:\n[button] Savings & Memberships -> CLICK\n[link] Weekly Ad -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.173, 0.714, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6721", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_016bf36a-0f5f-4e4b-a312-232d7232cea2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.134, 0.205, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6722", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_dd2bc1cb-54ad-4246-8005-f7287aa435c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.054, 0.869, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6723", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_93b28944-f841-4fa4-a303-7c39d9c73332.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.015] in the image\nAnd my action is Action: TYPE\nValue: game mix"}]}, {"id": "mind2web_6724", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_e97577ad-f25d-42c2-98a2-74fda1a588c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.764, 0.95, 0.797] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_6725", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_4b90093f-4363-4c73-8a02-87ab5e4686d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Flavor -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.016, 0.981, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6726", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8e52b340-62a8-48f8-8d18-ce80711db210.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.421, 0.942, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6727", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_10ce4c27-0114-485f-b18a-e5fed2af6d57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.73, 0.625, 0.749] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6728", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_4208985a-6f68-4493-a1fb-3abbe9503a0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.312, 0.884, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6729", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_8321c8e6-a5b7-45a2-b38e-6d0b5fba0bf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.13, 0.713, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6730", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_fcec7df7-3669-4c5f-8162-19849487f0c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Volunteer -> CLICK\n[link] Become a VIP and Volunteer with Us Today!\u203a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.339, 1.485, 0.416, 1.496] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6731", "image": {"bytes": "", "path": "./images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_cba371fd-cedd-44b5-bc73-f66ef9af18f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of Cleveland's animal shelters.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.0, 0.417, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6732", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_86f0129f-5ca2-437f-ace5-ada6f8fda4ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance\n[combobox] Start Time -> SELECT: 3:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.27, 0.384, 0.307] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_6733", "image": {"bytes": "", "path": "./images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_f0b16e02-0be2-4f9e-9cf5-c08950f7b267.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if there are tickets availabe for the Hamilton musical in Richmond, VA.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.171, 0.782, 0.203] in the image\nAnd my action is Action: TYPE\nValue: Hamilton"}]}, {"id": "mind2web_6734", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_dfd559fc-eb25-4cda-9a91-8c60b0bbce36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.297, 0.168, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6735", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c6851a43-51ca-4a93-b937-d560a9c4ce56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK\n[span] Sheffield -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6736", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_478e1cc4-76bf-46aa-beb1-599a90f1a9b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[dt] Customer Ratings -> CLICK\n[i] -> CLICK\n[button] APPLY -> CLICK\n[span] Compare -> CLICK\n[span] Compare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.906, 0.528, 0.984, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6737", "image": {"bytes": "", "path": "./images/551ab381-5bfe-4491-9602-0b0c584d1346_51d5a5f6-926f-4a16-98db-bffa5b3c9436.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give me the IMDB recommendations of what to watch.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.009, 0.144, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6738", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_2a8e8987-2a95-4d21-a7b6-11eed00c07d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM\n[combobox] Drop off time -> SELECT: 11:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.179, 0.923, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6739", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_3f13fb42-633d-48a5-8d77-afcf9de93569.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK\n[button] April 7, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.255, 0.192, 0.287] in the image\nAnd my action is Action: SELECT\nValue: 3 Guests"}]}, {"id": "mind2web_6740", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b0b1bb1a-52a6-45bd-b8bc-b97ddb5f9e5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] $99 Or Less -> CLICK\n[menuitem] Free to home or store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.329, 0.249, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6741", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_d8059cb9-a62c-4a11-811c-e185798ece8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK\n[span] -> CLICK\n[button] Country -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.549, 0.218, 0.564] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6742", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_c2cf99e0-83fb-4746-9a9d-7b151d9c60b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.415, 0.096, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6743", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_e0bfc000-6c7f-49c9-bfea-873bbac85dab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Price + Shipping: lowest first -> CLICK\n[button] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.461, 0.868, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6744", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_48cd4b76-4638-41bb-8ee1-04df8cbba952.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.297, 0.407, 0.324] in the image\nAnd my action is Action: SELECT\nValue: 500 mi"}]}, {"id": "mind2web_6745", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_7d95104b-2cee-40c4-b43f-914ab0c77461.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK\n[link] Running -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.522, 0.233, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6746", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99c566eb-d382-4848-8302-73ac22a42e9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.108, 0.153, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6747", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_84c30b1d-9a13-4f5a-9afc-716c157523b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK\n[button] Search -> CLICK\n[checkbox] Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.472, 0.22, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6748", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_b3025bc4-bb1f-4587-b640-2f7606f07007.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.069, 0.587, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6749", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_df4d6445-8f4b-4462-8798-32c0b9d0aaea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.175, 0.824, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6750", "image": {"bytes": "", "path": "./images/effb9df8-3b3f-4349-8033-f79ba1587a4d_b9aabd63-0aa0-4871-b683-29daf286e242.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a projector which accepts crypto currency as payment.\nPrevious actions:\n[searchbox] Search Site -> TYPE: projectors\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.183, 0.192, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6751", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_99951f39-43d4-41a0-aef5-e95a0a34b32f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK\n[img] -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.173, 0.975, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6752", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_1e192c02-4f8c-4ad8-b6de-6efa760df8bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.063, 0.652, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6753", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_ae1ce8a0-f74c-454f-b0ee-ad9054d61a1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.637, 0.627, 0.666] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6754", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_e491ad98-a4ad-48c3-aadc-ace9647b8eb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK\n[link] 9 -> CLICK\n[link] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.21, 0.465, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6755", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_269e9a3c-8bf4-4d0e-81f0-3a2488d00298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.755, 1.728, 0.822, 1.741] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6756", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_f59ec5b0-8588-44d1-b254-2a83421b4b23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.707, 0.619, 0.721, 0.625] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6757", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_2aaf6417-c1df-4b09-9d93-18d067f6930b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.175, 0.331, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6758", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_ff696e9f-af71-48b7-a4cb-fda242e97114.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.349, 0.785, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6759", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_80c2d342-8948-49b9-b18b-846b6b5dd105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.095, 0.129, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6760", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_54a94b9b-4c79-49ef-a0db-d62109ac4ff6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\n[link] Navigate to on-demand -> HOVER\n[link] select to navigate to Supported Devices -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.458, 0.242, 0.59, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6761", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f491a74d-ea2a-4ce7-b73c-a8493517b790.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.703, 0.165, 0.72] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6762", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_8f824ede-447c-4c6c-b620-18425d58bbe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: vegetarian"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.085, 0.273, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6763", "image": {"bytes": "", "path": "./images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_4b35d4cd-0a74-4c3e-82a9-a1804592ae3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a podcast about F1\nPrevious actions:\n[link] \ue028 -> CLICK\n[link] F1 -> CLICK\n[span] The Gab & Juls Show -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.407, 0.528, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6764", "image": {"bytes": "", "path": "./images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_ebeceea9-c367-4eb8-97aa-b96615e4671e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a podcast about F1\nPrevious actions:\n[link] \ue028 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.165, 0.573, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6765", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_229bf1f4-803a-4d87-9a8b-1715ae4dd3a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[button] Show all 14 -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 1.064, 0.263, 1.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6766", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ef3c3757-9751-4d0e-a336-271b2e09c353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.262, 0.452, 0.276] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_6767", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ab382337-5bf6-47b5-a717-1589609ab85f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.059, 0.181, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6768", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_eec4751e-9ac2-4842-98ac-2edd26e0d41f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York\n[a] NYC - New York, NY -> CLICK\n[combobox] Date -> SELECT: Friday, April 7\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.123, 0.847, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6769", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_bf997ba6-3daf-48f3-9fdf-0beef8edc37a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK\n[span] Agencies -> CLICK\n[link] Bridges & Tunnels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.672, 0.5, 0.681] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6770", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_4e232a35-ea10-415c-8e5f-c9ac22cd0350.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK\n[link] Find A Travel Agent | Carnival Cruise Line -> CLICK\n[textbox] City -> TYPE: Grand Junction\n[combobox] State -> SELECT: Colorado"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.802, 0.402, 0.903, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6771", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_04db667a-0b22-4a1a-a420-f17742d94391.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.299, 0.551, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6772", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_e0785f0c-5b78-480e-96b3-ef282ad0f38e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER\n[span] Tokyo -> CLICK\n[link] Food & Drink -> CLICK\n[link] Coffee & Tea -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.779, 0.121, 0.792] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6773", "image": {"bytes": "", "path": "./images/d29fd2a4-2305-4276-8a0e-2599291d0a17_22a19b2e-f799-4a08-bd41-16a246e36019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of reviews I wrote about my games.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER\n[link] GAMES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.214, 0.606, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6774", "image": {"bytes": "", "path": "./images/eab97f0c-38b3-4421-bff6-697b3267f23c_6b8873a4-d11a-4e18-ba66-664584b4be5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find carnival cruise options that include Alaska.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.638, 0.274, 0.67] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6775", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_9e7c3106-335f-4ee7-8dc2-9638fbeb8f6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.229, 0.464, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6776", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_038530a6-b2a4-4695-8a15-81312f121013.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.121, 0.434, 0.139] in the image\nAnd my action is Action: TYPE\nValue: Lave Hot Springs East KOA"}]}, {"id": "mind2web_6777", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_7157211f-9282-4318-8f16-d51a815e9e8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.982, 0.807, 1.009] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6778", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_a15a2c86-9065-4217-990d-60b0a09cf1a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK\n[link] Planned Service Changes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.125, 0.391, 0.142] in the image\nAnd my action is Action: TYPE\nValue: 4"}]}, {"id": "mind2web_6779", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_fd2ab52b-93bd-48aa-8b72-5f8a0835e72c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.148, 0.953, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6780", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_fcedbc87-f73f-4420-994e-2977bd3bab6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: ohio\n[div] Ohio -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.077, 0.398, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6781", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_80fe87ad-ddcd-4427-9c67-11e293082f8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.1, 0.443, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6782", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_92ca37c7-e58f-496a-ada7-4d5c078c20d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.426, 0.974, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6783", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_cfaa3b20-c491-40d0-ad34-ebcf44393172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.134, 0.047, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6784", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_356098a8-e05f-4dd3-abf5-7740e225140e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall\n[em] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6785", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_0dcb9111-8f0c-47a9-a1ab-f8d5b5043ae5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK\n[button] Table of Contents -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.146, 0.216, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6786", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_42393190-d323-4591-a206-ae9287b98ff7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK\n[combobox] Select a Size -> SELECT: Queen\n[spinbutton] Main item quantity -> TYPE: 2\n[button] ADD TO BAG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.619, 0.219, 0.736, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6787", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_59129ef1-cf47-4b83-b2f6-89a4f17166c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Topic -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.193, 0.154, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6788", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_70e105e0-2679-445b-990c-4c167caaa6cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 08\n[group] RETURN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.389, 0.384, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6789", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_ca98286b-ed03-4f7b-be6c-f1da235ef72c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.34, 0.63, 0.376] in the image\nAnd my action is Action: TYPE\nValue: Jaguar"}]}, {"id": "mind2web_6790", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_67c7cc50-4331-4340-93d5-90fa2a691741.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.631, 0.059, 0.647] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6791", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_e62a0a6c-d17d-4675-9dcf-80b0aebd0e3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.453, 0.846, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6792", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_fabde200-2878-4346-ba10-23b269a827fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.255, 0.488, 0.301] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_6793", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_2bc91564-8a2d-4caa-968e-f6d6713349e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK\n[link] Womens -> CLICK\n[div] Product Category -> CLICK\n[link] Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 1.275, 0.452, 1.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6794", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_18f5cbed-b040-4dcb-a90f-5aecfd8e43fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Sort -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.436, 0.969, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6795", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_3eb24abe-68ed-45f0-b53f-9873bc0d09f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.384, 0.574, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6796", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_1ec53584-9015-42d8-b9ef-b956a061181e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.843, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6797", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_57f1736e-adf3-46e0-bc47-5cb8910dd878.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Products & Services -> CLICK\n[div] Learn More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.317, 0.341, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6798", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_6547e39b-5ccc-4df5-8668-44a769d70fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[input] -> CLICK\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.297, 0.803, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6799", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e4249fd9-eaf4-4209-a6fc-81cd2b3267ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.032, 0.167, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6800", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_bd621503-fa0c-4902-80aa-28bca4aaa791.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.7, 0.442, 0.731, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6801", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_b4ba250f-9281-419f-8443-0ae4a34417ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.304, 0.359, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6802", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_983e2ebf-fc39-4f67-9991-ae7008c8a9e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.445, 0.333, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6803", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_5afa6856-13c2-405b-b830-a5ca14fe587f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Black (1) Black (1) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.293, 0.253, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6804", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4359bad3-5e98-42a3-8ae2-157730acf87f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia\n[select] Model -> SELECT: Carnival\n[textbox] ZIP -> TYPE: 11101\n[button] Find Dealers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.356, 0.928, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6805", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_8c8991bb-aa38-4939-b1bb-0b4b358b991d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[searchbox] City -> TYPE: fre\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.841, 0.531, 0.972, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6806", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_578dd203-e4de-4ea6-bb5a-d65d7c71e63d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\n[heading] Check-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.26, 0.481, 0.297] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_6807", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_98bafeb7-8d43-4fe8-bdd5-a3b1aaf920d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 100\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.033, 0.906, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6808", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_3b91e850-1a70-4008-9e51-f0c6dbdf6a74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK\n[checkbox] 30 April 2023 -> CLICK\n[combobox] Drop off time -> SELECT: 1:00 PM\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.079, 0.765, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6809", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_61cb1a42-d50a-4a13-a642-4519069dae8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\n[link] Music -> CLICK\n[link] hip-hop -> CLICK\n[gridcell] Clint Eastwood -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.575, 0.724, 0.59] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6810", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_81adcddc-419d-4f81-b70c-348be8137bae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.149, 0.853, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6811", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_019508db-f61f-4343-93fe-7df53859be47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.052, 0.668, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6812", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_270f2f26-4be9-4b51-8347-ec9ed1712b35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty\n[textbox] * Required Fields Last Name * Required Fields Las... -> TYPE: Lue\n[textbox] * Required Fields House Number -> TYPE: 4847"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.248, 0.476, 0.267] in the image\nAnd my action is Action: TYPE\nValue: 10019"}]}, {"id": "mind2web_6813", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_f1dbd69a-ff2d-4ee1-8fd8-6773599ab87b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.104, 0.109, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6814", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_79f76036-6a56-4ff6-8f25-49bda6beaa0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK\n[span] 11 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.575, 0.677, 0.584, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6815", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_04943aa9-b541-411a-8ed3-e4c259733e76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.038, 0.37, 0.05] in the image\nAnd my action is Action: TYPE\nValue: Chicago Bulls"}]}, {"id": "mind2web_6816", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_8f341320-6179-4cb0-b145-5d5b9d59e8a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.352, 0.767, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6817", "image": {"bytes": "", "path": "./images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_cb45e6f4-73da-49fe-85ca-746a2424c6c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the complexity rating for Frosthaven.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.006, 0.995, 0.025] in the image\nAnd my action is Action: TYPE\nValue: frosthaven"}]}, {"id": "mind2web_6818", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_b37d0499-8f2c-42f3-98a1-93d81e2cae6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.398, 0.027, 1.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6819", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_0f8f7d21-0a40-44f8-8683-ca8d046e3e2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK\n[label] Express Bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.22, 0.609, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6820", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_38ab39b0-d855-4990-91e4-801450b4c9ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.382, 0.196, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6821", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_46b11c44-c75d-44bf-8f50-74ef5dc7513f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.678, 0.253, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6822", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_a1a020e3-7bbc-464e-8ea3-cdff088f36db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] List Explorer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.089, 0.89, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6823", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_005d0877-8b37-4969-b673-51a0e9ff85ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA\n[input] -> TYPE: car accident lawyers"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.278, 0.405, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6824", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_a25fd485-84b7-4b83-ab29-4bb5a58ec54c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.27, 0.969, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6825", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_3756ef1a-2931-4990-a272-b1bf2b76a68c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[link] SEARCH CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.229, 0.249, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6826", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8abefbf4-3265-488e-921c-d391ae6096c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 2.451, 0.945, 2.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6827", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_52f50c15-5013-43bc-b055-f287c38e0d96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.248, 0.969, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6828", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_ae0eb36b-220a-432b-99da-eb328e43f411.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.301, 0.894, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6829", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_788721c8-ba93-44be-af15-056a8fd86356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.306, 0.808, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6830", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_946345bf-762a-48d7-99fd-8ff65665c304.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.673, 0.892, 0.69] in the image\nAnd my action is Action: SELECT\nValue: Two-Story"}]}, {"id": "mind2web_6831", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_94c51afd-2b7a-47e3-b33e-711ca7f9cd4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[div] Leather -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.33, 0.284, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6832", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_dbbb21ad-4cd5-4166-a7f6-121ce7bf34e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.376, 0.368, 0.419] in the image\nAnd my action is Action: TYPE\nValue: 1234567890123"}]}, {"id": "mind2web_6833", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_8800c1bb-c7a7-4b80-8edc-13b2fa4a5c29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.351, 0.866, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6834", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_3ee8c662-4498-4f40-8eff-7320a2470dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.085, 0.342, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6835", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_4e9abe92-bb44-4c26-b5b2-c782737e121d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.61, 0.087, 0.622] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6836", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_7ed0f607-4961-4ea1-b6c9-7ca428f4f9d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\n[combobox] Search -> TYPE: Atlantis\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.25, 0.326, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6837", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d99b4983-2ce2-4e02-b12d-1d5f4ead49ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK\n[img] Sports car icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.238, 0.249, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6838", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_2533c6aa-8fbe-4a89-8047-a7346e530fe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\n[combobox] Search products and services -> TYPE: cough medicine\n[button] Search for cough medicine -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.693, 0.143, 0.706] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6839", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_19b2c816-4c83-48d8-877e-71017a0fc5d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.082, 0.184, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6840", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_a3dff442-0bde-420e-8a86-013bc958198a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.137, 0.837, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6841", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_287f3852-35ea-4874-8d4f-64e2292bc1f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] $99 Or Less -> CLICK\n[menuitem] Free to home or store -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.121, 1.002, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6842", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_2d7c0f04-8f46-4ca8-bd6d-950c31e920f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.902, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6843", "image": {"bytes": "", "path": "./images/22509b64-b643-44ec-b486-9828e686303c_a0b11591-7d3a-41ce-a01b-fb76318531da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the full menu for AMC Dine-In\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.027, 0.524, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6844", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cb92fa99-6e9b-4b9e-983c-a85d76580669.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK\n[span] -> CLICK\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.758, 0.164, 0.981, 0.194] in the image\nAnd my action is Action: SELECT\nValue: Price Low to High"}]}, {"id": "mind2web_6845", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_a75f21c5-093f-435c-ac68-cfeef7b29ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.431, 0.686, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6846", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_40db3113-9b8c-433f-a36e-b2bce9ea6527.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK\n[link] Intro 65,171 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.233, 0.305, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6847", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_06ae5b02-03a9-45f9-a324-e9961b31c3e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.517, 0.277, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6848", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_73327f56-01db-46fd-b7d5-b3c3d84a563d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK\n[combobox] Guests -> SELECT: 3 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.206, 0.388, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6849", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_2190574e-5045-4a7c-aeab-5f9d88e544cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.669, 0.882, 0.684] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6850", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_796c803d-8f81-4cfe-a335-d7313478fdb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.231, 0.691, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6851", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_7e00abe3-6fa7-4b74-b2be-7505a7270e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.139, 0.367, 0.238, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6852", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_2fd38569-ac81-4db5-8534-ac8b52302caf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.393, 0.388, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6853", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_054fab53-5095-4ef9-a358-ccfae23ddabf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.184, 0.239, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6854", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_94bad45f-8cab-4d3f-9c10-ede8de8da2a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.362, 0.908, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6855", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_4ed75775-bdd0-455f-abf7-f105531035b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.372, 0.472, 0.383] in the image\nAnd my action is Action: TYPE\nValue: 26807"}]}, {"id": "mind2web_6856", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ffc2d7ad-0691-466b-b825-956744be5a2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.221, 0.573, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6857", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_26337816-766e-4897-b7df-e4d62ea83cda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.139, 0.292, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6858", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_93138842-6d51-48af-aa67-d6214bc11bfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK\n[dt] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 1.239, 0.192, 1.254] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_6859", "image": {"bytes": "", "path": "./images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e4610afd-1311-4ec1-97ee-3ecf4c573381.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of jazz albums released in 1890.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Jazz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.897, 0.093, 0.905] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6860", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_57f7d43c-5d53-4a00-8fa7-5feafe218409.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[tab] 7 -> CLICK\n[path] -> CLICK\n[div] 8+ -> CLICK\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.885, 0.226, 0.896] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6861", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_72ec56be-d53d-4b32-acb8-ac991e6ca999.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.347, 0.474, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6862", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_e8bc0cef-e7f2-447c-8393-356a10b812b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.376, 0.981, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6863", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_fd1d5d23-7a0f-4576-be97-833ebf8126bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.112, 0.041, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6864", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_43f63fb5-a96b-4da5-a251-cf7829d4501f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\n[link] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.21, 0.249, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6865", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8368fa5f-1af2-4abb-bcf6-cf089a8ca346.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.188, 0.272, 0.215] in the image\nAnd my action is Action: TYPE\nValue: london"}]}, {"id": "mind2web_6866", "image": {"bytes": "", "path": "./images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_906c8603-da25-403f-b16b-7258c1f73735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about the NBA Finals schedule.\nPrevious actions:\n[div] Sports -> HOVER\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.108, 0.161, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6867", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9b195a60-73db-47f5-a2f0-d5a47fbdeb06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.387, 0.609, 0.421] in the image\nAnd my action is Action: TYPE\nValue: 1234"}]}, {"id": "mind2web_6868", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_763f2654-e332-4ba0-b78b-81110a53ff6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA\n[div] Goa -> CLICK\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.582, 0.263, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6869", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_20dce0e2-8e16-4412-aa55-23f7a1d13681.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.205, 0.42, 0.257] in the image\nAnd my action is Action: TYPE\nValue: Ohio"}]}, {"id": "mind2web_6870", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_2e3da2eb-62df-434d-b787-bbb106ebfb4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.498, 0.32, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6871", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40137cca-0d78-4d63-9635-8352aa17f0c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK\n[button] Let's go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.097, 0.625, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6872", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_7dcf08f4-e760-4be7-be8c-0a533074883e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK\n[textbox] Recipient Name -> TYPE: Tim Stebee\n[textbox] Recipient Email -> TYPE: scisoorbros@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.477, 0.975, 0.508] in the image\nAnd my action is Action: TYPE\nValue: Jeerimiah Waton"}]}, {"id": "mind2web_6873", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_4762d735-9dc2-4717-ae8b-baab0b3446e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK\n[textbox] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.389, 0.143, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6874", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_8782a791-9cd8-4ff2-be7e-865859dd7fc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Medical -> CLICK\n[link] Diseases & Disorders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.387, 0.366, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6875", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_f5a28d3e-5195-4f57-9cb0-b69fc4e39b1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[span] New York, NY -> CLICK\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK\n[input] -> CLICK\n[gridcell] March 19, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.12, 0.304, 0.279, 0.335] in the image\nAnd my action is Action: TYPE\nValue: 3"}]}, {"id": "mind2web_6876", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_916bd8e4-c9a3-4837-8144-c9fc5cbdcf41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\n[span] Top activities -> HOVER\n[span] Colosseum -> CLICK\n[link] Tours & Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.947, 0.143, 0.956] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6877", "image": {"bytes": "", "path": "./images/28d54466-de85-45e6-9649-2575d38adfd4_14d0b2bf-2ab6-4ab6-abad-772760082d0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse documentaries streaming on Netflix.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.181, 0.077, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6878", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_e3cf226c-7d99-41ea-89d5-a56659d29b84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[heading] Price -> CLICK\n[label] $0-$10 -> CLICK\n[polygon] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.395, 0.256, 0.66] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6879", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_47173eeb-574c-43c5-a937-a7da3b445094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: TYPE\nValue: 10000001"}]}, {"id": "mind2web_6880", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_97773a8e-0f6a-46e6-b900-726dc84c0b7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.355, 0.292, 0.401] in the image\nAnd my action is Action: SELECT\nValue: Wineries"}]}, {"id": "mind2web_6881", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_07cf23ef-c4c8-4f6e-8ea7-acd2ea457987.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] MEN -> CLICK\n[svg] -> CLICK\n[heading] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.379, 0.471, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6882", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_59161f2b-4e6d-4fb2-be23-4c27eeedefce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.347, 0.205, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6883", "image": {"bytes": "", "path": "./images/3c9442f9-5542-4395-918a-6551dbba3e3a_9a0801ff-e639-4dd4-98a9-fa35268526a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Massively Multiplayer games which can be played on VR.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.503, 0.143, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6884", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b76a522b-8917-4f56-a7bd-f0ff4fa2cabf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.079, 0.186, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6885", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_068d6834-e98b-463c-a33b-df3480f7731d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog bed 30 inches\n[button] Go -> CLICK\n[RootWebArea] Amazon.com : dog bed 30 inches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 1.071, 0.032, 1.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6886", "image": {"bytes": "", "path": "./images/6d963cc0-90d3-4908-bee4-29a8530536af_604638dc-e4b7-4183-9b31-ea41921cdd3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all my offers for 2-5 day cruises\nPrevious actions:\n[button] Plan -> HOVER\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 1.496, 0.779, 1.534] in the image\nAnd my action is Action: SELECT\nValue: 2 - 5 Days"}]}, {"id": "mind2web_6887", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_865bb951-c7cd-439d-8d45-50b44019491b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.044, 0.176, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6888", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_905be250-0f48-4c69-a6c0-82997c490294.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.122, 0.819, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6889", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_490963a7-541a-4739-836f-b305f77e41ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[searchbox] Search by keyword -> TYPE: women t-shirts\n[div] WOMEN / Tops -> CLICK\n[gridcell] Size -> CLICK\n[label] S -> CLICK\n[gridcell] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.536, 0.139, 0.548] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6890", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_1ef89271-0828-431c-8ad2-83ba8b886666.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.06, 0.491, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6891", "image": {"bytes": "", "path": "./images/789b7d2d-fb01-453c-b933-383965e6123c_5e40a7b5-18ac-44c8-959a-a530a564942a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cast and crew of Titanic and add to watchlist\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.168, 0.594, 0.188] in the image\nAnd my action is Action: TYPE\nValue: Titanic"}]}, {"id": "mind2web_6892", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_5fdc4213-5ec5-4e87-9984-4b602c1a2368.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK\n[link] Phil Collins - Both Sides (CD, Album, RE + CD + Dl... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.113, 0.897, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6893", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_cffee1ce-d1d4-44c7-8978-a4a91b399818.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.363, 0.196, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6894", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a2e3ce74-6960-44a8-9352-ce292abb6b25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[heading] Barboursville -> CLICK\n[link] More info about Barboursville store -> CLICK\n[button] make it my store -> CLICK\n[path] -> CLICK\n[span] Easter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.108, 0.819, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6895", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_02461d6b-dd73-4855-9d43-5545b559e29c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK\n[textbox] Monthly Payment -> TYPE: 250"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.257, 0.459, 0.283] in the image\nAnd my action is Action: TYPE\nValue: 3000"}]}, {"id": "mind2web_6896", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_78b56f22-a09c-4cbf-8e7e-fe5dd97a1305.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop\n[button] \uf002 -> CLICK\n[div] Newest Lenovo Ideapad 3i Laptop, 14\" FHD Display, ... -> CLICK\n[div] Price Alert -> CLICK\n[textbox] price from -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.493, 0.727, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6897", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_e7bb4a75-73a7-4320-95ec-03516f734caa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\n[span] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.209, 0.234, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6898", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_95ec1d75-39b4-41c3-bdd2-fd4404dbe49f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.697, 0.039, 0.722, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6899", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_b0204daf-d53c-416b-bbf3-fe924f4d9d25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Add to List -> CLICK\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.131, 0.485, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6900", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_4c9dfd61-fbf4-424e-a505-a20212944a9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.363, 0.539, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6901", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2da8d261-db2a-478c-b02d-cd0694309653.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.436, 0.066, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6902", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_2f0b502b-6280-4361-9d9b-77f44c23c9c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.448, 0.307, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6903", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_9a0c78ef-cc11-4975-8d79-e59d7a5e6d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.219, 0.266, 0.242] in the image\nAnd my action is Action: TYPE\nValue: SPRING, TX"}]}, {"id": "mind2web_6904", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_529bf27f-0365-49a0-a525-a223e2d1d091.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.448, 0.127, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6905", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_a0fd3657-be32-476b-8d86-b7dd38afd2a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.019, 1.0, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6906", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_98318d24-fc5b-4031-bff4-008759505c93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.38, 0.875, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6907", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_b1ca9ca5-f756-40f0-9e77-3ee9207a3e2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: Mexico"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.153, 0.536, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6908", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_562c577d-ae8f-4c46-bb04-2877a53444f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.573, 0.138, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6909", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a559a3c5-70ba-425d-a2c7-ee28846020dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[button] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.264, 0.237, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6910", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_638d3c1a-7e71-476f-bd07-42cfcf96f211.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 2.331, 0.457, 2.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6911", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_41cf4d7a-1f81-42f6-8711-5c1a16ed9d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.224, 0.235, 0.277, 0.263] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_6912", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_178b5b80-98d2-4169-895c-8e4eada72f72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.218, 0.459, 0.244] in the image\nAnd my action is Action: TYPE\nValue: 15000"}]}, {"id": "mind2web_6913", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_0a60f460-55f6-4c2d-a535-36cdf07eeebe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: New York City\n[option] New York City\u00a0\u00a0 City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.096, 0.628, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6914", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_e4951d09-66ec-4136-bac3-44a43647d534.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Electrical -> HOVER\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.445, 0.098, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6915", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_b68ec086-9eaf-4b7d-977a-b2a6a6417d65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.54, 0.856, 0.545] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6916", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_ad3cdc43-66c2-4833-b233-5df774bfceab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.221, 0.5, 0.255] in the image\nAnd my action is Action: TYPE\nValue: Manhattan"}]}, {"id": "mind2web_6917", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_05468c6a-dee3-4b1e-a923-9004409dc1ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.734, 0.823, 0.748] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6918", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_12af0a4e-ac29-4730-9fef-81f52558f981.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Reggae -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.274, 0.881, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6919", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_da39254d-694d-449a-9fd4-61d73f28d2a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[button] Paris France -> CLICK\n[circle] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.443, 0.093, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6920", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_a726af10-e02c-4e08-846c-e5d79fc1f8cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK\n[link] Amazon Pharmacy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.341, 0.741, 0.365] in the image\nAnd my action is Action: TYPE\nValue: Metformin 1000mg"}]}, {"id": "mind2web_6921", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_acdbfdec-a930-4e6c-bbb3-2734441739b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK\n[link] Explore the directory -> CLICK\n[searchbox] Refine Location -> TYPE: Boston\n[span] MA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.751, 0.151, 0.769] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6922", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_233a182b-880c-4fc2-883d-b5f7db449fa8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at\n[listbox] hour -> SELECT: 10\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.421, 0.391, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6923", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_eb050362-242d-4e15-bf1a-82c746f71bc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.396, 0.975, 0.427] in the image\nAnd my action is Action: TYPE\nValue: Tim Stebee"}]}, {"id": "mind2web_6924", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_88c6723c-7217-4add-893d-bf5d72b68db8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK\n[button] Price (lowest first) -> CLICK\n[link] See availability -> CLICK\n[button] Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.614, 0.923, 0.629] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6925", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_71a6b122-0c7f-49f6-8f00-496bc997c596.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.212, 0.741, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6926", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_c01c84b0-81b3-4e6a-93f8-d1d319e101c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.042, 0.44, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6927", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_ddcf9faa-d926-4c7b-bdc8-ac481e2daddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.0, 0.279, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6928", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_9e20b613-6f39-45e4-b248-a3383bb160bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.232, 0.906, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6929", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f69f089a-e5a4-4995-9df6-4b564436b806.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.213, 0.027, 1.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6930", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_bc05be9f-e46b-4654-83c0-862c601f263f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.153, 0.782, 0.182] in the image\nAnd my action is Action: TYPE\nValue: New york knicks"}]}, {"id": "mind2web_6931", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_b512eb3a-d22d-4b97-9602-8accf6088ddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] Fresno -> CLICK\n[img] -> CLICK\n[span] Order Online -> CLICK\n[link] All -> CLICK\n[label] Japanese Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.216, 0.559, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6932", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_7e75bead-d0bb-4243-ab8b-7c062cd37053.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK\n[link] VIEW RATES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.486, 0.943, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6933", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_eb660037-cbb5-4b1a-be71-d1b5ad6fd160.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK\n[span] New -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.689, 0.158, 0.7] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6934", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_d36af357-3992-4fdc-af97-755183ecfd0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] European trains -> CLICK\n[menuitem] Spain train tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.702, 0.59, 0.794, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6935", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_56a0811a-d418-4d40-a9c6-3db908dfbfe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.49, 0.449, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6936", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_557a0c86-28aa-4838-b6d5-84c2383074df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK\n[div] -> CLICK\n[checkbox] Online Paperwork (4)\uf05a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 1.87, 0.296, 1.913] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6937", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_77c7b44a-4641-49f4-8c49-b7268e7e1c6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[div] 10 -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.845, 0.506, 0.908, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6938", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_95141533-9d98-44d8-892a-27fafb078c64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.141, 0.392, 0.155] in the image\nAnd my action is Action: TYPE\nValue: belo horizonte"}]}, {"id": "mind2web_6939", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_f463f8e4-acbf-45ce-b77a-59e6eadc7213.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[tab] Flexible dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.299, 0.598, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6940", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_4c1a9de4-e5d4-4be6-80e0-1e60fcfb3386.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK\n[button] DURATION -> CLICK\n[button] 6 - 9 Days -> CLICK\n[button] SEARCH CRUISES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.12, 0.212, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6941", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_f02d5411-dd85-430b-a6fe-47ea3fc45474.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.543, 0.927, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6942", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_2fda5161-1368-4436-8d1f-fc75151db6ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK\n[div] -> CLICK\n[button] Apply promo code -> CLICK\n[textbox] Apply promo code -> TYPE: 1000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.183, 0.953, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6943", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_9e2ad598-a31c-48d7-809a-7482f0e22074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK\n[button] DURATION -> CLICK\n[button] 6 - 9 Days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.307, 0.871, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6944", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_4c7c0d34-e5bd-4c51-b699-e4ff6f392fce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.047, 0.219, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6945", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_645e7a33-cc4e-47ee-bbe7-06941488d9f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.379, 0.701, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6946", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_445a3b2c-9bf3-48c9-bb84-97c89020d5e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.543, 0.285, 0.611] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6947", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_d3ce6c2b-fa4b-473f-9d10-2980148592c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK\n[span] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.752, 0.234, 0.853, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6948", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0d7f064a-aa60-43bf-a75a-a1e7ff4351ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[img] undefined -> CLICK\n[button] Continue without a seat -> CLICK\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.81, 0.451, 0.86, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6949", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f9801b57-8f15-4dab-9e72-aa767e19f1c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK\n[link] Black (294) -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.481, 0.048, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6950", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_7b90ff88-5507-4a4a-8c8b-52d348e46ff7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] Pop Rock -> CLICK\n[link] 958,222 United Kingdom -> CLICK\n[link] 246,685 CD -> CLICK\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.391, 0.163, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6951", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_694d5209-df32-4dd2-a885-72b559a39cb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.418, 0.039] in the image\nAnd my action is Action: TYPE\nValue: motherboard"}]}, {"id": "mind2web_6952", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_548cec03-b81f-4bfd-8d26-a5bd57383fa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 1, 2023 -> CLICK\n[button] Jul 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.928, 0.527, 0.984, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6953", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_4c159fd1-de41-432a-8c93-4ffef904d093.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER\n[heading] Find your trip - Find a reservation - American Air... -> CLICK\n[textbox] Last name -> TYPE: sin\n[textbox] Trip Credit / Ticket number -> TYPE: 1234567"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 0.314, 0.874, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6954", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_d321135b-7227-4764-933b-d0ce804aa88a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK\n[link] $189 -> CLICK\n[link] $259 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.368, 0.902, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6955", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_394dcce1-2df9-4a3c-8088-31e132733f32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.301, 0.211, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6956", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_18379f86-26b5-4f32-8c38-cfd07d6f4ec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK\n[span] Castles -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.688, 0.786, 0.722] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6957", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_14413e25-3474-43a7-88a4-8c6017dfefc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.416, 0.913, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6958", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ccc8fff7-4673-4e12-b66d-87ebf14f3a02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK\n[combobox] Passengers -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.249, 0.927, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6959", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_4d289945-1ccb-4aca-9ca5-00c19003c28b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[div] Spain -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.218, 0.442, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6960", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_69c07f95-bec3-47b2-964c-db723e729b84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[div] \u00a3 -> CLICK\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.775, 0.916, 0.79] in the image\nAnd my action is Action: TYPE\nValue: Bloom"}]}, {"id": "mind2web_6961", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_662d83be-f7cd-4480-ae69-20aeaf275ce8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.445, 0.233, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6962", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_3169cbf4-b8dd-4854-af0b-bad280e9950d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[link] Restaurants -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.269, 0.186, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6963", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_b7bcc68c-c9f7-4069-84c1-6c3fe43d24ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine\n[button] Search -> CLICK\n[textbox] Zip code -> TYPE: 90026\n[button] Apply within filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 1.823, 0.027, 1.834] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6964", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5e934cdf-7af9-40d2-a4f0-ada6b371432e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.565, 0.269, 0.728, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6965", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_1063a7d1-40b8-4b02-a6cb-f320875209b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.697, 0.035, 0.722, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6966", "image": {"bytes": "", "path": "./images/f45b0783-4325-4b3e-959d-c181971d72f6_fbd5b363-535a-4675-9c4f-ce3b14af687e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest news and rumors about the NBA team the Los Angeles Lakers.\nPrevious actions:\n[link] NBA . -> CLICK\n[link] Teams -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.343, 0.498, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6967", "image": {"bytes": "", "path": "./images/16886ec7-3301-4103-b175-9fa817335984_672dc62a-a88d-468f-9b1e-eee5818cb7a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the highest average points scored in the current season\nPrevious actions:\n[button] NBA -> HOVER\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.036, 1.147, 0.062, 1.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6968", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_b14fb2e2-d8b2-41f2-9cf2-517a4a832935.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] -> CLICK\n[label] Most popular -> CLICK\n[span] See availability -> CLICK\n[button] Show more dates -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.501, 0.801, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6969", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_f8261747-9bb4-4b98-98a2-f34f5eaba467.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.414, 0.246, 0.444] in the image\nAnd my action is Action: TYPE\nValue: 00"}]}, {"id": "mind2web_6970", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_9b472dc3-9d6c-466a-ac5b-3b787e64dbd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 1.025, 0.777, 1.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6971", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_82d3b3a2-ab6a-4d43-be93-b933685cab2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.072, 0.237, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6972", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_aa3adabf-f4e4-4b54-ab6d-9e8fbf8b11e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.566, 0.541, 0.841, 0.576] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6973", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_4db51223-64ed-46ee-aee9-c61490715f38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec\n[button] Search for zyrtec -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.21, 0.33, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6974", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_8b782e73-72f5-42e8-89e0-197104dfbedd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.089, 0.181, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6975", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_20f7373e-4912-4000-aab2-2097e31b32e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK\n[link] See all open MTA positions. -> CLICK\n[textbox] Enter a Location -> TYPE: brooklyn\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.043, 0.286, 0.254, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6976", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_e26a7560-027d-4467-b206-33ac5f582855.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.107, 0.492, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6977", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_f9650777-77a8-4cf0-9fc6-788cb349e8e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.34, 0.438, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6978", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_74a7bdfb-8e01-47ae-8251-2c1ee845131e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.078, 0.613, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6979", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_da0b4cb1-d93d-4810-a74b-cb1a47baded5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.286, 0.103, 0.39, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6980", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_8b3ecf8e-9155-4d96-9c07-4068e5782c66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4\n[link] Fallout 4 $19.99 -> CLICK\n[select] 1900 -> SELECT: 1995\n[link] View Page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.804, 0.34, 0.808] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6981", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2cc502de-3f64-4412-9dfa-d6311cbc490a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.463, 0.122, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6982", "image": {"bytes": "", "path": "./images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_af0979f8-d69c-48e8-a772-a069f0d24a84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Read the 1 star reviews of DayQuil Severe Cough Liquicaps.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.014, 0.804, 0.034] in the image\nAnd my action is Action: TYPE\nValue: dayquil"}]}, {"id": "mind2web_6983", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_bdc9ce47-f8ec-422f-a746-44d33de2b5a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK\n[link] Stats -> CLICK\n[select] English FA Community Shield -> SELECT: UEFA Champions League\n[select] 2022-23 -> SELECT: 2022-23"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.295, 0.717, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6984", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_1fb87a1c-d99f-48e1-ae71-ba6f01482933.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.012, 0.546, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6985", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_b05415f3-ea39-499f-b8d0-25e061aa16e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.103, 0.463, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6986", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_4abb3e7b-7920-47cd-9268-2df8e7a4c4c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\n[button] Reviews -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.102, 0.216, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6987", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_01ef4b13-5aec-4c24-9e21-67d5c3f3caeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.236, 0.249, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6988", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_6bd9289f-2ad1-42a9-81fc-f1719e3e9d89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.174, 0.178, 0.225, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6989", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_4cc72811-d9b9-4f6f-8ae1-5556a8f76045.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] Tablets -> CLICK\n[img] Samsung -> CLICK\n[span] 11\" & Larger -> CLICK\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.418, 0.192, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6990", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_77ae34f7-69b8-4da8-90c9-5420ce7b170c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK\n[svg] -> CLICK\n[img] SUV -> CLICK\n[div] $75 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.423, 0.631, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6991", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_39a01968-5e21-459c-82ec-924e69ae3041.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.292, 0.5, 0.321] in the image\nAnd my action is Action: TYPE\nValue: YAW639"}]}, {"id": "mind2web_6992", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_90692452-f027-4608-a74d-8382631f665f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK\n[div] Recommended -> CLICK\n[tab] Price (low to high) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.323, 0.84, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6993", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_7ae94499-171d-4bee-a8f8-12cd500daf3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.002, 0.348, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6994", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_51dfec56-5700-4f95-b3dd-34a07aae5856.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 1.924, 0.277, 1.939] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6995", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6219d481-0512-4dee-8054-a5a7b9fac49c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.967, 0.611, 0.976] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6996", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_1b8d8699-8875-4d1e-a4b0-594a3f659771.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[link] Herbs -> CLICK\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.941, 0.994, 0.993] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6997", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7128c85b-93dc-496e-9ffc-d8e624bf9036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.296, 0.699, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6998", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_a8c71db4-4c7d-45a0-befd-2bf4ea75e2fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.289, 0.447, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6999", "image": {"bytes": "", "path": "./images/e8603513-2740-485e-adf9-86361dd015f4_f12321ef-5a11-4132-ab68-7660be13e08c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare FlightAware subscriptions and signup for Enterprise plan.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.076, 0.17, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7000", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_3badcc5d-9a3e-408d-92d6-46206293a333.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[link] Grocery -> CLICK\n[link] Bread -> CLICK\n[label] In Stock Today -> CLICK\n[checkbox] Tortillas (4) -> CLICK\n[button] Increase Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.673, 0.99, 0.7] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7001", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_8bc96816-a9be-4f98-b864-15092427a0b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.008, 0.384, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7002", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_1cf0e95f-e911-42e3-a18f-a2c4ae24e04b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.214, 0.463, 0.243] in the image\nAnd my action is Action: SELECT\nValue: Vietnam"}]}, {"id": "mind2web_7003", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_6d93218f-905d-4f25-a513-a1e03c29eed5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.229, 0.729, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7004", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_bbad698b-ef52-4fda-85eb-5df447581cf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK\n[span] 14 -> CLICK\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.229, 0.255, 0.359, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7005", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_96dbde61-5327-46c8-8e3d-d2380577f324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.317, 0.649, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7006", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_45f50459-f551-4081-8177-ecefc94cae72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.509, 0.184, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7007", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_df45ef47-c2eb-4bb6-9b1d-aac6c641c2e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK\n[span] Active -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 1.832, 0.192, 1.848] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7008", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3bef15a6-9466-4ee9-b485-4f7dd16e1291.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.455, 0.492, 0.476] in the image\nAnd my action is Action: TYPE\nValue: CDG"}]}, {"id": "mind2web_7009", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_27fa3863-1da9-4a0e-849c-837c9f0abeaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.271, 0.923, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7010", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_fad534dc-b5e4-4d2b-85ae-4b2f16b7dce8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.262, 0.326, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7011", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_c99ec0f1-3a4a-4e38-bc04-4ecab395b872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.613, 0.659, 0.624] in the image\nAnd my action is Action: TYPE\nValue: Adams"}]}, {"id": "mind2web_7012", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8e2b91b9-de9e-4e98-8b53-a53d238427cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.138, 0.478, 0.172] in the image\nAnd my action is Action: TYPE\nValue: Chennai"}]}, {"id": "mind2web_7013", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_1874df15-0121-4c6c-9489-9ca06fbc20fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.121, 0.219, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7014", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_08dce56f-9e31-44cc-b247-2ff269bbd19e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[img] netflix -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Sort: Most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.491, 0.232, 0.501] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7015", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_06967b32-70d8-492b-8521-dbfafd2504f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.209, 0.32, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7016", "image": {"bytes": "", "path": "./images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_708d6e85-370a-4de2-b507-35df29ee9a42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for restaurants near my location with pizza and for 6 customers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.029, 0.456, 0.065] in the image\nAnd my action is Action: TYPE\nValue: Pizza"}]}, {"id": "mind2web_7017", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_c75460c1-761c-4db0-ae39-226820fe160b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK\n[div] Relevance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.102, 0.98, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7018", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_25614f00-cfb4-4d96-9189-80974032e6bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.095, 0.797, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7019", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_31f5929e-78b5-4fb3-9722-4fe9085bf63b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK\n[label] Medium Light -> CLICK\n[svg] -> CLICK\n[label] 32\" -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.863, 0.222, 0.875] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7020", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_eac36fe2-54cf-4f5c-b064-426223357844.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.855, 0.055, 0.9, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7021", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_4a582ba5-4347-4b8b-8e83-40d48174cd24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.823, 0.2, 0.833] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7022", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_01b4466a-8da6-4894-9b73-283acd8a8d89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK\n[searchbox] Type your destination -> TYPE: Las Vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.407, 0.409, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7023", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_51b4faa6-0136-4f49-baad-9bd1bc178051.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.54, 0.512, 0.555] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7024", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_70ab65e1-8095-4743-b42b-90879639ae57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK\n[link] Denzel Washington -> CLICK\n[button] Expand Upcoming -> CLICK\n[button] add to watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.975, 0.059, 0.984] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7025", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_9fd3a843-608c-4ab6-9bd2-3adee3a14559.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.298, 0.695, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7026", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_0b780e87-4aa8-4eaf-a19b-cb3457052141.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Katy Perry\n[button] Search -> CLICK\n[a] -> CLICK\n[button] BOOKMARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.469, 0.324, 0.531, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7027", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_d3001f53-d5a7-4ef6-b3f3-30fd3b8fdd2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.237, 0.566, 0.268] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_7028", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d183a68c-3454-480a-9d79-b2d033f7853d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.419, 0.331, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7029", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_276f37d7-195e-4140-8216-7cd9f629c82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Price -> CLICK\n[label] $40-$60 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.415, 0.463, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7030", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_7745dc56-1d07-4d83-b0b6-b196f26a0413.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Next -> CLICK\n[link] Personal Care -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.677, 0.309, 0.684] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7031", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_59cfb94c-6b8b-4897-a5b9-a0ec07c8afda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\n[link] BABY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.132, 0.351, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7032", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_f4fb861e-2b80-49a8-9c84-909518e0e7c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.259, 0.272, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7033", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_13d2a2b2-7b05-4bd4-9a33-659aa7490405.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[combobox] Start Time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.301, 0.384, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_7034", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ae57d1a1-a97c-4a44-aec7-1f1d6bd7c8d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.986, 0.308, 0.989] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7035", "image": {"bytes": "", "path": "./images/ef23fbf3-f05e-41e2-b847-a27028f42470_145d33f0-a819-4a3b-b6d3-3ae7980c8dda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me \u201cpizza\u201d restaurants near Atlanta\nPrevious actions:\n[svg] -> CLICK\n[button] Atlanta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.013, 0.424, 0.029] in the image\nAnd my action is Action: TYPE\nValue: pizza"}]}, {"id": "mind2web_7036", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ff1cb425-d1e1-4bf8-8ac2-2e6ef4dbf5d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.169, 0.666, 0.19] in the image\nAnd my action is Action: TYPE\nValue: 02199"}]}, {"id": "mind2web_7037", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_dce74858-cfe5-48b1-92a4-6ed0b917dc77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Spain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.616, 0.241, 0.681, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7038", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_60794dd3-fdc5-4c9a-9b1f-c84d44ea1544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.512, 0.64, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7039", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_60166de7-b684-4fd1-a01e-814cabfb53ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.26, 0.35, 0.294] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_7040", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_f14aee92-2270-4ef1-a4f2-6f3c03627989.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[circle] -> CLICK\n[button] -> CLICK\n[div] Food Tours -> CLICK\n[label] Free Cancellation -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.245, 0.926, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7041", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_9e12befd-5920-4a3a-a8c7-6f47a1a13b4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA\n[div] Goa -> CLICK\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.135, 0.927, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7042", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_10249103-9f3d-4098-9ea7-b80db7f8af9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.228, 0.714, 0.257] in the image\nAnd my action is Action: TYPE\nValue: 105307"}]}, {"id": "mind2web_7043", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_ade61336-8901-48d1-9c8a-f14332ed9aa2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.457, 0.234, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7044", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0bc62f29-131d-4a0f-a05b-5bb6471dd1b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 1.09, 0.284, 1.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7045", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_adcc697d-c4b8-4329-8efb-83af89c3ad55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: MANILA\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.301, 0.93, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7046", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_9981ef68-97b3-4388-906b-0b285e5b74f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.008, 0.546, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7047", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_be3a6710-98f4-4b0c-8508-ef55b5dca3d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK\n[span] 1 -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.516, 0.332, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7048", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_ddc83651-4e21-47b3-8ccc-50fc0d5e4783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\n[link] \ud83d\ude9aOrder Status -> CLICK\n[textbox] Order number * -> TYPE: X123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.341, 0.766, 0.372] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_7049", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_40470783-4757-4eac-a28c-fa1bfa9a8517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 0.25, 0.524, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7050", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_15214483-2534-48e2-bb40-d84e4daf3540.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Flights & Car Rentals -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.288, 0.442, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7051", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_dae9e143-9012-43de-aa95-496ce9cabb17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.23, 0.287, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7052", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_0a333e24-0a4d-4c6c-a466-3a1807f60957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK\n[link] White -> CLICK\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.696, 0.375, 0.735] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7053", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_725df4d5-bb6a-418d-a6d5-1dc6fd8cc328.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[span] Delivery -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.256, 0.095, 0.47, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7054", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_fa7ae12a-1f5d-4463-820e-4ff9e9211281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.22, 0.76, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7055", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_a1884569-f749-4e3d-96b6-2808db697b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.0, 0.191, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7056", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_f55421a2-42c3-4ceb-934b-620dae199c4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.083, 0.902, 0.118] in the image\nAnd my action is Action: TYPE\nValue: Fiji"}]}, {"id": "mind2web_7057", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a18c0387-6182-447c-99ba-74444b28d91b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.494, 0.688, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7058", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_d1079663-66c1-47c7-a0fa-6f0420a99469.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.477, 0.233, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7059", "image": {"bytes": "", "path": "./images/020bc054-a829-4af5-8f0a-6efce012c7ac_7949a928-22fa-4f0c-824a-d6b3ea062f7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the number 4 ranked board game on the geekmarket.\nPrevious actions:\n[button] Browse -> CLICK\n[link] All Boardgames -> CLICK\n[link] Ark Nova -> CLICK\n[span] Ark Nova (English edition, third printing) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.14, 0.97, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7060", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_d1d704cb-2130-4933-b894-6c0a492dc4c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.136, 0.324, 0.158] in the image\nAnd my action is Action: TYPE\nValue: chicago"}]}, {"id": "mind2web_7061", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_b9746274-9171-4823-b007-455876cc5a17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.326, 0.42, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7062", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_253077b6-b883-4263-808e-e8cd35f2b6b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[span] -> CLICK\n[button] Subscribe -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: Smith\n[textbox] Email Address -> TYPE: abc@abc.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.354, 0.799, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7063", "image": {"bytes": "", "path": "./images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_36aefdb1-aee2-4743-a3b8-54eaf1a6beed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information on how to find lost AirPods.\nPrevious actions:\n[link] Support -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.315, 0.5, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7064", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_61738502-0b06-46b8-910b-266b5ccfbe97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[textbox] Where to? -> TYPE: Dubai\n[button] Dubai United Arab Emirates -> CLICK\n[path] -> CLICK\n[button] -> CLICK\n[div] On the Water -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.705, 0.236, 0.714] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7065", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_f48ac382-e936-49f1-944b-ed81052b0e12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\n[textbox] search -> CLICK\n[textbox] search -> TYPE: Dota 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.112, 0.949, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7066", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_4aede9a4-0099-4d40-8b0d-4399bd3bd274.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.224, 0.539, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7067", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_7864d3db-532e-478a-b365-5533d458f2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.432, 0.618, 0.575] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7068", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_6ed5c79c-dd28-42e1-af23-8a7962616627.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: professional boxing\n[option] Professional Boxing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.157, 0.251, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7069", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fadf5fda-b09f-4516-b1a7-9ec58dc23e1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[link] Training -> CLICK\n[div] Size -> CLICK\n[link] YXL -> CLICK\n[div] Size -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.983, 0.233, 1.007] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7070", "image": {"bytes": "", "path": "./images/27724810-0bc8-446a-a2f4-b53a87e190df_36b4afb0-08a0-487e-9118-53846861391d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the location and operating hours of the nearest CVS pharmacy to zip code 90028\nPrevious actions:\n[button] change store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.205, 0.593, 0.229] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_7071", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_9c2f6d1a-a5c7-4094-97ed-0dfaf2eff284.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.158, 0.291, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7072", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_61dd1ff8-f57e-465b-8b8f-90340bb4c4d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.257, 0.675, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7073", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_ca7b5174-50e8-410c-882e-d33568d72b38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Protections & Coverages -> CLICK\n[heading] Emergency Sickness Plan (ESP) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.396, 0.748, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7074", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_5f017f7d-93a9-4835-b53f-c1af4eccc6e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.287, 0.783, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7075", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_a2f646a1-bb6e-4bef-8e80-9f65de82161c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK\n[searchbox] Search carmax locations. -> TYPE: california"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.326, 0.156, 0.34, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7076", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a20c534d-630c-4fa9-94d6-ef298b8e67ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.381, 0.393, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7077", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_4326dff5-bcbc-46fd-a64f-37c77bc38404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.259, 0.666, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7078", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_80087b51-ce0f-4a04-b7ef-512f6c67dfc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK\n[gridcell] Thursday, May 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.657, 0.699, 0.684] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7079", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_c14edccf-de52-47b8-928c-f87f13139747.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Women's -> CLICK\n[link] add filter: 6(220) -> CLICK\n[link] add filter: Waterproof(171) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 2.489, 0.204, 2.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7080", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_94da50d6-e71f-4997-abeb-db862c325ecc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.577, 0.099, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7081", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_89654b34-bfdf-4514-b1a1-1aa91462bf85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.158, 0.93, 0.176] in the image\nAnd my action is Action: SELECT\nValue: Low to High"}]}, {"id": "mind2web_7082", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_2a577542-e4e4-4c33-b14b-3f01975609da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.095, 0.666, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7083", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_db324c07-d4c8-4133-bf12-0b1be073d6e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.306, 0.23, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7084", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_66327fbb-2be0-43d0-9f9e-d4776f150711.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.142, 0.668, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7085", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_b9c1f781-bdec-4323-a3d6-2930774d05bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Chicago Bulls\n[div] Chicago Bulls -> CLICK\n[heading] SCHEDULE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.122, 0.715, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7086", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_aed97d15-86ae-44d5-b329-3b10758f50bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK\n[div] -> CLICK\n[button] Contact -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.345, 0.48, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7087", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_7404da48-043c-4f90-99cf-3a2e99a7bf80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[svg] -> CLICK\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.276, 0.968, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7088", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_4bf5478b-1450-424b-80a6-b2acb4798bd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.519, 0.12, 0.557] in the image\nAnd my action is Action: SELECT\nValue: 2006"}]}, {"id": "mind2web_7089", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_dd54cc31-18ee-44a1-92e7-fdef1940d932.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.061, 0.628, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7090", "image": {"bytes": "", "path": "./images/f385156c-4f2e-410f-bc73-7ec6d0f44448_8d193167-cec0-4e41-b471-99c194209723.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare all membership tier benefits.\nPrevious actions:\n[rect] -> CLICK\n[textbox] e.g.: New York -> TYPE: Membership tier\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.542, 0.295, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7091", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_7dec7f53-f08b-4658-993a-b6121c95d246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[button] go -> CLICK\n[button] Deals -> CLICK\n[div] -> CLICK\n[button] Update -> CLICK\n[button] Discount -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.16, 0.834, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7092", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_bf438b4c-9dbc-4369-82fb-005f6e63e14c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.031, 0.378, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7093", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2c9414e4-66fa-4d75-befd-bfffdfcb6497.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.525, 0.263, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7094", "image": {"bytes": "", "path": "./images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_5e729cf2-659d-4934-bd09-364bcb174861.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the birth place of Ali Wong.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Ali Wong"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.123, 0.595, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7095", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_5c046918-7e8b-44d4-a49e-521c81e5d12b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS\n[span] Paris -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: MILAN\n[span] Milano (Milan) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.203, 0.194, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7096", "image": {"bytes": "", "path": "./images/cdbd410d-170a-426d-b6d2-60dafaffe853_e7cce11b-e3ef-43c9-bc41-3f6db08f9d2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the best seller accessories for the Model X\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.054, 0.753, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7097", "image": {"bytes": "", "path": "./images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_82693ec7-34ef-40f8-b3b7-daca962c2a76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the San Francisco 49ers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.012, 0.211, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7098", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_ba11d9d1-a12c-4fea-99a3-b8c63f58c538.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.031, 0.551, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7099", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_6fcf3fd4-8f58-4bd7-8267-d4fa5cd2b6e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK\n[option] One way -> CLICK\n[gridcell] 14 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.604, 0.922, 0.632] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7100", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_149a80ed-1e2c-4a63-941f-99a5fec6a11e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.019, 0.45, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7101", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1ec386a0-9145-4cc5-ad86-8df75e74fa30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Home Essentials Under $20 -> CLICK\n[button] Save to favorites, KUDDARNA, Chair pad, outdoor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.483, 0.384, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7102", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_cdc883f2-d336-484b-88e6-badadb5a758f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[textbox] Pick up -> TYPE: Houston\n[a] Houston, US -> CLICK\n[button] Search -> CLICK\n[button] Economy cars 5\u00a0Seats 1 Large bag 1 Small bag From ... -> CLICK\n[p] Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.432, 0.917, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7103", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_475e620f-02b0-4a95-ba4b-bce28ab58e23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.266, 0.675, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7104", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_aa7bf5bf-a02a-47e5-8a36-dbdb3b463d2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 1.18, 0.153, 1.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7105", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_cb270e9a-7513-44a0-992e-1db9994bb336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.052, 0.556, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7106", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_1d0e7048-7e14-408c-b0ec-c8d6a200c859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[combobox] Year -> SELECT: 2016\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.484, 0.634, 0.527] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7107", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_caecd46f-1c1b-4494-bb4f-2d64fa469b04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.034, 0.223, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7108", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_8cd6279c-0398-4d6d-8efd-cc77ddc492c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.049, 0.082, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7109", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_b9fe007b-b768-4fd2-96c9-21e99e1fc443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.01, 0.418, 0.03] in the image\nAnd my action is Action: TYPE\nValue: wireless printer"}]}, {"id": "mind2web_7110", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5df012f2-ae71-4ede-b641-41f8e3e454f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: New York\n[option] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 1.128, 0.264, 1.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7111", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_a9210afa-f255-4b10-9dc0-401b91e86fb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK\n[span] English Language -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.244, 0.196, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7112", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_a451cb2d-a5e8-4808-88cb-c026cbda67da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[div] WOMEN / Tops -> CLICK\n[gridcell] Size -> CLICK\n[label] S -> CLICK\n[gridcell] Color -> CLICK\n[label] YELLOW -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.497, 0.364, 0.516, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7113", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_a309368f-6646-468c-8039-1867c9223c7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK\n[link] Insurance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.201, 0.602, 0.226] in the image\nAnd my action is Action: TYPE\nValue: Health Insurance Top Up plans beneficial or not?"}]}, {"id": "mind2web_7114", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3a7df161-0055-43f8-a7b3-0705eb5f73a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.099, 0.487, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7115", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_046e8cef-b409-41ce-a840-3daf9c4f05a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Electricians -> CLICK\n[textbox] Near -> TYPE: WESTMINSTER\n[span] Westminster -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.588, 0.156, 0.682, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7116", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b66287f6-86b3-4e91-97b5-be53a7338c72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK\n[link] Navigate to New Releases At The Kiosk See More -> CLICK\n[img] Plane (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.079, 0.127, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7117", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_ac795b03-c8fc-4cdb-9ed7-600429a37873.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.165, 0.339, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7118", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_3ab94658-7eb5-4f3f-9a47-d87421d1a4d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.346, 0.481, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7119", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_d694c8b7-923e-4d3b-97ec-3a475e4463f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.208, 0.571, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7120", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d68603fa-5c88-4cc0-a276-31fbc5052bd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram\n[button] \uf002 -> CLICK\n[img] 8GB (1x8GB) DDR3L 1600 (PC3L-12800) Desktop Memory... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.333, 0.963, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7121", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_87f3b880-2540-4b64-b688-f10fca9ea957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.026, 0.292, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7122", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_6d69a62b-9667-42ef-bc20-3c552c0e2e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[div] -> CLICK\n[checkbox] SUV -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] Avis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.77, 0.241, 0.796] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7123", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_6928e47d-af7a-4f49-89b0-1b72a2516909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Alien Worlds\n[div] Alien Worlds -> CLICK\n[link] Seasons & Episodes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 1.183, 0.129, 1.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7124", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_1e69c4c2-c2a1-4bb3-b5ad-3a4c6b19dd76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Size -> CLICK\n[div] Sports -> CLICK\n[div] Fit -> CLICK\n[link] Fitted -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.142, 0.925, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7125", "image": {"bytes": "", "path": "./images/6b831239-435b-494e-9aa8-a49e8605d0b3_97df459e-9422-40ca-88fc-0a6f15b4fbfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is trending now on AMC on-demand?\nPrevious actions:\n[link] Visit the On Demand page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.525, 0.087, 0.729, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7126", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_e7771434-c972-4fb1-9915-82fd77e5ec6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK\n[label] Medium Light -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.783, 0.222, 0.794] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7127", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_c269bd1c-a1bb-485b-9fc9-5eaca199ab2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: kashi vishwanath temple"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.191, 0.573, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7128", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_c280a5bd-f3af-43ab-a64b-29e6984be6b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.385, 0.159, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7129", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_73d19c95-32f0-4eed-ac23-4de4484fa210.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.569, 0.121, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7130", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c27e0edb-166a-4fca-afd7-de2a8823e3dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (10) -> CLICK\n[button] Show 10 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.396, 0.397, 0.409] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_7131", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_6860793b-b094-4e1f-88c6-07680327486c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.363, 0.131, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7132", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b82f1731-57ab-4129-9d15-c006e6895af7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Top Hip Hop\n[div] -> CLICK\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.051, 0.712, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7133", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_c4146be9-5d24-4618-975d-5ebfba34bf9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 94115\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.023, 0.335, 0.036] in the image\nAnd my action is Action: TYPE\nValue: electrician"}]}, {"id": "mind2web_7134", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_888fcb67-1235-4bb3-9cdc-b96d07a8dc10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.058, 0.178, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7135", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_658d1d45-7bab-4d3a-8ece-e898e819cc5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK\n[textbox] Near -> TYPE: Texas City, Texas\n[button] Search -> CLICK\n[button] Open Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.195, 0.621, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7136", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_b933a80f-1cc1-4d21-86dd-2e4e50ddaa91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.193, 0.495, 0.243] in the image\nAnd my action is Action: TYPE\nValue: San Francisco"}]}, {"id": "mind2web_7137", "image": {"bytes": "", "path": "./images/779cec8e-eef5-4de8-a42e-b449363664df_d5585212-4b78-49b6-8185-eae5dec350f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a theatre near 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.027, 0.443, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7138", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_a885209f-e3dc-4d1b-a292-b3631c292916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Venice Beach"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.318, 0.748, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7139", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_e73d92f8-b366-4344-9c6a-f8e671a75728.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[div] Shanghai, China -> CLICK\n[textbox] Where to? -> TYPE: SEOUL\n[div] Seoul, Republic Of Korea -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.405, 0.799, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7140", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_b93b8c53-e686-471d-bcf7-ed74fe2190a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK\n[link] VIEW RATES -> CLICK\n[button] Member Rate Prepay Non-refundable -> CLICK\n[label] I have read the rate details and accept the cancel... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.711, 0.647, 0.741] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7141", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_093733bb-a6a3-48b0-9aa5-bbe2a4b258aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.302, 0.484, 0.341] in the image\nAnd my action is Action: TYPE\nValue: Allan"}]}, {"id": "mind2web_7142", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_5dea1dea-02c6-4442-845b-c06ff9529037.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag\n[button] Search -> CLICK\n[textbox] Upper Bound -> TYPE: 40\n[textbox] Lower Bound -> TYPE: 0"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.946, 0.073, 0.956] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7143", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_158b6c1d-31c1-4888-ae71-7f8cb5bebcd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[b] Paris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.309, 0.777, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7144", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_88c22cbc-3eaa-4221-8790-92ab48659205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: UNITED STATES\n[input] -> TYPE: smith\n[input] -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.415, 0.416, 0.585, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7145", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_9efe842d-9955-4569-9692-f96a5edd3d49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.375, 0.397, 0.388] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_7146", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_1e255fc2-932c-41e6-b97e-1d1c15da28bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.268, 0.568, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7147", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_36028024-168b-4da8-a0fc-6452fcb120f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.33, 0.234, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7148", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_e95cc523-03ff-48de-a9d7-0f07b4906ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.34, 0.193, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7149", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c5e5e76f-39da-440f-a771-be5ef5b7e0c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi\n[combobox] Select Model -> SELECT: A6\n[combobox] Select Year -> SELECT: 2020"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.172, 0.624, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7150", "image": {"bytes": "", "path": "./images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_4f0c10f1-48d0-4d3a-af3b-f9d37f97fc96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending searches in Columbus.\nPrevious actions:\n[link] CITY PAGES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.151, 0.43, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7151", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_3f033257-fd1c-4875-8b86-4f0b4cd589c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.531, 0.233, 0.572] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7152", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_f444dc13-937a-4b04-8052-da002702db08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.483, 0.48, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7153", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f8928c84-d27a-42db-a6d1-dcd1f656d6ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.137, 0.159, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7154", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3de2c511-3989-41b9-8f34-d4cb2d3853b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\n[tab] Hourly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.224, 0.73, 0.254] in the image\nAnd my action is Action: TYPE\nValue: Atlanta International Airport"}]}, {"id": "mind2web_7155", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_dea7fb0b-b89c-4513-b5f3-9156a5463b72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\n[link] New & Noteworthy -> CLICK\n[link] Most Played -> CLICK\n[generic] By Current Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.047, 0.543, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7156", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_bb6ec3fd-7f24-4864-a4cc-1f7df779b2b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.178, 0.463, 0.184] in the image\nAnd my action is Action: TYPE\nValue: skiing"}]}, {"id": "mind2web_7157", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_2188f829-c7c5-4e97-b301-26caa57486ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.052, 0.129, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7158", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_2fe0fae8-1e67-4bfa-92d5-61c9c7eb65ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.287, 0.481, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7159", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_99759d39-96b3-4093-881d-b50db542dd56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.006, 0.675, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7160", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3407d7f3-e070-45e0-8ee6-cb9b2512b40c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.181, 0.43, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7161", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_ada350af-59cc-4e4c-aedb-8b128a8ee14e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.039, 0.838, 0.06] in the image\nAnd my action is Action: TYPE\nValue: La Bomba"}]}, {"id": "mind2web_7162", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_785d0f3e-72f0-4f03-80cd-dc73dcf41af2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.062, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7163", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_6ef520ac-7d19-4d52-852c-5a128b13cc40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.054, 0.532, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7164", "image": {"bytes": "", "path": "./images/d637c171-dc6e-4a4e-a162-9c230e822932_3cdab44c-9799-48ba-a720-3dc25eb00579.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show popular news which is at number one in comics.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.11, 0.047, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7165", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_2212f16a-7a5d-446c-a124-7afa61604d92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW\n[combobox] Select Model -> SELECT: i3\n[textbox] Zip Code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.323, 0.324, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7166", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_c854a73c-ba42-4f57-b19b-514c037bdf3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 44012"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.012, 0.62, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7167", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_ba18ee7c-dbe9-4345-b2c9-b4f7b2fa559a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.596, 0.287, 0.746, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7168", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_0495b180-3090-4b4a-901c-07cd307f9e82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.355, 0.639, 0.38] in the image\nAnd my action is Action: TYPE\nValue: Cincinnati"}]}, {"id": "mind2web_7169", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_9a0d4689-c0ba-46ce-acd9-03b108d9dd8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK\n[li] -> CLICK\n[spinbutton] Max Price -> TYPE: 75"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.176, 0.217, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7170", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_88e0ed6a-8359-41a6-99d4-e55bd567816c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.243, 0.462, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7171", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_bb5a4639-d62c-4155-8099-1ebe298b6bbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.322, 0.045, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7172", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b0412512-a7db-4bf6-9cf1-06586305037f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.394, 0.046, 0.546, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7173", "image": {"bytes": "", "path": "./images/eab97f0c-38b3-4421-bff6-697b3267f23c_78cd3d9b-a495-4c39-9102-2dc14b522e61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find carnival cruise options that include Alaska.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.545, 0.285, 0.613] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7174", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_50d10c0a-7be8-4680-97b3-b7047b61e733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.467, 0.284, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7175", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_b0d8ab74-8854-487a-805c-2f920b08af49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.146, 0.643, 0.188] in the image\nAnd my action is Action: SELECT\nValue: 2 00 PM"}]}, {"id": "mind2web_7176", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_362a4918-c9e4-43c7-b7c7-e6c5ab3b2f67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK\n[checkbox] 24 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.137, 0.753, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7177", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_75b39e1c-d143-4ade-8ba1-6ebad9aacff8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[span] -> CLICK\n[button] Find flights -> CLICK\n[textbox] Date -> CLICK\n[button] Move backward to switch to the previous month. -> CLICK\n[button] Saturday, April 29, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.059, 0.807, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7178", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_56ec5ed4-9bd9-4caa-9fd3-21b38487f195.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK\n[button] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.277, 0.452, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7179", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_60fe2dd5-eaff-4563-9cf3-dd946f846edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.249, 0.573, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7180", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_ba1cf05e-362c-499a-bd3a-1fbc5d649325.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl\n[button] Search for benadryl -> CLICK\n[img] -> CLICK\n[div] -> CLICK\n[button] Form -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.766, 0.143, 0.784] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7181", "image": {"bytes": "", "path": "./images/63d1f820-37bf-4adb-aabb-65eb7925790c_73adafc4-ae3f-4cc6-89c4-64510b8e7910.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current roster of the Miami Heat.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.101, 0.335, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7182", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_af68a13f-cead-4bad-9b91-ac1fbc14b005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.622, 0.363, 0.638] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7183", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f8dc3296-ee92-4059-922e-f380c7f8a6ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.739, 0.089, 0.747] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7184", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_4a697de0-aa51-43f3-ad3e-d3312265bd48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504\n[button] Look up -> CLICK\n[heading] Barboursville -> CLICK\n[link] More info about Barboursville store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.243, 0.24, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7185", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_90d3a2c2-9fdb-4edf-ad99-15fd086454e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.032, 0.232, 0.051] in the image\nAnd my action is Action: TYPE\nValue: thai restaurants"}]}, {"id": "mind2web_7186", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_f3309336-f65d-423d-943f-296c3d7a3b97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.146, 0.338, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7187", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_ee795994-e62a-47c3-a705-bb02487f3c6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.293, 0.672, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7188", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_2ea9701a-ab3d-4dbc-a9b4-bf8f615fe651.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[span] Scores -> CLICK\n[heading] SOCCER -> CLICK\n[a] FEATURED MATCHES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.157, 0.41, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7189", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_639a3c92-a608-447c-9f65-176900f37e9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI\n[combobox] Guests -> SELECT: 4 Guests\n[button] March 30, 2023. Selected date. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.25, 0.466, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7190", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_a9216e09-9cdf-4e76-961a-972569f94327.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.345, 0.663, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7191", "image": {"bytes": "", "path": "./images/693ae151-6a70-41b1-b016-87279c4c532e_68993e22-ae92-47b0-9712-e4e67c7c657e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the cheapest xbox 360 game available for purchase\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: xbox 360 games"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.036, 0.645, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7192", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_9bb22c1b-f45d-478a-bf4f-1a018c576906.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.233, 0.359, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7193", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_1bb44b82-30be-4dc1-910e-458594103813.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.004, 0.791, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7194", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b847be3f-0e83-44b6-900e-cd2c4d162f97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK\n[label] Accessible Trip -> CLICK\n[link] Plan my Trip - Press enter key to submit the form ... -> CLICK\n[tab] Fastest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.324, 0.332, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7195", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_05faad15-74f4-4e7d-b3ec-1fecd007f9d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK\n[button] Sail To -> CLICK\n[button] Caribbean -> CLICK\n[div] Sail From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.173, 0.82, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7196", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_0cef9097-2798-4c61-bc77-7ed372327135.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Back -> CLICK\n[path] -> CLICK\n[link] Shower Essentials -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.659, 0.309, 0.67] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7197", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_f8725f0b-18d9-4b4c-845d-ad4fcd1a9d6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Rail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.182, 0.609, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7198", "image": {"bytes": "", "path": "./images/6d963cc0-90d3-4908-bee4-29a8530536af_09291760-75ab-4a52-b1a3-763fd1ac9e02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all my offers for 2-5 day cruises\nPrevious actions:\n[button] Plan -> HOVER\n[use] -> CLICK\n[select] DURATION -> SELECT: 2 - 5 Days"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 1.496, 0.969, 1.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7199", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_cb1c3f11-8fe0-41ee-bb51-2e8061bdfc57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Activity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 0.667, 0.382, 0.676] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7200", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_c2addf44-300d-4f7e-9bec-b2b5471e0d2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK\n[button] select to browse a kiosk -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.203, 0.17, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7201", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_0c9c1694-75c7-446b-b8a1-4585a8561f79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.032, 0.938, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7202", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_d55bc99d-7725-453e-b01d-c0cd6d36e985.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[img] Add -> CLICK\n[img] Add -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[textbox] Minimum price filter -> TYPE: 150"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.309, 0.264, 0.326] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_7203", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_0f034570-81d0-41ed-9f4e-e3ad4241112b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.258, 0.925, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7204", "image": {"bytes": "", "path": "./images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_16c54e19-5ee4-4204-9d47-a622771a3506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the Dallas Mavericks.\nPrevious actions:\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.638, 0.271, 0.794, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7205", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6170a50-fd4d-4d7c-930f-66231442bda0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.66, 0.583, 0.666] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7206", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_9cbdc533-8352-4da3-b64c-bdc59d0517a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.178, 0.492, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7207", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_016e7d79-50f7-4e96-b822-70b91f99a2e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH\n[button] find store -> CLICK\n[button] filter by services -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.23, 0.62, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7208", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_62b92129-4035-4743-aef3-0b72cc301caa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK\n[div] 24 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.138, 0.381, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7209", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_8fd55a42-471e-4418-b2f5-bca74ede84ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.044, 0.374, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7210", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_68c2b8e3-b806-4602-ac7d-027a7865a754.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.107, 0.964, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7211", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_766c0830-7e36-42fb-8f3e-9473f6322736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.49, 0.685, 0.518] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_7212", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_4da18a4b-7a48-4342-aced-13ac7ea17785.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Las Vegas\n[span] Las Vegas, Nevada, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.141, 0.964, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7213", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_da185646-5517-4406-ad3b-28bae9edf30a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[link] Categories -> CLICK\n[span] Luggage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.329, 0.316, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7214", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_3bce3264-f5ca-4d47-9ab5-95af75dd15ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\n[link] Filters -> CLICK\n[checkbox] RV Site -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.121, 0.434, 0.139] in the image\nAnd my action is Action: TYPE\nValue: California"}]}, {"id": "mind2web_7215", "image": {"bytes": "", "path": "./images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_6f9d42c6-bb53-4235-aae9-30a81afc7180.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of Cleveland's animal shelters.\nPrevious actions:\n[link] CITY PAGES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.12, 0.43, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7216", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_02557082-babe-4a38-a66a-4b2f4a170b35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\n[button] DEALS -> CLICK\n[link] MULTI-RIDES & RAIL PASSES USA Rail passes, monthly... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.064, 0.494, 0.299, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7217", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_9311ff44-ee59-4214-a920-2b5fb38d43f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.254, 0.568, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7218", "image": {"bytes": "", "path": "./images/7f1f085b-5765-40f8-86c7-8df6e8b68053_43ab932b-04e2-4282-86c2-2e7af016655b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about baggage allowance for business class.\nPrevious actions:\n[button] Experience -> CLICK\n[link] Baggage allowance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.456, 0.641, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7219", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_6ae8e6ca-ed17-4af9-937c-cd2666364100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.249, 0.284, 0.288] in the image\nAnd my action is Action: TYPE\nValue: O'hare Airport"}]}, {"id": "mind2web_7220", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_2a57a0b2-0e58-4fd9-b6b2-eaf59e4e6d5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[link] 29 -> CLICK\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at\n[listbox] hour -> SELECT: 10\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.338, 0.391, 0.36] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_7221", "image": {"bytes": "", "path": "./images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_de075009-e20b-4800-8460-2bb57f6db9c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are today's Limited Time Offers deals?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.269, 0.082, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7222", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e50a5cc2-36cd-44a5-8540-32d37ae310bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.255, 0.271, 0.28] in the image\nAnd my action is Action: TYPE\nValue: 12345678"}]}, {"id": "mind2web_7223", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_32a87b2c-3e26-45ba-9fe9-32c4dc3949b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Kevin Durant\n[link] Kevin Durant Phoenix Suns -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.218, 0.164, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7224", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_c09c8617-efb7-47b0-b638-3aa6dab6eb6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK\n[input] -> TYPE: 105307"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.414, 0.617, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7225", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6088520-5a6b-4e2e-bbdb-d9a7e1f5a605.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.117, 0.568, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7226", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_ad546f94-f9c4-4693-9e29-6dab15f82b4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK\n[a] Software Development -> CLICK\n[a] Hybrid -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.132, 0.769, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7227", "image": {"bytes": "", "path": "./images/da386775-280b-4a84-9801-4ae3098044b0_8b42d9a9-7e40-4030-bda3-b84edc4d852b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in California city for Limos which also offers military discounts and free wi-fi.\nPrevious actions:\n[link] Auto Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.485, 0.199, 0.587, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7228", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_98aca18e-749f-4dcb-a26a-02f3c7b20917.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.048, 0.153, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7229", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_2dc6fa55-e5aa-4511-92b2-71dde630cdcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.06, 0.897, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7230", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_ccf8e5a6-cc0b-46b9-93a9-a725eb195bc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.039, 0.271, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7231", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_d77a38e2-0f86-4a9e-8466-acaa6d9b8aa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK\n[div] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.122, 0.819, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7232", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_9284698b-67c4-43a4-8150-0bf06a0fd54e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.288, 0.476, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7233", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_d147dd83-a0b3-4263-9db4-30b58e266a21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.041, 0.668, 0.074] in the image\nAnd my action is Action: TYPE\nValue: Kayaks"}]}, {"id": "mind2web_7234", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_6ecf4fe7-562e-42cd-8cb4-871246f9d45a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.394, 0.351, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7235", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_7a6574e9-178c-4a06-8a4f-3854f3d5279c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.034, 0.673, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7236", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_644f0928-6069-4c8e-9ed7-51ec7e259184.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.307, 0.077, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7237", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_495d586d-b1a2-41e0-a289-1abc2365840e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.367, 0.352, 0.388] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_7238", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_23a34dd1-6a2d-4b3b-b0ae-bd4472286e89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John\n[textbox] last name maxlimit is 30 -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.369, 0.354, 0.4] in the image\nAnd my action is Action: SELECT\nValue: Ticket Number"}]}, {"id": "mind2web_7239", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_d684a2b4-143e-4851-b9e2-6b1bdef467fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.373, 0.595, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7240", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_6a0e5751-e659-44d8-b355-64280b94b4a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.073, 0.855, 0.083] in the image\nAnd my action is Action: SELECT\nValue: Next month"}]}, {"id": "mind2web_7241", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_d28d30a1-9e44-4374-aa29-49d616e71df2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.9, 0.333, 0.909] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7242", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_254d076a-6aec-4696-b23f-a83c21573d62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.543, 0.018, 0.584, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7243", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_79dd5af5-6248-4261-916d-6a5be124e417.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.039, 0.333, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7244", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_4885ec31-8249-4992-8ce2-c661f339be98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK\n[button] Subscribe -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.193, 0.977, 0.232] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_7245", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d3c88cc2-8226-4464-b356-e448c7a3e5dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.244, 0.325, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7246", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_9a556b99-9709-438d-8d96-73c977afe480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.192, 0.282, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7247", "image": {"bytes": "", "path": "./images/6df317e6-3414-4f2e-b5fc-b70914def4eb_3a79506e-4983-4f73-800e-97010e8017a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Jazz music genre albums and tracks.\nPrevious actions:\n[link] Music -> CLICK\n[link] jazz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.767, 0.645, 0.775] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7248", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_3359fd6e-131e-481b-8a7f-dad00e69757b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.605, 0.83, 0.619] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7249", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_1fad7c31-f747-4e0f-b2c7-99e5a4febcfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.335, 0.414, 0.367] in the image\nAnd my action is Action: TYPE\nValue: Las Vegas"}]}, {"id": "mind2web_7250", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_538f32f7-02af-4098-b8e7-d1861bd5819f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.113, 0.83, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7251", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_be6f3ede-4d0a-4a03-8f49-78f91329c5e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.16, 0.274, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7252", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_84042e46-ecd1-428d-b72c-53232329ed7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Women's -> CLICK\n[link] add filter: 6(220) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.985, 0.142, 0.996] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7253", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_a8ec1dff-5f2e-4bf7-be21-9a534e37ac41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.475, 0.887, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7254", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_fb6edc44-52b6-4f64-8245-8ce967249d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.428, 0.087, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7255", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_ca152b05-af0c-47e0-8958-bce808d51e93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 500"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.721, 0.786, 0.756] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7256", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_81b1149d-7ff0-4e12-a33c-f093e82f71de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.017, 0.509, 0.046] in the image\nAnd my action is Action: CLICK"}]}] \ No newline at end of file +[{"id": "mind2web_0", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_c7d6e34d-c623-4e8a-93f4-c19e1269d82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.492, 0.504, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_01b5c5b4-5304-4c38-9e8c-36cf97ccfa74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] New York -> CLICK\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.312, 0.694, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_77eb7f8e-0e43-4535-b87b-9704908c779f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.693, 0.783, 0.755] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_84fd0d10-e222-4c1b-a852-e49ac16d3462.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.445, 0.843, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_e3e58722-6d08-46af-bb39-109c07dc6874.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.255, 0.346, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_670d6d1f-a265-4b87-bb85-bcbe74cf3740.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.339, 0.424, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_a16a9f27-7699-4f5f-a78f-5b0d6429569c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.068, 0.489, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_b17d8b15-9af5-4661-9d7b-74851a227b83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 0.619, 0.231, 0.631] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_8", "image": {"bytes": "", "path": "./images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_f7ca543f-c1ac-4a4f-9bf1-f9980a41a07a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open gift list for beauty products.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.045, 0.249, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_9", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_515c6c84-9b18-49a5-a48c-2bdb562a48c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.134, 0.74, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_10", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_1b4aa789-a458-4655-9eb9-f9e72cb900fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK\n[button] Explore All -> CLICK\n[button] Age -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.272, 0.981, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_11", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_83cd5543-cd4f-4bfe-9d7e-4ce6bf0dce32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.396, 0.614, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_12", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_97174dd3-20af-42da-b81d-bed5eadb1b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.162, 0.855, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_13", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_dab1ffd9-2df1-450b-8d68-120b605f8d45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.108, 0.719, 0.149] in the image\nAnd my action is Action: TYPE\nValue: NAPA VALLEY"}]}, {"id": "mind2web_14", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7c992448-8fd4-4aff-96a6-34b790363b71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK\n[button] HOME -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Movies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.118, 0.535, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_15", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_024c93e9-5579-44a1-bf6a-c773f34b8d34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000\n[span] Good -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.583, 0.259, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_16", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_2e80fca7-6d2c-46fa-a7f3-a9569f0dc82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK\n[link] Fare Finder -> CLICK\n[textbox] From: -> CLICK\n[div] Dublin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.224, 0.553, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_17", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d3e3e41f-c601-4798-b602-6990777eba4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.205, 0.196, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_18", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_7fd3adeb-d440-4a31-a75e-3d2c009e00b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006\n[combobox] Make -> SELECT: Honda"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.306, 0.508, 0.34] in the image\nAnd my action is Action: SELECT\nValue: Civic"}]}, {"id": "mind2web_19", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_7425e091-4439-49a8-a6ff-c355ec0c4f34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK\n[link] XL -> CLICK\n[img] Men's UA Tech\u2122 2.0 Short Sleeve -> CLICK\n[button] XL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.19, 0.952, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_20", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_2255c0f5-0829-4874-89d2-1dafcd92da6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK\n[link] HAPPY HOUR BURGER, Aria, Thursday, April 13, 2023 ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.256, 0.159, 0.278] in the image\nAnd my action is Action: SELECT\nValue: 1 Ticket"}]}, {"id": "mind2web_21", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_f2fd4cab-af21-4062-bc3e-9899832b6611.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[path] -> CLICK\n[div] 8+ -> CLICK\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.825, 0.081, 0.832] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_22", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_e9d3601e-1394-445d-b999-b957959694a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.217, 0.259, 0.246] in the image\nAnd my action is Action: TYPE\nValue: Santa Fe"}]}, {"id": "mind2web_23", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_04f0418a-4171-42ec-9588-fbf470f54df0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.102, 0.702, 0.126] in the image\nAnd my action is Action: TYPE\nValue: male"}]}, {"id": "mind2web_24", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_76b93c24-ab52-4865-8491-2c3423d615af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.07, 0.519, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_25", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_3cb50019-9056-4144-9449-be80b231f3cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.114, 0.192, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_26", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_b7a36124-3c4f-4a40-b927-9e0c1f548427.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.181, 0.289, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_27", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6c1f5f61-3aa6-4eba-bd8b-ef20145ac9b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.342, 0.327, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_28", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_19e36673-154e-407f-9425-7d8c2dfdd30c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.181, 0.931, 0.227] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_29", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_cad3fd8c-22ca-4bef-806e-3ffa533fa0b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.286, 0.464, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_30", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_39e4017e-d59e-4582-be70-07a8b8cfd2fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.848, 0.141, 0.908, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_31", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9e6abcf6-1bbe-42c9-bba3-8fdfe5b228a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.252, 0.894, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_32", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_3f8b1ce5-738a-4d8e-8fe9-e682ff2cb865.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.045, 0.468, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_33", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_589275b2-8649-4dcd-b815-bca201d28836.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] All -> CLICK\n[label] Veterinarians -> CLICK\n[label] Veterinarian Emergency Services -> CLICK\n[heading] Features -> CLICK\n[label] BBB Rated A+/A -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.248, 0.559, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_34", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_c027c8ec-b3e0-44d0-b671-5700374e6284.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.211, 0.26, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_35", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_1113a688-3969-4e5b-9a16-d418ef8ac466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.178, 0.619, 0.211] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_36", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_fd1a35c4-45cd-4e46-ba38-ceb3203b6cb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK\n[checkbox] Electric (175) -> CLICK\n[heading] Electric Mile Range -> CLICK\n[checkbox] 300+ Miles (12) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.159, 0.888, 0.182] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_37", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_3f1ad2d8-508b-4dca-b072-8d2ff125fafe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.449, 0.359, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_38", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9abb53ba-45cd-4c7f-92ee-33073e99789b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.102, 0.621, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_39", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_b32b39aa-4510-4ee1-8d3f-560a4fb3220f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.207, 0.505, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_40", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_600552f9-d248-4c02-bede-2b4624a229da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK\n[div] Price -> CLICK\n[link] $10 to $25 (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.501, 0.109, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_41", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_ad2fa916-8d5f-41f8-bd96-ea4924d38c52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[link] Business -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: LAS VEGAS\n[div] Las Vegas -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.342, 0.254, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_42", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_5f3ff4a9-f9ef-4e5b-99de-5bbf85c5f02e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.076, 0.523, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_43", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_3f6cc639-ac20-4823-b0b9-b6bb1a1c9d26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.269, 0.981, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_44", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_9e62b475-81ab-4342-974b-bc13968dad2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA RESORT CAMPGROUNDS \uf0da -> CLICK\n[button] Arizona -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.249, 0.35, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_45", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_4dd5c447-4344-47c2-aaf4-63554db508f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.787, 0.092, 0.85, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_46", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_cff52937-7d1f-4306-8a48-62e7d8b814fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.077, 0.294, 0.089] in the image\nAnd my action is Action: TYPE\nValue: 59901"}]}, {"id": "mind2web_47", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_33ee7882-a48b-49b5-afd7-d34ebec0a600.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.267, 0.071, 0.321, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_48", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e17eb0b6-cf6e-45af-be58-704816835353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK\n[generic] Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.207, 0.271, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_49", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_2fa1df82-2bfc-4e2a-a8dc-bf00f7ea75a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.279, 0.291, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_50", "image": {"bytes": "", "path": "./images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_d4ad5462-08d1-400f-881b-080390e948c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for news about the latest news about Lebron James' injury and share the article on twitter.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.025, 0.728, 0.046] in the image\nAnd my action is Action: TYPE\nValue: lebron james"}]}, {"id": "mind2web_51", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_abcaa74b-7460-4b0b-95e6-3fcf23ac1904.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.333, 0.298, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_52", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_332d0daa-f81f-45b6-aa45-2bb32665819c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK\n[link] The Smiths -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.454, 0.645, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_53", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0d5293b2-9ff7-48b5-80f6-b043d52c9066.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Jun -> CLICK\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.077, 0.386, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_54", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_770aa60a-7be7-436b-9b5d-59111c135246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[div] & up -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.369, 0.451, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_55", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_0b2e5b9a-bed8-4064-8057-bb32b4bc6111.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK\n[checkbox] 8 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.125, 0.927, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_56", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_459ed167-c817-4bbf-bc91-73822e98bfd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.14, 0.326, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_57", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_b75be239-fb3b-4d79-820c-e374efbe2c73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[input] -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[button] Go! -> CLICK\n[div] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.572, 0.123, 0.689, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_58", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_251534f2-9acc-4f2c-a1d4-2158f8a4840e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.136, 0.468, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_59", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_b9ea57b4-ce6f-4010-b79b-f3f8fc031d1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.23, 0.459, 0.253] in the image\nAnd my action is Action: TYPE\nValue: 5000"}]}, {"id": "mind2web_60", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_3a37f05f-1cf6-43cb-9509-7936404dae33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] One Day Add-Ons -> CLICK\n[link] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.0, 0.505, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_61", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a87e7411-3bda-4944-beb8-2f77f9fbe4b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.288, 0.243, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_62", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bbff6535-c2fa-4fe3-ab52-3ba6813014b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2\n[combobox] Minute -> TYPE: 30"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.424, 0.305, 0.457] in the image\nAnd my action is Action: SELECT\nValue: PM"}]}, {"id": "mind2web_63", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_3fed27c4-2cb3-43d0-b92a-16275e1f8178.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.02, 0.93, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_64", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_894a3e88-d3f2-417d-b464-ce6f3086c9cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.324, 0.236, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_65", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_be4c997b-89fc-4e4f-93d1-092dc7cde1a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[circle] -> CLICK\n[button] -> CLICK\n[button] -> CLICK\n[div] White Water Rafting -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.195, 0.905, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_66", "image": {"bytes": "", "path": "./images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_409090be-7df9-412d-b354-2a68656eb421.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show my Library and sort by Albums.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Library -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.301, 0.285, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_67", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_d905eacd-ba9e-43ac-815c-d4e42a636301.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.117, 0.573, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_68", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_f7f63924-6669-400e-b187-76d3b6243151.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.447, 0.36, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_69", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_a824cebd-8374-4f3a-b76d-df0f6a9f45ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.335, 0.759, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_70", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_f73c9f1b-6d77-4f34-bbdc-84ab137a91f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.296, 0.831, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_71", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_ff0ee393-4490-4aaf-9fc0-a21fcdb41c9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: wall art"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.027, 0.187, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_72", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_47baddf3-e09d-414f-8c3a-7de89a39aa06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.206, 0.324, 0.225] in the image\nAnd my action is Action: SELECT\nValue: BMW"}]}, {"id": "mind2web_73", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_7b44e78e-d6d9-44ad-8331-3930b3d959b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[link] Hostels -> CLICK\n[searchbox] Please type your destination -> TYPE: udupi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.173, 0.409, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_74", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_6ba16278-a7a3-4e06-8426-356b05e3219c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.258, 0.678, 0.326] in the image\nAnd my action is Action: TYPE\nValue: smith@gmail.com"}]}, {"id": "mind2web_75", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_98ce6733-1dd3-4cf0-a29c-03f67319dc68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.251, 0.314, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_76", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_276ea5ec-d119-4d03-9121-f9ee4616da2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.607, 0.53, 0.623] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_77", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_9a924659-4b0a-4374-9e9c-e24889c4dac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.071, 0.719, 0.098] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_78", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_3c3caece-89d2-4110-b976-242c6070e947.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK\n[div] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.329, 0.736, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_79", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_794b3de4-0e28-4ba9-819a-017558734d98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR\n[div] Newark Liberty Intl (Newark) - -> CLICK\n[img] Submit Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.363, 0.205, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_80", "image": {"bytes": "", "path": "./images/73cf6eec-cae6-4d5b-9b8e-e44359311565_702d51c4-4747-4525-b58c-324c776f600a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for the newsletter\nPrevious actions:\n[textbox] Email Address * -> TYPE: larryknox@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.292, 0.685, 0.337, 0.718] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_81", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c3c9c397-7337-484c-97c1-71421f964f62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] 30 -> CLICK\n[button] Search -> CLICK\n[button] Get alerts for this flight for flight 906 American... -> CLICK\n[textbox] Email -> TYPE: lin.lon@gmail.com\n[textbox] Confirm email -> TYPE: lin.lon@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.387, 0.83, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_82", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_4c93db0b-982d-4815-933d-10283c2fb380.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.203, 0.831, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_83", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_eba8a877-10db-482e-8ca8-f6b61efb119b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.529, 0.829, 0.545] in the image\nAnd my action is Action: TYPE\nValue: HOLLYWOOD"}]}, {"id": "mind2web_84", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_50d4a729-d12a-4707-af8d-69b5ab13c8db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[textbox] Monthly Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.491, 0.219, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_85", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_0043be09-27a3-4b47-81c8-cc4ee1cb996e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 2, 2023 -> CLICK\n[button] Jul 8, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.357, 0.984, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_86", "image": {"bytes": "", "path": "./images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_9a4dabad-f38c-4fea-9345-202450e96322.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse tickets for Chicago Bears games.\nPrevious actions:\n[button] SPORTS -> HOVER\n[tab] NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.149, 0.455, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_87", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_66ece2a6-3789-462d-8cd2-627355cd988a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK\n[button] View 95 Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.357, 0.227, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_88", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_cf8a0b6f-dde7-4ec5-959a-23c1fd331528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER\n[link] All MLB Tickets -> CLICK\n[button] All dates -> CLICK\n[link] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.239, 0.941, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_89", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_98b66f05-3799-48a7-955d-5c2075f75a44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[button] Price: -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[span] Save -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.106, 0.612, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_90", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_1c9cabc3-3fc6-4561-891d-5b473220ab9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.43, 0.759, 0.457] in the image\nAnd my action is Action: TYPE\nValue: 08/04/23"}]}, {"id": "mind2web_91", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_9d73a27e-5499-4d8d-84c2-f442fdfd516e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK\n[link] Intro 65,171 -> CLICK\n[link] Guitar Pro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.204, 0.97, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_92", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_25d69ce3-0527-4b54-ae05-76b4246c6816.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.046, 0.358, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_93", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c4186aed-2d4d-41ca-bc89-ec1e003fc4b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] sub 1 -> CLICK\n[div] open -> CLICK\n[option] 6 -> CLICK\n[button] Update -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.449, 0.606, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_94", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_9eb3684f-bdc3-44d7-aa67-be7839fb83ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.013, 0.05, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_95", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_16a62e54-3ff2-4ec8-aa34-0f75f384d352.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.096, 0.199, 0.182, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_96", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_cd58814d-9500-4922-a7f4-416a19ffcc34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.16, 0.168, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_97", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_06b3b17f-68a0-4d62-a236-9852c9ae658a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[label] Austria -> CLICK\n[label] Belgium -> CLICK\n[label] Bulgaria -> CLICK\n[div] Popular -> CLICK\n[div] A - Z -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.173, 0.263, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_98", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_3533eae9-5554-4489-9498-64ba4f8c832b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.397, 0.657, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_99", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_0a0d2fc8-04cf-49c2-a658-cf12ef65c5eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: vegetarian\n[b] Vegetarian -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.22, 0.075, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_100", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_5330614e-ce1a-4da4-906a-6fc408c6c3f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.051, 0.553, 0.091] in the image\nAnd my action is Action: TYPE\nValue: mens black hoodie"}]}, {"id": "mind2web_101", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_c7bcbc4d-fcc6-40ae-8c77-900307f08664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow\n[span] Glasgow Central -> CLICK\n[textbox] Date use format: 17-Mar-23 -> CLICK\n[path] -> CLICK\n[link] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.198, 0.133, 0.215] in the image\nAnd my action is Action: SELECT\nValue: 15"}]}, {"id": "mind2web_102", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_503a5c81-6a8a-4ece-9c8a-c80d7198f388.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.295, 0.079, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_103", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_4e296369-5864-4c0d-b372-f4deb8454461.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK\n[heading] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.321, 0.471, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_104", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_d0666e22-cb4c-4bab-b17b-7dabac0d02b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.01, 0.898, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_105", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_425a5e71-db07-473d-9e9a-43da9606841a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.309, 0.988, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_106", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_0096821b-49aa-4a8d-b059-a9cea6e724f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK\n[button] San Francisco, CA -> CLICK\n[span] Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.149, 0.495, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_107", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_0b0bd6c0-a4c4-4a82-8757-549fe7ac92a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK\n[button] Find flights -> CLICK\n[textbox] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.178, 0.406, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_108", "image": {"bytes": "", "path": "./images/7f94386a-d032-43cf-9dbe-2b64430c9c28_48bbbdea-1b37-4a5f-bd55-a9cb309e5507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: locate the store in IL\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.608, 0.2, 0.63] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_109", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_2dd318f7-faa9-4bec-891d-2d35b588268d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.258, 0.312, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_110", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_ac062748-57fb-429f-a6a1-c6eeee2dee00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[link] Store -> CLICK\n[link] Pre-Orders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.178, 0.219, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_111", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_0ceb3fb8-d1c7-4b00-a0f5-15d88fee4234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.088, 0.947, 0.115] in the image\nAnd my action is Action: SELECT\nValue: Carnival"}]}, {"id": "mind2web_112", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8acc4e87-a3c3-4f37-a65d-ced00e37a017.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.117, 0.388, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_113", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_81d06b54-9858-498c-adf3-e40fd5b4ae17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji\n[div] Fiji -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.472, 0.772, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_114", "image": {"bytes": "", "path": "./images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_10cb2e61-9f95-4cac-8db9-03791ea89776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a podcast about F1\nPrevious actions:\n[link] \ue028 -> CLICK\n[link] F1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.24, 0.195, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_115", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_3d80d70b-e911-4b66-832a-9e4e48884689.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara\n[textbox] From -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.506, 0.873, 0.522] in the image\nAnd my action is Action: TYPE\nValue: Happy Christmas"}]}, {"id": "mind2web_116", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3ffa4643-b065-489e-824a-9c30771b411c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.465, 0.286, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_117", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ac89e5b0-f806-42bb-81f7-e0e072172796.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.036, 0.938, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_118", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_ded882d1-0f01-46b0-b67a-11ad3e9b513d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.2, 0.729, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_119", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_50cf3b67-127e-4d22-b584-4708cb56b602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.185, 0.226, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_120", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_d55123cf-0a80-4b19-9b22-6719bbab3231.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.195, 0.761, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_121", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_cb0df657-8141-4273-979b-5d66494faae9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.248, 0.249, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_122", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_2bb2c81e-02b9-46b3-a3a0-89d174bd1e53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.8, 0.437, 0.813] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_123", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_2c3f007d-c4df-4247-8a58-bf4b58db1530.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.018, 0.443, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_124", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_b03e5747-77f1-4f8a-9590-ca38c6b62ed3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Guest Rating -> CLICK\n[link] Pets welcome (118) -> CLICK\n[link] Free breakfast (57) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.145, 0.556, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_125", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_c1c57834-374a-41c8-ac41-13163aad1f2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.094, 0.861, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_126", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_992413a4-5f43-4f99-866a-cf43fa75678c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.286, 0.5, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_127", "image": {"bytes": "", "path": "./images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_45868167-8aa0-44cb-972a-cf3113815043.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information on how to get a ticket refund.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.0, 0.756, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_128", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_dac23220-8015-4eca-83ef-a520c024eb6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard\n[combobox] Month -> SELECT: 01"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.308, 0.95, 0.347] in the image\nAnd my action is Action: SELECT\nValue: 2023"}]}, {"id": "mind2web_129", "image": {"bytes": "", "path": "./images/74226fab-6285-45da-8582-d25a876aa7b0_7fffde10-7954-4767-b39f-f913cadf8a51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for the next pop concert.\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.148, 0.517, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_130", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_ccf0ae9a-6d2e-48e5-be97-d91c9528fc61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK\n[button] Show more filters modal -> CLICK\n[span] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.169, 0.966, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_131", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_ea3c2af5-fb9c-4145-ab53-a35548df1e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.19, 0.157, 0.207] in the image\nAnd my action is Action: TYPE\nValue: 59316"}]}, {"id": "mind2web_132", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_3d12ecd5-f8ec-4e3a-b0c0-7d16c6e27f06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.767, 0.846, 0.798] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_133", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_647547bf-d59a-4a3b-b32a-9ea1fb94a530.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.609, 0.276, 0.618] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_134", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_d02d7a6c-c5d6-4b61-bd22-20a8e2309ea5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[checkbox] Outdoors -> CLICK\n[checkbox] Wine tasting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.91, 0.713, 0.954] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_135", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_82c86cfb-3786-4a17-95c7-5cb6562ae363.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[button] All -> CLICK\n[radio] Key extraction -> CLICK\n[radio] Vehicle -> CLICK\n[generic] 2 filters Key extraction \u2022 Vehicle Clear all Cance... -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.179, 0.642, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_136", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_d25c7cda-0f2b-477a-8971-de77649a5939.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.059, 0.414, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_137", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_d5672242-c470-499b-bc08-b42bbd8fb450.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Explore All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.074, 0.163, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_138", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_388f7180-285e-4867-8f40-f223749016f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Pool -> CLICK\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK\n[div] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.337, 0.861, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_139", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_8f7c6002-5777-46f1-80f9-13e66c053b06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.148, 0.282, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_140", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_458554fd-debe-4dbe-a011-b64a5301fbd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.165, 0.237, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_141", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_b22bf5a8-0dc7-4cb4-adb4-1ae86643fe10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Drop D 39,730 -> CLICK\n[link] 1990s 3,183 -> CLICK\n[div] Today's most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.45, 0.97, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_142", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_afd0591d-15ef-4dec-ac72-b2cea47ba8dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.218, 0.5, 0.263] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_143", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_a03e1463-bb7f-481c-9579-9caa826a8644.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees\n[option] New York Yankees -> CLICK\n[link] TICKETS -> CLICK\n[div] More Options -> CLICK\n[span] Parking Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.467, 0.978, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_144", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_084b94a2-6e3c-4b64-baa1-ba2dc61777a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK\n[link] Europe & Mediterranean Cruises 16 DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.285, 0.079, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_145", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_f9bbc023-4e46-4803-a374-743e972eb8df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.265, 0.155, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_146", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e6345ea9-a5a4-4b88-95b5-4efececed261.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] close -> CLICK\n[button] Material -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.152, 0.319, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_147", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_44cd97dd-6f7f-4709-b641-f662ff17208b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.371, 0.266, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_148", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c47062d6-0d58-4383-9d62-efc14a92807c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.0, 0.354, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_149", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_524b1cc6-240f-41df-a42b-9de89456c807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Protections & Coverages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.49, 0.341, 0.682] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_150", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_438fdc63-2ab2-4f1f-9731-321dc68fda6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.235, 0.341, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_151", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_7b98db39-1751-42ca-b632-f40400c443bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK\n[link] Training -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.342, 0.122, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_152", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_c3f1eed3-fedd-4937-8837-44bcace14f3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK\n[link] MONTHLY -> CLICK\n[span] Distance -> CLICK\n[link] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.298, 0.328, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_153", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_0802ef06-4167-4e70-b52f-4f106bf1ce19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply\n[button] \uf002 -> CLICK\n[generic] 600 W -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.276, 0.4, 0.295] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_154", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3163cce7-c289-4004-94b8-15e312dac0dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.522, 0.29, 0.542] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_155", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_6838510b-e62e-416f-b389-46cd59c40012.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] Caribbean -> CLICK\n[button] SEARCH CRUISES -> CLICK\n[button] Number of Guests -> CLICK\n[path] -> CLICK\n[button] Increase to 4 guest button -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.065, 0.233, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_156", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_e851afe1-7aac-43ac-ab6d-e36cb60ccbd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Chicago Bulls"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.116, 0.406, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_157", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_aeeb7ac6-c8f3-4c56-bdb6-e9269dafab16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.013, 0.855, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_158", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c8b6e56e-4973-41fa-8ffb-0b3e044b052a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.388, 0.595, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_159", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_83752122-ca77-4eda-ba7c-c98b8fcfe3af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.664, 0.469, 0.715] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_160", "image": {"bytes": "", "path": "./images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_b30b2ef7-14ff-4170-b14a-4894124b0efc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of publishers for board games\nPrevious actions:\n[button] Browse -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.085, 0.218, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_161", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_481e6509-59bc-48c7-b6c0-1f065058835d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue without a seat -> CLICK\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK\n[button] Continue -> CLICK\n[button] No thanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.841, 0.802, 0.953, 0.833] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_162", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_10d26c2c-7db7-44d7-b5cb-ae1e2a15f5dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.202, 0.5, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_163", "image": {"bytes": "", "path": "./images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_97a45713-c48e-4eef-8fe4-5711e87f4c5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me news about the ps5.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.337, 0.044, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_164", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_7426506d-f253-4977-9475-faa2e4975689.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[li] Spring -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.011, 0.82, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_165", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_b86bc343-260f-4335-980b-ea5e2fca2a71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.228, 0.41, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_166", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_8f468438-c6e4-4af0-be8e-055e175d6de0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK\n[button] - -> CLICK\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.7, 0.541, 0.737] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_167", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_be1bab14-11b9-41e1-b4b2-b0a1f0a834c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK\n[div] Select your dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.737, 0.351, 0.763, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_168", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_c01ad00e-d680-48e8-bfe6-bf73b8d30674.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\n[link] Navigate to deals -> CLICK\n[link] Navigate to 1-Night Kiosk Rentals See More -> CLICK\n[img] Blockers, MOVIE on , , Comedy, Romance, Special In... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.363, 0.338, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_169", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f16bc000-f7b9-4f57-b5e0-4fcf43b9bb40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.121, 0.237, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_170", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_773b414e-e1c4-4471-bbdf-f8143c8a606f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.225, 0.228, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_171", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ab620f2c-ea4d-4465-b77b-aa3b064e0f47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.243, 0.703, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_172", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_98480951-572a-451f-8538-188191a9a0c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 10001\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.353, 0.116, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_173", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_2f4873c2-1964-4640-8275-11655aa7465f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK\n[button] View 95 Vehicles -> CLICK\n[i] -> CLICK\n[checkbox] Buick\ufeff1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.237, 0.588, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_174", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a8cfda8a-1fc7-4f7c-bec5-09e4f3b1c420.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.537, 0.059, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_175", "image": {"bytes": "", "path": "./images/549452ab-637a-4997-bce1-5898541bb288_218bb404-5a73-4d8f-a72d-1b680a898e89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all NFL tickets\nPrevious actions:\n[button] SPORTS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 0.099, 0.319, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_176", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_38c21a93-8c5b-4d31-b72c-06acc63a2afc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.468, 0.194, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_177", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_e9a53ae9-3b3d-444a-9dcc-a92bec2b77de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.426, 0.654, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_178", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_e1cdcfc1-fc66-4d3b-8858-876e11893c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Volunteer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.269, 0.398, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_179", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a03f2e58-8fe0-4622-998b-ddcd17a238ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.327, 0.359, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_180", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_93807878-de8b-4b57-83bd-6964dd4decf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.632, 0.32, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_181", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e8fc2f56-54e4-48fc-8ec2-dd86b6042ceb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.158, 0.705, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_182", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_25b74fe3-6b52-453a-9885-aaa17ab27940.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL\n[svg] -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.016, 0.953, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_183", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_20f844f5-7336-4362-91e4-577a81d9d46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.222, 0.514, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_184", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31217e2a-0ae5-4c3a-9559-dbf6eba97bf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Gaming Monitors Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.527, 0.868, 0.545] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_185", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_dc71652f-34de-4786-b270-3b5b750c5905.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.319, 0.309, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_186", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9d136317-f8c9-430b-a4d1-ecb67729f4c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.166, 0.429, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_187", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f46868bc-232a-4680-8b33-8e5198c0010c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.035, 0.272, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_188", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_94563925-37bd-495c-9e75-5a2cfda4e37e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.264, 0.194, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_189", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_d46fc387-0c97-4047-b400-07d10dd1c8d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.073, 0.109, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_190", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_d4a3cabd-8df7-4d25-a8a7-2ed784bafd3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.123, 0.332, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_191", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_808a3d44-fd0e-4a1e-aef7-55fead922731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK\n[link] Notify me -> CLICK\n[link] Add to wishlist -> CLICK\n[textbox] Wishlist name -> TYPE: Must buy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.159, 0.716, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_192", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_88495664-04bf-43d1-93fd-d2afd216d7f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\n[link] Find Stores -> CLICK\n[link] View store directory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.753, 0.294, 0.785, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_193", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_5b1347ea-791a-4e8d-bc7b-db15fe3375e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.361, 0.589, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_194", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_7c4bf048-7214-4ba9-aa74-822f50390427.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.322, 0.237, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_195", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_bd2e6fd5-bbac-40dc-8a8b-1f2ed8eb5c07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.194, 0.532, 0.21] in the image\nAnd my action is Action: TYPE\nValue: 1"}]}, {"id": "mind2web_196", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1a347d26-3f20-44c8-8030-c09a8ae8ec9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\n[textbox] Search -> TYPE: scream\n[p] Neve Campbell, Courteney Cox, David Arquette -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 0.195, 0.695, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_197", "image": {"bytes": "", "path": "./images/0c577209-47dc-4645-8d10-0b659663a969_338aee04-6bae-4c3a-b3c3-1a8a12a61210.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nba game played by the phoenix suns.\nPrevious actions:\n[combobox] Search query -> TYPE: phoenix suns"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.119, 0.727, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_198", "image": {"bytes": "", "path": "./images/928ec908-ea23-42a4-8b13-3ca6f0721ead_9d934934-db77-4af8-89ca-56dfc9f9f1c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter search results for guitar tabs to only show songs with a difficulty rating of \"Beginner\"\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.166, 0.153, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_199", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_73fac390-4a7e-41a3-814e-caa47a3ad866.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[span] Chhatrapati Shivaji Intl -> CLICK\n[textbox] Flight destination input -> TYPE: Dubai\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.156, 0.928, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_200", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_fd9fe93d-6937-4027-89b5-20b0221d4c27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.242, 0.595, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_201", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_038b9509-b8b1-4e84-9426-a5377183ea28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[combobox] \uf0d7 -> SELECT: 1 Room\n[combobox] \uf0d7 -> SELECT: 2 Adults\n[combobox] \uf0d7 -> SELECT: 1 Child\n[select] Age -> SELECT: 0\n[link] Search Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.403, 0.123, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_202", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_d558e7ad-1abf-41ab-8a92-2e62cc399b43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Los Angeles\n[option] Los Angeles, CA -> CLICK\n[span] Filter by -> CLICK\n[div] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.496, 0.47, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_203", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_41966dc2-1c46-44f2-89da-4e108a52dbc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.101, 0.5, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_204", "image": {"bytes": "", "path": "./images/4ff347e6-e911-4af5-8151-7805a9e91b28_e0a4ce5f-1ee3-4a27-a60e-5c7ca962277e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show current alerts for red line subway.\nPrevious actions:\n[tab] Alerts -> CLICK\n[link] red line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.267, 0.295, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_205", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_d8f45da9-b931-4adc-b980-61fc1ecf4943.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.129, 0.421, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_206", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_d79f9228-b3a9-418d-add9-33ed60d96f36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.181, 0.968, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_207", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_1d109036-07b4-4d9e-83e3-9ec6c93111df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.303, 0.069, 0.327] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_208", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_e165de37-91d8-4552-88cb-72773a2d61ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.068, 0.957, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_209", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_0cd2cdd2-052b-42c7-9e1d-f0bc04e54244.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.177, 0.86, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_210", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_758a6b1b-74f1-42c3-84ba-bae21ea8afd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.418, 0.077, 0.515, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_211", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_185b383c-4764-413c-94db-33a69434174e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.156, 0.562, 0.163] in the image\nAnd my action is Action: SELECT\nValue: 2 Guests"}]}, {"id": "mind2web_212", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_65943829-04b2-47f0-8962-29ec916f9463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK\n[span] 160 pixels x 400 pixels -> CLICK\n[span] Default -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.768, 0.493, 0.791] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_213", "image": {"bytes": "", "path": "./images/0592744b-ea69-4724-80f8-3924916b7758_021fde47-dd12-4ac5-b8f5-224b962a26ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out the cancellation policy\nPrevious actions:\n[link] Help -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.485, 0.945, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_214", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_57cbe038-dfd5-40a1-a06b-ac5867355b3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA\n[option] Museum of Modern Art (MoMA) \u00a0\u00a011 West 53rd St, New... -> CLICK\n[link] MONTHLY -> CLICK\n[button] See options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.11, 0.622, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_215", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_977bba29-aae4-4a39-b861-3078f910070e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK\n[link] 7 -> CLICK\n[textbox] CHECK OUT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.32, 0.594, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_216", "image": {"bytes": "", "path": "./images/52a8bace-f14c-41ce-980f-50d95e5ac259_39ed9e93-b3b0-4010-88bf-a0f716059c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of countries with the highest number of seen aircrafts.\nPrevious actions:\n[link] ADS-B -> HOVER\n[link] Statistics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.322, 0.437, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_217", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_71312214-c558-4c47-a70e-f32e1f74f9a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.66, 0.872, 0.672] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_218", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_9de9bac3-ef0b-4042-b991-1b1dfc157d6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[select] Sort by Distance -> SELECT: Sort by Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.474, 0.32, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_219", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_df33373b-2ae4-4f6e-8f22-a1b84bde50a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.435, 0.394, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_220", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_f981234b-29f6-451d-b795-ad8216ee453f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.05, 0.117, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_221", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_03e8b74e-3cf6-4077-9339-84aefa9f9237.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.06, 0.181, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_222", "image": {"bytes": "", "path": "./images/54112d86-1d85-4abf-9e12-86f526d314c2_a3c7b2db-75e5-41d9-a23b-b01d06ba008f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the BGG rules for Game Submissions?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.007, 0.52, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_223", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_152064fc-85f5-4364-aa81-6f9a6fa9941c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.122, 0.094, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_224", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_9c69f639-37d4-4a10-b271-a86ad3892709.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.267, 0.94, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_225", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e9c512bc-6241-4452-a4ec-3ad2237375a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK\n[img] -> CLICK\n[textbox] To: -> TYPE: John\n[textbox] From: -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.286, 0.99, 0.342] in the image\nAnd my action is Action: TYPE\nValue: Congrats on your new home."}]}, {"id": "mind2web_226", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_329bca7f-0638-4eff-83b5-50f793f10541.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.091, 0.846, 0.118] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_227", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_c33315e7-7e2d-4dc0-a06d-06ad4e82dbab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK\n[combobox] Activity -> SELECT: Climbing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.234, 0.561, 0.258] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_228", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_30484980-8301-4a3a-ae0f-f2ea7df58336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.091, 0.628, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_229", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_561a3105-2605-4f2f-abbe-2b622948cf16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK\n[button] All Filters -> CLICK\n[tab] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.109, 0.666, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_230", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_feefcb3a-19ea-438c-9bfa-b4c99631dcbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: nba2k23\n[button] Search -> CLICK\n[img] NBA 2K23 - PlayStation 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.713, 0.975, 0.757] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_231", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_29164d9e-ba63-4d06-8c46-c482d44a416b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.233, 0.788, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_232", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_82b45e0c-6d98-47d6-9691-3d2d8a21abe6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22\n[button] Vehicle Class -> CLICK\n[radio] Minivans -> CLICK\n[button] Apply Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.365, 0.837, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_233", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_def2773e-0727-493c-916f-407e36da2dec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.216, 0.221, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_234", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_9f7ab3a7-9b90-42cc-969b-9cd4d687d6a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK\n[button] 05/02/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.228, 0.517, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_235", "image": {"bytes": "", "path": "./images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_3a0d5ccb-3636-49a5-898b-80a18673958a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a living History event to attend in in April .\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.108, 0.93, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_236", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fefcbed0-8fa1-4592-88d3-8bdab9e18fd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.186, 0.699, 0.202] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_237", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_c79b7700-6a68-429c-b616-d0151bd9bb47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.293, 0.393, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_238", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_5feef698-f267-4190-a94a-3cc69cfae45f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.21, 0.261, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_239", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_39a1e694-4b56-4f10-845c-e3d03dc73e11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.288, 0.754, 0.311] in the image\nAnd my action is Action: TYPE\nValue: Thalia Hall"}]}, {"id": "mind2web_240", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_68aa9c94-69f6-4512-996c-58c416c098ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.037, 0.851, 0.054] in the image\nAnd my action is Action: TYPE\nValue: Mustang"}]}, {"id": "mind2web_241", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_4c00ab57-f61c-4d58-804f-25da94282151.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK\n[label] Points -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.255, 0.438, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_242", "image": {"bytes": "", "path": "./images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_6d02d310-1f7b-45f6-b680-73edaeff707b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the release date and supported platforms for the game \"Elden Ring.\"\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.059, 0.047, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_243", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_28a61364-9726-4983-96f8-f68988ba8da5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK\n[checkbox] Red Devil (2) -> CLICK\n[label] Add -> CLICK\n[combobox] Select a list -> SELECT: Wish List"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.349, 0.781, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_244", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_1d412587-c49d-49e7-aa72-4b12e47b9c59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.009, 0.39, 0.027] in the image\nAnd my action is Action: TYPE\nValue: laptop"}]}, {"id": "mind2web_245", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_d5c61581-da19-4144-a5a5-540bb4ab10d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.716, 0.263, 0.724] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_246", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_63575c7f-68ce-498c-a1fe-1bfefa463455.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.212, 0.205, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_247", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_66e0df08-da51-4405-a484-0d02219ec44d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK\n[span] -> CLICK\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.493, 0.255, 0.525] in the image\nAnd my action is Action: SELECT\nValue: 2010"}]}, {"id": "mind2web_248", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_10cae0d4-c04c-40c7-ace9-d4a0bd6a2993.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.006, 0.348, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_249", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_943a0122-f698-44fa-a09a-a51b0b364862.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\n[searchbox] Search -> TYPE: game mix\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.126, 0.199, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_250", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_241e6556-a874-4125-b694-5bc8b8bc4e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[link] Superhero -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.321, 0.331, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_251", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_c829579a-b47f-4091-a86e-57467ac96607.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Alagnak\n[option] Alagnak Wild River -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.234, 0.73, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_252", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_247b7cf9-68ad-4c76-b6f9-179be7ce2b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\n[link] Players -> CLICK\n[link] RETIRED -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.29, 0.491, 0.315] in the image\nAnd my action is Action: TYPE\nValue: James Smith"}]}, {"id": "mind2web_253", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_6c2dbd75-a9bb-416a-a351-191e84c7897e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.22, 0.798, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_254", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_b9a3579d-b988-4a98-97d1-9bdc1abcfb2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK\n[div] Add to Watchlist -> CLICK\n[textbox] Search IMDb -> TYPE: The Magician's Elephant"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.028, 0.657, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_255", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4463379b-ecb8-4fd3-a871-b3ba26ce27bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.644, 0.678, 0.672] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_256", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_ccb50965-fe39-45c7-8e51-1f00048585d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK\n[button] Price (lowest first) -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.369, 0.917, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_257", "image": {"bytes": "", "path": "./images/160fc162-7f03-4f59-83e1-5502d00806f2_44c6392e-a186-4eaa-8760-eb0ab0f1688a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what is playing on Showtime sorted by newest.\nPrevious actions:\n[link] TV SHOWS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.157, 0.691, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_258", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_60083201-3aa9-4224-ba53-064f1337c834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm\n[button] Search -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.125, 0.825, 0.152] in the image\nAnd my action is Action: SELECT\nValue: Total Price"}]}, {"id": "mind2web_259", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a8b0b04f-7a57-4daa-9501-dcc668509760.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.282, 0.949, 0.314] in the image\nAnd my action is Action: TYPE\nValue: 1111111111111111"}]}, {"id": "mind2web_260", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_0727b0e3-b43e-4257-91fe-d0522d9f95ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.223, 0.434, 0.256] in the image\nAnd my action is Action: TYPE\nValue: Phoenix"}]}, {"id": "mind2web_261", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_9cab69a1-12c1-4b8a-96b6-6677977b0efb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.671, 0.477, 0.702] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_262", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_2403f621-0e06-4828-bd85-e88920da6630.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Sort -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.21, 0.13, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_263", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_e4a42325-a654-487d-84c7-bf3df4ef3fdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Las Vegas\n[span] Las Vegas, Nevada, United States -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.821, 0.081, 0.831] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_264", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_b66d3924-665c-4d00-89ce-3af5bbdefa0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[span] -> CLICK\n[label] 2 -> CLICK\n[span] -> CLICK\n[input] -> TYPE: 200\n[span] Prices with Fees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.272, 0.978, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_265", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a7113861-fe5b-4489-9bf1-74d8e911bdac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.027, 0.652, 0.048] in the image\nAnd my action is Action: TYPE\nValue: xbox series x console"}]}, {"id": "mind2web_266", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_26e640a9-4ea1-4d49-91b5-c85e6f60afff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.348, 0.314, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_267", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_73553ffc-fbfe-498e-bd3c-0f29651390e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in\n[button] Search -> CLICK\n[p] Video Games -> CLICK\n[searchbox] Find values for games and more -> TYPE: Black Ops"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.201, 0.975, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_268", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_e0d490bf-0f05-41af-a25c-2c1607beb5f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.568, 0.504, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_269", "image": {"bytes": "", "path": "./images/bafd6a44-5938-431f-8e2e-17d680d5c48b_68ac02d5-a995-46a4-91fc-cc364d6a9585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about Wi-Fi subscriptions.\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Pre-paid Wi-Fi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.32, 0.943, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_270", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_eea2294a-9dc9-46e6-bffe-3a79e7bf7339.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.156, 0.362, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_271", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_a222958a-28e5-4650-a828-970e8418f440.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.806, 0.004, 0.838, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_272", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_f8f537b6-6859-4811-870b-70ea8462e472.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK\n[span] Search flights -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.171, 0.438, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_273", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_01f9edf6-29ed-4d92-a014-f3130a29558b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK\n[textbox] Your Name * -> TYPE: James Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.495, 0.557, 0.515] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_274", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_1012b462-c850-48cc-9aec-b52c613c9815.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.008, 0.418, 0.025] in the image\nAnd my action is Action: TYPE\nValue: headphones"}]}, {"id": "mind2web_275", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_fd52e1f0-8e62-4dfe-87ba-46653af03edd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Jerry Trainor\n[button] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.154, 0.18, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_276", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_2c0514e7-0198-47f6-9cc4-579b8d94d4b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.609, 0.546, 0.661] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_277", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_c2e21f02-9b52-4af0-a516-8ded4f5667d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.361, 0.666, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_278", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_59aefbe2-91fb-454b-9776-e882facf39e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[button] Go! -> CLICK\n[input] -> CLICK\n[div] All dates -> CLICK\n[span] -> CLICK\n[span] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.234, 0.664, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_279", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_fae28de0-5ad5-40f9-9957-f28a133d78ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK\n[span] 160 pixels x 400 pixels -> CLICK\n[span] Default -> CLICK\n[li] .15 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.729, 0.39, 0.775] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_280", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_131d877b-75a0-4877-af95-39ad3de38bd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.177, 0.583, 0.213] in the image\nAnd my action is Action: SELECT\nValue: LA"}]}, {"id": "mind2web_281", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_a053edd4-1209-4989-9b99-86fba90a1817.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK\n[combobox] Activity -> SELECT: Climbing\n[textbox] Location -> TYPE: 90028\n[combobox] Distance -> SELECT: Within 50 miles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.717, 0.318, 0.779, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_282", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_b90bfd21-8292-4a30-b820-0e7294539949.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.29, 0.492, 0.309] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_283", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_6661dc2d-7358-4707-b483-256abecef314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.214, 0.802, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_284", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b1b808d1-e980-4885-9add-57e9801759d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504\n[button] Look up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.326, 0.215, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_285", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_bcbd4fa2-dfa8-49db-9c12-836d1369cb1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: vintage clothing\n[option] vintage clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.14, 0.906, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_286", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_bbaa21de-6ebe-4cb2-b0f1-5f078bc25883.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.171, 0.385, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_287", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_ecf837ac-8efb-4881-ba8c-0468980e8236.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.204, 0.359, 0.227] in the image\nAnd my action is Action: TYPE\nValue: brooklyn"}]}, {"id": "mind2web_288", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_6d2e17fb-6fa7-41b5-b3ad-e06febea811f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK\n[button] Show sorting options modal -> CLICK\n[span] Price: low to high -> CLICK\n[button] Show filter modal size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.281, 0.382, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_289", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_dd9f943d-ec6a-4c26-ab14-1e616956da46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.191, 0.091, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_290", "image": {"bytes": "", "path": "./images/673841c2-de8c-4417-bdcc-dc48753a539f_2c2aec1c-480f-4da8-ab1d-5b869f7da824.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the current standings for the western conference in the NBA and find the top team.\nPrevious actions:\n[link] NBA . -> HOVER\n[link] Standings . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.619, 0.109, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_291", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_f2a963c3-0f8b-4bad-b7ab-4298c4bf0d48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.196, 0.124, 0.217] in the image\nAnd my action is Action: TYPE\nValue: 49102"}]}, {"id": "mind2web_292", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_9620c2bd-5128-408a-83d5-9cdbcb8313c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK\n[combobox] Select a Size -> SELECT: Queen"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.877, 0.231, 0.918, 0.253] in the image\nAnd my action is Action: TYPE\nValue: 2"}]}, {"id": "mind2web_293", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_734546b2-50bd-404e-9a03-995d32d7a59b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.117, 0.2, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_294", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_5e2410e0-53cc-4f38-9183-503099b46c3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.5, 0.289, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_295", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_e16396d8-5c1b-48e4-a7a1-5fbdea6617d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.28, 0.574, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_296", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_07dd93fe-4727-49e8-9b21-323de3c1d691.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK\n[textbox] Reservation/Ticket Number -> TYPE: 123456\n[div] -> CLICK\n[option] Last Name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.176, 0.616, 0.186] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_297", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_6838a03a-5a15-4837-801d-1217ce23b10b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] Any -> CLICK\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.183, 0.249, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_298", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ae8a6811-3d44-443b-8bae-878a6f545432.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.296, 0.359, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_299", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d6d9d6a9-2059-43c5-8b87-7b7bf2cbfeb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Crossbows and Accessories -> CLICK\n[label] Limited Stock -> CLICK\n[label] Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.192, 0.09, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_300", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_a2427697-98ac-41ad-9fbf-861751daa293.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK\n[gridcell] Thursday, May 18, 2023 -> CLICK\n[button] Search flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.12, 0.573, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_301", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_a1c39cfa-dd66-4ab5-ad95-670f09f04659.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[path] -> CLICK\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.161, 0.121, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_302", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4fefd40e-5a97-42d1-968e-b429e4b4c5c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[label] 5 (10) -> CLICK\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.41, 0.812, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_303", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_70fd7564-5163-44de-b8cf-c90ccb96379d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone * -> TYPE: 8888888888\n[combobox] Market: * -> SELECT: Chicago\n[textbox] Spot Address: * -> TYPE: 123rd st"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.503, 0.264, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_304", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_871a1cc6-f377-40b7-bb1e-aba28ec0787e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.047, 0.664, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_305", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_5fd3f332-ce1d-4724-b698-be9c1146adac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12\n[combobox] Minute -> TYPE: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.348, 0.305, 0.374] in the image\nAnd my action is Action: SELECT\nValue: PM"}]}, {"id": "mind2web_306", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_0e11dda9-eff8-4ab3-a636-a4a0237becdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.029, 0.054, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_307", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_9be4c700-75be-4c66-9202-8f31718ddabe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.391, 0.409, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_308", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_c3255b9c-7b61-4e73-a586-b21159ed70fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK\n[input] -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.35, 0.291, 0.546, 0.315] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_309", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_6be71501-c895-4f3c-934f-16a21938dec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: easter home decor\n[span] easter home decor -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.297, 0.974, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_310", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_f5829ff5-6294-41b7-b00b-3433d86971d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.258, 0.505, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_311", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_57a8bfec-2f70-49b8-b132-25569b94616a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.46, 0.071, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_312", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_708acb3e-66cf-4976-83fe-0fc5a575f150.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.036, 0.148, 0.087, 0.163] in the image\nAnd my action is Action: TYPE\nValue: 49107"}]}, {"id": "mind2web_313", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_73c4da1e-dfc7-42c1-9b8b-493fc0048f3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Coming soon to theaters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.238, 0.226, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_314", "image": {"bytes": "", "path": "./images/2879afa9-05f9-4d97-bbfe-f95f5d665174_879c0f11-6c7b-4133-b30d-ecbee152194d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the Wakanda Forever trailer\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.086, 0.259, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_315", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_6ac8ac5c-31df-4c8f-8093-13f539417457.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.448, 0.566, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_316", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_9fd1755f-24be-469a-8f05-55c07c1b34a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Next -> CLICK\n[input] -> TYPE: Crew\n[input] -> TYPE: James\n[input] -> TYPE: Johnson\n[input] -> TYPE: james.john@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.196, 0.525, 0.221] in the image\nAnd my action is Action: SELECT\nValue: Family Trip"}]}, {"id": "mind2web_317", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_26ba29a5-6c06-4176-a682-02c044459b30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.505, 0.28, 0.561] in the image\nAnd my action is Action: TYPE\nValue: addis ababa"}]}, {"id": "mind2web_318", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_35d8a6c9-4e1c-4b18-82fd-c4ee2821678f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.223, 0.226, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_319", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_708f72c4-9e63-4fc5-84d7-d89623a406d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.174, 0.296, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_320", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_344872cd-6715-4851-a8f6-01eaff065563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.355, 0.984, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_321", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_49c73a41-8cf0-4ec0-b12e-b588fa3a2320.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.182, 0.175, 0.338, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_322", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5155bd89-c99d-4e8e-8cbe-185618e319e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK\n[span] Guest checkout -> CLICK\n[span] Select a store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.206, 0.969, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_323", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_d522a186-135f-4d08-a07a-852004c505fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.007, 0.31, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_324", "image": {"bytes": "", "path": "./images/41ff100f-582a-422e-b387-3abd9008cee4_ea2e6ff2-b264-4578-aa3d-cd33be74b9a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open red line subway schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.067, 0.367, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_325", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_a6f1e015-8967-4d33-b56c-4daf513b7396.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.228, 0.278, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_326", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_73c5cb1c-f750-41f0-8bd2-ab89bf3b403c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.0, 0.445, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_327", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_4d0ed0cc-72ac-4a64-8ff7-3d5962f067fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.067, 0.788, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_328", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_8a1e5242-bcce-46ec-8ba5-e1aa2b723b33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 0.179, 0.704, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_329", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_d5320812-311a-480e-934c-e35760ef5bff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\n[link] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.175, 0.287, 0.2] in the image\nAnd my action is Action: TYPE\nValue: 43215"}]}, {"id": "mind2web_330", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_931c5aca-4b73-4e84-9797-1c93a3bd176b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.119, 0.332, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_331", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_7c298bc0-fed0-40a1-b15b-c6cad7071b60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.238, 0.359, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_332", "image": {"bytes": "", "path": "./images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_70e49603-f22f-465e-b9b9-1344b4a905ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse camping stoves that have an auto ignition feature.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Stoves, Grills & Fuel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.543, 0.149, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_333", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_1f8f403a-057f-40c8-8f98-5ac2d1a46e9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK\n[combobox] Select Sort Order -> SELECT: Lowest mileage first"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.396, 0.605, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_334", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_15c7fb1a-57d3-453f-b3de-6cf368e782f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.203, 0.529, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_335", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d7ac76c3-c31d-4daf-ba91-f07a2250eb2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Price -> CLICK\n[link] Under $75.00 -> CLICK\n[button] Delivery Options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.218, 0.44, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_336", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_60dc39b7-782e-4aa3-836d-62fc57fe8819.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[heading] Continue -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.587, 0.642, 0.624] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_337", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_395e5287-15fc-412e-8c91-356376438cd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs\n[input] -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.561, 0.224, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_338", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_96892596-2a86-4978-8c2c-701040a4f9f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.057, 0.281, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_339", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_adbd43a1-3981-48e5-97e4-1f12fb54e667.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK\n[heading] Boston Bruins -> CLICK\n[link] Full Team Statistics -> CLICK\n[heading] David Pastrnak RW -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.104, 0.249, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_340", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_9b9bea77-138e-40d6-bebe-86d163d835a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.182, 0.418, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_341", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7e1d3bb9-5950-4300-ad90-dd0be92707da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK\n[div] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.348, 0.913, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_342", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_0c03bd04-974b-4904-9ccd-9ec0e2152f29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.196, 0.124, 0.217] in the image\nAnd my action is Action: TYPE\nValue: 70726"}]}, {"id": "mind2web_343", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b9964889-17f7-4897-92d3-a2221740f0bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[rect] -> CLICK\n[link] 26 -> CLICK\n[polyline] -> CLICK\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.085, 0.17, 0.319, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_344", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_87e9c2d8-c6f0-42c3-8bfb-f1456f4699d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles\n[b] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.286, 0.777, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_345", "image": {"bytes": "", "path": "./images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_12063f39-2856-480c-aa96-7e4eb94ffcc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nfl game played by the las vegas raiders.\nPrevious actions:\n[combobox] Search query -> TYPE: las vegas raiders\n[img] Las Vegas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.5, 0.976, 0.559] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_346", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_095d6d7a-0df6-4731-83ef-14e17d810b5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.222, 0.937, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_347", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_59d223e5-8ce1-47cc-a614-ad75954151b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK\n[input] -> TYPE: 105307\n[button] 105307 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.431, 0.617, 0.508] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_348", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_a8277574-07e0-411e-8787-81cb20501c1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.216, 0.721, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_349", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_96779cdb-a229-44fd-848a-8522a105e38d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.185, 0.702, 0.22] in the image\nAnd my action is Action: TYPE\nValue: 2983 Marietta Street"}]}, {"id": "mind2web_350", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_eac1ae66-8d5a-48b1-b2b1-a0fd06cb5690.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK\n[link] Find one near you (opens in a new window) \uf301 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.031, 0.669, 0.054] in the image\nAnd my action is Action: TYPE\nValue: 60540"}]}, {"id": "mind2web_351", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_88a36484-1756-4a0d-8e91-cf10a9abaa0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK\n[textbox] Parking Start Date -> CLICK\n[gridcell] Tue Apr 18 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.173, 0.391, 0.187, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_352", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_c9b80332-9b10-41ac-b0ae-09330173af4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\n[link] Recommendations -> CLICK\n[link] Recommendations -> HOVER\n[menuitem] Winter Preview: New Shows Worth Watching -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.175, 0.555, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_353", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_8c018b5c-efad-4b19-8fdf-607219a937e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\n[link] New & Noteworthy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.11, 0.369, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_354", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_2dfb62c9-c929-4cef-a5b5-ee1b8b9d7faf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.118, 0.129, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_355", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_f3e095cc-b31b-4f6b-83ab-677044140ff8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.545, 0.645, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_356", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_641f627f-98e5-4b3e-a0b5-4bb370e16340.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.121, 0.45, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_357", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_7337a4ab-f0d2-4e5a-9498-4f133b64972f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue without membership -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.366, 0.2, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_358", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_cbcdb41a-a319-4771-a94b-6c5348430bd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[input] -> TYPE: 6000\n[span] Good -> CLICK\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.15, 0.495, 0.17] in the image\nAnd my action is Action: TYPE\nValue: john"}]}, {"id": "mind2web_359", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_1aff894c-1861-4fe9-a936-bd4264f0c644.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[div] Play -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Top Hip Hop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.108, 0.441, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_360", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_2fe12b3f-15f7-4cff-a0da-d485f189cb4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.042, 0.483, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_361", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_0180d34b-3ffd-44d5-ae82-c7e6b031c05e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.306, 0.373, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_362", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_442ce85b-f9ff-4a4b-8fc5-7c41fc303963.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.032, 0.169, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_363", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_97d43de2-df7c-4880-9368-ea38fa587621.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Concord\n[span] Concord -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.02, 0.335, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_364", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_fb8f3a51-6870-47b6-898a-25b1ebf691f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.205, 0.102, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_365", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_241522f0-f05c-49d3-89e1-e0db568af201.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.096, 0.215, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_366", "image": {"bytes": "", "path": "./images/f9e88baa-a109-454b-839f-1ab0746a5f13_dd18b502-ec91-4183-9051-0866c49b0936.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all watch options of Avatar: The Way of Water and where it's available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.108, 0.594, 0.121] in the image\nAnd my action is Action: TYPE\nValue: Avatar The Way of Water"}]}, {"id": "mind2web_367", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_c6d1a72d-6a78-4b55-a5e1-a7360cf50158.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: New York, United States, 100\n[textbox] Enter your pick-up location or zip code -> ENTER\n[link] Close -> CLICK\n[div] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.298, 0.493, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_368", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_c741a4b9-037f-4e8f-8a72-606fb1bcba61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.018, 0.535, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_369", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_26e80001-b32e-4aee-982e-5d3ff6fb21bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK\n[button] Trade in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.206, 0.758, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_370", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf14f1d4-470f-4110-b3f4-019a9f7d0aed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.273, 0.512, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_371", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_d35e0d76-e5a4-478c-ae41-af9e27ffd454.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.296, 0.218, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_372", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_0edebd4f-be22-427c-84d1-2223ab345ef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.142, 0.965, 0.17] in the image\nAnd my action is Action: SELECT\nValue: Price Low to High"}]}, {"id": "mind2web_373", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_bdb13abb-d7a0-428b-94c5-64951f68f1db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.386, 0.216, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_374", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_178bad0d-047f-4dc2-84ec-7f2a39924cc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[label] Brown -> CLICK\n[svg] -> CLICK\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.658, 0.223, 0.668] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_375", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_7ee2e3b2-d056-429c-a1c7-301f38f08660.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.123, 0.38, 0.153] in the image\nAnd my action is Action: TYPE\nValue: new orleans"}]}, {"id": "mind2web_376", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_672b06cb-3141-4330-b5c0-dfa51a37ba3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.238, 0.488, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_377", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_d1f6fe24-c802-40ec-9de5-9c81c57b69aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.094, 0.78, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_378", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_404af1a6-ec27-4b33-aa24-691486c2ec74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] 2+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.68, 0.114, 0.699] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_379", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_b3189405-5a1b-427c-a196-d223b6799956.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\n[button] Car Sales -> CLICK\n[link] Shop Vehicles Under $20,000 Link opens in a new wi... -> CLICK\n[button] Body Type \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.22, 0.196, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_380", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_98fb426d-f6ca-4336-8792-05ee6ea8b7e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lave Hot Springs East KOA\n[list] KOA Logo Icon Lava Hot Springs West KOA Holiday La... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.225, 0.771, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_381", "image": {"bytes": "", "path": "./images/90557510-32dc-415f-8507-41b050594962_317a2951-ef4e-4a9f-bd40-18d345a63cd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the coming soon AMC Artisan Films\nPrevious actions:\n[link] Visit the See A Movie page -> CLICK\n[link] Coming Soon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.125, 0.285, 0.149] in the image\nAnd my action is Action: SELECT\nValue: AMC Artisan Films"}]}, {"id": "mind2web_382", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_353e5f4e-0dd3-4175-b341-462558576da0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.252, 0.332, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_383", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_83552bdf-be4a-412c-a088-0615ea08bbaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.091, 0.343, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_384", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b84d10b2-35ff-4c2f-9e47-89243f5d02b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.148, 0.352, 0.176] in the image\nAnd my action is Action: TYPE\nValue: 07718"}]}, {"id": "mind2web_385", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_c944c80a-9545-44eb-a901-aad8b0834d7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_386", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_68c8701d-311a-40ba-ad1a-482ee7d84c6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Any -> CLICK\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.171, 0.249, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_387", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7d0e900d-c57c-45f7-a2fc-a41e9ae471a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.23, 0.617, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_388", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_dc03dd08-a61b-430e-97ce-1c37fec505ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK\n[textbox] Recipient Name -> TYPE: Tim Stebee\n[textbox] Recipient Email -> TYPE: scisoorbros@gmail.com\n[textbox] Sender Name -> TYPE: Jeerimiah Waton"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.745, 0.975, 0.779] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_389", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_338368ed-2c11-449d-ae56-e8726649f0ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji\n[div] Fiji -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.251, 0.442, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_390", "image": {"bytes": "", "path": "./images/29d6b448-a688-4c2f-8f6d-a13546d506d8_ffc9b6af-f030-473b-8333-db8f3f2cf31f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of jazz albums released in 1890.\nPrevious actions:\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.241, 0.581, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_391", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_942f53c6-7c06-488e-af27-0fefddaa6b13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.318, 0.506, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_392", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_63febe49-b818-4412-96ee-0589ed46caba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.055, 0.176, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_393", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_26b869aa-497e-4de5-82f9-4f7fc39977d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.409, 0.249, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_394", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_f34b6e67-a22e-4092-8304-c34b40b107e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[button] 1 adult -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.097, 0.321, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_395", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_f0138e48-b01c-4f47-81cf-41be44fa3298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.229, 0.582, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_396", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_d0f7d055-29ae-4ebe-bd92-0c2e7ae2de4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 500\n[link] Show 684 stays -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.276, 0.7, 0.287] in the image\nAnd my action is Action: TYPE\nValue: Togo"}]}, {"id": "mind2web_397", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_998b318b-b288-443e-9cd0-039f263ea2b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: kashi vishwanath temple\n[span] Kashi Vishwanath Temple, Varanasi, Uttar Pradesh, ... -> CLICK\n[path] -> CLICK\n[div] 6 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.162, 0.721, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_398", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_1a75f8c0-4a9a-4c49-bac5-85a1ad22aecd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK\n[select] All -> SELECT: In Stock (7,640)\n[select] All -> SELECT: Spanish (42)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.221, 0.196, 0.252] in the image\nAnd my action is Action: SELECT\nValue: Paperback (39,356)"}]}, {"id": "mind2web_399", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_81a0d494-3e16-47e6-ae12-96ca4d918431.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.195, 0.212, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_400", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_987291ad-91f8-4e77-80b9-343575d7813d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846\n[input] -> TYPE: Texas\n[button] Complete -> CLICK\n[button] Continue Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.185, 0.781, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_401", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a2f6493b-5528-4aeb-a97d-602877298c51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.607, 0.32, 0.621] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_402", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_af9e9a40-200b-4453-83ce-3ff86dd64154.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.153, 0.31, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_403", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_6714117c-6959-4cbd-9ee2-9cd57f3d627d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK\n[div] May -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.056, 0.97, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_404", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_93ff61b6-0bab-479c-9f06-45a4274258ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK\n[option] WA -> CLICK\n[button] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.57, 0.412, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_405", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_28771408-7ab1-41bd-9819-91f5781f65d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.847, 0.313, 0.911, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_406", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_6a5ae849-09da-452e-8f6b-7757dca46690.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.227, 0.812, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_407", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_63bb767f-b11f-4830-8208-0ee804fa1842.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay\n[paragraph] Coldplay -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.29, 0.255, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_408", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_feb8be19-5f9b-44dc-a9fe-1467233b4677.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK\n[link] HAPPY HOUR BURGER, Aria, Thursday, April 13, 2023 ... -> CLICK\n[combobox] 2 tickets for HAPPY HOUR BURGER -> SELECT: 1 Ticket"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.493, 0.153, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_409", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_46f83ebb-597c-4df2-a715-6d17b102a7cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[button] All cities -> CLICK\n[button] Go! -> CLICK\n[input] -> CLICK\n[div] All dates -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.387, 0.193, 0.419, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_410", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_442e1ce3-2522-48da-b947-c9d0c670411f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.674, 0.194, 0.709, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_411", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_45f168e4-68f7-4a40-b1cb-50e2d47ed9cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK\n[checkbox] Family -> CLICK\n[button] Amenities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.802, 0.081, 0.809] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_412", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_d05d5797-f2d2-4046-b3b3-8f19e5bbd1f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.621, 0.322, 0.656] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_413", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_771645db-3909-401b-9e11-ec577982b6c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.183, 0.855, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_414", "image": {"bytes": "", "path": "./images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_d82a81d4-25cb-48d9-921d-0cc1a8624a2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ideas and recommendation for things to do in Cancun.\nPrevious actions:\n[textbox] Where to? -> TYPE: cancun\n[circle] -> CLICK\n[span] Trip Inspiration -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.055, 0.34, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_415", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_acf51d07-4630-4160-999d-f3ecfe8a47a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.224, 0.479, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_416", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_f1d36d0f-7896-44f3-bf48-6f9f9950416c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.529, 0.339, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_417", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_821343aa-9c06-4f3c-9437-15e55f522c11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.393, 0.783, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_418", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_69b85be2-186c-4be0-90bf-103fc674b6f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent\n[span] Analyst - Sales Programs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.189, 0.325, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_419", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_40a0c326-bcad-4edf-8b4e-6fb3af658ab1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.273, 0.048, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_420", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_15606cb6-9b40-427c-b76c-5f32223fafda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER\n[link] On Sale Now -> CLICK\n[img] movie poster for Elvis -> CLICK\n[button] Rent from $3.99 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.311, 0.174, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_421", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_758947fe-0cd0-4b67-a5f6-62048e8f794a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2022 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.355, 0.249, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_422", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_db4a2ab6-e43f-4059-9dfa-0e4c7fb2eeab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.083, 0.263, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_423", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_d3e071fc-c039-46af-adab-d88fcba72fa8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[link] Girls -> CLICK\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.432, 0.194, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_424", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_34932950-fe34-4548-99d0-8a8726ddb9f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[listbox] hour -> SELECT: 10\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.404, 0.327, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_425", "image": {"bytes": "", "path": "./images/61c8e051-a847-4424-9d8b-b8bc2c134a35_c89c55cf-e379-47bd-b0b4-a642ffe7be1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the recent trades\nPrevious actions:\n[button] Shopping -> CLICK\n[link] Trades -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.178, 0.598, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_426", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_973418b6-859e-479f-8d1a-b1a8fa9c5e51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.364, 0.468, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_427", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_83f96f84-7682-466e-a739-da6ce13c247e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.283, 0.919, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_428", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_e8b587ce-c3a8-485f-8455-bc7869669484.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.105, 0.517, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_429", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_272ad721-8eb9-4e16-b522-ec352a3edc47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK\n[gridcell] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.721, 0.246, 0.904, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_430", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7713e50d-7086-49cf-a8ab-0cc3befbd494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.128, 0.25, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_431", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_57021f78-0a01-4b86-864a-5f427019edf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK\n[link] Training -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.422, 0.233, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_432", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_d1477074-827a-4194-a1bc-1c17e76b13c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: Davis"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.575, 0.456, 0.683, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_433", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_a58ae00f-38de-4c81-a24f-d32bd6933d7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan\n[strong] Abidjan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.201, 0.5, 0.253] in the image\nAnd my action is Action: TYPE\nValue: Accra"}]}, {"id": "mind2web_434", "image": {"bytes": "", "path": "./images/160fc162-7f03-4f59-83e1-5502d00806f2_1b586bab-28ee-4b81-96bd-0cce359c5989.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what is playing on Showtime sorted by newest.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] showtime -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.403, 0.355, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_435", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_5bab90f5-78c2-4d19-82ab-2f2aabc94fb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.134, 0.765, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_436", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_8528cae5-cd59-4742-b285-f1855866c552.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[span] Add -> CLICK\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.389, 0.367, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_437", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_98e02390-fb4c-4887-9ec0-294167219c7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK\n[generic] Thursday April 20th -> CLICK\n[div] 23 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.001, 0.453, 0.273, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_438", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_cd7f794a-afd7-45b5-8d02-ed5fbce7caf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK\n[path] -> CLICK\n[div] 8+ -> CLICK\n[div] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.61, 0.081, 0.618] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_439", "image": {"bytes": "", "path": "./images/fb7741f6-f388-4535-903d-d07315ea995e_e41eb015-80b3-45b5-bc29-c9f672f163ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find fitness events for this weekend.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.279, 0.252, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_440", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_5391b73e-2ea7-472d-bbdf-0978e4e0564f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.153, 0.327, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_441", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_43bb051b-7c4e-4b20-921d-4555a8f353dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK\n[link] Seattle, WA -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.074, 0.305, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_442", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_e6d800be-8004-45eb-a793-b15400c0ccff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.845, 0.007, 0.87, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_443", "image": {"bytes": "", "path": "./images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_54a253e1-012d-435c-8ab1-277ef327c33f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find FC Barcelona's next fixture in the Spanish Copa de Rey\nPrevious actions:\n[li] Soccer -> HOVER\n[link] Barcelona -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.119, 0.093, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_444", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_52c8c1af-9bd2-4aa9-aeca-c781ccba7366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone * -> TYPE: 8888888888"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.645, 0.787, 0.673] in the image\nAnd my action is Action: SELECT\nValue: Chicago"}]}, {"id": "mind2web_445", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_76d2a3a9-6953-4102-b032-e0b0907c88e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[input] -> TYPE: James\n[input] -> TYPE: Johnson\n[input] -> TYPE: james.john@gmail.com\n[combobox] Organization Type -> SELECT: Family Trip\n[input] -> TYPE: Johnson"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.113, 0.777, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_446", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_10212b24-195e-48c9-acae-cb2350a78ceb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.194, 0.359, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_447", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_3d497426-f9d7-4f13-a176-e700575969ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.028, 0.128, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_448", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_0e295f69-6563-427f-9cb9-163bc4c61253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.192, 0.694, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_449", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_fac64ba6-630f-443b-ae2f-0afb8aac89bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.366, 0.153, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_450", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_464ce264-f475-4262-a089-2b8f06fc4f83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.588, 0.137, 0.692, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_451", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_c45411de-bce5-415e-90d1-63f05c5810e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.244, 0.552, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_452", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_10e90e61-628c-4bc0-ab57-827dd4085228.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK\n[button] Subscribe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.103, 0.977, 0.142] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_453", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_3f2a8087-9586-4576-82d5-aebc1c19025b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.445, 0.336, 0.469] in the image\nAnd my action is Action: TYPE\nValue: new delhi"}]}, {"id": "mind2web_454", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_b60f4bf4-01db-45c4-99f2-28275b4807ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Certified fresh movies -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.482, 0.435, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_455", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_4a105819-7709-498b-a943-5e3a9eefdfda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM\n[combobox] Select Residency -> SELECT: Vietnam\n[generic] Vehicle Type * -> CLICK\n[p] Small to Full Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.25, 0.56, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_456", "image": {"bytes": "", "path": "./images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_ae673ea3-b7f4-47ed-bf82-0f42f5cd51c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an available wine at Kroger.\nPrevious actions:\n[path] -> CLICK\n[button] Departments -> CLICK\n[link] Wine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.259, 0.457, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_457", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_56a51491-e603-4275-841c-989da2b8d9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.142, 0.504, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_458", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4545e3b8-201f-4a29-b5e2-cd31dc104bb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.24, 0.857, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_459", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_b802897f-2c52-42af-b317-321de287b5ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\n[textbox] Search -> TYPE: Tom Hanks\n[p] Tom Hanks -> CLICK\n[link] My Life in Ruins (2009) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.439, 0.518, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_460", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_09dd0172-dd00-45b1-95e2-61dd15cf2d11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.336, 0.33, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_461", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9b22d165-4722-428c-a980-3773ac46b8d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Kitchen -> CLICK\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.17, 0.1, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_462", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_7abcc2cf-5142-4193-a68a-ccc119801db5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.074, 0.785, 0.092] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_463", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_e89bb795-2d24-4e2c-bcae-1294e3501dfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] close -> CLICK\n[heading] Same Day Delivery -> CLICK\n[link] Self-Rising Crust Uncured Pepperoni Frozen Pizza -... -> CLICK\n[svg] -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.231, 0.988, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_464", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_b40a4626-a9b0-46f9-b9a6-35d49a8fd0bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK\n[img] Tesla Shop Gift Card -> CLICK\n[textbox] Name of Recipient -> TYPE: April May\n[textbox] Email Address of Recipient -> TYPE: april.may@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.387, 0.938, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_465", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_afbb5253-41d5-4896-8fb9-a49db36fecf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York\n[div] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.1, 0.753, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_466", "image": {"bytes": "", "path": "./images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_24676627-e890-4aab-a9a2-aa595ee4e950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of games I've played recently.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER\n[link] GAMES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.214, 0.253, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_467", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_5f310105-9322-4ff9-befc-9e9ada33ba05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.077, 0.999, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_468", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_3d14978f-be25-44dd-b3f4-bf95d170d4f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\n[link] New & Noteworthy -> HOVER\n[link] Most Played -> CLICK\n[generic] By Current Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.143, 0.543, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_469", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_43f569ad-2a06-4a74-b77c-16acd0431fcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK\n[div] -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.414, 0.922, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_470", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_2ec2117c-c501-416d-a25e-c24faef4c518.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.361, 0.695, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_471", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_b4ba7775-0761-4026-ace8-47325c692364.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.316, 0.161, 0.345] in the image\nAnd my action is Action: SELECT\nValue: Daytime Only Parking"}]}, {"id": "mind2web_472", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_31fe500d-c8ce-4a15-a225-c86333e8826a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK\n[link] Tablets -> CLICK\n[img] Samsung -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.527, 0.158, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_473", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_251bb8a8-5a58-4219-8fde-c24c613d4337.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.214, 0.512, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_474", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_98a07d01-547e-46a7-a19d-843c7cef225e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.225, 0.843, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_475", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_9abb2493-8fff-4950-9b8a-d371af9516a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.234, 0.463, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_476", "image": {"bytes": "", "path": "./images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_6505d42e-3973-4cb2-9d59-b7fa6513d6c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Last of Us series and add it to my watch list.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Last of Us"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.029, 0.194, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_477", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_6c2838cc-2eba-4e57-ba2b-91edc2804240.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.086, 0.322, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_478", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_583fc711-90e0-4363-ac48-057b547a3a33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.155, 0.627, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_479", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_60d9f7d8-adde-4e1a-8763-46cf48b62328.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.141, 0.271, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_480", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_acf2309a-3542-43c5-a8cb-3fef021a5c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[link] Hostels -> CLICK\n[searchbox] Please type your destination -> TYPE: udupi\n[option] Udupi Karnataka,\u00a0India -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.555, 0.805, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_481", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_cdaff30d-7164-4b7a-b1e2-a95d33da9282.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.008, 0.867, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_482", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_ab2175cb-f9af-4c04-a557-c9671e492e76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan\n[strong] Abidjan -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Accra"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.309, 0.72, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_483", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_7672056b-5964-4cb7-95fb-579dec1a1d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.078, 0.129, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_484", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_aaa64149-aeef-4b01-9d53-323f0c6357b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.096, 0.402, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_485", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_b3044a24-aa62-41ae-a42c-b6ab256132f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.24, 0.573, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_486", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_cb226f96-18d8-4dd2-bad2-d38a23094374.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy\n[textbox] Phone Number -> TYPE: 123-999-0000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.733, 0.391, 0.748] in the image\nAnd my action is Action: TYPE\nValue: RA@gmail.com"}]}, {"id": "mind2web_487", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_384d2cb5-1500-4cfb-b973-ad828bf541fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.72, 0.087, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_488", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_0f9aceb2-5773-43c0-883f-c3e0ab76df13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.399, 0.204, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_489", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_04bab092-de15-4b34-8d45-a444c6e6b1b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.405, 0.184, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_490", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_689efb16-8afc-4054-ae22-289fba6674b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.296, 0.486, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_491", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_c99f7ced-46a0-4187-a323-0ce345af5b76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.005, 0.204, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_492", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_72aaf637-cf0b-4762-beb3-e4cdfe50dbf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.173, 0.266, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_493", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_c07dee8e-5b45-432f-80b3-c79f3ff2f1d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK\n[link] Taylor Swift -> CLICK\n[button] Follow -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.202, 0.375, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_494", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_1329c041-da50-44be-9694-0a50b5a51d2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Electrical -> HOVER\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.366, 0.089, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_495", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_89cbd56a-b983-4a06-afa8-cfd121dd0ddd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK\n[link] Used -> CLICK\n[button] Style -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.712, 0.281, 0.868, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_496", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_43a17e87-29ed-4e79-8b90-ede9013a6030.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.177, 0.429, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_497", "image": {"bytes": "", "path": "./images/9223ed29-5abb-4f4d-8108-1c3a584a7017_46a981ea-d3c6-42fb-9d9e-3cc0f679b56d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about reduced bus fares.\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Bus Fares -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.245, 0.576, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_498", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_4908afca-3881-4d5f-bc9c-d1bd00895602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK\n[span] 28 -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.234, 0.497, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_499", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_9b7e1386-5ab5-46e3-8739-701d711d5059.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK\n[link] Stats -> CLICK\n[select] English FA Community Shield -> SELECT: UEFA Champions League"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.302, 0.26, 0.321] in the image\nAnd my action is Action: SELECT\nValue: 2022-23"}]}, {"id": "mind2web_500", "image": {"bytes": "", "path": "./images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_45ac5967-b39a-4abb-abe9-314ab611fcc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare Pro Plans with other plans available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.0, 0.702, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_501", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_d7aae626-e02f-4f67-a2ed-100574db0121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.314, 0.711, 0.34] in the image\nAnd my action is Action: TYPE\nValue: Johnmark@gmail.com"}]}, {"id": "mind2web_502", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_6b02e5e0-bda2-415d-9468-9796ce2ad2b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.419, 0.469, 0.449] in the image\nAnd my action is Action: TYPE\nValue: Ohio"}]}, {"id": "mind2web_503", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8f3e61f6-be1e-4fbc-b01c-904f68a74086.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK\n[button] sub 1 -> CLICK\n[div] open -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.389, 0.497, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_504", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_ae0e503c-2e94-4e89-92e5-a385c1434d50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.1, 0.777, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_505", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_77d2fade-b9c9-46c2-b41b-81e8bc671d15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.057, 0.303, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_506", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_d0ed97a5-260e-43f7-b268-72fa521ff5a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply\n[button] \uf002 -> CLICK\n[generic] 600 W -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Lowest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.233, 0.451, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_507", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3f6e79c5-fb1f-41c7-be6c-53bedd7bd544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.196, 0.754, 0.221] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_508", "image": {"bytes": "", "path": "./images/851ed4e6-51ee-47ad-a861-a28bdc61a102_d47cbb1c-7d70-445f-a145-f4af8c2e35f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to schedule a Model X test drive.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Demo Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.452, 0.667, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_509", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_c53a794a-53ea-4564-b4f3-5ef7c0279bab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK\n[link] Stats \ue00d -> CLICK\n[button] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.133, 0.517, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_510", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_bac428a8-a55a-4c2d-a416-51ae11d42509.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK\n[button] All Filters -> CLICK\n[tab] Features -> CLICK\n[checkbox] Wi-Fi Capability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.203, 0.757, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_511", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_a695dfc5-ea9d-4bb7-8efb-45d3aa1f8928.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK\n[link] Check Balance -> CLICK\n[spinbutton] Gift Card Number -> TYPE: 1000000000000000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.276, 0.5, 0.304] in the image\nAnd my action is Action: TYPE\nValue: 1222"}]}, {"id": "mind2web_512", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_83a425dd-09e4-4a42-b6c7-440c00333fd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK\n[img] Sponsored Ad - Google Play gift code - give the gi... -> CLICK\n[button] $100 -> CLICK\n[textbox] To -> TYPE: abc@abc.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.1, 0.968, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_513", "image": {"bytes": "", "path": "./images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_062da2e4-9c43-48c4-898f-1ef4b05a7542.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Leaderboard for the top 10 fantasy Basketball players for the Rotisserie challenge.\nPrevious actions:\n[link] Fantasy . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.08, 0.725, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_514", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_06935ea2-746d-401d-8d7f-39e882db3cd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[link] Restaurants -> HOVER\n[span] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.022, 0.564, 0.035] in the image\nAnd my action is Action: TYPE\nValue: SAN FRANCISCO"}]}, {"id": "mind2web_515", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_3d430b08-3b31-40be-966d-0ebc25c0e439.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] close -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Material -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.134, 0.824, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_516", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_ba9f1b5e-9f7d-4890-b949-fab2446b19cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK\n[img] Sponsored Ad - Google Play gift code - give the gi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.26, 0.606, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_517", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_fa456b75-d802-4cb8-a122-24ba577812f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK\n[link] 128 GB - apply Storage Capacity filter -> CLICK\n[heading] Apple iPhone 12 Pro - 128GB - All Colors - Unlocke... -> CLICK\n[combobox] Please select a Color -> SELECT: Blue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.205, 0.737, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_518", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_30031b64-c4c3-4741-a338-9de86a7bd529.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.596, 0.244, 0.746, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_519", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_f7c781d4-9856-4f3e-b227-20e1cfe0a4d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance\n[combobox] Start Time -> SELECT: 3:00 PM\n[combobox] End Time -> SELECT: 5:00 PM\n[button] Update Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.496, 0.372, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_520", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_68c477a3-d1b6-4c90-95e1-e78aa128bf1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] Last name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.343, 0.656, 0.388] in the image\nAnd my action is Action: TYPE\nValue: Davis"}]}, {"id": "mind2web_521", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_28459c7a-e656-4f30-946d-53f528631e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.226, 0.395, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_522", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_bf489b94-792e-475d-aa34-32cdcda0f2b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.262, 0.868, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_523", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_3b050be4-1d7e-43f1-a584-1bf2ce238aa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK\n[link] Accessories -> CLICK\n[link] Gaming Mice -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.847, 0.081, 0.979, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_524", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e9c074e0-eb15-4d22-92b6-f703bb5da185.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Socks -> CLICK\n[generic] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.212, 0.943, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_525", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_78a55844-7ec2-4b4d-9a58-e35d37ef18e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK\n[textbox] Your Name * -> TYPE: James Smith\n[textbox] Email Address * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.541, 0.557, 0.561] in the image\nAnd my action is Action: TYPE\nValue: 6157075521"}]}, {"id": "mind2web_526", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_2bd4c20d-4f07-4507-a6c5-9cf1b634a4d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.048, 0.106, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_527", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_98776082-1913-404c-8a5b-ff56c03291c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin D -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.421, 0.163, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_528", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f2aa38c8-10e2-4a9a-8305-480422409dd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.54, 0.512, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_529", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_ff4a9a5f-bb9a-4fe4-ac0d-1b6e7ef9a46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.125, 0.4, 0.157] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_530", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_e58c6cf0-b67e-459b-bdad-9bfe55c453ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.112, 0.041, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_531", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_b9ee8eb8-3b77-4ec0-9278-a65267b9cc50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.324, 0.894, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_532", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_0d3bb8c1-0174-43bb-ba64-b4f5d4392c7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[button] Find Schedules -> CLICK\n[img] -> CLICK\n[span] -> CLICK\n[button] Close -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.182, 0.393, 0.296, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_533", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_875a9aaa-9e35-4575-868b-6dd03d6ca8dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.087, 0.487, 0.117] in the image\nAnd my action is Action: TYPE\nValue: Neo"}]}, {"id": "mind2web_534", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_d8706414-226d-4656-b7d5-818d440c9c6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\n[link] BABY -> CLICK\n[link] One-Pieces -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.388, 0.256, 0.584] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_535", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_fe2329c4-61b1-43e9-9ef6-52d2ee4bdd48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.075, 0.559, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_536", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_a2ff1967-a42c-486a-9a4b-356fc3d1f590.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 7\n[button] Find -> CLICK\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.354, 0.181, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_537", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_d2dc023e-7146-43f3-88e4-dd00ce65a2f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.272, 0.568, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_538", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_67c0685a-9f53-46c3-9842-d416f890ea25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.177, 0.846, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_539", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_2ca80415-2ee3-421c-b26e-662116f8f61c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.14, 0.206, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_540", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_ad1a48fb-4b8a-4ea8-8945-04d8b57dd201.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107\n[textbox] Zip Code -> TYPE: 49107\n[div] 49107 - Buchanan, MI -> CLICK\n[button] Request Appointment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.4, 0.333, 0.438, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_541", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_2fe0da29-0224-4d57-8ca7-f203f4ee7f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.097, 0.387, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_542", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e8302760-d313-4bd7-9f3b-c38819b7d97d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $16.99/Day$6.55/Day -> CLICK\n[checkbox] $5.99/Day$1.38/Day -> CLICK\n[checkbox] MARKET -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.351, 0.93, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_543", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_4f8da365-0e3a-49aa-a1d5-32e0ed17259d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK\n[link] NPGallery -> CLICK\n[span] Search all Parks -> CLICK\n[li] Acadia National Park -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.123, 0.727, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_544", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_8c85d1f6-5d5f-4b7f-8ad8-8fcdb58ca94b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.191, 0.196, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_545", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_60f12b9c-9c86-4d52-986c-d66d26ff9ea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.309, 0.269, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_546", "image": {"bytes": "", "path": "./images/998d121b-c858-485d-9dd3-4609575d144b_6bed6fe2-2ce7-47ef-9b12-9a8f308a3102.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular song this week by the no. 1 weekly charts ranked artist\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.407, 0.206, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_547", "image": {"bytes": "", "path": "./images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_0fffa0bb-9a4e-48b1-9023-298c7a5829c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending daily deals.\nPrevious actions:\n[link] Today's Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.549, 0.567, 0.562] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_548", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_512c2744-f31b-4206-98c6-f69312994a72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\n[searchbox] Search -> TYPE: Chill"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.014, 0.553, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_549", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_728b67b3-d076-4667-afca-854c4864e209.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.097, 0.777, 0.122] in the image\nAnd my action is Action: TYPE\nValue: Crew"}]}, {"id": "mind2web_550", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_0938ce44-198a-4d1f-a88a-c26cd07e7a2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York\n[div] New York, NY -> CLICK\n[div] Sat, Apr 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.459, 0.575, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_551", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_f235354f-5877-4b33-82b4-dd854cf552a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.387, 0.085, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_552", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_4f7e3555-112c-40b1-b45e-a729bb210f51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.177, 0.755, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_553", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_0b65497e-6dbe-4d02-b48d-0662c365c294.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.583, 0.262, 0.68, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_554", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_42319d5e-a274-4be1-a41e-e97ed6615952.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.004, 0.675, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_555", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_cea8fdc0-7489-497d-b118-515681b710bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.476, 0.235, 0.506] in the image\nAnd my action is Action: SELECT\nValue: 2017"}]}, {"id": "mind2web_556", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_11949dd6-6d1a-42e9-a965-3bad963bac16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK\n[button] increase number -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.393, 0.355, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_557", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_9bb8527b-4f0f-4adb-a232-baaaf881902d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Games -> CLICK\n[combobox] Platform -> SELECT: PS5\n[tab] AUG '23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.284, 0.523, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_558", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_8ae64ebc-8538-42aa-bd7e-f0675af9c375.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK\n[link] Find one near you (opens in a new window) \uf301 -> CLICK\n[combobox] Find a store -> TYPE: 60540"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.092, 0.668, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_559", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_4231bc71-9555-49ec-8edf-0e46843f0832.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[link] More -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.054, 0.388, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_560", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_ef7e1558-a90e-4187-81be-290734f69625.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[path] -> CLICK\n[div] 4 -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.109, 0.963, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_561", "image": {"bytes": "", "path": "./images/cdbd410d-170a-426d-b6d2-60dafaffe853_1c12e058-d63b-4514-bba9-ca7c1cec49d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the best seller accessories for the Model X\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.861, 0.611, 0.879] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_562", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f11eadf6-b789-4a4a-94d3-46613bffdc98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.0, 0.169, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_563", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_9db12986-1e76-4bff-80d8-6fd5ec3fb7b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[span] Pickup -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.095, 0.448, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_564", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_d80603c1-f854-4923-ae8c-dae6003a5cd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.323, 0.27, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_565", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_2bb14193-cad0-433f-aa68-3def5ba090a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew\n[textbox] Email address -> TYPE: johndoew@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.173, 0.495, 0.2] in the image\nAnd my action is Action: TYPE\nValue: 4533234565"}]}, {"id": "mind2web_566", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_926471db-6655-45d2-9182-4af24f614ad2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK\n[button] Germany -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.209, 0.728, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_567", "image": {"bytes": "", "path": "./images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_a634663a-b496-4ead-94e0-e2c1a1f4b86a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find FC Barcelona's next fixture in the Spanish Copa de Rey\nPrevious actions:\n[li] Soccer -> HOVER\n[link] Barcelona -> CLICK\n[link] Fixtures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.508, 0.138, 0.534] in the image\nAnd my action is Action: SELECT\nValue: Spanish Copa del Rey"}]}, {"id": "mind2web_568", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_0dfd6898-ef1c-4b83-9abb-6fb4630af976.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.099, 0.181, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_569", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_55015e6d-ec84-41ae-99cc-1c8298eba5a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.126, 0.352, 0.15] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_570", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_ac979c95-d410-4b40-83b9-32caefbe0fcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Flights -> CLICK\n[b] Columbus -> TYPE: NEW YORK\n[span] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.228, 0.617, 0.269] in the image\nAnd my action is Action: TYPE\nValue: TOKYO"}]}, {"id": "mind2web_571", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_e47414c2-b553-4232-82d6-5172de9eb75c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.064, 0.387, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_572", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_53a3e30c-6fe2-4f5a-b132-8390e74be073.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK\n[button] View 95 Vehicles -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.709, 0.048, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_573", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_83815d26-fe3f-46de-8fdf-b8d347a78e50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.038, 0.36, 0.051] in the image\nAnd my action is Action: SELECT\nValue: Airport"}]}, {"id": "mind2web_574", "image": {"bytes": "", "path": "./images/e9300d50-11fa-4f98-8c39-424630668ab9_6b487bd0-fda3-43e7-8adb-45fd77815a64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the popular online Health events for tomorr\now?\nPrevious actions:\n[link] Health -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.409, 0.189, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_575", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_25595676-125c-4c39-8a05-9d86e9f3b5a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] Continue -> CLICK\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.194, 0.711, 0.24] in the image\nAnd my action is Action: TYPE\nValue: Mark"}]}, {"id": "mind2web_576", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_82d93f55-8572-4c68-8aa7-982b1774b04c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.122, 0.966, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_577", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_39ab3869-5aa7-4ee7-b1f2-d2e182997e3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.449, 0.042, 0.589, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_578", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_36632f49-e9c1-4dbc-866d-eb03522d0614.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: NEW YORK\n[span] All airports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.103, 0.702, 0.127] in the image\nAnd my action is Action: TYPE\nValue: PARIS"}]}, {"id": "mind2web_579", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_f32216ff-a9d3-426b-ad6b-0081850a3db0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.276, 0.196, 0.307] in the image\nAnd my action is Action: SELECT\nValue: Hardback"}]}, {"id": "mind2web_580", "image": {"bytes": "", "path": "./images/14f0e837-af77-44b9-9cad-a8911aab30c6_89ea3db5-1984-4912-a93a-8cdb9b2402af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the status of flight from Columbus, number 1234 on April 5th, 2023.\nPrevious actions:\n[heading] Flight status -> CLICK\n[textbox] Flight number -> TYPE: 1234"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.275, 0.478, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_581", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2a7a1bbf-df80-4b6f-a57f-fd754cc16db3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.183, 0.592, 0.201] in the image\nAnd my action is Action: TYPE\nValue: Shubert Theatre"}]}, {"id": "mind2web_582", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_7f571a32-da4f-48e1-b26c-5c5b412dca5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\n[link] WAYS TO STAY \uf0d7 -> CLICK\n[link] GLAMPING \uf0da -> CLICK\n[link] glamping near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.314, 0.141, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_583", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_becee9e3-be6c-4d01-b62e-3b2e23d3413a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.351, 0.686, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_584", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_84502966-8969-4b4f-bbef-370c2f4e62bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.313, 0.486, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_585", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_6baa7eb5-d650-4920-80c4-bfea64397c55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.182, 0.957, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_586", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_285f5467-2cad-4f8d-8b01-8f90a80e3cce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB\n[button] \uf002 -> CLICK\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.412, 0.963, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_587", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_ec2b8835-2edf-4769-a89a-5c36d204ee52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2017\n[combobox] Select Maximum Year -> SELECT: 2017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.212, 0.226, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_588", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_9ce95ce5-01c8-4a7a-87a4-aae8193cd6d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.35, 0.322, 0.387] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_589", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_2566660a-da4f-4da5-979a-0ffb4953d972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK\n[textbox] Flight origin input -> TYPE: Mumbai\n[span] Chhatrapati Shivaji Intl -> CLICK\n[textbox] Flight destination input -> TYPE: Dubai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.175, 0.784, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_590", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9bb86454-f3c6-453e-b06e-70f28ec3d09a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: Gingerbread cakes"}]}, {"id": "mind2web_591", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_a645bce8-e7b5-44ef-99b2-045410868809.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK\n[svg] -> CLICK\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.13, 0.894, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_592", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_c6cbec5e-1b4e-4c9b-bbc5-c0d55a1968e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Employer Name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.652, 0.685, 0.685] in the image\nAnd my action is Action: TYPE\nValue: Gua AB"}]}, {"id": "mind2web_593", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_b178005b-95d0-4ad2-9d7a-fcf68844cf09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.41, 0.135, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_594", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_09862727-dffe-4e83-a678-d29962c98d92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM\n[combobox] Return Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.237, 0.567, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_595", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_e2dc345b-7f76-4518-9f6b-1e75f62a4fa5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.49, 0.339, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_596", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_a40aa80f-344a-4d08-8333-4778e7549172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.105, 0.181, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_597", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_7adc528f-60dc-477b-8cb8-77ad576f840b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.126, 0.913, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_598", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_6592370e-2797-4a62-9b33-9769fd75aa37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK\n[option] 2:00 pm -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.179, 0.105, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_599", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_04339d0b-8754-454b-b068-ce03b1f45f86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.165, 0.609, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_600", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_4b926a9c-fd06-48cd-b8dd-62a5b7d509a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[textbox] Credit card number -> TYPE: 123456789\n[combobox] Expiration month \u00a0* -> TYPE: 01"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.66, 0.492, 0.677] in the image\nAnd my action is Action: SELECT\nValue: 2024"}]}, {"id": "mind2web_601", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_1bbbf339-0ff9-4326-b590-da7e3b92be27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.41, 0.691, 0.427] in the image\nAnd my action is Action: SELECT\nValue: June 2023"}]}, {"id": "mind2web_602", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_2e04274f-f7b0-447f-a96d-7094c9e50f25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books\n[button] Search -> CLICK\n[link] Auction -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.088, 0.792, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_603", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_66efeee7-26da-4ccc-a8eb-1cb6b87f7b7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.599, 0.141, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_604", "image": {"bytes": "", "path": "./images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_b77ac57b-8075-4c8d-8104-6551fac80aa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ideas and recommendation for things to do in Cancun.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.311, 0.463, 0.321] in the image\nAnd my action is Action: TYPE\nValue: cancun"}]}, {"id": "mind2web_605", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_91398ccf-62f2-4b00-99e8-538f8dc83ff1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.309, 0.173, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_606", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_79195bd7-6e14-43c7-818b-83aa994a0f60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK\n[checkbox] 2+ -> CLICK\n[button] Select Nissan Kicks Vehicle -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.31, 0.951, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_607", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_671005f8-0412-404e-b398-ad5476ea00cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.052, 0.913, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_608", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_1553af30-7f22-4c4d-8037-4ac6c3a3b72b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2005\n[combobox] Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.355, 0.508, 0.392] in the image\nAnd my action is Action: SELECT\nValue: Corolla"}]}, {"id": "mind2web_609", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_d99edb30-3e73-492f-ab89-8e248147726d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK\n[button] Add railcard -> CLICK\n[listbox] Select railcard 01 -> SELECT: Veterans Railcard\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.277, 0.327, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_610", "image": {"bytes": "", "path": "./images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_9e41be5e-d1a9-4ae3-82bb-2d9cf7e3fd22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of critic reviews for the movie Creed III.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: creed III\n[div] Creed III -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.166, 0.896, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_611", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_db9f5f4a-cfd7-4ca7-9dd7-e73dd9314048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Topic -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.139, 0.679, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_612", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_59857729-5631-4bd5-be03-ae871b6c7549.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.043, 0.091, 0.173, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_613", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_a684c98a-c238-4bef-b2ad-f476f07d73f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.677, 0.458, 0.714] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_614", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_9d995e9a-9209-44b2-995e-c789e80640fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK\n[div] Add -> CLICK\n[span] Add -> CLICK\n[link] View More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.22, 0.367, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_615", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_a73e46ae-d077-4494-bbb6-3e900105e7b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK\n[li] -> CLICK\n[spinbutton] Max Price -> TYPE: 75\n[span] mm/dd/yyyy-mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.218, 0.312, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_616", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_5e1367e4-40be-4bd7-a0e7-0f4cea0043e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.844, 0.273, 0.93, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_617", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_98fac87d-a77e-45a6-be35-d7582402efd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.183, 0.211, 0.204] in the image\nAnd my action is Action: SELECT\nValue: 100 Miles"}]}, {"id": "mind2web_618", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_05ee4572-3449-45bc-81de-0ca98ab19c32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK\n[heading] Resident Evil 4 - Xbox Series X -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.358, 0.751, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_619", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_09094d31-83aa-4538-842f-a3d990b2c0f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.403, 0.336, 0.432] in the image\nAnd my action is Action: TYPE\nValue: Venice Beach"}]}, {"id": "mind2web_620", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_1bc3cce0-b1be-4e81-8248-4525ffd46b09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.69, 0.329, 0.701] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_621", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_e4a9cbd4-088d-4619-bea8-f2012f168a59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.064, 0.176, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_622", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_a0d343ae-e59d-44b8-abfb-e1ed3c0df2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.051, 0.188, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_623", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_585b6e77-b0da-452f-b0c9-97e223fc786a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.199, 0.377, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_624", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_60a55e3d-54ea-4570-8fe0-92972c015964.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.08, 0.421, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_625", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_28857cab-172b-4651-b610-831598ecf7e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.05, 0.06, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_626", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d25fbaa1-54f2-4aaa-9446-6f113794dfc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[div] -> CLICK\n[heading] to next step -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.341, 0.926, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_627", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_bbe959f7-08e3-4dfd-b80b-b837caab3e9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK\n[button] View more availability for BayLeaf Modern Indian C... -> CLICK\n[button] 8:15 PM Table -> CLICK\n[button] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.325, 0.523, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_628", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_e9787ce7-b544-442f-bfc9-3c56c68ad182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.141, 0.851, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_629", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b07896e7-2e85-4045-9080-9134edeafe41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.004, 0.204, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_630", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_47c0995c-7238-4f67-8bfd-dcb9ebad4a86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 25000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.198, 0.473, 0.217] in the image\nAnd my action is Action: TYPE\nValue: 5000"}]}, {"id": "mind2web_631", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_08af7cdf-e95d-4875-a679-c15c9c08e85b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\n[combobox] Search for anything -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.02, 0.652, 0.035] in the image\nAnd my action is Action: TYPE\nValue: musical instruments"}]}, {"id": "mind2web_632", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_dd0d509f-3050-4610-baa3-cd8f57e8ab83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] When? -> CLICK\n[link] 17 -> CLICK\n[link] 20 -> CLICK\n[combobox] How many guests? -> SELECT: 4 Guests\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.381, 0.596, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_633", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_d1c3e4dc-571d-4ee7-84e2-6751f69713c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.209, 0.176, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_634", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_507573e9-3eef-41c7-833f-a9992b520d5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.185, 0.536, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_635", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_88a2761a-0c07-43a1-b931-1b8f81bb2cea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.019, 0.68, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_636", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_fb0bb348-ec09-4106-a0c6-5072cb5a070c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK\n[textbox] Select End Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.187, 0.173, 0.201, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_637", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_8ed5187f-287c-40c5-9ca9-9a142fb87136.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: AUSTRALIA\n[input] -> TYPE: Walker"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.581, 0.347, 0.902, 0.387] in the image\nAnd my action is Action: TYPE\nValue: A987654"}]}, {"id": "mind2web_638", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_98de417c-491c-4137-a8d7-1071dc6e1f4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.443, 0.32, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_639", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_5f599e93-da0c-4046-a99f-5ee9b6b91c4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey\n[radio] Yes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.563, 0.667, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_640", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_80ec2d6b-ccfc-4134-88b5-e9ae4967039e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.138, 0.36, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_641", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_0a22a18c-983e-446f-871f-d2fd71f2c9ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.198, 0.795, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_642", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_cfaf73bf-07fa-433e-a651-8c1c4a4a633c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK\n[option] Apr 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.257, 0.478, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_643", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_e390e8f2-c563-4082-b220-e8544fd3f37d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.171, 0.249, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_644", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_9af5e2bf-542b-482b-b479-2cdead789a25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.208, 0.843, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_645", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_853848fa-4225-4177-814e-f29de4ed4f5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.405, 0.855, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_646", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_289cdf09-4bfc-4ba0-affb-bf06c4b47593.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.223, 0.643, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_647", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a3853221-cf8e-4f1e-a89b-0d1e3fd620b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.012, 0.984, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_648", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94fc7a4-d560-4fa5-a4ab-7c97572032f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.426, 0.349, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_649", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_f3dc63f7-80a7-4979-99bd-7bda15aee89c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK\n[link] click here -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.154, 0.657, 0.182] in the image\nAnd my action is Action: TYPE\nValue: Ohare, Chicago"}]}, {"id": "mind2web_650", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_17ca5c93-6288-40dc-9018-691a97dc13d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.129, 0.73, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_651", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_a229ef07-963b-4d03-aacc-695b5b923058.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.333, 0.554, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_652", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_c7e2d653-d028-48ff-987d-7d48b3fc1bbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.62, 0.691, 0.64] in the image\nAnd my action is Action: SELECT\nValue: June 2023"}]}, {"id": "mind2web_653", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_f00afcea-c60d-4746-b705-8abad5a4a61c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\n[link] CareersExternal Link should open in a new window o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.455, 0.688, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_654", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_9732a0c7-bde5-479e-b4db-527fa1212bff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.525, 0.641, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_655", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_e24783c6-df28-49de-a73f-cdf3cf4500a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.112, 0.331, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_656", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c0bd78c0-c5b3-4607-9f24-fa07181701a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.23, 0.359, 0.256] in the image\nAnd my action is Action: TYPE\nValue: empire state building"}]}, {"id": "mind2web_657", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_f083d98e-f278-4e39-9c59-c02d95e8dd2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.126, 0.321, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_658", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ca2a9f0f-84e9-4e41-9462-32f12264b4ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Size -> CLICK\n[span] Now Trending -> CLICK\n[li] Newest -> CLICK\n[button] Add to Wish List -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.925, 0.136, 0.947, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_659", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_6506aee3-cdd1-4f39-b9ff-4968228cfcda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[button] Find -> CLICK\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.789, 0.96, 0.823] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_660", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_1d37262b-2901-4468-bb2f-f5a9dd9e95b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK\n[gridcell] Wednesday, April 5, 2023 -> CLICK\n[button] Done -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.774, 0.573, 0.957, 0.701] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_661", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_798b286b-adb3-4c20-b60c-f9d140ca52ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\n[button] Car Sales -> CLICK\n[link] Shop Vehicles Under $20,000 Link opens in a new wi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.382, 0.225, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_662", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f3a45444-8db6-4965-b692-96e995ab489b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.016, 0.211, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_663", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_66ecd249-ed96-47a9-9e83-29e6d273fb6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.434, 0.863, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_664", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_88859090-cc5c-4b82-b5cc-3a7c2cce4f4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK\n[div] -> CLICK\n[checkbox] 4 stars rating -> CLICK\n[radio] Lowest price first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.48, 0.791, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_665", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1bf42320-592b-4bfb-8141-a292892eb093.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.223, 0.3, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_666", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_60b25c01-6a9c-456e-a2de-296e7090b8c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.557, 0.097, 0.572] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_667", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_9912f695-1f04-491d-bcb4-dcc99b5eb3ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.147, 0.769, 0.176] in the image\nAnd my action is Action: TYPE\nValue: po box 2846"}]}, {"id": "mind2web_668", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_0bfd0d38-184d-4d8a-9764-9b845095d0df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.098, 0.475, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_669", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_92db244d-ca13-4885-8d45-87f3df9a87c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.269, 0.723, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_670", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_22ad9507-f8b7-4f15-bd7f-c0f99312acd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[div] 4 -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.543, 0.181, 0.815, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_671", "image": {"bytes": "", "path": "./images/330d5618-9db4-447b-9b56-0d2c33f414d5_769575d0-1e94-4299-9e76-4b79f5704861.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the full menu for AMC dine-in locations.\nPrevious actions:\n[link] Visit the Food & Drinks page -> CLICK\n[link] Explore Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.748, 0.172, 0.782] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_672", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_7322835c-5bca-4b29-a680-c8d122209a40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK\n[button] Go! -> CLICK\n[link] Rides & Experiences \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.206, 0.654, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_673", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_31f0c71f-7a90-4fa6-beac-319af1442002.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.318, 0.403, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_674", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_ed7b5274-aae9-47a3-8b14-63e67b3f171c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] Railcards -> CLICK\n[combobox] How old are you? -> SELECT: 18-25\n[combobox] Who do you usually travel with? -> SELECT: Couple"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.186, 0.941, 0.219] in the image\nAnd my action is Action: SELECT\nValue: Yes"}]}, {"id": "mind2web_675", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a17e5768-0ac8-450e-af82-4b7c2656c3ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.113, 0.703, 0.129] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_676", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_48650296-30f6-4c10-90bc-b65a4f8d92c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[link] Search for flights -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.089, 0.292, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_677", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_bc74f169-259e-446a-a63b-77e3f990d729.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Order Food & Drinks -> CLICK\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14\n[button] Order Now -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.418, 0.182, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_678", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_005bc9e7-3f90-4be3-9512-4e6c3fc9517d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.439, 0.29, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_679", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_b0a24212-9aae-4fbc-a62d-bc7129890aec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.145, 0.782, 0.172] in the image\nAnd my action is Action: TYPE\nValue: red sox vs yankees"}]}, {"id": "mind2web_680", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_d8aed545-1860-46ac-a290-ce24e2ee12b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR\n[div] Newark Liberty Intl (Newark) - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.283, 0.639, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_681", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_bac24a0f-8c3f-47d7-8870-0facc3b7352b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK\n[combobox] SORT BY -> SELECT: Price: Low to High\n[button] Select Mitsubishi Mirage Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.169, 0.951, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_682", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_b068a66c-b3fe-4991-a9c1-b534eac1c4ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.021, 0.466, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_683", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_1d0ce156-c4ff-462f-9503-71e97ddc7bc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[link] Show all 10 cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.839, 0.259, 0.974, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_684", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_85e67d52-dbf2-4548-bc57-7030b7d926c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.316, 0.494, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_685", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_ade4eacb-a963-445c-bb0d-c025a8ac3b47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.372, 0.359, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_686", "image": {"bytes": "", "path": "./images/370a037c-c397-4adb-ab7c-0c388f448f68_4c7b25a2-d944-488e-ab04-8558592e50ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vinyl records at the lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.0, 0.557, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_687", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_db04e65a-c4bc-47b4-90cb-2a233cee4a12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.153, 0.647, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_688", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_7f72659d-d09a-4ec5-8d21-174f5ad2b87e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.318, 0.777, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_689", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_cf49630d-7148-4457-b45c-0bc7ccde4df7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\n[link] Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.142, 0.875, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_690", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_52d7c938-aadd-4349-b61b-4db12d69371b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.35, 0.039, 0.391, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_691", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_17bb4834-cbfd-40cc-84dc-8a06cb5be3d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK\n[dt] Memory -> CLICK\n[span] Show -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.62, 0.158, 0.629] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_692", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_f9eba4a9-dc63-44b8-9382-822cac46e582.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[button] Go! -> CLICK\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.31, 0.176, 0.359, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_693", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_2193b3f2-8f8a-4bca-b688-831462294ca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.053, 0.441, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_694", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6daab355-c31f-4f01-9790-b621f663409c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.243, 0.438, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_695", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_efb82e4d-b2c7-4b75-9125-34401b88bb10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.373, 0.746, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_696", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ea2935b1-0eeb-4873-985f-fcf52085b341.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK\n[span] 9 -> CLICK\n[button] SEARCH FLIGHTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.327, 0.816, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_697", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_4fdc91b7-4ecb-4279-81f3-3e53e6e92071.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: SHANGHAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.301, 0.448, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_698", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_2edb2388-3a5b-444f-9824-2bd1e69cbf18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.401, 0.106, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_699", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d3e3c95-6cc7-41d8-923c-a543635c2643.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Crossover vehicle icon Crossovers -> CLICK\n[button] Make -> CLICK\n[listitem] BMW (389) BMW (389) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.289, 0.045, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_700", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_752e0eed-61ec-416e-a42d-7313f6820f5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.228, 0.647, 0.596] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_701", "image": {"bytes": "", "path": "./images/74226fab-6285-45da-8582-d25a876aa7b0_5874954f-7c2e-432e-bddc-1a6028f60421.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for the next pop concert.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.007, 0.211, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_702", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_20e92a74-f8b4-4d13-b636-5de220b1d2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.026, 0.931, 0.042] in the image\nAnd my action is Action: TYPE\nValue: Michael Jordan"}]}, {"id": "mind2web_703", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_43599216-5ae3-4012-bc6f-4583b95a4523.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.288, 0.377, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_704", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_71404d12-c5cf-47c9-8128-8390e15252db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.11, 0.492, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_705", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_653e6f08-8ac9-495e-94e1-9f6fcda996e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK\n[textbox] Drop-off location -> CLICK\n[textbox] Drop-off location -> TYPE: Miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.142, 0.743, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_706", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_47a2fbbb-9821-433d-8f1f-7fcf371505a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK\n[link] HOT DEALS \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.135, 0.905, 0.151] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_707", "image": {"bytes": "", "path": "./images/8710addc-5ff3-4aaf-b397-4c6165f285ee_8b322fdc-9820-44d2-8476-1304ae1129e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the service options for cars under warranty.\nPrevious actions:\n[button] Open helpful links menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.029, 0.384, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_708", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_f2b10367-aef5-46ac-805f-5d684a9c958d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK\n[link] Europe & Mediterranean Cruises 16 DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.36, 0.662, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_709", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_aef52996-d58a-4772-9e64-05599aab864b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.018, 0.057, 0.137, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_710", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_fef93795-3e62-497a-bee0-d9cee88d0932.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK\n[link] Dave Chapelle -> CLICK\n[link] TICKETS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.177, 0.755, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_711", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_b774739a-82c1-4b3c-a4e6-9925804f8038.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.232, 0.309, 0.251] in the image\nAnd my action is Action: TYPE\nValue: columbus"}]}, {"id": "mind2web_712", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_e5094c07-65e1-407b-9bd1-e5fbc050372b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK\n[link] English -> CLICK\n[link] Last 90 days -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.101, 0.999, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_713", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0ab23995-fd39-483f-9eb4-c633bef00a0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.215, 0.688, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_714", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_6464b9d6-3f44-4954-b9eb-b304fab198b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Katy Perry"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_715", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_598ec622-0634-4fb2-8976-b12bb75f1b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.1, 0.424, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_716", "image": {"bytes": "", "path": "./images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_6dd05ca6-239a-4a8e-b976-b8399dd021fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music event organizers and follow the second one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.324, 0.273, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_717", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_f363d0a7-38a5-49f0-90be-ee433c2505b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.196, 0.093, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_718", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c11a83bd-a583-4cdc-a473-04c26ce5eba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Sun, Jun 4, 2023 -> CLICK\n[div] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.448, 0.959, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_719", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_0b35cdaf-9c0e-4533-a402-1801ac2683a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.778, 0.367, 0.836] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_720", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_84321d57-8d7f-4a25-b4f2-dff4851503a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.642, 0.318, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_721", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_437b48ad-7167-492d-ae11-280b37292671.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.177, 0.373, 0.207] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_722", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_08aae8dc-6df3-4f78-b0f0-2fbdef6c83f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.589, 0.312, 0.615] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_723", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_ad5633e3-d238-41a4-9b12-78597a1f2070.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High\n[img] Black Diamond Zone Climbing Shoes 0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.514, 0.956, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_724", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_73800d44-ed5a-4491-8f5c-117137ca2c28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\n[link] attractions. -> CLICK\n[li] Neighborhood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.442, 0.477, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_725", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_1a402d99-0a6f-43eb-b962-740175d36fd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Certified fresh movies -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.415, 0.535, 0.435, 0.559] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_726", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_87606ebb-a36a-4bdc-ada4-c0fee1eb610d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[textbox] Guest rooms -> TYPE: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.655, 0.648, 0.696] in the image\nAnd my action is Action: TYPE\nValue: 20"}]}, {"id": "mind2web_727", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_d229bdd3-bc87-470b-910d-a43ff645f98f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.019, 0.652, 0.034] in the image\nAnd my action is Action: TYPE\nValue: washing machine"}]}, {"id": "mind2web_728", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a074576d-9370-4453-bac0-97e1eb002723.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.571, 0.492, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_729", "image": {"bytes": "", "path": "./images/90557510-32dc-415f-8507-41b050594962_48a1f5d2-1da6-4ac5-a698-b4fbc319662d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the coming soon AMC Artisan Films\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.033, 0.348, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_730", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_e003a56c-dc49-40d5-bcdb-aa86ca0d7b66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york knicks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.165, 0.289, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_731", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_94a52ef2-5c85-4dae-9de0-e54c23e77f0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[link] Get tickets -> CLICK\n[p] Number of Travellers -> CLICK\n[img] -> CLICK\n[button] Check availability -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.611, 0.496, 0.638] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_732", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_d39be68c-f55a-4c10-b578-860068cfaa10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[tab] My flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.188, 0.254, 0.211] in the image\nAnd my action is Action: TYPE\nValue: 10000002"}]}, {"id": "mind2web_733", "image": {"bytes": "", "path": "./images/2879afa9-05f9-4d97-bbfe-f95f5d665174_6c0f5b45-5196-4eb1-ad83-ae44d2d157e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the Wakanda Forever trailer\nPrevious actions:\n[link] Navigate to on-demand -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.231, 0.236, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_734", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_e250ff6f-8015-4511-9c8c-e97988aa1f87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\n[textbox] Search -> TYPE: Gingerbread cakes"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.078, 0.763, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_735", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f2cd36f6-89c6-42a0-a70a-1ed8db7b1860.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.324, 0.234, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_736", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_399f334d-68f3-4b0a-ad34-57645e5d3ae6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.335, 0.521, 0.376] in the image\nAnd my action is Action: SELECT\nValue: Corolla"}]}, {"id": "mind2web_737", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_fcad2218-4124-4bbc-bee8-b921a0a01c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.131, 0.486, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_738", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_74bfa8a6-c7a4-4df1-935c-57ff41629dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.407, 0.055, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_739", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_af528d8f-2c1b-44c8-8440-ab3caf5b60ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.335, 0.038, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_740", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ddc4c6d0-c812-4ea9-ae6a-06d94c832d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.21, 0.13, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_741", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_a9b0f4a3-62b9-47d2-b251-ff3694d32864.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.041, 0.809, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_742", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_a3049f40-57a6-4b30-bb6f-49183455254d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[textbox] Date -> CLICK\n[button] Move backward to switch to the previous month. -> CLICK\n[button] Saturday, April 29, 2023 -> CLICK\n[button] Update -> CLICK\n[link] Details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.26, 0.168, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_743", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_6a9be6b6-2a46-47a5-baff-fe468a69a2da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] December -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.232, 0.266, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_744", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b9148fc0-f5ac-4ff4-a188-9705698633f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.335, 0.242, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_745", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_cddfab15-2683-475d-bf18-73b8f9e9d08b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.044, 0.256, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_746", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_c7df32a5-dab9-4edb-afc5-75f2a9996884.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.177, 0.359, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_747", "image": {"bytes": "", "path": "./images/612653f8-defe-41be-ae48-26ed859d98ca_89fcbaca-06b8-4af6-82fd-530433e7f2c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate Dry Tortugas in the state of Florida and find out the Current Conditions.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.222, 0.789, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_748", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_1595ac95-da5c-474a-bba7-243c1f2fe245.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Color: Magenta -> CLICK\n[span] -> CLICK\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.318, 0.716, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_749", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_66a559b3-5317-49ef-b0ba-ca14967bfde9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.963, 0.339, 0.974] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_750", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_d1b37c24-1fd4-4076-981d-ce6ffecdaad5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: vegetarian\n[b] Vegetarian -> CLICK\n[button] Fri., Apr. 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.326, 0.253, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_751", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_bfd1ac18-f07a-4bc9-ba4e-cdc4eb36fafb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.115, 0.702, 0.143] in the image\nAnd my action is Action: TYPE\nValue: london"}]}, {"id": "mind2web_752", "image": {"bytes": "", "path": "./images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_fb07bb60-507e-4d13-8d03-5a9acbe22238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Edit my movie watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.233, 0.094, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_753", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_3e1a0425-96a2-4d33-bb75-a68e69a3a034.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.055, 0.788, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_754", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_84dd4b7b-ea40-4309-914e-f2eea4e5e68f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Cruise Deals -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.226, 0.772, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_755", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_4b5fe889-0eb4-48b7-b3a0-4be0ddcf6d3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.359, 0.787, 0.381] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_756", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7c771dc6-d31e-4b5e-9619-90f0f383d7fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.424, 0.699, 0.444] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_757", "image": {"bytes": "", "path": "./images/cdbd410d-170a-426d-b6d2-60dafaffe853_76ee2054-93df-48ad-8b0f-8af2935d3b97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the best seller accessories for the Model X\nPrevious actions:\n[link] Shop -> CLICK\n[link] Shop Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.14, 0.138, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_758", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_e506b344-947b-434c-a139-e271b049ba34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.17, 0.285, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_759", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a97d602c-fb9a-4e8f-b69b-e92f1033bf8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.161, 0.594, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_760", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b48ce68b-6792-48b3-8531-e49eef1bf081.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK\n[div] Jun -> CLICK\n[generic] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.254, 0.246, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_761", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_44ace67e-82b4-4aa8-9f39-85b7fb1c3059.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.163, 0.5, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_762", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_40602eff-59ce-454a-98ca-c13c6f89eff6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz\n[link] Gorillaz Gorillaz Artist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.284, 0.275, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_763", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_2b375810-fcfa-4607-b97b-f1d4ee31a5a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.328, 0.65, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_764", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_e49d2de9-5610-407b-8f08-cb457d9b6297.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456789\n[textbox] last name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.22, 0.94, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_765", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_826d708c-25dd-46c8-9e40-0a777f75a221.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK\n[button] increase number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.464, 0.95, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_766", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_f88b3369-3cf8-4294-8704-fd8c5c30361c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK\n[group] \uf067 Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.321, 0.205, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_767", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_e749f011-925d-4541-bd98-9a4e3a6d80d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.301, 0.991, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_768", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_502cb52a-b1bc-4917-b8a3-05b5c0d471eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK\n[link] 17 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.914, 0.321, 0.953, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_769", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_5ae05692-58b4-478f-91f9-c62ad636c125.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: Lubbock, Texas\n[button] Search -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.215, 0.871, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_770", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_fe5b6d26-4f32-4da6-b7c2-59a12433d959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.423, 0.246, 0.577, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_771", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b12af52c-85cb-41ff-81ff-f93f40ac4751.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.131, 0.084, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_772", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_841ce39d-d503-4b9f-a08c-4f24ac450c47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.024] in the image\nAnd my action is Action: TYPE\nValue: toilet paper"}]}, {"id": "mind2web_773", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_60374600-f447-4297-b386-44c4c154ff42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK\n[img] Happy Birthday -> CLICK\n[button] EUR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.578, 0.916, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_774", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_faf7bc28-9f05-4e7d-ba4d-8ada377c3d0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[img] Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.166, 0.316, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_775", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_adba973c-6ed5-4579-99ba-918691da9c24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.401, 0.67, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_776", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_cc95f693-0bf1-441b-91b7-7129f8b0361a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.0, 0.805, 0.018] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_777", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_24d83bed-8e0b-43ec-8a4e-6a977a86d9fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.186, 0.223, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_778", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_13a6b6cb-b169-43ba-a0e0-64b556025f7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK\n[link] Womens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.117, 0.233, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_779", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_f9c4ef1a-dfda-462f-a275-179397cc7580.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Greenville\n[span] Greenville -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.02, 0.335, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_780", "image": {"bytes": "", "path": "./images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_fcdb8150-acc2-41e6-9b61-f2ac96016afe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information on how to find lost AirPods.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.0, 0.799, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_781", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_ae175601-aa78-4ea8-91ea-1f7aa0a5e4d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.656, 0.474, 0.667] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_782", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_b45f21d0-bde6-46c9-a618-7710e5efa2e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.054, 0.261, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_783", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_9a969207-0b5f-4c8d-a8a5-6474fcfd24ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Cannes -> CLICK\n[link] 2022 -> CLICK\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.709, 0.582, 0.728, 0.601] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_784", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f1904386-8aaf-4d31-85bd-37a9301574a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.294, 0.359, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_785", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_b7a26037-9c60-43df-a713-2ae35e0bffd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK\n[button] Sort: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.205, 0.923, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_786", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_947fb47d-5ba4-4225-b3ca-4d4948db8acf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.049, 0.646, 0.066] in the image\nAnd my action is Action: TYPE\nValue: trade in"}]}, {"id": "mind2web_787", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_d6957c49-bb8b-4449-9fd0-2d154802e084.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.232, 0.727, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_788", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_08fe33bf-abd6-4099-b093-38ac58b3051b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.122, 0.266, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_789", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_ee2013b2-35d7-4611-a7b3-1a2bcad752bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.507, 0.84, 0.553] in the image\nAnd my action is Action: TYPE\nValue: MONTGOM"}]}, {"id": "mind2web_790", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_454819fe-c9be-4427-99b7-f70b68c0c6b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.243, 0.21, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_791", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_abeec648-d689-44f8-a277-15fda2ecf8fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK\n[generic] 18 -> CLICK\n[button] 10:00 -> CLICK\n[button] 10:00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.119, 0.953, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_792", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_cee6030c-0d5e-4c19-89a8-df52fcc3406d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.197, 0.32, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_793", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_8ca400b4-34ee-4d74-b6be-b8074b17cadf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.24, 0.233, 0.322, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_794", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_ba6b661d-92fd-4244-8a69-962bc891113c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.432, 0.286, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_795", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_c03afb1c-7dc5-4f54-bad0-e9361412ba27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.225, 0.5, 0.26] in the image\nAnd my action is Action: TYPE\nValue: Harry Reid Intl Airport, LAS"}]}, {"id": "mind2web_796", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_522acf7b-1e3a-4d27-a685-9133c8d1a5c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.316, 0.584, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_797", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_39c5f4f4-45aa-4c73-ac79-3c9e99a750a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Edit -> CLICK\n[button] 04/11/2023 -> CLICK\n[link] 12, Wednesday April 2023 -> CLICK\n[link] Find Schedules -> CLICK\n[div] Earlier -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.245, 0.857, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_798", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_cf712776-65b6-42ad-851a-6f37fcb94caf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall\n[em] Music -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.373, 0.379, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_799", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_121e3dc7-8829-4755-b0ea-bc71253e4038.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[link] 18 -> CLICK\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK\n[button] Add railcard -> CLICK\n[listbox] Select railcard 01 -> SELECT: Veterans Railcard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.33, 0.391, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_800", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_e7149b0a-8004-47b5-a369-901f174947a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.122, 0.454, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_801", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_1dd4cd76-6894-4702-8d5e-d107e0846f67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.32, 0.62, 0.337] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_802", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_e5e1ae2d-f013-428c-a7ff-d3144deb008c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.456, 0.316, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_803", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_e7266152-d209-4b04-b2dc-0c07b35a3d1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.0, 0.354, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_804", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_46aff272-d165-4cda-bb3a-39cf087aaba3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\n[link] WAYS TO STAY \uf0d7 -> CLICK\n[link] GLAMPING \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.276, 0.324, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_805", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_4d4276de-b1f8-4b63-95a9-90730f481623.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.164, 0.586, 0.198] in the image\nAnd my action is Action: TYPE\nValue: Stanford, CA"}]}, {"id": "mind2web_806", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_332e2265-61e7-4ed9-b753-4fe9255dc1e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK\n[link] Past year -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.44, 0.212, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_807", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_a5d96eea-9933-4c28-aba9-b0b7b95ea8a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.047, 0.128, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_808", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_7c7f0a13-b479-4828-9bd3-dbbb8bafdace.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.273, 0.479, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_809", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_dd3a385f-430e-44de-adb5-e2318ec80c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER\n[link] On Sale Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.122, 0.395, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_810", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_8735b62b-f80c-4908-8d6f-bb314454a8b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.339, 0.22, 0.632, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_811", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_ff659553-9a17-46b7-8ba2-3b166e41eb8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Choose your room -> CLICK\n[link] Choose Another Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.268, 0.968, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_812", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3072f153-fda1-40dc-a266-ae38ada19df4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK\n[checkbox] 24 March 2023 -> CLICK\n[div] Sat, Mar 25 -> CLICK\n[checkbox] 25 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.088, 0.923, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_813", "image": {"bytes": "", "path": "./images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_9761249d-77f4-4009-a2f7-051f0a77a81a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for upcoming MLB games taking place on Sunday, 3/19.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.1, 0.384, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_814", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_3c77beb9-4242-4ebe-8ec6-d5599cf39cd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[button] Refine results -> CLICK\n[button] Add to basket -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.327, 0.517, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_815", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_b75cd0fb-f7e4-4a75-a5aa-58ce641b02be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\n[link] New & Noteworthy -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.204, 0.369, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_816", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_944f9f58-f6e0-4143-a86a-a3ee31f8e955.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.279, 0.048, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_817", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_abd904e6-4b53-4414-95db-52a328c92bb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.091, 0.523, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_818", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c72c6551-816f-4f68-a498-19516269cc9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.163, 0.96, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_819", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_2431a829-5471-430a-b02a-c30e63a9c5c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Price: low to high"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.247, 0.422, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_820", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_8516581e-6b9e-469b-8862-803974da5ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.142, 0.047, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_821", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_cc647dd5-2319-4fd2-a79e-b64dc529275b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.097, 0.646, 0.131] in the image\nAnd my action is Action: TYPE\nValue: nba2k23"}]}, {"id": "mind2web_822", "image": {"bytes": "", "path": "./images/f9e88baa-a109-454b-839f-1ab0746a5f13_7ef7d650-69ff-4cbf-a538-30a540a0be22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all watch options of Avatar: The Way of Water and where it's available.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Avatar The Way of Water"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.192, 0.594, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_823", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7c000882-e591-4c81-85ea-ed2ff428e75c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK\n[img] expand -> CLICK\n[spinbutton] Enter Minimum Price -> TYPE: 0\n[spinbutton] Enter Maximum Price -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.816, 0.169, 0.84] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_824", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d44b31d5-4480-44c8-b773-49adca6d7e9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK\n[div] -> CLICK\n[checkbox] 4 stars rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.074, 0.237, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_825", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_2f28c65c-d95f-4e34-b76b-db3a412b8fc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: los angeles kings\n[option] Los Angeles Kings -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.518, 0.881, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_826", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_861c9513-4fed-487b-8eea-fd35543e0599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\n[link] Filters -> CLICK\n[checkbox] RV Site -> CLICK\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: California"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.367, 0.316, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_827", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_2f8df0c4-d081-4b95-a0d3-3d80f872ac6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.537, 0.197, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_828", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_4713b2b4-b558-441a-8635-75ad2fa8a3a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK\n[img] Tesla Shop Gift Card -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.26, 0.929, 0.277] in the image\nAnd my action is Action: TYPE\nValue: April May"}]}, {"id": "mind2web_829", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_fce397d8-aece-4a06-8c4f-1a90b6b1a8bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK\n[textbox] Date use format: 16-Mar-23 -> CLICK\n[link] 8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.259, 0.133, 0.282] in the image\nAnd my action is Action: SELECT\nValue: 10"}]}, {"id": "mind2web_830", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_65d6ae1d-4269-4ca6-9e02-0aabb1af9aca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.016, 0.39, 0.051] in the image\nAnd my action is Action: TYPE\nValue: wireless keyboard"}]}, {"id": "mind2web_831", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_dd98ebcc-12fe-476a-aa79-7c94bde9eabf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.0, 0.597, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_832", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_95da4d8d-e01b-4bdc-9f1c-01ab6235c3ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.479, 0.19, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_833", "image": {"bytes": "", "path": "./images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_99a22a2c-d4bc-42e8-922b-7e29faba46d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vacation packages to Hawaii.\nPrevious actions:\n[link] Search for packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.416, 0.83, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_834", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_963377e9-ccf5-42ae-90ad-74516b1a38a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.415, 0.966, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_835", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_efd43e42-a268-4260-9e20-0333d7e55f50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.029, 0.054, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_836", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_954678cc-e3c6-4ded-85b6-8032ea329f5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.275, 0.908, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_837", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_d91dd847-e852-4522-8725-3ddd418c8f7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.091, 0.969, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_838", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_0c0f9528-0f31-46b0-b7c4-78507a1facc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.091, 0.376, 0.112] in the image\nAnd my action is Action: TYPE\nValue: Mumbai"}]}, {"id": "mind2web_839", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_6c672ebf-8e99-41ab-843c-8fac574b2092.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.25, 0.956, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_840", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_e644a94d-62bd-43a0-8279-1ea74b67e337.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.153, 0.196, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_841", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_870c171f-328a-46b1-8d81-e111a3d7a5f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.752, 0.219, 0.853, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_842", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_aa4ff294-fa00-493f-8625-e483115057f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\n[button] Location Atlanta -> CLICK\n[button] Seattle -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.119, 0.576, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_843", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_b3dd079c-2531-400a-92c1-7555485e132e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK\n[link] New York -> CLICK\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.2, 0.272, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_844", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_383cc11d-8136-408f-bb05-a3222ccfdfc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.105, 0.374, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_845", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_1959a470-8e5c-4c0f-826c-a690ded653ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.163, 0.554, 0.182] in the image\nAnd my action is Action: TYPE\nValue: Washington"}]}, {"id": "mind2web_846", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_fe9e507b-6b68-4939-b7eb-2cd4132794b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 0.306, 0.664, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_847", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_04efa8bd-69ee-4f5a-97a9-0a70d2e5de36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.728, 0.242, 0.845, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_848", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_009a6173-dd0d-4afc-89c3-25931c746449.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] close -> CLICK\n[button] Flavor -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.28, 0.802, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_849", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_662fb87c-70e4-4f70-bf85-337aa79a8d75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.457, 0.945, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_850", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_63b445da-7103-4ceb-b2b2-afc1395d10c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.108, 0.661, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_851", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_0a38b7ed-8182-4324-bbae-469672aa4c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.144, 0.28, 0.156, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_852", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_178bcb43-7ba1-429d-a973-088ef383426e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.313, 0.658, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_853", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_7591b059-f011-44df-9bc3-cf3399a62179.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.09, 0.266, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_854", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_3371a7e6-bf53-469a-941f-47a7f6038b5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.389, 0.238, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_855", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c8aaa308-dddf-4bc7-9835-e30a48203407.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\n[searchbox] Search -> TYPE: rock\n[button] Search -> CLICK\n[link] Playlists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.251, 0.47, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_856", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3092161-3b70-4ae0-833a-2ec69d613c38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK\n[link] Seattle, WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.005, 0.867, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_857", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_f7935528-8d53-49ae-9235-70b6c1304d79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.19, 0.5, 0.209] in the image\nAnd my action is Action: TYPE\nValue: XA1234"}]}, {"id": "mind2web_858", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_53d2d9a2-528f-4a9a-b182-b80a0795a6a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK\n[menuitem] Women's -> CLICK\n[menuitem] Running Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.213, 0.218, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_859", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_00403920-e25f-4c7e-877c-e8e119cda4e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK\n[button] April 7, 2023. -> CLICK\n[combobox] Guests -> SELECT: 3 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.721, 0.235, 0.754] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_860", "image": {"bytes": "", "path": "./images/d516b2f7-b180-422f-9e24-6fb778cb4b55_51e09831-d247-402b-9853-bfaeb5d4399b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me Ed Sheeran Chords & Tabs\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: Ed Sheeran"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.081, 0.897, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_861", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_f3eb8779-cf1a-4688-a021-2a5257bba89d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.17, 0.226, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_862", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_4b20b395-938e-4a1a-8f71-bdf4dfb419e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK\n[link] 128 GB - apply Storage Capacity filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.387, 0.265, 0.837, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_863", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_21b2e6cd-1e81-4e05-b5f0-37feba1bafe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.009, 0.891, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_864", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_4abeefc8-cbb3-4f6b-9059-6c379d4e2e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.826, 0.143, 0.931, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_865", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_07461677-526b-43c3-96a0-f92b0e69a3b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\n[link] commuter rail Commuter Rail -> CLICK\n[combobox] Search for a line -> TYPE: Oyster Bay\n[span] Oyster -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.272, 0.344, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_866", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_c01ffea2-9354-448e-8ff9-2f3083925381.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK\n[span] Active -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.752, 0.158, 0.762] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_867", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_56fe6cc9-a3e9-4b7c-990c-803cd8a94f2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.369, 0.95, 0.403] in the image\nAnd my action is Action: SELECT\nValue: MasterCard"}]}, {"id": "mind2web_868", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_2a48ee3e-f492-498c-bdad-de1107d2da4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.385, 0.823, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_869", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_d8569323-5315-47fb-9b08-d9b48ce92b2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.346, 0.365, 0.364] in the image\nAnd my action is Action: TYPE\nValue: 12345678"}]}, {"id": "mind2web_870", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_30f0a458-bb0c-49e4-b940-6b82a6d7b082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.225, 0.161, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_871", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7005bf98-3214-4bfb-8133-79cfab48306e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.206, 0.374, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_872", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_28f01eb6-f60d-4efe-82b9-6bab5fb6765c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Hiking Footwear -> CLICK\n[link] add filter: Waterproof(456) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.773, 0.208, 0.787] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_873", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_c67ea018-cdc3-44b1-aec9-1dd781811f05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.384, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_874", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_5fa29f1e-7753-4629-b276-e8466bef50a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 94115\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: electrician\n[span] Electrician -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.214, 0.257, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_875", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_cf70ebee-773d-4ad7-b6b1-a2d55fdca152.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.128, 0.39, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_876", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_f566954c-0762-4dcf-a758-b847ef11f301.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.28, 0.362, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_877", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_9fd0d975-1788-4b24-ae73-d661fc03b8ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.192, 0.471, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_878", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_1a61760b-4d3e-459f-9940-00033fd2555e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.031, 0.164, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_879", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_158410d4-4bff-4a9d-bd03-39997c0c9a89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] 6 - 9 Days -> CLICK\n[button] SEARCH CRUISES -> CLICK\n[button] Number of Guests -> CLICK\n[path] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.083, 0.416, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_880", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_5335dfb4-618b-4282-951a-e9066ef63841.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte\n[button] Belo Horizonte, MG, BR (CNF - Tancredo Neves) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: buenos aires\n[button] Buenos Aires, AR (EZE - Ministro Pistarini) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.366, 0.246, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_881", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_25e25bd8-d8c8-4a09-a158-eccbd9e38296.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.316, 0.574, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_882", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_603d8a45-cc7a-4dc0-a899-c74f8c86e870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK\n[span] 14 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.118, 0.353, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_883", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_2cdca21b-352f-4f82-84fa-16b60dde7c28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.194, 0.56, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_884", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_08a0952a-311d-46f3-bcfd-f183dc5cf434.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK\n[button] Furniture -> CLICK\n[link] Dining Sets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.83, 0.661, 0.933, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_885", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_64eb802f-8390-4ffb-8a72-1f10211fffbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: red sox vs yankees\n[button] Search -> CLICK\n[link] TICKETS -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.771, 0.256, 0.799, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_886", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_1a2a11b4-132e-4f75-a8fb-ab3ede13cd0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.367, 0.309, 0.394, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_887", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_eaf260c8-e239-4e96-b387-970c8a48e56e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.036, 0.356, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_888", "image": {"bytes": "", "path": "./images/c4538b84-ca81-413a-8f3d-7ce87e42b4f6_4dd1dfca-450b-40d4-a55b-c20df696ad63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Saturday park hours for Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.583, 0.846, 0.614] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_889", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_9d6b2e4b-880a-40d0-ac9d-3f9fdcb3e7eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station\n[link] T red line silver line commuter rail Zone 1A Sout... -> CLICK\n[combobox] Enter a location -> TYPE: north station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.296, 0.745, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_890", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_1945d47d-5810-42ee-bc36-5f0a90c7c1f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.046, 0.553, 0.083] in the image\nAnd my action is Action: TYPE\nValue: skirt"}]}, {"id": "mind2web_891", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_44b87654-0fc1-443f-88c2-f8898601f2bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.22, 0.956, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_892", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_ae470708-5a6a-454a-a56e-1bacceda7eea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.379, 0.545, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_893", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_be0a740c-1b47-4b45-b879-8a45d32f7c0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.129, 0.552, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_894", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_f25623fb-9589-4fb2-984c-2da6871e9a33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.416, 0.316, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_895", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_383ffaec-42ae-4770-9ef6-305c581ca89e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.209, 0.133, 0.227] in the image\nAnd my action is Action: SELECT\nValue: 14"}]}, {"id": "mind2web_896", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_1f6d99a6-6430-48ff-a6a9-8c7881d4a609.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.122, 0.177, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_897", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_83416428-3787-4952-875d-dcfbb6e4cdc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[link] Music -> CLICK\n[svg] -> CLICK\n[div] Canada -> CLICK\n[svg] -> CLICK\n[div] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.245, 0.526, 0.343, 0.667] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_898", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_ebbb7fed-de37-4ce8-ad48-6bf6573f5708.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\n[link] Navigate to deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.889, 0.531, 0.948, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_899", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_5c1467a5-8401-44d3-a4d2-2beb7cfeb39e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.436, 0.634, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_900", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_e28713b9-2334-46a3-9c37-b9d5e33e2cf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.161, 0.203, 0.284, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_901", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_28288ba0-786e-412f-a038-4a9df7f9a4a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK\n[img] Sports car icon -> CLICK\n[button] Sort by -> CLICK\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.55, 0.249, 0.596] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_902", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_17c7b831-aee8-4f2b-88f2-a4ebe641fe61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.364, 0.478, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_903", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_5bba0b11-0c16-4d8a-89e8-086cdeb2a3b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[button] Next -> CLICK\n[gridcell] Thu Aug 10 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.275, 0.957, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_904", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_06fa60ae-2f59-4035-88f1-acb8471e415b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.006, 0.561, 0.025] in the image\nAnd my action is Action: TYPE\nValue: Nirvana"}]}, {"id": "mind2web_905", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_8d96a315-2e4d-4faa-ab1b-d3f7ddec978b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.035, 0.775, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_906", "image": {"bytes": "", "path": "./images/18a104dc-29e7-4777-9fee-1e023be1d686_6d88fab4-239f-42ed-8ccf-c52a478eb08d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ground operations jobs with JetBlue.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.564, 0.183, 0.592] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_907", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_908889f1-3f7b-4123-b773-f233a4fde2dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[button] When -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.158, 0.777, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_908", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_5074958c-7330-4688-bdd6-f3eb05b8c31e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.216, 0.557, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_909", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_e41e2eb8-6d19-446c-a636-c3ad48011f2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\n[img] movie poster for Creed III -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.186, 0.134, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_910", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0e8893ad-49da-4f23-b04b-ee6ed6e2caf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 0.485, 0.492, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_911", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_d298cf02-a542-415b-a3ec-a168b352b112.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[option] Airport Chhatrapati Shivaji Maharaj International ... -> CLICK\n[combobox] Flying to -> TYPE: NEW DELHI\n[option] Airport Indira Gandhi International Airport -> CLICK\n[button] Next Month -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.587, 0.376, 0.591, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_912", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_cb8dab61-9fd6-4508-bd9a-881a8e130872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.088, 0.559, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_913", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_2a47b218-77f1-4189-abf5-aa8933b7584f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.215, 0.689, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_914", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_cb30eaad-9ff0-4869-bdf9-d2357ab500bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Cannes -> CLICK\n[link] 2022 -> CLICK\n[link] Eo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.228, 0.182, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_915", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_dcdc1299-4778-4bab-934b-25b7f85f4e65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.064, 0.902, 0.092] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_916", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_efda118b-1324-4375-90be-92d6e1767945.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.359, 0.592, 0.395] in the image\nAnd my action is Action: TYPE\nValue: Disneyland"}]}, {"id": "mind2web_917", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_87dc3c95-c884-4f08-9267-36cde803766b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.422, 0.359, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_918", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_41532c32-73da-4df8-83e0-f50f2f3c9baf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[button] Choose your room -> CLICK\n[button] Book Business Double Room A -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[button] Choose -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.626, 0.962, 0.666] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_919", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8922d5d7-b361-488e-bc90-959777b2d346.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[button] Miami -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK\n[link] Thursday, May 4th | American Express Presents CARB... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.243, 0.159, 0.263] in the image\nAnd my action is Action: SELECT\nValue: 3 Tickets"}]}, {"id": "mind2web_920", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_c58aefeb-4d48-4653-a97d-b5958b12472a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.255, 0.488, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_921", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_0833eb64-245d-427a-be49-e6a766226478.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.055, 0.181, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_922", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_9376f06f-d441-4ad0-8f26-502331ad9fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.258, 0.053, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_923", "image": {"bytes": "", "path": "./images/f61456ed-3cc2-41a0-b79c-56737515fac9_ee4acd97-3547-4a8e-ba52-b49838ed1d83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the photo gallery for Tsiakkos & Charcoal.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Tsiakkos & Charcoal"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.062, 0.691, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_924", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe33bdc-459b-4a31-96a3-c8439d26ed77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.208, 0.308, 0.245, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_925", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_e38767aa-e1c6-4969-b1f0-eb94870fafd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.381, 0.123, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_926", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_6d0e6630-2780-436f-8ab3-47c831fe077c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.052, 0.153, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_927", "image": {"bytes": "", "path": "./images/f385156c-4f2e-410f-bc73-7ec6d0f44448_d526c9e6-eb77-49e7-ac83-5cf979528a1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare all membership tier benefits.\nPrevious actions:\n[rect] -> CLICK\n[textbox] e.g.: New York -> TYPE: Membership tier"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.072, 0.877, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_928", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_8252ffbc-069b-40e9-b567-119df02fc127.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK\n[menuitem] Women's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.4, 0.409, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_929", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_ffa65472-f26a-475f-b7f3-b038d6bf632f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.151, 0.474, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_930", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_84185798-3837-4eae-8599-fcf123c64957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK\n[textbox] To , required. -> TYPE: washington\n[a] WAS - Washington, DC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.363, 0.875, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_931", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_35c777e6-4dd6-4380-8684-9ebf15d75980.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.28, 0.35, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_932", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_364bc655-fc39-4523-b249-45dca735161e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.258, 0.291, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_933", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_0bc6e4ed-a88d-49cc-aa74-a52453e53118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.4, 0.266, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_934", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_2557846c-fbb2-4d8a-a709-15856d7dd485.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.264, 0.829, 0.272] in the image\nAnd my action is Action: TYPE\nValue: bali"}]}, {"id": "mind2web_935", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f3d7a9b2-8a52-4123-b7e7-17c771db0e20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK\n[heading] Pocketable UV Protection 3D Cut Parka -> CLICK\n[checkbox] ORANGE -> HOVER\n[checkbox] L -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.456, 0.906, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_936", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_acdf9fe4-f31e-4899-955f-a59164fe2044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.256, 0.574, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_937", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_36474a36-dedb-4836-b0bb-64cb383cadf1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.562, 0.287, 0.588] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_938", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_f489ea2d-fd52-48bc-a2dc-b225a500c1c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK\n[button] State -> CLICK\n[span] Virginia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.463, 0.331, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_939", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_ec31e954-a50a-420c-8399-467d237b647d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.828, 0.009, 0.935, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_940", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_a346f608-7469-48db-ac2e-ecd8eef73e57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.107, 0.902, 0.153] in the image\nAnd my action is Action: TYPE\nValue: Europe"}]}, {"id": "mind2web_941", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_4c8a7cdc-5981-4172-8c9f-9bdb8344d39f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\n[textbox] Where to? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.271, 0.359, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_942", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_cc825221-7e27-4074-a31a-56d90e876fe6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.618, 0.404, 0.626] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_943", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_04780493-f795-4e65-a207-2e3edb57e3e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise\n[span] Le Marais, Paris, France -> CLICK\n[div] 27 -> CLICK\n[div] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.935, 0.171, 0.977, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_944", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_771625de-5227-4b48-a469-ac3b6593b8ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[b] Paris -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.11, 0.743, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_945", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_67ca7cd9-6200-4e87-a045-d7b4cc5e6c72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.58, 0.931, 0.624] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_946", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_bc9d4b7a-56e0-4646-8339-e74a346a70b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[link] Your lists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.393, 0.421, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_947", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_ea3f8f9b-8bcc-4435-8a9c-15a3bc749bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.295, 0.285, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_948", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_2569ea8a-41ee-43ea-a7b7-6804a67043df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.234, 0.537, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_949", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_4c3fa7f5-c7d3-4380-b3c5-3e0c6a22dca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[span] -> CLICK\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.257, 0.494, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_950", "image": {"bytes": "", "path": "./images/f385156c-4f2e-410f-bc73-7ec6d0f44448_e86c1b4b-7bca-43d5-905b-9cb5119d4fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare all membership tier benefits.\nPrevious actions:\n[rect] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.07, 0.791, 0.093] in the image\nAnd my action is Action: TYPE\nValue: Membership tier"}]}, {"id": "mind2web_951", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_7a968f10-ab98-49c7-8dce-a5ee4b28a838.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK\n[generic] 18 -> CLICK\n[button] 10:00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.597, 0.715, 0.638] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_952", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_d4598675-b3f5-4401-989e-45aeca7b33c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[input] -> CLICK\n[button] Spain -> CLICK\n[button] All cities -> CLICK\n[button] Go! -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.326, 0.162, 0.376, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_953", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_f786baf2-b7ce-4eb3-ac8a-bb407d850be6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK\n[span] 31 -> CLICK\n[span] Search flights -> CLICK\n[button] Continue to flight results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.315, 0.223, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_954", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_e3c95f6e-c1f6-4930-90d4-a34358b98d49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK\n[button] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.115, 0.815, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_955", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_33f33559-f12f-4c42-9a01-9e4ce1feb006.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\n[searchbox] Search -> TYPE: Chill\n[button] Search -> CLICK\n[link] People -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.143, 0.426, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_956", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_bee495fb-a632-4df6-b714-a7b289a9c7bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.659, 0.338, 0.667] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_957", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_f5a169c9-f5f1-465d-ad88-c56bf75aa1ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[option] Airport Indira Gandhi International Airport -> CLICK\n[button] Next Month -> CLICK\n[use] -> CLICK\n[div] 1 -> CLICK\n[span] 8 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.201, 0.686, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_958", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_8db7043e-11fc-4825-a35d-c65b0acbcbcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK\n[checkbox] Electric (175) -> CLICK\n[heading] Electric Mile Range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.369, 0.277, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_959", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_2cc7fafe-b2f4-46ce-9f99-62c9b885e2db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.801, 0.148, 0.886, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_960", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_a8076012-4c69-49af-af46-8d84cfd2638f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.304, 0.363, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_961", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_344fdc6e-858a-48cf-8dc8-073c98975aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.107, 0.239, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_962", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_e909e5cc-b764-4ebc-9e91-b87b5863879d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.221, 0.312, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_963", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_308e735f-a080-469b-879b-4c99508ede29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.43, 0.685, 0.457] in the image\nAnd my action is Action: TYPE\nValue: 133 st avenue"}]}, {"id": "mind2web_964", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_05016fe3-32db-4f00-8d4a-e23b842cbd13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.457, 0.375, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_965", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_9a991a36-6a7b-42c7-9599-fbfebc37336c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.648, 0.268, 0.675] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_966", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_7638b874-5601-4028-9e02-931e87de0aa4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.018, 0.592, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_967", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_22ff8d83-db1a-44b1-8a74-fd9c1bd0b489.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.035, 0.535, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_968", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_21c2de06-c37f-48d9-9657-a25121393718.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.747, 0.405, 0.758] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_969", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_7681b7eb-faa0-4363-ade5-49c5cd230b87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.098, 0.554, 0.121] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_970", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_19094986-c5bc-4351-96e5-2b11185894b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.508, 0.092, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_971", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_8207c27d-1536-43da-8fdb-6973924ef101.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\n[button] Car Sales -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.183, 0.215, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_972", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_8cfcf8da-5ec8-4836-87c2-01cfc886d515.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.265, 0.469, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_973", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_d04d30d3-199f-40a7-a804-6bf215c11519.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.27, 0.486, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_974", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_e929f088-cc08-476a-b91a-607c0572186e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.336, 0.176, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_975", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_8dce160e-b79a-4de0-88f2-cc59dc891541.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Top Hip Hop\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.11, 0.695, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_976", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_0e63a72b-be6d-4c64-bd1b-b745222e02ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.126, 0.652, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_977", "image": {"bytes": "", "path": "./images/cb07d410-75ff-483a-920c-3ce2a295524f_580c3525-e4a3-44e7-8480-ff930ac9b5ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the kicker with the most made field goals in the 2022-2023 NFL season.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.012, 0.392, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_978", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_d88b5180-5170-4103-9593-db363c4caede.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.286, 0.729, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_979", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_2db82559-908a-4e1e-a73b-9cba33575c47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.529, 0.452, 0.652, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_980", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_b977193d-503a-4389-ad04-7ace55c70e04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte\n[button] Belo Horizonte, MG, BR (CNF - Tancredo Neves) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: buenos aires"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.206, 0.617, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_981", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_0acd72fb-4000-48a3-be6e-9df18f318ca9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.179, 0.797, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_982", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_8fd5373b-93ce-4726-8b01-9cc2688e631e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.164, 0.42, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_983", "image": {"bytes": "", "path": "./images/f4623be1-31c6-4546-a567-92bfd1da9cd7_2b0f415b-e7e6-4380-b66f-97cafe7a2a8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Upgrade the count of the current SSD in my cart to 10\nPrevious actions:\n[link] Shopping Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.262, 0.523, 0.276] in the image\nAnd my action is Action: TYPE\nValue: 10"}]}, {"id": "mind2web_984", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_cd10aed6-e4b2-4cfb-af74-8c6b0c2caae5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[link] Shoes -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[div] Size -> CLICK\n[link] 9K -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.164, 0.495, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_985", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_cbca43c1-359f-42bd-bcab-d1f61c9af591.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[input] -> TYPE: Crew\n[input] -> TYPE: James\n[input] -> TYPE: Johnson\n[input] -> TYPE: james.john@gmail.com\n[combobox] Organization Type -> SELECT: Family Trip"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.165, 0.777, 0.187] in the image\nAnd my action is Action: TYPE\nValue: Johnson"}]}, {"id": "mind2web_986", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_9720e24e-7a9f-42e7-86e1-b48d0b05c57e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.482, 0.266, 0.518] in the image\nAnd my action is Action: SELECT\nValue: Honda"}]}, {"id": "mind2web_987", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_5c4b121e-b7a1-4a58-9f37-8146db77190e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.122, 0.144, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_988", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_949553fb-552c-49ed-9735-48391fd1e11a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.045, 0.378, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_989", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_9eb4e38b-69dd-454a-baa1-7412bf4d5830.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.286, 0.514, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_990", "image": {"bytes": "", "path": "./images/b4872f0e-9d9e-4259-8b1e-844509b85712_8793262f-20c6-4600-a161-8ef3699192cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all campgrounds located in California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.013, 0.461, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_991", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_48fba121-c893-45fd-85f5-9bcd5094a0cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.422, 0.28, 0.467] in the image\nAnd my action is Action: SELECT\nValue: UNITED STATES"}]}, {"id": "mind2web_992", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_0f9dec76-0399-40ec-95ca-b76d4f091120.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.175, 0.122, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_993", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_2642d695-660f-433e-b9fa-6f820ccfc7e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[path] -> CLICK\n[span] -> CLICK\n[heading] Car specs -> CLICK\n[span] -> CLICK\n[div] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.451, 0.632, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_994", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_9c7a9f0c-e8d7-47cf-903d-30fb9d0b5854.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[link] Search for flights -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.11, 0.35, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_995", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1fff6452-6d62-49a3-84f2-dfbdf0f3e314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.255, 0.468, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_996", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_57d825a3-5ada-4f45-9789-a4d4cdecb04d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.258, 0.278, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_997", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_b597c9fb-a0dd-48ae-aab4-cb1928e97ecb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.232, 0.471, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_998", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_15c67851-8081-4bf2-a0d5-a005fb7a2e98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog bed 30 inches\n[button] Go -> CLICK\n[RootWebArea] Amazon.com : dog bed 30 inches -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.143, 0.394, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_999", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_f0cd22a0-1abe-4cb5-b3cf-ae886963828c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.257, 0.211, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1000", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_aa25f120-fb19-4ecd-9708-d18d857e48ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.233, 0.122, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1001", "image": {"bytes": "", "path": "./images/b9f5dd60-690d-4f32-9e69-3db9d346f020_64d255e5-d45d-43fc-a9d2-49c01b414b38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out what payment methods are available for monthly parking.\nPrevious actions:\n[link] SUPPORT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.58, 0.331, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1002", "image": {"bytes": "", "path": "./images/b9f5dd60-690d-4f32-9e69-3db9d346f020_a066138a-8316-4514-b493-b12221f9f0b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out what payment methods are available for monthly parking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.008, 0.431, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1003", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9cffd287-ffc6-42a3-a408-b3198b37fd01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK\n[div] Jun -> CLICK\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.101, 0.953, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1004", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_c3436179-32b0-4eee-87c9-92f564819bb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.303, 0.037, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1005", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e0384709-ae58-4537-9314-fe8e3eff55ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.355, 0.441, 0.388] in the image\nAnd my action is Action: TYPE\nValue: 250"}]}, {"id": "mind2web_1006", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_a98a57df-9cbd-4882-8daa-dd037f890ed7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK\n[button] Altavista, VA \ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.41, 0.269, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1007", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_da9e60a4-6770-483e-8d86-fdc06a48523d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.172, 0.387, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1008", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_3077a3f0-48f7-423f-a919-efe74e72572a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.364, 0.622, 0.392] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_1009", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_69ff5db0-8fee-4696-aea9-2b9142a8449b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Action Figures -> CLICK\n[img] Hasbro -> CLICK\n[button] All Filters -> CLICK\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.213, 0.393, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1010", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_97ed99f6-1db3-4a61-a2ff-356c3ebc03cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK\n[button] Follow -> CLICK\n[link] Popular tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.222, 0.457, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1011", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_4aee1c2f-31ad-464e-8dc6-bdddbf81f193.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.124, 0.719, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1012", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_9b72dec5-6c89-4886-9e53-c982e3601f5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\n[label] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: London\n[div] London Heathrow Airport (LHR) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.156, 0.923, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1013", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_3f99e4ac-4933-41ad-84e9-1395f8194c48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.038, 0.366, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1014", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_b8cff931-25cf-43d1-bd7c-c81275bec27d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[button] Back -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.518, 0.193, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1015", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3b7020b8-f410-4928-836c-247d4cec350d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.137, 0.727, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1016", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_991edd8c-d233-4898-80b1-d91ad3831f9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: easter home decor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.081, 0.342, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1017", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_146bcec4-09b5-47b1-97b0-6a17d09e9e95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Gift Cards -> CLICK\n[img] -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.283, 0.975, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1018", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_c809eab5-3466-4dc3-89dc-7f1329bbf5d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[use] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.765, 0.716, 0.789] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1019", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_805622d3-f7bf-4871-8774-5a3fa531171b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK\n[link] See all open MTA positions. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.579, 0.663, 0.629] in the image\nAnd my action is Action: TYPE\nValue: brooklyn"}]}, {"id": "mind2web_1020", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_fe53f6bd-ace7-4544-aab4-803e19962624.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.312, 0.894, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1021", "image": {"bytes": "", "path": "./images/44284a24-b3de-44ef-bcfc-abf57c3f791a_8561b027-bf66-4fda-9414-c6a4d3f4213c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flight #039028\nPrevious actions:\n[input] -> TYPE: 039028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.027, 0.603, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1022", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_a3d22f91-736a-458a-b567-be47670899fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.27, 0.191, 0.307] in the image\nAnd my action is Action: SELECT\nValue: 3 00 PM"}]}, {"id": "mind2web_1023", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_d434647d-400c-4bf5-8f59-e1bffe2583ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.072, 0.311, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1024", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_48d7ca2b-52f7-4730-9672-abe75d7aee92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.087, 0.129, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1025", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_170ece26-3a41-445a-b474-e2f643fcd5a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.039, 0.309, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1026", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_5dd12581-fb29-43fa-b5d8-785d5a442017.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Search -> CLICK\n[button] 2 adults \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.335, 0.331, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1027", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_54e0b5c5-4903-4dd7-97bb-8fbd0b78e00b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK\n[button] SEARCH BY JOB TITLE OR KEYWORD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.232, 0.571, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1028", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5bc86a39-0dbb-450a-b960-5d87fd390bfb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.67, 0.161, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1029", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_89f5d60a-f436-4da2-8d01-57ed7e61d270.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] Railcards -> CLICK\n[combobox] How old are you? -> SELECT: 18-25"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.349, 0.626, 0.377] in the image\nAnd my action is Action: SELECT\nValue: Couple"}]}, {"id": "mind2web_1030", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_42ffdf8b-64e3-461f-8094-ca965c529368.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.328, 0.462, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1031", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_e44d4bc3-a47f-4b4f-95f7-635fea019b5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\n[link] shopping. -> CLICK\n[li] Neighborhood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.437, 0.171, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1032", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_fc9e7790-8bb2-4915-ab1b-3e51b7c79a71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.27, 0.691, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1033", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_ef60845a-ab21-45d4-a89e-c77ec915c0dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK\n[button] Show more filters modal -> CLICK\n[span] Price -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.776, 0.806, 0.819] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1034", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_c5643e17-b79c-4cfd-a521-fa58b0c006ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[span] Barbershop -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[span] -> CLICK\n[checkbox] Private Lot -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.427, 0.688, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1035", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_0b1c7033-5c2a-4574-80c3-86956a2b8e45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.124, 0.964, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1036", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_aabba011-6b2a-42a7-abcd-05d5a92d6784.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.174, 0.574, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1037", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_336eb253-b594-42dc-97c0-a6a96a35c858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.436, 0.699, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1038", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_bb6aa598-dc1b-4818-ad11-2f54fde43845.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.374, 0.863, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1039", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a6849511-4d2a-4799-9fdb-82757a549170.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.142, 0.688, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1040", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_ad9b72be-b60a-47b0-af68-3123c7b4a0ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK\n[button] set store -> CLICK\n[button] Make -> CLICK\n[span] (954) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.147, 0.249, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1041", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_ee2b0b59-efb3-464f-958e-90d6db5839f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Save -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.233, 0.712, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1042", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_8004364b-2cb8-4ef4-95bd-14d0365581d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: musical instruments\n[option] musical instruments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.08, 0.662, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1043", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_9b98022c-c2e1-4233-b9e0-547ad4c678c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[button] Go to next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.352, 0.821, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1044", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_0d6eba3e-0e6c-4ad6-ab14-4b84cbb2265a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[menuitem] Free to home or store -> CLICK\n[button] Back to all categories -> CLICK\n[switch] COMPARE -> CLICK\n[path] -> CLICK\n[button] Add to Compare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.108, 0.899, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1045", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_d75f272c-4aa0-45e5-9737-33d00ac9f661.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.125, 0.515, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1046", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_075a04fe-0a97-42ee-aa93-736bd6b90023.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK\n[checkbox] Family -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Air-conditioned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.136, 0.754, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1047", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_9a5ca032-1f47-4c2b-b33f-5356a5d7116b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.199, 0.252, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1048", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_bd93fcae-3bb0-4acf-8189-415e9cdce009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.268, 0.385, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1049", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_7d8f7af7-05f9-4a86-96e0-4a680f3b2c6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.468, 0.884, 0.536, 0.893] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1050", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ed1f55a1-64f6-433a-b8da-0abfcdad6ec5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK\n[img] Vitamin D -> CLICK\n[div] Relevance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.221, 0.98, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1051", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_1c141689-6500-45dd-b75c-d0e4ff1588db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[button] Listing options selector. List View selected. -> CLICK\n[link] Gallery View -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.145, 0.775, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1052", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_0f472545-bc7d-45e0-8614-8bda0386ae6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Choose your room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.324, 0.549, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1053", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_54b9a781-e649-40a7-8f18-0361898363c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\n[link] subway Subway Lines -> CLICK\n[span] Red Line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.204, 0.45, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1054", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_ecfca8ce-c709-4d5d-8104-bb73107d2eb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK\n[div] Real Madrid -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.049, 0.79, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1055", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_5232d12e-5d7f-406f-a6ad-f9f054f9e2b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.317, 0.574, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1056", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9ebda146-dfb9-438c-b151-ae45e7624802.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[generic] Controller -> CLICK\n[span] Sony -> CLICK\n[button] APPLY -> CLICK\n[span] Free Shipping -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.135, 0.4, 0.149] in the image\nAnd my action is Action: SELECT\nValue: Highest Price"}]}, {"id": "mind2web_1057", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_7e6e1e43-af17-4934-848a-4c235520b30e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.054, 0.326, 0.087] in the image\nAnd my action is Action: TYPE\nValue: music"}]}, {"id": "mind2web_1058", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_2b86d495-4492-4196-9f85-257dbbf27d61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK\n[div] Sort by: -> CLICK\n[link] Number of Seats (High to Low) -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.119, 0.93, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1059", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_f4702531-91bf-45e3-9072-b758cdb35ae8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[link] WOMEN -> CLICK\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK\n[searchbox] Search by keyword -> TYPE: women t-shirts"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.225, 0.637, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1060", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_a87551ef-f7ee-40d6-8c93-deeea86e0d50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK\n[button] Follow -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.327, 0.513, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1061", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_f421e549-c5ee-43bb-94ed-5df3f9ec7af6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.565, 0.015, 0.579, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1062", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_a7a0f732-8940-4da0-b0a4-6aa68777441f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[div] GBP -> CLICK\n[button] -> CLICK\n[div] \u00a3 -> CLICK\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.345, 0.916, 0.37] in the image\nAnd my action is Action: TYPE\nValue: Stuart Bloom"}]}, {"id": "mind2web_1063", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_ac106afc-a33a-4df9-9a39-62e856864f0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.26, 0.505, 0.296] in the image\nAnd my action is Action: TYPE\nValue: New Orleans"}]}, {"id": "mind2web_1064", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_518c52e1-9005-4b0e-b702-b4847a54b9e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.005, 0.51, 0.024] in the image\nAnd my action is Action: TYPE\nValue: king of tokyo"}]}, {"id": "mind2web_1065", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_99f3702a-7d37-4557-b110-b28e439599fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK\n[link] Rock 2,420 -> CLICK\n[link] Tab -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.396, 0.97, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1066", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_48e7b865-b9ad-4137-875a-03918d8e7933.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.106, 0.326, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1067", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_57fab646-1b28-4e2b-a267-e7b8b41ec858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.104, 0.374, 0.115] in the image\nAnd my action is Action: TYPE\nValue: TEL AVIV"}]}, {"id": "mind2web_1068", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_6157d472-2e23-4858-928e-091450f63ff9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[button] 19 -> CLICK\n[button] 23 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.162, 0.686, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1069", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_4fc1cd27-721c-4c5c-a8ea-a8dd4b50f1ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Rap / Hip Hop -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.382, 0.881, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1070", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_c1b6e8bd-86ad-45bf-91b1-1afa13bf0167.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.256, 0.782, 0.304] in the image\nAnd my action is Action: TYPE\nValue: Ballet"}]}, {"id": "mind2web_1071", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_8e24ad34-2bd0-42ae-8e55-d78362055463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.376, 0.782, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1072", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_b00bea60-30f0-44e1-8bc5-b691cc38c391.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.0, 0.279, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1073", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9bca0cd8-adcb-40e4-b5be-788809e0f59c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.539, 0.253, 0.583] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1074", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_9bae8776-4d35-44df-9f57-73a91801eee4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504\n[button] Look up -> CLICK\n[heading] Barboursville -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.238, 0.28, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1075", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_31f7b8a4-1b3c-47d0-b248-9dac460f9f5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.412, 0.917, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1076", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_b8217911-9803-46de-a83e-f7996c8899e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.139, 0.128, 0.177, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1077", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_71116ee3-3e15-4a37-9498-820698eef9b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.204, 0.233, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1078", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_7e5977da-bceb-4022-a210-58f4c1a25d9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.117, 0.156, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1079", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_837177b9-fc1d-4b15-8035-b15efd915693.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.051, 1.051, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1080", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_11d98119-16db-4912-930e-afe4a8e285d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] African -> CLICK\n[link] East Village (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.258, 0.207, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1081", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_12598eea-0be8-4d16-bf8f-2114636a2c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees\n[option] New York Yankees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.494, 0.941, 0.515] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1082", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_822476fd-11a5-4d57-88d0-dbc0ead7e7bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[button] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.373, 0.09, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1083", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_49696ae4-ee6d-4a31-9521-754f78814c3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2022 -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.338, 0.253, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1084", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_1b09b330-9c1a-4b78-8a83-e9beab45cccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] Check in / Check out Any week -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.242, 0.613, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1085", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_c3e9825f-6e7b-4c76-b9a2-2fd62f64a14b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK\n[span] 160 pixels x 400 pixels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.73, 0.209, 0.744] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1086", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_a97fa8a7-cf17-4f25-a02e-adf1cc4c1e43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.356, 0.069, 0.372] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_1087", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_36665f93-2ffe-41d3-9c44-bffa85122390.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.506, 0.864, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1088", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_bc47bc18-9778-4205-87e2-11cf7d6bad00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[link] Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.031, 0.301, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1089", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_209698d6-671f-446c-9af0-d3ec4a85381f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.198, 0.73, 0.225] in the image\nAnd my action is Action: TYPE\nValue: Times Square"}]}, {"id": "mind2web_1090", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_6788371c-dda7-4003-ba11-27f187e92ae1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.827, 0.144, 0.927, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1091", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_413d5059-2958-4244-883e-b5ec9474badf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse TV Shows by Genre -> CLICK\n[link] Documentary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.326, 0.316, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1092", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_fa1cf227-c27c-409e-b9a8-dd7b6211f1d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK\n[div] 18 -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.904, 0.213, 0.92, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1093", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_2a0253c2-d580-4b2a-a8bb-32aa79df68f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK\n[textbox] Monthly Payment -> TYPE: 250\n[textbox] Down Payment -> TYPE: 3000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.308, 0.459, 0.337] in the image\nAnd my action is Action: SELECT\nValue: Tennessee"}]}, {"id": "mind2web_1094", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_cf98d157-acd5-4580-b1e5-bcbfc964517d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.017, 0.564, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1095", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_75f78e83-b910-4a8c-bd15-97003e9216fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.244, 0.359, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1096", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7b485017-057c-4657-821f-25df616be249.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.013, 0.284, 0.018] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1097", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_d97dd54e-0198-46e3-b4d1-78883c9422c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK\n[heading] to next step -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.288, 0.63, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1098", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_946b79ed-c797-471c-a2cd-668b999cf3a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\n[link] The Hit List -> CLICK\n[a] -> CLICK\n[link] Book Now\ue90b -> CLICK\n[button] 12:30 PM Outdoor Counter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.418, 0.523, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1099", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_a7450e82-c348-402b-a662-a94d2c7f36d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.163, 0.782, 0.21] in the image\nAnd my action is Action: TYPE\nValue: los angeles kings"}]}, {"id": "mind2web_1100", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_48f143c9-5b96-4ecd-9782-f33375c7879d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop\n[button] \uf002 -> CLICK\n[div] Newest Lenovo Ideapad 3i Laptop, 14\" FHD Display, ... -> CLICK\n[div] Price Alert -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.217, 0.495, 0.231] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_1101", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8a0711a0-bd00-4c9e-8186-8178f224303d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago\n[span] Chicago, IL -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Ann Arbor\n[span] Ann Arbor, MI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.323, 0.831, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1102", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_289a75cc-bb17-4cd3-8ef5-214427a0b471.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.697, 0.048, 0.722, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1103", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_79cf4364-cc02-439e-a7c8-3244a668dd67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.284, 0.932, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1104", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_daa6154d-0580-423c-9d14-633a3de4fb59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.356, 0.582, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1105", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_77cdcb4f-8373-48d4-9dd8-6c9f2ca90b39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.129, 0.364, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1106", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7324432-b416-4718-99a0-42887f0bb612.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[button] VIEW ALL -> CLICK\n[img] -> CLICK\n[textbox] To: -> TYPE: John\n[textbox] From: -> TYPE: James\n[textbox] Message: 200 characters remaining -> TYPE: Congrats on your new home."}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.57, 0.789, 0.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1107", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_4c96f4fe-99ac-440e-804d-bea1c48f40ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.182, 0.359, 0.199] in the image\nAnd my action is Action: TYPE\nValue: little caribbean"}]}, {"id": "mind2web_1108", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_1cdcae52-8227-4239-8f0b-e512b769eef1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.134, 0.956, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1109", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_9027e140-c963-4718-afa2-d6a47ce31453.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.021, 0.645, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1110", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_fd4af41c-9faf-4c75-b376-c0be227d6673.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.244, 0.568, 0.264] in the image\nAnd my action is Action: TYPE\nValue: Dallas"}]}, {"id": "mind2web_1111", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_cf4a59fc-c8e5-42cd-9278-2a65679a02c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[link] Wichita -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.334, 0.14, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1112", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_8810aa59-ef59-41eb-9dd2-4f79b1c8262f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.366, 0.168, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1113", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_94362238-1136-4e0f-a10b-dc03a18519f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.02, 0.243, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1114", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_99166e02-7f26-4ead-b3ac-370225b32d30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK\n[option] WA -> CLICK\n[button] Products -> CLICK\n[label] Kids -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.653, 0.412, 0.666] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1115", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2a371b9b-0d60-4252-bb8f-ed98d12d77c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard\n[combobox] Month -> SELECT: 01\n[combobox] Year -> SELECT: 2023\n[textbox] CVV -> TYPE: 123"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.233, 0.95, 0.285] in the image\nAnd my action is Action: TYPE\nValue: joe bloggs"}]}, {"id": "mind2web_1116", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_321c544d-a6f2-48b8-9db0-60760dc13574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.181, 0.273, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1117", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_18f60177-0fe3-4abb-a4b8-22f8e0c6cbf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Amenities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.721, 0.089, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1118", "image": {"bytes": "", "path": "./images/7f640279-bd9d-45ae-b3fc-43338977e2c1_757c2d0e-783e-4e4b-b3d6-ae763877604f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of Museums in New York City.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.006, 0.348, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1119", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_7f57582c-c4a9-4de7-9804-977b8120e0dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.061, 0.74, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1120", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_d550a4d6-20bb-4663-8319-6ea7930ed041.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.149, 0.196, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1121", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_52afba1f-0b83-422d-a20d-10afa650dc82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.275, 0.644, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1122", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5f4ecb4d-824e-44c3-870a-813c9d96d954.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.274, 0.715, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1123", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_3b878620-0274-48d4-930f-73ddb4e39492.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo\n[strong] Camarillo -> CLICK\n[img] Submit Search -> CLICK\n[link] View KCMA Airport Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.112, 0.353, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1124", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8b668c59-b70f-47c6-89af-b30d15b3d84b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[textbox] Enter zip code or location. Please enter a valid l... -> TYPE: 90028\n[img] -> CLICK\n[generic] 6201 Hollywood Blvd., Suite 126 -> CLICK\n[button] In Stock at 6201 Hollywood Blvd., Suite 126. 0.2 m... -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.318, 0.812, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1125", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88e8254f-f9bc-4604-9dcd-92b6618a6ffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK\n[button] Filters -> CLICK\n[combobox] Select Region Type -> CLICK\n[option] Domestic & North America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.208, 0.565, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1126", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_1d564585-6725-42ae-ab43-5203aab4ae39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street\n[span] 2983 Marietta Street -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.424, 0.702, 0.472] in the image\nAnd my action is Action: TYPE\nValue: 2"}]}, {"id": "mind2web_1127", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_103182c3-4574-4ff1-bb5f-9dce65f2f2e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.048, 0.652, 0.071] in the image\nAnd my action is Action: TYPE\nValue: vintage clothing"}]}, {"id": "mind2web_1128", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_e3a1714a-ede8-4672-9707-2030e3484f5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.187, 0.139, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1129", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_2fedfeba-1361-4546-a638-0b8f70a9f69a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[div] Size -> CLICK\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK\n[img] Men's UA Surge 3 Running Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.867, 0.136, 0.924, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1130", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_90bf1701-92ac-4889-ae87-3983445c4c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.154, 0.271, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1131", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_1524351c-9647-484f-83b4-c844747fec77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.186, 0.294, 0.216] in the image\nAnd my action is Action: TYPE\nValue: 60538"}]}, {"id": "mind2web_1132", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_64a3ed10-4de5-4698-84dd-c9fe2059c059.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.093, 0.723, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1133", "image": {"bytes": "", "path": "./images/f61456ed-3cc2-41a0-b79c-56737515fac9_f77042ab-abf2-495d-9ad1-b4d23d272cde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the photo gallery for Tsiakkos & Charcoal.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Tsiakkos & Charcoal\n[heading] Tsiakkos & Charcoal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.893, 0.292, 0.977, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1134", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_5d543f92-b9a3-4ffd-8b08-c10032b9c704.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Real Madrid"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.189, 0.931, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1135", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_415f38ea-8042-4f3e-a62e-b7cf6a488379.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] SEARCH CRUISES -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.269, 0.173, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1136", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_249650fb-199e-4ea7-b79f-6dfe0e204f62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK\n[link] Check Balance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.233, 0.5, 0.262] in the image\nAnd my action is Action: TYPE\nValue: 1000000000000000"}]}, {"id": "mind2web_1137", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_daa6fad6-24ba-49d3-a6ad-2370649a2e8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.566, 0.147, 0.579] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_1138", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_9d1d6dc3-2184-4ded-84ea-77035eeb1a7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.628, 0.795, 0.672] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1139", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_303b1894-bec5-49b2-a4e0-b0c0cdc3d3d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.828, 0.166, 0.898, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1140", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_e9493953-b795-4941-acb1-554769dbee75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.197, 0.358, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1141", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_a4a2bb03-dc07-4e60-942b-d43fe00ca4b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.562, 0.618, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1142", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_be6d2fbf-2e18-4a79-ae8d-29fe60b67390.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.386, 0.106, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1143", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_bc48fabd-306e-466a-98cd-490fe1730ece.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi\n[combobox] Select Model -> SELECT: A6\n[combobox] Select Year -> SELECT: 2020\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.245, 0.301, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1144", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_8f47dd64-2175-4e78-ba73-3395e8188152.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.277, 0.3, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1145", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_a48d2bb3-e783-4679-9d31-3a86b8e0353d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.077, 0.563, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1146", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_1ca55a08-3c16-407e-b2c5-1c3d15360c55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 0.112, 0.766, 0.131] in the image\nAnd my action is Action: TYPE\nValue: dallas"}]}, {"id": "mind2web_1147", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_05f6ee5e-250d-4770-82a2-0b7e87ff2586.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.234, 0.326, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1148", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_ca514e43-ce9d-4aeb-bd96-9c8fc7f2017d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.103, 0.547, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1149", "image": {"bytes": "", "path": "./images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_471e7745-222e-40b2-a20f-c65fc40e098a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the profile page for author of latest shot\nPrevious actions:\n[link] Shots -> CLICK\n[button] New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.149, 0.852, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1150", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_08878507-5684-4a12-a316-4ce18a2fbf55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.195, 0.804, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1151", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4a005a1-d2ff-4628-80b4-310e149d0585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.346, 0.93, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1152", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_00bb11f2-1f7d-49f5-a15a-5bc24bc5dd4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.064, 0.327, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1153", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_885aca06-fcca-4f82-b060-1578409d7c2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: San Francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.198, 0.247, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1154", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_77c307df-b1ab-41f3-b616-1e19acd5cd98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.064, 0.206, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1155", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_101d08fa-ab8d-4d48-8827-10b75525a40c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Crew\n[input] -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.121, 0.777, 0.145] in the image\nAnd my action is Action: TYPE\nValue: Johnson"}]}, {"id": "mind2web_1156", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_63149fa6-84de-46e5-a6aa-e8eed68cd23d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.226, 0.278, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1157", "image": {"bytes": "", "path": "./images/13cf0b14-422b-4486-841a-aa9ded048380_e56baebb-6877-4766-9a61-6f73fafacb7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find kinect camera for xbox one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.078, 0.646, 0.105] in the image\nAnd my action is Action: TYPE\nValue: kinect camera"}]}, {"id": "mind2web_1158", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_3d8400a4-ec58-48bb-a45d-9d8bef993fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.409, 0.307, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1159", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_0c9a515f-1917-4832-8e49-d33f76581263.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI\n[combobox] Guests -> SELECT: 4 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.199, 0.215, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1160", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_491d1d9c-fb28-4878-a568-757c0b80241b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.315, 0.195, 0.338] in the image\nAnd my action is Action: TYPE\nValue: 12"}]}, {"id": "mind2web_1161", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_10392ce8-9a90-4bbb-8106-e627c22465da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.27, 0.627, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1162", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_31782c9f-b77c-46ce-bee1-4ee1d7199cfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.218, 0.712, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1163", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_f9942007-cb07-4f7a-a597-4280403e62d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.024, 0.243, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1164", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_7f0414c5-2299-4fb1-9b20-cbc1cdf35486.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.667, 0.512, 0.692] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1165", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_30cd750e-6560-4cc9-ac1a-4bbce8e6444e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.425, 0.323, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1166", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_643a83b8-2d11-4001-a1b6-d5ad0fe22f89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.173, 0.397, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1167", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_d154d215-a2c1-4ca9-8aec-a8f047a361da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\n[link] WOMEN -> CLICK\n[link] Cropped Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.891, 0.099, 0.959, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1168", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_8183ac39-c17d-48ca-9e6c-5cc6db13667d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 100\n[button] Submit price range -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.149, 0.905, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1169", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_00e931ae-8251-49ff-9ac5-8409c46d5204.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK\n[button] Add to Cart -> CLICK\n[button] View Cart & Checkout -> CLICK\n[textbox] Add Kohl's Cash or Coupons -> TYPE: FREESHIP3093"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.541, 0.529, 0.572] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1170", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_97cc385a-366e-4733-8afc-54b6ceb584b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.071, 0.387, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1171", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_bcc99912-98b5-4458-b057-a3f9c7aa4391.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.211, 0.695, 0.246] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_1172", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_931c4775-68b2-4411-a8d8-57c3b4f6cb64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK\n[link] Schedule & Maps -> CLICK\n[button] Connections \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.641, 0.292, 0.875, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1173", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_14bd050c-7014-4023-8da9-9c0b2974c571.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Hiking Footwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.597, 0.142, 0.61] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1174", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_701e1555-43d2-4dbb-86a8-308404d496a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[div] 2 -> CLICK\n[button] Search -> CLICK\n[button] Number of rooms and guests -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.042, 0.899, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1175", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_54555413-eb7c-40b7-8f49-d78f658e881b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK\n[button] Chicago -> CLICK\n[button] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.336, 0.318, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1176", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_cb72c5b6-7bc1-40f4-84a0-264b0de8d2bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[button] Next Month -> CLICK\n[span] 11 -> CLICK\n[span] 18 -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.414, 0.267, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1177", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_549a5ad5-d37c-4180-8797-01abf12af15c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.264, 0.733, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1178", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_20c42e23-c938-4889-8b06-e59438c1e794.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.198, 0.375, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1179", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2f8b7429-46da-4860-82ef-dbfe229fcf9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.123, 0.734, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1180", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_a297de95-784f-429c-9ff7-b987f1cbcbef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[tab] Lowest price first -> CLICK\n[checkbox] list-filter-item-label-0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.832, 0.089, 0.842] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1181", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f1a04b25-a0cc-4bfa-bf18-7862f7ba3700.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.239, 0.376, 0.261] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_1182", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_6876130c-d667-4051-a398-95e5cba6f1e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.104, 0.581, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1183", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_a91196cb-b774-4575-a1f8-0d09f1aba6b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.329, 0.882, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1184", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_b0060309-c2c0-4df1-b25b-a0246d005187.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.304, 0.566, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1185", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_ac7fadf0-dae5-47ce-b122-a54664a3566f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.141, 0.397, 0.175] in the image\nAnd my action is Action: TYPE\nValue: Birmingham"}]}, {"id": "mind2web_1186", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8811440d-7710-4542-87cd-217dbe94a7b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Tomatometer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.463, 0.794, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1187", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_c89800b3-f7c9-4862-aece-bdb8b5e50736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.45, 0.304, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1188", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e49fc4a5-1f43-41a9-9d9a-ec9b5a65e2b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.521, 0.349, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1189", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_224a759b-aa9a-4cee-8cea-36a955e2ce76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK\n[radio] RENEW - An Existing Account -> CLICK\n[textbox] KOA Rewards Number -> TYPE: 1000000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.431, 0.926, 0.466] in the image\nAnd my action is Action: TYPE\nValue: 10023"}]}, {"id": "mind2web_1190", "image": {"bytes": "", "path": "./images/978376c1-8545-4160-81d5-722bdea60434_c217d9bf-cb78-42fc-97ab-8e7d362b796c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Start the process to sign up for CVS ExtraCare\nPrevious actions:\n[button] Prescriptions -> CLICK\n[link] Pharmacy & Health Rewards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.162, 0.418, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1191", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_14bb477c-5382-4aa5-9c10-767f73d2e3ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK\n[button] Release Date -> CLICK\n[button] View Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.826, 0.466, 0.845] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1192", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_48938eb1-60ff-48ac-880b-4ffac70ac2ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.72, 0.909, 0.729] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1193", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_36910aa0-a074-4234-955b-a3d43e59bdc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.174, 0.981, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1194", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cfb5072a-1eb9-4da0-8515-843dba96f9d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.241, 0.613, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1195", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_bd1ad30c-c61c-4dc2-8445-1e2d605ca95c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Kindle E-readers & Books -> CLICK\n[link] Kindle Books -> CLICK\n[textbox] Search Amazon -> TYPE: roman empire history"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.007, 0.686, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1196", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_bc59266c-98c2-4d58-8b55-2df5b754a1e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Pop Rock -> CLICK\n[link] 958,222 United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.298, 0.163, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1197", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_5b4418c5-688a-41d9-8de6-0552c58d18ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.081, 0.233, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1198", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_86075eb3-e1d9-44a3-899b-abb27b2a899d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK\n[div] SUVs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.26, 0.71, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1199", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_a68ce23a-54c0-4a20-bdf2-c64c60b7db33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.201, 0.782, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1200", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_438a3200-eaac-441b-b9c8-6940fd697362.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas\n[button] Search Jobs -> CLICK\n[link] Developer/ Senior Developer, IT -> CLICK\n[spinbutton] Select how often (in days) to receive an alert: -> TYPE: 8"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.216, 0.463, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1201", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_55d1b23e-c4cd-4459-8ecc-fc8db8334fb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[ins] -> CLICK\n[ins] -> CLICK\n[link] Show all 10 cars -> CLICK\n[link] Opel Insignia\u00a0or Similar , View deal -> CLICK\n[radio] Radio yes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.7, 0.93, 0.748] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1202", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_062a6bbc-b371-4d55-9970-603857dd185b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK\n[option] One way -> CLICK\n[gridcell] 17 April 2023 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.494, 0.94, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1203", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_e12856be-7e2c-4628-a1a4-9e78e1ca3e35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.374, 0.918, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1204", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_a6121a43-e23f-421c-ad8a-ec637cb2e49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.014, 0.546, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1205", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_245b8385-23b8-4570-b928-1c9e54526995.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.45, 0.271, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1206", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_85ba8675-1e8b-485c-a4ac-87ead92a45a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.192, 0.599, 0.218] in the image\nAnd my action is Action: TYPE\nValue: 90012"}]}, {"id": "mind2web_1207", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_171cf048-50dc-47a6-90ed-3eb5fa533fc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.393, 0.958, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1208", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_6f344629-c4fb-4980-b926-4ea947c17609.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.114, 0.627, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1209", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_ce167469-0673-4a2b-824a-db6bb26f2912.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.061, 0.228, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1210", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_52367d47-a04c-4db0-94f4-b1525d6e4db1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK\n[textbox] Depart date -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.408, 0.562, 0.452] in the image\nAnd my action is Action: SELECT\nValue: Business or First"}]}, {"id": "mind2web_1211", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_5bb23b9f-783a-4b44-8439-6703dd7bf340.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.186, 0.424, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1212", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_d071fca1-24f8-460f-8e33-f6d24a8f651e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.683, 0.631, 0.716] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_1213", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_c9626d97-8b52-49bd-80e3-6490a55642f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.151, 0.32, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1214", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_3f8cd6ae-eaec-4079-b5b1-d39b0b9c8903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK\n[button] Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.187, 0.295, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1215", "image": {"bytes": "", "path": "./images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_9498722d-5902-4a3b-9128-22ab274da505.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending searches in Columbus.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.001, 0.417, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1216", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fc51fd06-6764-4183-9c13-c4e78867ba63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.389, 0.492, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1217", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c4d77221-301b-48fe-8061-d291c303317d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.458, 0.238, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1218", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_2bbc5313-c847-4bd1-8db4-1fa235c134ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.444, 0.748, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1219", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_04f7c7bb-0def-4780-aea6-e6171f06625a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.354, 0.195, 0.38] in the image\nAnd my action is Action: TYPE\nValue: 2"}]}, {"id": "mind2web_1220", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_3f581127-3bd7-4965-8787-13548d03385c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: musical instruments"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.174, 0.643, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1221", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_74f2b513-2ee2-469b-a2de-837034b739e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.033, 0.937, 0.053] in the image\nAnd my action is Action: TYPE\nValue: jaguar xf"}]}, {"id": "mind2web_1222", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_9a4a0f84-7a55-4cca-bf4b-0c044967ed41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.801, 0.493, 0.92, 0.514] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_1223", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_8c5d4102-d5ae-4d01-8751-7c37609f5fce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.485, 0.487, 0.496] in the image\nAnd my action is Action: TYPE\nValue: Honolulu"}]}, {"id": "mind2web_1224", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_baa133fa-49f8-4b65-b96b-d529f98ac029.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs\n[input] -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.417, 0.216, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1225", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_371d79e1-816f-4ac1-b567-3373e6257e51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.323, 0.194, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1226", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_ac723ff9-25c7-4256-a703-4498b7baaad3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.116, 0.278, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1227", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_627fa5b3-5e4b-4b8b-aaf7-6fc28b256a15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[button] Let's go -> CLICK\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.348, 0.966, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1228", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_f7a5ee05-b056-4d74-8dbe-1a6ea359f004.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, NY, US Select -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.691, 0.691, 0.739] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1229", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_ea4f9b84-8386-40f7-821b-26aabdb914d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.237, 0.335, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1230", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_711a103f-e023-47d5-bba6-84481d512f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK\n[textbox] Search by game -> TYPE: Dota 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.307, 0.992, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1231", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_9ee68218-54fe-4eea-80ad-dbc710aff87f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.058, 0.174, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1232", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_2f18d9ac-7e3d-47eb-a590-dfe4ec702343.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Kitchen -> CLICK\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.291, 0.116, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1233", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_20ae08b1-640a-41f8-9af6-9b29da52578e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.237, 0.31, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1234", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_d59e9047-3e81-43c4-832c-0513a9f41954.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[link] Hotels -> CLICK\n[searchbox] Type your destination -> TYPE: Las Vegas\n[option] Las Vegas Nevada,\u00a0United States -> CLICK\n[checkbox] 10 April 2023 -> CLICK\n[gridcell] 16 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.254, 0.927, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1235", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_3aa74cd2-39e2-4618-92d3-1f4de5170032.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK\n[link] Rock 2,420 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.361, 0.291, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1236", "image": {"bytes": "", "path": "./images/453da07e-cb2b-4f05-80c5-5b3bc6413086_7e109022-22a1-45b2-9942-b053f85b89bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me FAQs related to eating and drinks\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.056, 0.286, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1237", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_89927f7f-c3a1-4274-b1eb-a8f3086ceddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.229, 0.17, 0.251] in the image\nAnd my action is Action: SELECT\nValue: UEFA Champions League"}]}, {"id": "mind2web_1238", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_c6753f2d-865b-4622-89e9-09d2beb9e602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.062, 0.668, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1239", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_c10a7d0d-2f2f-4def-bba3-816048aa552e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo\n[strong] Camarillo -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.404, 0.203, 0.421] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1240", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_fc21b0b8-0045-44c9-b6ac-423368b4bb4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.199, 0.249, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1241", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_a5faede6-890b-4518-9ff8-94f1cd3d1460.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.27, 0.871, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1242", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_0d47ad52-d333-48b6-9718-abf6fd0dcccf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.428, 0.268, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1243", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_a6f86a41-b433-478a-b445-563cafaebe34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\n[link] Press RoomExternal Link should open in a new windo... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.302, 0.945, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1244", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_f21feaf0-2f36-42c7-8714-70e118a11da6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.027, 0.456, 0.061] in the image\nAnd my action is Action: TYPE\nValue: thai"}]}, {"id": "mind2web_1245", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_f2b4a031-d62e-4f07-9ebc-8b3d9684c116.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.375, 0.755, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1246", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_540327b8-779c-4b6b-8ea9-e4a180265a55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK\n[spinbutton] Flight Number (Required) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.109, 0.562, 0.148] in the image\nAnd my action is Action: TYPE\nValue: DL145"}]}, {"id": "mind2web_1247", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_4221ed36-d0a6-4821-b352-b9cdb97af2ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.304, 0.716, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1248", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_d8ebd628-85a8-41ba-a20b-8d10222703e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK\n[input] -> TYPE: 50"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.207, 0.837, 0.231] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_1249", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_3e990bc8-0831-405f-89e7-0d2c621e5bb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.306, 0.195, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1250", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_5317b42b-0d04-47ec-ba12-84aab7c9039d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.578, 0.605, 0.62] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1251", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c02f0195-b85a-4a67-95a2-379936f61b69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.053, 0.267, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1252", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_3fdd479d-2e1e-477d-b8dd-f21c40d2d86f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.205, 0.094, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1253", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_e6a3d740-87e2-4af5-a32e-55478f7813bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.166, 0.446, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1254", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_5e76ed62-0279-4542-a2d8-928980ccbe2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.255, 0.184, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1255", "image": {"bytes": "", "path": "./images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_bf4e20d6-7a31-4c0c-94b7-1ca00193f3ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the user reviews for the game \"Cyberpunk 2077\"\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Cyberpunk 2077"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.221, 0.677, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1256", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_9a5bcd22-5ab4-495e-ab2e-5a5979182205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.312, 0.342, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1257", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9d4a2fe6-8c14-4164-902f-0529d7d9261e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK\n[button] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.312, 0.237, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1258", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_636bf503-609b-4c28-9677-2735b7389f07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.186, 0.247, 0.288, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1259", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_3456d1ad-6145-49ba-bef7-cf879d1981f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[textbox] Credit card number -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.474, 0.302, 0.496] in the image\nAnd my action is Action: TYPE\nValue: 01"}]}, {"id": "mind2web_1260", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_dea8c80e-a711-4e0e-9e9c-5ce98849184c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.118, 0.46, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1261", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_096ddbe2-4904-4769-95a4-5f086d977a22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.199, 0.715, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1262", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_e8695bb9-96f7-47f0-8ed8-13a4d78e50d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.07, 0.777, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1263", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_cb4667cf-02bf-48dc-a01b-a81a5c205577.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.43, 0.474, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1264", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_babd2160-a830-4bcf-a262-9729e78664c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels\n[div] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.136, 0.492, 0.195] in the image\nAnd my action is Action: TYPE\nValue: Mexico"}]}, {"id": "mind2web_1265", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_162a7c64-4c7e-4540-a732-954d6b4be4d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.18, 0.648, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1266", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_d1ad5da4-0888-4482-9973-f2aace082189.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK\n[checkbox] Red Devil (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.377, 0.552, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1267", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_c0eb07c3-268b-4e2a-8db0-666d7d413517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[link] View More -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.838, 0.691, 0.873] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1268", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_0fee4950-9755-496f-814f-6f6f5eecd575.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\n[link] Library -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.046, 0.634, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1269", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_db1977c3-e244-4d3b-9ff6-b0b0cc554c7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.202, 0.634, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1270", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_4f2fb538-a8c2-4890-a77e-65c8f133c0cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.256, 0.393, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1271", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e1baa59a-4622-4d82-9916-a8ab39e36512.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Next -> CLICK\n[span] -> CLICK\n[div] Leather -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.554, 0.286, 0.591] in the image\nAnd my action is Action: SELECT\nValue: Good"}]}, {"id": "mind2web_1272", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_9f5ae924-5319-4085-9d3d-f0e93305d8b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.222, 0.524, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1273", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_f124a0f6-d428-41e6-957d-75863da08b17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK\n[button] 23 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.214, 0.808, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1274", "image": {"bytes": "", "path": "./images/d29e8a14-ee66-4330-b282-09cb1955aad0_548c5177-531c-485e-83b1-5c1773bd3068.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the weekly ad in List View.\nPrevious actions:\n[button] Savings & Memberships -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.132, 0.638, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1275", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_e16871e0-fd0f-46bd-b7e2-46e8908c39ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK\n[img] Men's UA Surge 3 Running Shoes -> CLICK\n[button] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.795, 0.952, 0.833] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1276", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_47a8b77a-b439-4aae-b55a-dc9989289199.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.092, 0.441, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1277", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_ebbf95db-06c5-4ac5-8355-504f1f77a72a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK\n[div] Make/Model -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.204, 0.192, 0.227] in the image\nAnd my action is Action: SELECT\nValue: 2016"}]}, {"id": "mind2web_1278", "image": {"bytes": "", "path": "./images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_05655bed-844e-40ad-8f78-36a2466eb50a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Remove the SSD on my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.903, 0.014, 0.984, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1279", "image": {"bytes": "", "path": "./images/7f640279-bd9d-45ae-b3fc-43338977e2c1_3014cec8-07b7-4224-8737-3260aa0ca81b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of Museums in New York City.\nPrevious actions:\n[link] CITIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.617, 0.349, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1280", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_5762cb31-0f90-4da5-84d3-8fe2f8e95134.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[checkbox] Superhero Sci Fi (745) -> CLICK\n[checkbox] Based On Comic Book (226) -> CLICK\n[strong] IMDb Rating -> CLICK\n[group] IMDb user rating (average) -> SELECT: 7.0\n[group] IMDb user rating (average) -> SELECT: 9.0"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.224, 0.182, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1281", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_a4b9f6d3-602d-47c0-bb53-cbc05c2c73a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[link] Amazon Basics 7-Piece Lightweight Microfiber Bed-i... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.702, 0.49, 0.765, 0.565] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1282", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_fd544f3c-9154-4db2-9d56-7cc4138fe0b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\n[link] CareersExternal Link should open in a new window o... -> CLICK\n[textbox] Search by Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.261, 0.688, 0.281] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_1283", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_19d143be-5401-4456-a21c-788e8e6a043b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Games -> CLICK\n[combobox] Platform -> SELECT: PS5"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.058, 0.737, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1284", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_56836838-03d7-449e-b87f-37ea90bf16fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.301, 0.384, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_1285", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_81fd0300-9ad3-40fa-bec3-798fec6e088d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 0.856, 0.727, 0.9] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1286", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_77c05c0f-315a-4014-bfcd-4943c731b855.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.457, 0.486, 0.483] in the image\nAnd my action is Action: TYPE\nValue: BOSTON NAVY YARD"}]}, {"id": "mind2web_1287", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_6a527941-0214-4124-b97f-4f28e08866a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.064, 0.546, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1288", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_9bcdd47d-1557-4bda-b942-08571a6d3688.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\n[span] COVID-19 booster, testing, treatment & records -> CLICK\n[link] COVID-19 testing Schedule a COVID-19 test -> CLICK\n[textbox] Where do you live? (required) -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 0.266, 0.844, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1289", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_6b93ccb8-16b3-41bc-90ca-62441ccb33f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK\n[span] -> CLICK\n[button] Country -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.313, 0.245, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1290", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d0518e79-c097-4a9a-a841-be2f94c1aac2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK\n[img] Sports car icon -> CLICK\n[button] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.289, 0.243, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1291", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a356fe26-1097-4344-9d45-c4c9cdaf42a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.843, 0.481, 0.856] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1292", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d71a35ff-906e-400a-95ea-268aec2e265b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.169, 0.35, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1293", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_2258dff5-dc9b-44c6-94f6-629411cc0506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.482, 0.621, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1294", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_17fddc65-7f90-4e09-ad51-64f7224c3242.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\n[textbox] Where to? -> CLICK\n[button] Nearby -> CLICK\n[gridcell] Thu Mar 30 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.45, 0.78, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1295", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c609672f-a7f7-4d05-bf6d-e0a3beac539e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.083, 0.695, 0.095] in the image\nAnd my action is Action: TYPE\nValue: Top Hip Hop"}]}, {"id": "mind2web_1296", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_acc48811-0d86-4f47-ac69-4ef0073c9d99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.14, 0.797, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1297", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_227c3818-5a1d-45fb-b107-14f02fd50a22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.252, 0.318, 0.285] in the image\nAnd my action is Action: TYPE\nValue: albany"}]}, {"id": "mind2web_1298", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f5aeec71-5f34-4bbb-872c-fcf7e73581e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK\n[textbox] 1 Adults, 18 to 64 years old, 1 of 8 passengers se... -> TYPE: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.828, 0.34, 0.861] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1299", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_f5c415d1-2c8e-40e4-bd0f-72fba8cf0fd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.044, 0.644, 0.066] in the image\nAnd my action is Action: TYPE\nValue: iPhone 12 Pro"}]}, {"id": "mind2web_1300", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_f1bd2a11-0430-4a4a-a850-5d4f2a0509bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[combobox] From -> TYPE: Abbotsford\n[div] Abbotsford -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.212, 0.98, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1301", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_f44293d8-7694-4f8b-b54e-b14d572de3db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[div] Apr -> CLICK\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.124, 0.366, 0.175, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1302", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_118d364c-60ab-4ee9-bb94-839fad51462c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.03, 0.917, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1303", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_ba02fb70-2e69-4906-bb2b-34f4731545f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456\n[textbox] Date you received your order (DD/MM/YY) * -> TYPE: 08/04/23"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.489, 0.759, 0.517] in the image\nAnd my action is Action: TYPE\nValue: Harry Potter Box Set"}]}, {"id": "mind2web_1304", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_81062e4e-eea9-437a-ab50-756bba2cca30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Sports -> CLICK\n[link] Training -> CLICK\n[div] Size -> CLICK\n[link] YXL -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.395, 0.233, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1305", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_05089a33-1242-46b1-add7-bd4eb35abc03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[heading] Continue -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[span] 36 -> CLICK\n[button] 34 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.626, 0.774, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1306", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_a0d0a0dc-e1ef-4efb-8c64-f76f38813865.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK\n[button] Calendar -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.14, 0.514, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1307", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_40c13d1e-b12e-400e-8755-60d0c6dd3652.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[button] Sail To -> CLICK\n[button] Caribbean -> CLICK\n[div] Sail From -> CLICK\n[button] Miami, FL -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.099, 0.551, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1308", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_0f71aa1b-f0ae-4360-8312-faeac77e1fe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.0, 0.552, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1309", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_1dbf3df0-7bb2-4b4d-bf1b-108692b3387d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK\n[link] View Tickets -> CLICK\n[span] Filters -> CLICK\n[listbox] Sort by Price - Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.855, 0.976, 0.896] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1310", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_d64e976c-6174-4f23-80ae-bb2a1af5a5a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.387, 0.194, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1311", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_6c5a2ac8-5f7c-4c89-8151-a9429ef3797b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456\n[textbox] First Name (required) -> CLICK\n[textbox] First Name (required) -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.421, 0.833, 0.456] in the image\nAnd my action is Action: TYPE\nValue: Buck"}]}, {"id": "mind2web_1312", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_ec3faf8f-2b87-4367-8e09-3f7977f994e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.046, 0.987, 0.088] in the image\nAnd my action is Action: TYPE\nValue: Alinea"}]}, {"id": "mind2web_1313", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_912cb9c9-bd7e-4716-a337-1d848ad699be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.179, 0.359, 0.199] in the image\nAnd my action is Action: TYPE\nValue: central park zoo"}]}, {"id": "mind2web_1314", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_dd3fda05-b84a-42a1-92dc-f7f60043d557.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK\n[span] (0) -> CLICK\n[textbox] Max Price -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.435, 0.826, 0.468] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_1315", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_bad1f3ca-f331-49c9-b384-520ef6d972de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.363, 0.285, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1316", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_5f3874c8-9929-49b6-8e63-d7e356a0021c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Los Angeles\n[span] City -> CLICK\n[div] 21 -> CLICK\n[div] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.139, 0.964, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1317", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_84e40751-b41d-4447-9230-62c763c51494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.157, 0.463, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1318", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_a21b93a5-223b-4203-b8d1-b50e53371daf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK\n[div] 5 -> CLICK\n[div] 7 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.3, 0.686, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1319", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_435c88ad-84e9-40e9-b104-f732917fa6e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Rail -> CLICK\n[label] Express Bus -> CLICK\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.208, 0.359, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1320", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_12b43c44-08b2-4054-96c4-4f4b62433e37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.161, 0.62, 0.527] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1321", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_7f5b804a-de4d-431e-af40-f11f88024f8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK\n[link] Produce -> CLICK\n[link] Fresh Fruits -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.149, 0.516, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1322", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_2d7e4e1f-c4e4-4952-b72d-8578d04e5a20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.014, 0.323, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1323", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_6c97e7b4-b514-4509-9c8f-a7f8f802f56f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[div] May -> CLICK\n[span] -> CLICK\n[span] Filters -> CLICK\n[textbox] max price $ -> TYPE: 100\n[div] Shared room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.401, 0.786, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1324", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_6a9eafbb-53ed-43f0-88f7-6282ca1c676e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.101, 0.223, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1325", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_5e9517f2-e5c6-4f6e-9dc8-48652fa459f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 100\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.365, 0.701, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1326", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_4f02b46a-27e4-4252-b903-79e909d5cd42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.258, 0.643, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1327", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_0786979a-b7d5-4a76-8b9a-5b24c2ed095b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.903, 0.396, 0.944] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1328", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_20025603-f2db-480a-b623-54c605d29e37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK\n[img] AIRism Cotton Graphic Short-Sleeve Raglan T-Shirt -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.337, 0.802, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1329", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_c9b23ba4-feaa-4d70-b31e-4ab45b0de665.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK\n[button] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.242, 0.913, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1330", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_3d184f90-9278-44e1-ba90-4b853b6d57d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.093, 0.414, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1331", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_17cc8fec-b781-48d4-86ab-a842b9ffa5bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.256, 0.691, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1332", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_1a967805-4e68-408d-907b-66a52037ca3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK\n[button] Deals -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.426, 0.988, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1333", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_790f3994-0282-4c98-a80d-4758ef216776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.474, 0.32, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1334", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_31676c0a-b906-4ef6-a036-5b82635f521a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.603, 0.391, 0.635] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1335", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_fd219d72-5b65-4045-a4f4-04587d1c4cf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.147, 0.432, 0.182] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_1336", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_f187504d-f032-4815-a47d-c44cf137f3aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.507, 0.097, 0.663, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1337", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_652b20f7-ac5f-4df0-a811-8439600ebe0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.185, 0.209, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1338", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_1a65c0c5-0a3f-4937-b236-47abc8727a64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.62, 0.777, 0.651] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1339", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_fed84eba-0361-4753-883e-226494372650.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.771, 0.316, 0.797] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1340", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_68bef90c-0ea2-41c8-8e0d-24c2cdbe7b8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[label] L -> CLICK\n[heading] Color -> CLICK\n[label] BLACK -> CLICK\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.178, 0.843, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1341", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_d601a7c6-57c5-4f10-993f-b6ca0040497d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\n[input] -> CLICK\n[input] -> TYPE: KCCR"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.046, 0.576, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1342", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_d19d4129-17c8-4d55-8922-f2e6468c09fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.008, 0.686, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1343", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_6359600f-6012-4e5a-a3bc-26b3faaf6d51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.245, 0.307, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1344", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_6dc4d4a5-0284-47c7-b9b9-123e7e580adc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\n[link] Press RoomExternal Link should open in a new windo... -> CLICK\n[link] View All Releases \uedbe -> CLICK\n[combobox] Select year: -> SELECT: 2020"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.336, 0.48, 0.37] in the image\nAnd my action is Action: SELECT\nValue: Press Releases"}]}, {"id": "mind2web_1345", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_76aa8b0d-7681-46b5-983a-c715bbec237c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[strong] Accra -> CLICK\n[option] One way -> CLICK\n[gridcell] 14 April 2023 -> CLICK\n[button] Continue -> CLICK\n[button] Show flights\ue99a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.621, 0.727, 0.766] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1346", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_f057ecbf-37ff-4849-a8bd-52524dbff3d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.195, 0.234, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1347", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_401aaaf4-e63a-4957-ae58-38fab003cb30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.564, 0.118, 0.575] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1348", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_4ae02d45-a9d7-49e0-b784-860687b59016.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.525, 0.941, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1349", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_832003de-d47f-4c28-8581-ee704cc1f19a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK\n[link] 2 -> CLICK\n[listbox] hour -> SELECT: 17"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.341, 0.327, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1350", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_d86079d3-0dbc-483f-a352-a5b6b204d119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.106, 0.492, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1351", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_1b864842-2093-41bd-9dd5-2a2e967afdf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.063, 0.307, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1352", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_03665eda-4ea5-49b2-b687-66ec30c80b16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Ariana Grande\n[button] Search -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.477, 0.645, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1353", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_70eb3436-556f-4fe5-8c18-78852559efc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK\n[textbox] Flight origin input -> TYPE: Mumbai\n[span] Chhatrapati Shivaji Intl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.458, 0.136, 0.59, 0.168] in the image\nAnd my action is Action: TYPE\nValue: Dubai"}]}, {"id": "mind2web_1354", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_353b91ca-a6e4-4e6b-9fd1-14a2586a796a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK\n[button] Comedy -> CLICK\n[button] TV Shows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.337, 0.639, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1355", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_2f3a086d-426a-4c71-b79b-05865962cb27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.128, 0.366, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1356", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_80303c11-8409-4de5-a1ab-7e724a8c74cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[img] Woods 41366 Surge Protector with Overload Safety F... -> CLICK\n[span] Qty: -> CLICK\n[option] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.709, 0.275, 0.869, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1357", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_2ed285c2-d5c2-47a3-9cab-96af7b698f2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.322, 0.639, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1358", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_78240913-e4ba-488d-af47-988d6dcd7307.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK\n[div] Choose date -> CLICK\n[generic] 5 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.405, 0.922, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1359", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_4dafe010-a466-41a4-ae48-14b3769fdd36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.163, 0.241, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1360", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_7b525852-d861-4b68-96ac-240e8e78e5e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.077, 0.036, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1361", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_18ad1432-a6eb-46c8-a4d0-b27f35971f9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28\n[link] 7:30pm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.832, 0.473, 0.879] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1362", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_14d83651-d626-40c5-bb20-7cafc64a78ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.091, 0.89, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1363", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_f84b368c-5c44-41ff-be11-271ea329cfbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.394, 0.777, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1364", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_12a902e3-b65c-4644-86f6-53b76223606d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.326, 0.891, 0.365] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_1365", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_176a7cf8-69c3-47c4-8090-30c9a98a3633.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London\n[button] London England -> CLICK\n[circle] -> CLICK\n[link] Unique Experiences -> CLICK\n[label] Up to 1 hour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.575, 0.236, 0.59] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1366", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_cc1f3832-fb78-4fdb-a6a4-6ecd4c37c716.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.507, 0.521, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1367", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_3c7c8607-1c94-490a-b1aa-7d545fba0376.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.132, 0.263, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1368", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_f2d2e650-eea6-4670-b758-ee55649c1e07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.216, 0.359, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1369", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_24909b46-0dde-4b06-8dbc-150212c5eb23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.662, 0.192, 0.67] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1370", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_389c1fc4-5e1d-487d-8791-0f6f32b1a1a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.474, 0.546, 0.535] in the image\nAnd my action is Action: SELECT\nValue: 4 guests"}]}, {"id": "mind2web_1371", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_d5d06797-e73f-4063-8807-2792fae51cf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[span] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.048, 0.72, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1372", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_fbe93f95-b79d-4208-94b5-baa35be519f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.112, 0.045, 0.205, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1373", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_c5d34636-ac2c-4afa-bc53-ca501dba2c63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: black stroller\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.146, 0.155, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1374", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_49b6818c-3f34-49f1-ba58-9cba952646ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\n[link] Tabs -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.28, 0.153, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1375", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_6f4fdbe1-0c56-424c-9df6-b84d8876fc21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK\n[div] Sports -> CLICK\n[link] Outdoor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.701, 0.233, 0.747] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1376", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_9d559cfa-a819-4c5b-8d50-446d5a0538d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.253, 0.492, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1377", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_70e3d5b7-ec0e-4e31-ab82-ab367b0aa9f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] Bras -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.145, 0.256, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1378", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_92faefb0-d3d9-46ed-a6e1-200c685e21b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\n[label] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.153, 0.429, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1379", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_1d159c3f-a3f0-41a4-a733-ea456f96c507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK\n[div] 18 -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.096, 0.964, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1380", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_ac1ad2da-eeb2-4030-a592-fdf3c8a5c97f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.368, 0.783, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1381", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_f63f307f-8cac-4289-9eb1-bfed085a6f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER\n[link] Jazz -> CLICK\n[img] Image for Red Hot Chili Peppers 2023 Tour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.209, 0.372, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1382", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_a4d1e68e-8bb3-42fb-a386-74798f3660b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 23 -> CLICK\n[input] -> CLICK\n[span] -> CLICK\n[button] Find Your Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.309, 0.915, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1383", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_89b46503-23ad-4b12-8aa0-132496a675ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.418, 0.846, 0.479, 0.857] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1384", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_439a6ca4-c0e8-4e02-8b9c-37632fdbf3d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[img] Hasbro -> CLICK\n[button] All Filters -> CLICK\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.251, 0.443, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1385", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_9ac267ec-b83a-4ee9-9fba-e0beedd3f174.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK\n[input] -> CLICK\n[option] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.347, 0.906, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1386", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_76efe3f4-a2cc-4414-bd56-7ba6012a68a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.232, 0.567, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1387", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_24a16929-1568-40b1-b407-013d6d8b9107.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.09, 0.554, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1388", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_4f5d1bb3-55ec-4b13-a0fd-8c243591f074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.493, 0.359, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1389", "image": {"bytes": "", "path": "./images/716ed90e-a138-452e-b5b5-167911871fda_499639da-7d48-4aef-a2f6-2f0b1cda21b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Playstation gift card of $10.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.058, 0.646, 0.078] in the image\nAnd my action is Action: TYPE\nValue: playstation gift card $10"}]}, {"id": "mind2web_1390", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1d6a6c4d-7dec-47fe-a26d-a596304c0ef2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.367, 0.868, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1391", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_08676b6f-dfe9-4f7c-acb9-b85f4e91123c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.069, 0.615, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1392", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_6eacf7f1-bfcf-4dad-9660-fda396a4b150.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.007, 0.553, 0.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1393", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_de29d2ea-22c4-4b25-ac58-063235e2f9c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK\n[textbox] Confirmation or eTicket More information about con... -> TYPE: 12345678\n[textbox] First name -> TYPE: Jason\n[textbox] Last name -> TYPE: Two"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.326, 0.271, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1394", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2f6c50ae-f7f9-4cb1-bf09-185df432382f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[combobox] \uf0d7 -> SELECT: 2 Adults\n[combobox] \uf0d7 -> SELECT: 1 Child\n[select] Age -> SELECT: 0\n[link] Search Hotels -> CLICK\n[radio] $100 to $200 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.37, 0.123, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1395", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_1be05128-edbd-43c1-b205-2923b0b3b75d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.364, 0.218, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1396", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_abb7ad2b-5603-4bd9-995c-e98a5ad82e57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK\n[textbox] Custom name your search -> TYPE: Jaguar\n[button] CONTINUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.427, 0.293, 0.459, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1397", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_20b8aff4-4269-4cf8-a6a7-d1232bab53a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.161, 0.984, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1398", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_d5d9dc65-d2d7-40a9-bffc-3a9bf35a0050.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[img] -> CLICK\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.294, 0.209, 0.325] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_1399", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_6a59bf9d-e498-42ff-9361-27f824894bd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.624, 0.315, 0.658, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1400", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_4124e048-0c44-4b5f-b3f9-a449d3e18de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK\n[svg] -> CLICK\n[button] 25 -> CLICK\n[combobox] Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.302, 0.894, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1401", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_985d0c82-e934-4e69-9a1a-7e7097fb4c00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.279, 0.196, 0.303] in the image\nAnd my action is Action: SELECT\nValue: US$20 to US$40"}]}, {"id": "mind2web_1402", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4ef64b7b-5543-41f0-87b8-90c79cb7aa92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.319, 1.0, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1403", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_f35b1ee4-b294-4c01-9578-db54c94efe46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK\n[img] Missing (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.141, 0.127, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1404", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_7222bd6f-e476-48ce-8e0c-01637c662281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch\n[link] nintendo switch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.29, 0.483, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1405", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_a603d9a4-649e-4daf-8218-cba78f032b30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.032, 0.567, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1406", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_29e613e0-4f71-4265-8c2c-7ecf158499b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.434, 0.285, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1407", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_e1e4c60c-9081-43ac-96e0-4b8e5bd6003d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.063, 0.342, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1408", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_cb7b348f-dc7d-4c76-b1a9-0fc02a87f46a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Alien Worlds\n[div] Alien Worlds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.139, 0.189, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1409", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_771dcd89-5187-4dbd-bcac-6e4ea751b409.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.665, 0.277, 0.68] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1410", "image": {"bytes": "", "path": "./images/bbfed209-df70-434a-aece-5c5fc7a38f4f_c15c9c63-aa0b-4ae5-82c1-a5df5c474ea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the push notification settings\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.409, 0.241, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1411", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_52148af4-19e2-4ea7-be70-40c779c314bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.159, 0.713, 0.177] in the image\nAnd my action is Action: TYPE\nValue: Paris Charles de Gaulle Airport (CDG)"}]}, {"id": "mind2web_1412", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_d17cc61b-2cdc-4948-91c9-e58d1f9311b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow\n[span] Glasgow Central -> CLICK\n[textbox] Date use format: 17-Mar-23 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.279, 0.412, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1413", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_d71602fb-ca32-4910-9360-a5684b83ea91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\n[link] Navigate to deals -> CLICK\n[link] Navigate to 1-Night Kiosk Rentals See More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.22, 0.333, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1414", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_ad58eba1-5d2a-4f85-905a-6d2d0b1312ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.485, 0.48, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1415", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_d420750c-23ce-43f7-8f39-d31b623dddde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: AUSTRALIA\n[input] -> TYPE: Walker\n[input] -> TYPE: A987654"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.415, 0.258, 0.585, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1416", "image": {"bytes": "", "path": "./images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_2f9858fb-e872-4568-b349-3391628deb20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an hourly parking in New York City of lowest price.\nPrevious actions:\n[textbox] Search for parking -> TYPE: New York City\n[li] New York City, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.441, 0.397, 0.454] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_1417", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_ab6a19ab-94cb-4b50-a231-3ec9df28c9b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.737, 0.435, 0.773] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1418", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ce2b117c-d60b-4135-9f3e-406a601c7028.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.722, 0.273, 0.762] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1419", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_85141338-8cd2-4b4d-9f60-9cea25beadb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.107, 0.326, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1420", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_8e64a305-417f-4f93-b0c4-ae588b41194e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[option] Socks -> CLICK\n[generic] Sort by -> CLICK\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK\n[img] Short Socks (2 Pairs) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.488, 0.248, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1421", "image": {"bytes": "", "path": "./images/fce75183-0825-42b1-baf3-a9214fe20ce9_36269d09-9e56-4e12-ac33-a0ac39b4a53c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse recent NFL-related videos.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.561, 0.17, 0.621, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1422", "image": {"bytes": "", "path": "./images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_92587b64-ad6e-4e8f-8c27-feeff12b79d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of games I've played recently.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.118, 0.481, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1423", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_89ea3bea-b9d9-4381-8184-5c66df6cd0ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.01, 0.232, 0.016] in the image\nAnd my action is Action: TYPE\nValue: spa"}]}, {"id": "mind2web_1424", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_34accc8c-406e-4136-8c51-c2b1edb1654a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200\n[combobox] Number of Stories -> SELECT: Two-Story\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.273, 0.963, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1425", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_fd4064b1-988a-4940-9578-6fbfbfc2f352.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.043, 0.426, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1426", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_d9976c1f-bf18-4f5f-abd6-fb7592c0622c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Men's Shoes & Boots -> CLICK\n[label] Boots -> CLICK\n[label] Sale -> CLICK\n[div] Color -> CLICK\n[label] Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.839, 0.223, 0.85] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1427", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_ca8b04e6-e90e-436c-84fa-b5af56223c3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK\n[span] -> CLICK\n[span] Plastic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.297, 0.169, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1428", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_b740c1d3-669f-45ae-beed-936d5f4e4f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.02, 0.469, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1429", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_ff47cd42-40a5-47a6-8b52-589ca150f520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.137, 0.587, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1430", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_9174deb2-b1a0-46a7-b79b-8265bbbad507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Bridges and Tunnels tolls -> CLICK\n[span] Toll Rates -> CLICK\n[span] Motorcycles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.262, 0.867, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1431", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_38d43410-d666-4a82-8c4c-514bd2c40a0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes\n[button] Go -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] Under Armour Men's Charged Assert 9 Running Shoe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.404, 0.549, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1432", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_146406e2-e394-4d53-8e79-f20f4d7c3df0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.005, 0.371, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1433", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_76b4419e-d497-4831-8074-447ca32328fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[svg] -> CLICK\n[div] Canada -> CLICK\n[svg] -> CLICK\n[div] This weekend -> CLICK\n[p] Rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.121, 0.242, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1434", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7d4790d6-486f-4152-a56d-6ec08c11b626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.346, 0.144, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1435", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_4bf4e3a7-8e4d-4453-9bdb-3f68faa7feb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.077, 0.673, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1436", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_8eb4a4cc-f4f6-4cdd-979a-3eb10b5742a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.01, 0.232, 0.017] in the image\nAnd my action is Action: TYPE\nValue: Burgers"}]}, {"id": "mind2web_1437", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_3458c954-87c8-4d5d-848d-2b9041fd5ef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK\n[span] Size -> CLICK\n[link] Under 2'x3' (38) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.259, 0.986, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1438", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_ee8846f3-42cd-4257-9f4e-011b4079bf1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.236, 0.744, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1439", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_2696e6cb-e2fc-45da-b9e5-33fe50d21113.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\n[span] Menu -> CLICK\n[link] Accessibility -> CLICK\n[div] List of Accessible Stations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.449, 0.403, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1440", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_906d43ef-49fe-40c0-b676-17bf6a6c7cc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\n[link] Library -> CLICK\n[link] History -> CLICK\n[button] Clear all history -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.158, 0.684, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1441", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_18953029-7de1-4f5f-bbfd-47497151e78d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.746, 0.094, 0.764] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1442", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_85d89f31-f66f-4dea-9d27-26cffeb6b2ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK\n[img] -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.012, 0.988, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1443", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2355acbf-015a-411b-9255-66eb6a6ea664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.12, 0.783, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1444", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d1b5e61a-25a8-4b5a-8797-51292027172a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK\n[searchbox] Please type your destination -> TYPE: MANILA\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.811, 0.371, 0.817, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1445", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_f9125541-f6f0-462e-bbd6-74b95fa0141e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.432, 0.492, 0.449] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_1446", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_041c26fa-ce1d-486c-ac07-f01db497d492.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Audience score -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Tomatometer -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.675, 0.388, 0.798, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1447", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_7fabb89e-f2fd-419c-b2b1-bf792b60efff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] 7 -> CLICK\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM\n[combobox] Select Residency -> SELECT: Vietnam"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.201, 0.902, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1448", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3e6b66ed-8220-4c62-8a25-6d6f0815a83c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK\n[checkbox] 8 April 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.196, 0.71, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1449", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_49ca6092-7a1c-4313-9a93-16fd2713cbb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.122, 0.269, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1450", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_640f1bc7-d87d-4abd-b427-251868d68256.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[tab] One-way -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.114, 0.702, 0.141] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_1451", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_b1cdb3df-d1c0-4a37-966f-b0f460a30a65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.073, 0.587, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1452", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_8b608f62-98ad-4a30-98fb-39c4d74a95d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK\n[button] Add to Cart -> CLICK\n[button] View Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.237, 0.394, 0.254] in the image\nAnd my action is Action: TYPE\nValue: FREESHIP3093"}]}, {"id": "mind2web_1453", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_5ae1626b-8e2b-4bb4-be5c-4488a2121063.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.574, 0.038, 0.596, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1454", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_9d90cf4a-97bd-4b59-a600-b1a420139626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\n[span] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.1, 0.961, 0.135] in the image\nAnd my action is Action: TYPE\nValue: 11231"}]}, {"id": "mind2web_1455", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_8050d3d0-fb77-42bf-bbe4-77c3f358a46b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK\n[textbox] Down Payment -> TYPE: 500"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.273, 0.459, 0.299] in the image\nAnd my action is Action: SELECT\nValue: New Jersey"}]}, {"id": "mind2web_1456", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_01991012-99bb-43ba-80b3-8761e12526b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.303, 0.712, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1457", "image": {"bytes": "", "path": "./images/693ae151-6a70-41b1-b016-87279c4c532e_f455e88f-ed98-4077-b0cb-ea5f32a69743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the cheapest xbox 360 game available for purchase\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.088, 0.646, 0.119] in the image\nAnd my action is Action: TYPE\nValue: xbox 360 games"}]}, {"id": "mind2web_1458", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a3ce4ba4-e949-4d3f-961a-e0ab53de1539.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[input] -> TYPE: South Station\n[option] South Station, 700 Atlantic Ave, Boston, MA 02110,... -> CLICK\n[link] Go to route -> CLICK\n[button] City Point -> CLICK\n[menuitem] City Point typical route -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.494, 0.393, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1459", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_fd110d4a-c93d-432a-860b-76873aeb4d0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.118, 0.812, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1460", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_1d0652c6-8c34-4087-aea7-e41d19eea42e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\n[link] Recommendations -> CLICK\n[link] Recommendations -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.17, 0.565, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1461", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_f2c74424-c791-46ac-8ed3-080e0b523ebe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.131, 0.902, 0.187] in the image\nAnd my action is Action: TYPE\nValue: Spain"}]}, {"id": "mind2web_1462", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_cb589bb8-5110-40b5-9fa6-b42683918b39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Fresno -> CLICK\n[img] -> CLICK\n[span] Order Online -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.115, 0.123, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1463", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_e5dc29b7-feed-4ca1-addd-d63034be1d36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.058, 0.518, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1464", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_c83aa179-2b1b-4f4b-8d0e-714e90cb8743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Update -> CLICK\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.296, 0.278, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1465", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_cbbfb5cf-8c1a-47fc-a015-afaa2567bbf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: La Bomba"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.837, 0.03, 0.897, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1466", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_51eb2c1d-08f8-4f21-92f2-c17c6de966de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.16, 0.498, 0.185] in the image\nAnd my action is Action: TYPE\nValue: 123 Main St, West Chicago, IL"}]}, {"id": "mind2web_1467", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_1cc8fab2-5512-4790-95ae-8349beb1f6f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.354, 0.366, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1468", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_4f12982c-1b2f-42b8-9391-dd4b17ff0ced.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.04, 0.665, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1469", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_b36f6cf0-ceff-4a01-bc4e-bf8ffa893d48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.142, 0.45, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1470", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_23bec80e-fc2a-4ca2-afa5-ea11e0911edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.217, 0.433, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1471", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_94853346-3f50-42d4-a572-19b457b58ea5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.221, 0.254, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1472", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_6ae1d041-2f14-47cd-8056-e5167cde24a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.067, 0.326, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1473", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8975d7cf-935f-4d95-aa36-5eb71e2b01eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.279, 0.173, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1474", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_eaebc513-fd13-45da-8a09-78a30eb928d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\n[link] NBA -> CLICK\n[link] Scores -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.271, 0.717, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1475", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_7ebdc5db-d931-4a68-a6f5-cb6976b12702.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.41, 0.452, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1476", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_c6b4511e-d878-4799-9d4e-4cbac9de4c48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.402, 0.592, 0.442] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_1477", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_1bb0ea8b-610d-45e5-b23d-ee1bad6369c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[span] Dining package -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.78, 0.713, 0.888] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1478", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_3cd13063-08bf-498b-92ed-c690490a1526.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.642, 0.263, 0.658] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1479", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_90db0fb9-3240-4db9-bd1f-ee23ceb7ea76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK\n[link] NPGallery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.146, 0.695, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1480", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_69956295-9900-40e8-b095-abf025c73dc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.103, 0.472, 0.133] in the image\nAnd my action is Action: SELECT\nValue: Kia"}]}, {"id": "mind2web_1481", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_b74474d1-d37d-4bc9-9fb5-5d6a91112763.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.176, 0.241, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1482", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_61593538-6a94-428e-a354-f8b3da1bde5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.357, 0.605, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1483", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_a414ba1e-c263-4dcf-865b-04054f9e18aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.387, 0.129, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1484", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_7693c6c7-4388-417a-acdc-d0ad1937dec5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA\n[div] Goa -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.7, 0.172, 0.734, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1485", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_b368cde5-79cc-412f-89d5-579b80c8db94.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.432, 0.252, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1486", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_82ba2b10-839f-4716-b42e-af904b9b0c04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.276, 0.47, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1487", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_c44a1e52-643c-4487-b25d-dedd81984892.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.351, 0.329, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1488", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_5b52c0ec-73b3-41b6-b34a-b0882e65cbfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[textbox] Credit card number -> TYPE: 123456789\n[combobox] Expiration month \u00a0* -> TYPE: 01\n[combobox] Expiration year \u00a0* -> SELECT: 2024"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.394, 0.302, 0.422] in the image\nAnd my action is Action: TYPE\nValue: 123"}]}, {"id": "mind2web_1489", "image": {"bytes": "", "path": "./images/9223ed29-5abb-4f4d-8108-1c3a584a7017_7b4101fe-ef0c-4517-9e11-be6a982e764e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about reduced bus fares.\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.062, 0.494, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1490", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_ba56712e-4801-424b-857e-fb64ab1a9307.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.448, 0.648, 0.485] in the image\nAnd my action is Action: TYPE\nValue: 50"}]}, {"id": "mind2web_1491", "image": {"bytes": "", "path": "./images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_86f31177-b948-43d1-bfb7-32166655b35a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information on how to get a ticket refund.\nPrevious actions:\n[span] Help -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.079, 0.756, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1492", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_4da78b8c-50b7-4b50-8e3c-bad054eacd2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay\n[paragraph] Coldplay -> CLICK\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.357, 0.553, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1493", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_ce91ca6f-c166-4451-aec6-a9f75ea23165.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.146, 0.969, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1494", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_729f0caa-03f2-413f-a4f0-c17602d24653.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.459, 0.536, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1495", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_b2516cbd-9d43-4c25-b114-d8edc450e95c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[div] -> CLICK\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK\n[button] Find Your Truck -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.381, 0.915, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1496", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_c9e53d13-c6fc-4af4-b8c4-45ea969dd04a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.084, 0.808, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1497", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7b712fcd-3dcd-44a1-a57b-e574ddb56109.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Price + Shipping: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.282, 0.78, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1498", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_103014cb-e53e-4b52-84d3-7653842690ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.074, 0.492, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1499", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_d92ae16a-3126-4997-ab0e-125dd2416c77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.183, 0.205, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1500", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_ddec100d-f916-4321-ba6a-dceb8f48e51c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\n[searchbox] Search -> TYPE: rock"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.007, 0.553, 0.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1501", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_c6b0f017-31fc-4a65-880b-30d19b72e561.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.252, 0.675, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1502", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_ff85d6b0-e760-47c9-9cbe-0b2c40ea369f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan\n[combobox] Credit Score -> SELECT: Fair (580-669 FICO\u00ae Score)\n[combobox] Term Length -> SELECT: 48 months"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.214, 0.835, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1503", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_74dc5d68-9acc-4106-936e-cba53c2782cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.004, 0.371, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1504", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_0aeafd95-fef4-4d22-aaea-cd873ef8fd5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.515, 0.535, 0.526] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1505", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_3260885d-20b9-4daf-9f4b-1a95e0a6a4d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.362, 0.305, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1506", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_2da87719-cf27-463c-859e-44538f0428bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.15, 0.316, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1507", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_8031c316-8f33-49ca-85a3-4f274aac7fb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.148, 0.354, 0.17] in the image\nAnd my action is Action: TYPE\nValue: TOKYO"}]}, {"id": "mind2web_1508", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_949e1e48-94d8-4d69-aa74-24e5582011bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.056, 0.664, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1509", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_c31b9357-ee70-4f66-974d-647feb53a5da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.839, 0.012, 0.858, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1510", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_dbb99d1d-77d3-4826-a887-2bee8e5bf43c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.084, 0.123, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1511", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3851cdfd-1081-4f86-b05d-6062a054e094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK\n[span] $200 - $300 -> CLICK\n[button] APPLY -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Lowest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.437, 0.33, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1512", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_ef69e26b-3544-4e04-95cb-b382313130d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.118, 0.321, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1513", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_8baa6a27-c421-47ea-9ad4-efeeba6e1815.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[span] Reservations -> CLICK\n[textbox] Near -> TYPE: SAN FRANCISCO\n[span] San Francisco -> CLICK\n[checkbox] Cocktail Bars -> CLICK\n[checkbox] Outdoor Seating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.119, 0.635, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1514", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_bc2c2145-162c-46a9-8bfd-32e070aa3cb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle\n[span] Seattle, Washington, United States -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.163, 0.702, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1515", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_4b2dcd3a-0b40-469e-845d-e7b4f050d030.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.133, 0.244, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1516", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_38d7f841-8b1f-4c5b-8131-310960d438fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Find your state -> CLICK\n[link] New York -> CLICK\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.096, 0.628, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1517", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_e835b0d2-5db9-498a-81f5-598bb3d144c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[button] Search -> CLICK\n[span] -> CLICK\n[button] Show all 14 -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.339, 0.263, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1518", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_d834a522-37a2-4ae0-88eb-0d4490a2d956.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.31, 0.07, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1519", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_23a4d8ba-846f-4a6b-9d5e-8e9059bbd4be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.431, 0.518, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1520", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_118b28dd-087b-45f4-8490-baa847a291ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.888, 0.18, 0.959, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1521", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_39a3a8d8-dbe1-4949-8d5f-a9097d58c4fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.021, 0.409, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1522", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_f9b97955-f20c-41ea-83d4-8b3c274cb9f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[searchbox] Type your destination -> TYPE: Las Vegas\n[option] Las Vegas Nevada,\u00a0United States -> CLICK\n[checkbox] 10 April 2023 -> CLICK\n[gridcell] 16 April 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.197, 0.44, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1523", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_8bcadcd3-882c-4c95-a59f-121cd8e75eba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.139, 0.384, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1524", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_ffaf6b03-f8a7-4d88-b8b3-e95d8ac0b97a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.077, 0.987, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1525", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6c235a86-4748-4b46-bb48-86f39329f0e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.123, 0.838, 0.147] in the image\nAnd my action is Action: TYPE\nValue: 5"}]}, {"id": "mind2web_1526", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_3b99d7fa-1730-4fcd-86c1-d5244bc75520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[button] Sort selector. Best Match selected. -> CLICK\n[button] Listing options selector. List View selected. -> CLICK\n[link] Gallery View -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[link] Price + Shipping: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.339, 0.4, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1527", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_227dfbfe-6b1f-473c-b87d-101c9dfd7306.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK\n[link] 1 -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.145, 0.891, 0.173] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_1528", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_7d5af9ff-4e09-4a12-a7aa-870797c20fcb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.069, 0.582, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1529", "image": {"bytes": "", "path": "./images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_961171c8-d057-442d-9bd2-68aa64900442.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Read the 1 star reviews of DayQuil Severe Cough Liquicaps.\nPrevious actions:\n[combobox] Search products and services -> TYPE: dayquil\n[button] Submit search -> CLICK\n[div] Vicks DayQuil SEVERE Cough, Cold & Flu Relief, 24 ... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 0.567, 0.362, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1530", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_fd4aace9-856c-4933-a18f-8817c81c926b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK\n[heading] Resident Evil 4 - Xbox Series X -> CLICK\n[span] Digital -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.159, 0.825, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1531", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_c8f29a0e-e477-43e2-a3c5-2895cac22164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.5, 0.459, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1532", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_29445a41-8ff2-4bff-b2fa-3f892a59bbf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.657, 0.004, 0.761, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1533", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_ac2708fd-5705-4b77-8cf4-684e0e121f2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.292, 0.463, 0.302] in the image\nAnd my action is Action: TYPE\nValue: Dubai"}]}, {"id": "mind2web_1534", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_0360e768-e23e-4a3f-8e45-956e24c36c5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.351, 0.985, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1535", "image": {"bytes": "", "path": "./images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_5f4a93f4-ae47-4f5c-b7ba-c9ccc2463d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the page to find classic rock concert tickets.\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.152, 0.517, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1536", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_e672e49c-5049-4b40-b486-40028a26aa99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: kashi vishwanath temple\n[span] Kashi Vishwanath Temple, Varanasi, Uttar Pradesh, ... -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.177, 0.571, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1537", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_2ec82da8-e3d5-4d54-a618-84c72889c172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.837, 0.029, 0.897, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1538", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_678dddb3-2699-4a17-9a70-578517f9260f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[div] Kyoto -> CLICK\n[div] Select your dates -> CLICK\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.641, 0.165, 0.655] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1539", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_941c96f6-80a7-4f83-bcdd-a4c57106ddc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.17, 0.25, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1540", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_427fc741-820d-4659-b46e-ba46fa397047.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.255, 0.285, 0.301] in the image\nAnd my action is Action: TYPE\nValue: 1234567890123"}]}, {"id": "mind2web_1541", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_3e7c0282-8e70-4f28-af2d-3f6c13c55221.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK\n[span] Search flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.264, 0.223, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1542", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_685cf19d-39cf-483b-9d1d-0b664f121910.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.331, 0.21, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1543", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_697b485d-dfb7-4825-8e41-6d0fffe55a76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.446, 0.05, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1544", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_09dcc085-8dd6-4a59-89f8-c14cb6266555.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.037, 0.821, 0.06] in the image\nAnd my action is Action: TYPE\nValue: organic dog food"}]}, {"id": "mind2web_1545", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_61d45c64-e415-47a6-b881-54e29f1cdc68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[span] Vehicle History -> CLICK\n[checkbox] No Accidents (4) -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[link] Confirm Availability for Used 2019 Buick Encore Pr... -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.085, 0.643, 0.101] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_1546", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_4d38e663-23c7-43da-9cf4-3667c1872ff3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.548, 0.339, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1547", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_ba02b1c4-6cdb-4c43-8f62-e7fbe49c6a90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.527, 0.455, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1548", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_078ff56d-4223-448a-a76d-bd8b5b747c95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty\n[textbox] * Required Fields Last Name * Required Fields Las... -> TYPE: Lue\n[textbox] * Required Fields House Number -> TYPE: 4847\n[textbox] * Required Fields ZIP Code * Required Fields Zip -> TYPE: 10019"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.424, 0.476, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1549", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_a2193e39-2058-4b6c-bfaa-6395e17662a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.201, 0.591, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1550", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_de372882-c37b-4261-a0b7-a234e29456c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.314, 0.616, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1551", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_c6dad653-0f55-4f00-83a7-7ac9cffc7316.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.124, 0.469, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1552", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6e1208e-d16a-437b-aa5e-4bce43b335f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.216, 0.427, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1553", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_bda37da7-1fef-4ff8-9174-51730582abd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.271, 0.434, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1554", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_c8ff6366-a117-4e3d-8a22-0c74ccf24360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[button] Listing options selector. List View selected. -> CLICK\n[link] Gallery View -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.182, 0.774, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1555", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_55888df7-c6b9-4ab0-8f38-d6970f7a3025.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK\n[link] GIGABYTE B450M DS3H WIFI AM4 AMD B450 SATA 6Gb/s M... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.359, 0.441, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1556", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_a5cac325-2527-4236-9fb6-6ecbfbd52c58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.302, 0.514, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1557", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_191f5324-8b5a-4b2d-aecd-47c7c053d9e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.804, 0.267, 0.831, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1558", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_103a605a-1392-4f31-bc96-5a2f561540da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.077, 0.894, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1559", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1741ce47-1b26-41c1-9827-b8393e3dfab3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.198, 0.691, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1560", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_48468ad4-f291-419b-9089-9ca9d1ffd9da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK\n[label] 4 -> CLICK\n[span] -> CLICK\n[div] Lower -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.922, 0.232, 0.982, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1561", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_33c00f88-b578-4fac-9e0c-a9a0520fc6f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.06, 0.492, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1562", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ffc28c0e-9243-45a9-9470-4669e5a310e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.167, 0.788, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1563", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3e652678-e0e8-49b3-9954-f5076aa0631e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[button] Camp & Hike -> CLICK\n[link] Hammocks -> CLICK\n[link] Camping Hammocks (21) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.248, 0.428, 0.27] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_1564", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_29373ca9-75bb-449f-9f36-2cd26f44674b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK\n[checkbox] 2+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.369, 0.914, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1565", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_b29cd238-ad53-4b4d-b6cc-8a139d724bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Kayaks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.099, 0.668, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1566", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_c0e76686-4c8a-44e1-8982-6cb008ef9a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.034, 0.407, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1567", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_19e8ffd0-e981-4b9e-8eac-a752dabc1b72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK\n[button] select to browse a kiosk -> CLICK\n[button] Show Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.226, 0.105, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1568", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_18adac7f-bb79-4f9d-85b4-e4ec43f9775c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.552, 0.571, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1569", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_7ad31fb5-8326-41d3-b6d1-d7de45193fc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.524, 0.037, 0.574] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1570", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_07860790-53f1-4130-90cc-6a68c134f319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.009, 0.144, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1571", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_2c3caf3a-dd47-45ad-9bb0-4b63ea700dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.193, 0.587, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1572", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_dd2c3bba-7534-49fa-9567-2db678f5486e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.479, 0.147, 0.501] in the image\nAnd my action is Action: TYPE\nValue: 60"}]}, {"id": "mind2web_1573", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_ebc2b67a-b88f-4ff5-8a9a-3b93c778a404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 1234567890"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.244, 0.647, 0.271] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_1574", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_f66238f2-ef35-41ed-bd5a-61140b435c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK\n[textbox] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.279, 0.25, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1575", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_2471b465-ed09-45d5-9c2b-ecf0efc16f91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in\n[button] Search -> CLICK\n[p] Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.255, 0.932, 0.298] in the image\nAnd my action is Action: TYPE\nValue: Black Ops"}]}, {"id": "mind2web_1576", "image": {"bytes": "", "path": "./images/27724810-0bc8-446a-a2f4-b53a87e190df_32f9fa3a-cc74-4d29-8347-ec82ea2f97f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the location and operating hours of the nearest CVS pharmacy to zip code 90028\nPrevious actions:\n[button] change store -> CLICK\n[textbox] Search by Zip Code, City, State, or Landmark -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.598, 0.177, 0.637, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1577", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_cd059490-19cd-4a25-9017-ecd728b2b58c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.218, 0.086, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1578", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_cceccde0-e4da-420c-a4c0-3dc9ef3191a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.203, 0.89, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1579", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_587cf410-7484-4014-a232-ebe323189d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.119, 0.139, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1580", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_b6b56f9c-77f1-40ed-bfef-d708917927c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.225, 0.473, 0.256] in the image\nAnd my action is Action: TYPE\nValue: 25000"}]}, {"id": "mind2web_1581", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_61dfd7e4-d733-4b03-a0bd-a80a9821c4a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.217, 0.481, 0.246] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_1582", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_0bf9dab8-18e1-471d-a458-72708badf771.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK\n[link] The Weeknd -> CLICK\n[button] Love this track -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.29, 0.104, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1583", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_30b98e40-d57a-4744-bfdb-660a9dfef288.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK\n[button] 16 -> CLICK\n[div] Petaluma Music Festival -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.151, 0.95, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1584", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_8c16969c-e931-4482-9e62-dc9ac32fe338.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.295, 0.049, 0.455, 0.097] in the image\nAnd my action is Action: SELECT\nValue: Tue, Mar 28"}]}, {"id": "mind2web_1585", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_7581d740-729b-401a-9625-340b89af3dfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.366, 0.245, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1586", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_197af70a-27ca-4bb0-bbfd-8374dcbca041.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.219, 0.389, 0.25] in the image\nAnd my action is Action: TYPE\nValue: Abbotsford"}]}, {"id": "mind2web_1587", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_2ac7ed26-b119-4a0a-bcdb-a83f63b211f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.547, 0.578, 0.566, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1588", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_ea5b4be4-62c9-4afd-8e22-69ce9b7b0102.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK\n[link] Fiction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.559, 0.196, 0.584] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1589", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_bfa33329-4c33-4284-bdd5-ffae4b862d8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4\n[link] Fallout 4 $19.99 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.387, 0.577, 0.405] in the image\nAnd my action is Action: SELECT\nValue: 1995"}]}, {"id": "mind2web_1590", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_a6f4f6b6-0ea3-435f-95bb-55fd74917bad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.055, 0.036, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1591", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_4d44da5c-7602-419f-a8d1-e48ccbe0ccb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.009, 0.613, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1592", "image": {"bytes": "", "path": "./images/928ec908-ea23-42a4-8b13-3ca6f0721ead_1287a730-f1cc-4046-9b54-e1aa12b6d33f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter search results for guitar tabs to only show songs with a difficulty rating of \"Beginner\"\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.062, 0.176, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1593", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_f1699e8f-d02e-4667-92f0-13061818f3c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.024] in the image\nAnd my action is Action: TYPE\nValue: dog food"}]}, {"id": "mind2web_1594", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_8a8aa71c-c8b4-4fb8-85d8-c47a3787306b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.069, 0.882, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1595", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_7fd7c256-6857-49f5-bd5a-c761fb99587b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.126, 0.659, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1596", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_ed48cb9a-ee51-4e80-9a71-bdd03240fef1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK\n[div] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.206, 0.493, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1597", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_43f973b3-95d3-440f-8ecd-60b4b10a1d46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[button] Athens -> CLICK\n[button] Go! -> CLICK\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK\n[div] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.185, 0.616, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1598", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_b655d9a9-a403-46c8-8b35-5686ceb8c895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] December -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: High to Low -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.343, 0.772, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1599", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_9754a5ea-7a5e-4822-8c16-3049b3ec50ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.243, 0.328, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1600", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c778de5-a846-4249-9be7-49bf4badb86f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] Depart , required. -> TYPE: 04/23/2023\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.238, 0.357, 0.26] in the image\nAnd my action is Action: SELECT\nValue: Total travel time"}]}, {"id": "mind2web_1601", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_cfdab859-8ea1-4145-a761-ca9ffa100107.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.03, 0.274, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1602", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_cb293186-4d7c-4e50-96c2-2f81fc673290.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[menuitem] Women's -> CLICK\n[menuitem] Running Shoes -> CLICK\n[label] 5 (10) -> CLICK\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.286, 0.523, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1603", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_caa63937-8a3f-4ea6-8013-fb602b62e01c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK\n[link] Tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.198, 0.212, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1604", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_945a12bb-d7e9-4fca-b017-2f102026def7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Ballet\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.547, 0.18, 0.569, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1605", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_64384e4d-3b63-4313-bc53-479890efd517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.036, 0.464, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1606", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_7109924d-4f35-4dd3-a5ec-af0b66f8cc53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK\n[input] -> TYPE: 3329456534543\n[input] -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.068, 0.851, 0.092] in the image\nAnd my action is Action: TYPE\nValue: Green"}]}, {"id": "mind2web_1607", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_08743486-379f-4213-a796-2f2ec65df153.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.212, 0.721, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1608", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_f365d762-a1b7-4bfc-be6d-29c97073326c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Order Food & Drinks -> CLICK\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14\n[button] Order Now -> CLICK\n[img] -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.195, 0.562, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1609", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_a7f6b888-5de1-4223-9684-6cb8f17c2402.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\n[span] Hurricane Harbor Los Angeles -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.102, 0.468, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1610", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_e184447e-7f19-40e8-82f0-58a6173878f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.496, 0.643, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1611", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_6b716ee5-9df3-429d-86ec-a8e0146a5a2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.203, 0.408, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1612", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_2169ef0b-2186-4604-8416-3775702ad018.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\n[link] Order Tracker -> CLICK\n[textbox] Order number EXAMPLES: ECEA12345, 01234567 -> TYPE: 456481897\n[textbox] Email or Phone Number (associated with Order) -> TYPE: 898-448-6474"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.913, 0.241, 0.99, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1613", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_7cf1ed05-1957-4d82-955e-7b21de6a6ff1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: wall art\n[input] -> CLICK\n[link] Wall Art -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.46, 0.168, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1614", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_c1d3ecd1-6112-4505-a707-fd619bd9f991.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.193, 0.743, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1615", "image": {"bytes": "", "path": "./images/6f1fe14d-543a-43c6-964a-0c74f6d86091_d0d43d10-b53a-4fe7-9b1a-bc20816f7d60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me movies produced by Aaron Horvath.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.005, 0.613, 0.017] in the image\nAnd my action is Action: TYPE\nValue: Aaron Horvath"}]}, {"id": "mind2web_1616", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_59c4d38b-5848-4f9e-8057-87bda7630fe5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50\n[button] Find -> CLICK\n[generic] Brand -> CLICK\n[generic] \ue914 The Ritz-Carlton (4) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.285, 0.96, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1617", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_734c5ed7-2c72-4f8d-87d1-95043346307f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris\n[button] sam harris -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.227, 0.068, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1618", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_fa1a703b-4bed-4c72-9d39-92f378ff4b48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan\n[combobox] Credit Score -> SELECT: Fair (580-669 FICO\u00ae Score)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.314, 0.459, 0.337] in the image\nAnd my action is Action: SELECT\nValue: 48 months"}]}, {"id": "mind2web_1619", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2dfc1364-9827-47be-8fe1-5b4000462ec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.339, 0.409, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1620", "image": {"bytes": "", "path": "./images/b1a1f767-8611-4539-9c08-475011d38e12_7988776b-0e2f-438f-b2d6-b789efb59236.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news about Mikal Bridges\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.034, 0.931, 0.054] in the image\nAnd my action is Action: TYPE\nValue: Mikal Bridges"}]}, {"id": "mind2web_1621", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_47aa51fc-0ea0-440e-ad0d-851fd1bd1f90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport\n[option] Boston Logan Int'l Airport, 1 Harborside Dr, East ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.205, 0.807, 0.229] in the image\nAnd my action is Action: TYPE\nValue: North Station"}]}, {"id": "mind2web_1622", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_19c29ea7-ab93-4ced-aba2-5af7c9b162c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[generic] Click to add item \"Tuscany\u00ae Baytown Drop-In 33\" St... -> CLICK\n[generic] Click to add item \"Dayton Drop-In 25\" Stainless St... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.419, 0.958, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1623", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_06d3130e-be04-448a-a863-e5f760296504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.339, 0.995, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1624", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_8d50111d-41cf-4ccc-ba45-96eaab8eff04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 1.083, 0.758, 1.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1625", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_24495f4c-e52f-45dc-a2db-85227476df1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[span] Any -> CLICK\n[li] -> CLICK\n[spinbutton] Max Price -> TYPE: 75\n[span] mm/dd/yyyy-mm/dd/yyyy -> CLICK\n[Abbr] May 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.393, 0.312, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1626", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_360c51fc-14b3-43ec-a013-8485a168a0f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Rail -> CLICK\n[label] Express Bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.386, 0.848, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1627", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa29a6f8-0eb6-4810-a0d2-c46095e1eb0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.459, 0.47, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1628", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c0c57aa1-6255-44f0-a853-a8199d85778b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846\n[input] -> TYPE: Texas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.147, 0.773, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1629", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_c3adce54-837b-4bea-880c-d8500152c67d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.206, 0.206, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1630", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_571a8a34-2ad2-41e5-bca8-b8f77ab01ab7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[div] Durban -> CLICK\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.451, 0.481, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1631", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_e4c84777-b378-44df-8696-64999d1c0000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[span] -> CLICK\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre\n[span] -> CLICK\n[button] Hiring Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.597, 0.13, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1632", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_8126292b-9121-4097-ae47-90374a2d66b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.264, 0.881, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1633", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_5506a376-2a29-4df0-bf29-d43bc2bd831c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER\n[link] Jazz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.452, 0.953, 0.575] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1634", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_24d96aba-20ef-4923-b4ce-41d35ddd7a45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[option] Brooklyn, NY, US Select -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.508, 0.84, 0.555] in the image\nAnd my action is Action: SELECT\nValue: 22"}]}, {"id": "mind2web_1635", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_33d871a9-cab5-4efa-a4b0-d1fed5245166.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK\n[button] Next -> CLICK\n[gridcell] Mon May 01 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.313, 0.556, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1636", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_a04c978f-a418-4035-9e7e-24eccfb178df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.032, 0.74, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1637", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_d40f1233-8494-41b8-81a7-06a62b0e1d9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.27, 0.543, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1638", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_ee422c7b-5f11-4a16-9245-1fe1fd5e4e3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.092, 0.695, 0.105] in the image\nAnd my action is Action: TYPE\nValue: Love"}]}, {"id": "mind2web_1639", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_879e4979-0951-4fc9-a7f2-10d0324f5524.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.085, 0.773, 0.098] in the image\nAnd my action is Action: TYPE\nValue: 99201"}]}, {"id": "mind2web_1640", "image": {"bytes": "", "path": "./images/37564222-bb58-4a55-b47b-e9ffbbc1d160_53c60b2d-adf4-4d28-b1d2-bf611ed7b011.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the results of the most recent NFL games.\nPrevious actions:\n[link] NFL . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.062, 0.21, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1641", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_1b53f68e-584e-406b-89f1-9ebc42ccc465.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.248, 0.359, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1642", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ff2b5ac7-e294-4f9b-afef-8dbb37c61efb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.005, 0.219, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1643", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_b92ebe23-bc7d-4bad-a928-aa3ef23ca849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.054, 0.546, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1644", "image": {"bytes": "", "path": "./images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_cad83f0b-1baf-461f-92b1-b353a804f39c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the rating and user reviews for the game \"Deathloop\".\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.168, 0.677, 0.197] in the image\nAnd my action is Action: TYPE\nValue: Deathloop"}]}, {"id": "mind2web_1645", "image": {"bytes": "", "path": "./images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_d36bbe70-741b-4a62-93a4-1b16f4790520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about how to buy metro card on booth.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Where to buy a MetroCard -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.409, 0.742, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1646", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_f326b843-6523-487d-8fce-82e72e9a0bd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.245, 0.377, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1647", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3203402a-4b9e-4b1a-8ddb-22aad143f626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Sun, Jun 4, 2023 -> CLICK\n[div] Start Date -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.298, 0.803, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1648", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_02f85c6e-84ed-42a2-8d2e-1a23f5ac3528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.444, 0.645, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1649", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_18aac891-d2cc-4fd1-bd1a-04b3b0a349af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK\n[button] Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.209, 0.715, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1650", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f42e267a-d410-43e3-986b-17397fa958cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.663, 0.284, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1651", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_7e25073e-b49b-49fb-aa04-c4eb651386f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.174, 0.734, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1652", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_5d24be92-5a3e-4d6f-949d-d4aec8a374a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.144, 0.375, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1653", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_d42eee95-95cc-482d-99f0-3f087df1b275.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara\n[textbox] From -> TYPE: James\n[textbox] Message Line 1 -> TYPE: Happy Christmas\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.721, 0.616, 0.9, 0.639] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1654", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_4559c623-3668-4d7b-8d9b-f91e46c95435.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK\n[heading] ENGLISH PREMIER LEAGUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.337, 0.375, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1655", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_bce1747c-7034-48d8-8257-0157b7e1d6b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Chicago\n[option] Chicago, IL -> CLICK\n[button] See next Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.223, 0.965, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1656", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_775290f1-45da-49b4-b454-0c1739b55504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK\n[textbox] first-name -> TYPE: Joe\n[textbox] last-name -> TYPE: Bloggs\n[textbox] booking-number -> TYPE: 101010"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.582, 0.23, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1657", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_eb17dc66-943c-4b2d-8533-f60580dd669d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK\n[option] One way -> CLICK\n[gridcell] 17 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.288, 0.922, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1658", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_b33a580f-0820-45bb-8bf0-deaf9de822f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.287, 0.5, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1659", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_176f95c2-fb04-44dd-a18b-50de8dd10786.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK\n[button] APPLY -> CLICK\n[span] Single Pack -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.136, 0.192, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1660", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_235ebfe3-a9e9-4f3e-8629-731eeda9bafc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary\n[div] Calgary -> CLICK\n[textbox] Destination -> TYPE: New York\n[strong] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.232, 0.639, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1661", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_42312a4c-561a-4fd6-9018-313706b8acf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK\n[button] Go! -> CLICK\n[link] Rides & Experiences \uf078 -> CLICK\n[link] Shops & Gifts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.204, 0.248, 0.218] in the image\nAnd my action is Action: SELECT\nValue: Gifts"}]}, {"id": "mind2web_1662", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_97e0fff2-f03e-45eb-9263-2ee6bf94bac9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.345, 0.153, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1663", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_5ed2ca25-c9ae-4888-a3cd-da8c166130fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University\n[li] New York University, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.316, 0.384, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1664", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_ebceb903-ecd1-4993-9962-4f21022d163f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark\n[textbox] *Email Address -> TYPE: Johnmark@gmail.com\n[textbox] *Phone Number -> TYPE: 234567890\n[label] I am NOT a Travel Advisor. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.131, 0.544, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1665", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_76b15b20-c42e-40bd-8e7e-d686c716d096.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.74, 0.482, 0.779] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1666", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_95adf1db-0249-4c8d-aed5-32e5cd9b98da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: black stroller\n[input] -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.329, 0.123, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1667", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_2de427ce-7c9f-44c9-b4b7-65e4f697624f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2022 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.349, 0.249, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1668", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_09fdd043-f803-4750-933d-aee5e5291cdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.508, 0.233, 0.544] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1669", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_96fadcb9-f9f6-4d1d-b696-4208d5b98b38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK\n[img] Sponsored Ad - Google Play gift code - give the gi... -> CLICK\n[button] $100 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.321, 0.654, 0.336] in the image\nAnd my action is Action: TYPE\nValue: abc@abc.com"}]}, {"id": "mind2web_1670", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a9bbd8b9-6372-4ec8-823b-a7a75b04cd09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] 2+ -> CLICK\n[radio] Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.512, 0.096, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1671", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_df246174-70ac-41c1-ba3c-7f741eb5afda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.084, 0.653, 0.108] in the image\nAnd my action is Action: SELECT\nValue: \ud83c\uddec\ud83c\udde7 English (UK)"}]}, {"id": "mind2web_1672", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_464ccc80-cbfc-4c86-a72e-f4a240a53743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.566, 0.489, 0.59] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1673", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_05bac10b-9c88-4c85-b380-2d89170b882d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.326, 0.7, 0.361] in the image\nAnd my action is Action: SELECT\nValue: 4"}]}, {"id": "mind2web_1674", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_2657d87f-4ee4-41cd-8272-12113073ca0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.12, 0.662, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1675", "image": {"bytes": "", "path": "./images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_7037c6bc-63db-4a5d-93fe-fe2a87738c8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals in New York.\nPrevious actions:\n[a] City Pages -> CLICK\n[link] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.39, 0.858, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1676", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_51d326c2-eb8f-4c5d-b4b9-95716d9a7618.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: La Bomba\n[button] SEARCH -> CLICK\n[link] Chords -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.19, 0.971, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1677", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_37fa00e2-64aa-432d-ae02-bc716b3c0726.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.387, 0.205, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1678", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_683fa50a-4b42-4881-8f37-0352c39ce025.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.407, 0.284, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1679", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_40e6c0f2-c0aa-4052-bf7f-47e27f5de990.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[div] 10 -> CLICK\n[span] 12 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.132, 0.574, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1680", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_d12db8d3-672e-48e8-8d6f-b9adc6ffa5f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.192, 0.291, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1681", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_fad7b35a-6f9a-4294-8470-74b9cc85bd65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\n[link] The Hit List -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.243, 0.356, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1682", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_8fac423b-2ae6-402f-85b7-48b356e7f5ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK\n[div] Remove -> CLICK\n[p] Remove -> CLICK\n[button] 4K -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.319, 0.703, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1683", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_b4680bd7-becf-4477-b09b-b3e9351c8e25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.061, 0.471, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1684", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_82ce6c3a-9087-41e3-9900-56d7d8798099.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\n[link] Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.171, 0.737, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1685", "image": {"bytes": "", "path": "./images/6d338fef-6d40-4f08-a045-861ddbc3d9f4_c64fb6ac-9525-46c7-bb5c-a78cf71e4fc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse glamping locations in Northern California.\nPrevious actions:\n[link] WAYS TO STAY \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.132, 0.266, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1686", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_37982fe3-0a00-4cb0-81bd-93641d095722.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.265, 0.986, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1687", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_f24491dc-80a1-4824-82cc-67c11157db08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.391, 0.474, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1688", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_dc1847f7-919b-4a2f-b778-2ee33edacc46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.057, 0.812, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1689", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_37f20ac7-f9b8-45df-afe5-4f8d184cd100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Ariana Grande"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1690", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_38274c62-d229-43da-a57a-32470873c88e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK\n[button] Ship Location: Any -> CLICK\n[link] Canada -> CLICK\n[link] King of Tokyo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.142, 0.97, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1691", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2ab926e5-d341-46ab-ac5c-48d1001bf00a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: m s subbulakshmi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1692", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_9cd4c1dd-80ee-402e-992a-70c4e072e0ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.124, 0.948, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1693", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_6ad53972-ccb3-4e09-a210-e20efff708a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.301, 0.459, 0.332] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_1694", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_3ee042de-e542-4cdf-b2c0-0f2c3a4f74f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[input] -> CLICK\n[option] WA -> CLICK\n[button] Products -> CLICK\n[label] Kids -> CLICK\n[label] Maternity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.666, 0.412, 0.694] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1695", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_8521d188-44bf-4585-9ddd-10af35e11bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.076, 0.672, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1696", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f2a55dd0-a78b-43a9-8611-d22a9f6510bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: smartwatch\n[button] smartwatches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.528, 0.032, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1697", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_d79a8219-43dc-4b34-b8e4-bdc43b6678b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.063, 0.777, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1698", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_7d59caa8-226f-4fe2-986b-17eb4b9ffcee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.068, 0.327, 0.084] in the image\nAnd my action is Action: TYPE\nValue: sheffield"}]}, {"id": "mind2web_1699", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_911b9641-1ba1-4aa5-a6bd-7d1a609dd663.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.174, 0.805, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1700", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_b084de8c-0fa0-44f0-853f-12afedc35be6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Price -> CLICK\n[label] $40-$60 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.29, 0.248, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1701", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_38386a3c-7b6f-4dc1-9977-2bdfb13ca2c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.064, 0.746, 0.092] in the image\nAnd my action is Action: TYPE\nValue: recipe"}]}, {"id": "mind2web_1702", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_6cca7b3f-a1f9-42b7-b468-dc7d0dfb93a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA\n[option] Museum of Modern Art (MoMA) \u00a0\u00a011 West 53rd St, New... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.09, 0.16, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1703", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_c09834c9-a9db-4eea-83ed-69f3feb73903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.438, 0.36, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1704", "image": {"bytes": "", "path": "./images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_c793da3f-2031-4dfc-8684-78418b702dd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a park in the state of California called Castle Mountains National Monument and find out it's Basic Information.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.284, 0.789, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1705", "image": {"bytes": "", "path": "./images/40fbda9d-22c5-4aab-9798-3db50d981c5c_c12adadc-f6c6-4a3f-8969-6badce55661d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to answer a question in the home improvement section.\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.102, 0.595, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1706", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_17c1ec49-033a-41e2-a6cd-101bfe603185.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK\n[label] Express Bus -> CLICK\n[label] Rail -> CLICK\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.383, 0.359, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1707", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_50ddc6d5-f2d7-497f-b281-bf6a1aa2061a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.247, 0.383, 0.267] in the image\nAnd my action is Action: TYPE\nValue: Conductor"}]}, {"id": "mind2web_1708", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_1d12dd7b-e729-489b-a3d9-6316947514ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.57, 0.972, 0.611] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1709", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_bc7f8665-64d1-48f7-97df-63fdf82ac826.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK\n[input] -> TYPE: 105307\n[button] 105307 -> CLICK\n[combobox] Change quantity -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.417, 0.351, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1710", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_4497cc28-31de-4410-b209-540e572646c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.336, 0.09, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1711", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_7f3f485c-961a-45b8-bec6-288eedb4e5c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[button] Chicago -> CLICK\n[button] Today -> CLICK\n[button] April 20, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.437, 0.542, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1712", "image": {"bytes": "", "path": "./images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_786a63d8-2537-40cc-85ce-2484ed87a3ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the rating and user reviews for the game \"Deathloop\".\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.265, 0.047, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1713", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_032de1a2-3b36-4cbb-80c4-94c0c2882d0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\n[link] New & Noteworthy -> CLICK\n[link] Most Played -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.164, 0.548, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1714", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_62440a28-36b1-4101-bba1-55fc81c56f90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.069, 0.492, 0.099] in the image\nAnd my action is Action: TYPE\nValue: Hotels"}]}, {"id": "mind2web_1715", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_32b5be81-0be2-4247-8419-7817ed9927c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[link] Business -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.184, 0.245, 0.203] in the image\nAnd my action is Action: TYPE\nValue: LAS VEGAS"}]}, {"id": "mind2web_1716", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_f32b6c7d-1aad-45f6-b201-b6f78fccc014.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.203, 0.694, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1717", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_758f0d99-942a-4dc5-93c4-acefb1418d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14\n[button] Order Now -> CLICK\n[img] -> CLICK\n[button] Select -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.261, 0.703, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1718", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_2285e9d0-68f5-4691-895a-faf4f9e1ceca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[input] -> CLICK\n[option] MEN -> CLICK\n[svg] -> CLICK\n[heading] Color -> CLICK\n[span] BLACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.191, 0.463, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1719", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_01945c7c-6cea-4473-81a8-af3672d0c114.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.005, 0.492, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1720", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_e23192da-8d2f-4759-b9c8-79da922f98bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[link] Superhero -> CLICK\n[checkbox] Superhero Sci Fi (745) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.349, 0.331, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1721", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_7690935f-c901-40fe-8b8c-afd20a6e4a91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\n[li] Soccer -> CLICK\n[link] Teams \ue00d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.257, 0.707, 0.274] in the image\nAnd my action is Action: SELECT\nValue: Spanish LaLiga"}]}, {"id": "mind2web_1722", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_c5db25df-3e40-41fe-8667-1e5ba8f58c02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.148, 0.375, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1723", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_085c7e1e-20b1-4c50-ba2a-8e9088dfd3e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.039, 0.621, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1724", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_fd00b193-5c69-47ae-89a7-19293dbe9c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.017, 0.77, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1725", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_1896e25f-674e-407b-bb72-a02a44c625b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.027, 0.577, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1726", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_a1f00027-cf54-420a-b375-71b179d29a4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Traveling with children -> CLICK\n[link] sit on a parent\u2019s lap -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.609, 0.61, 0.624] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1727", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_109f902d-9671-436b-9870-8f7358032809.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.103, 0.287, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1728", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_e186f90a-bada-4de9-9201-38bce05d6f24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.303, 0.599, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1729", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_59e52dac-4d79-4f8c-96c8-b10b27294851.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John\n[textbox] last name maxlimit is 30 -> TYPE: Smith\n[select] Confirmation Code -> SELECT: Ticket Number\n[textbox] ticket number maxlimit is 13 -> TYPE: 123456780"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.32, 0.281, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1730", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_635ce156-2cfa-433d-9e4a-a4b6002519cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[span] Walgreens -> CLICK\n[textbox] Add a title (Required) -> TYPE: Walgreens\n[img] A person holding a tomato and a grocery bag with a... -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.669, 0.193, 0.716] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1731", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_1e9d301f-ab8a-4ee1-8441-3a05220caf6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.47, 0.339, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1732", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_5d697126-f42c-4bda-8316-05bd6ab4e3a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK\n[link] Crossover vehicle icon Crossovers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.257, 0.253, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1733", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_30644e2f-e07b-499c-8a69-269b8c6dd9d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.57, 0.684, 0.577] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1734", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_113b23fe-b2b9-44f4-9d94-55f2490d9e41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[textbox] Flight destination input -> TYPE: Dubai\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.288, 0.466, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1735", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_60a95fcf-316e-4431-94e7-50a38e6a421b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK\n[link] Running -> CLICK\n[div] Size -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.143, 0.925, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1736", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_4c99e677-3a4a-428f-818f-5505d1841eca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK\n[i] -> CLICK\n[button] Buy Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.139, 0.053, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1737", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_54620a46-795e-4dd5-8616-41aba7dfed58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.421, 0.345, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1738", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_136a2a2c-8b5b-4ac1-ac9d-5b30cc7d2840.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK\n[textbox] Reservation/Ticket Number -> TYPE: 123456\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.237, 0.616, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1739", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_8427717e-96a8-4a13-b271-cb4eefd926be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK\n[img] Vitamin D -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.226, 0.991, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1740", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d9021cfc-99a8-49c8-8f1e-d3a6a3dcbddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.299, 0.566, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1741", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8338e3ee-2170-4b88-b346-742f10b82e06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.683, 0.18, 0.734, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1742", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_c5d7f50f-9728-4cb1-b11e-e97ff0e67470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.566, 0.399, 0.709, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1743", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_f42d0082-b3db-4ab3-bc96-0f5c51523fbf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.174, 0.244, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1744", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_aa02e15b-4a03-4834-9c7b-426c76d2a7d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.324, 0.772, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1745", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_a0b8ce6d-f627-464e-8c71-a2e196fe4999.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.084, 0.605, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1746", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_7f944d31-195e-4421-9644-93d4aadde6f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.415, 0.096, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1747", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3129d72d-487b-4db5-b9f5-e5108f9905c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.288, 0.943, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1748", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_c397e50d-70f8-4294-bf79-ee3a2d1d1385.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[heading] Distance & Shipping -> CLICK\n[button] Any -> CLICK\n[menuitem] $99 or less -> CLICK\n[path] -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.369, 0.045, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1749", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_f9b331ee-30d0-452d-8612-6799a1b53c65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.211, 0.148, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1750", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_719999a6-90a6-40d5-8b0c-067215172e55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.431, 0.471, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1751", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_fe049523-2d6d-4d2e-9721-982583f3b2bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.168, 0.495, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1752", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_23f652b4-d726-4302-9ed4-c78747639b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: adele\n[option] Adele -> CLICK\n[link] TICKETS -> CLICK\n[div] Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.209, 0.792, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1753", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_94f9bf73-f8d5-45fc-9fe8-8745e3364c2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.242, 0.164, 0.303, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1754", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_33c0189f-b6a7-4bae-9005-79334f91871a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.125, 0.528, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1755", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_ec2be064-6f50-458b-8f90-5473118a60a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[span] resident evil -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.339, 0.375, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1756", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_a78c000e-8c5f-40cb-beca-5a3daeb439c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.063, 0.181, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1757", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7065838f-d6b7-48b1-b673-1fdb72ebf959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.315, 0.446, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1758", "image": {"bytes": "", "path": "./images/d4f9c67f-00c5-41ed-bde1-c704b92647f7_be041995-ae1b-4f8a-85ed-f83b8b1ba907.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare Pro Plans with other plans available.\nPrevious actions:\n[link] Try Next Pro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.232, 0.336, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1759", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_ebcfa163-d925-44c7-8cf2-b73382218e73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.114, 0.3, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1760", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_c9ed5c49-2af1-457d-851d-2214eea40c77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.114, 0.317, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1761", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_98fe0126-b4a4-4fb4-af52-ca93b1a10f9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.7, 0.96, 0.735] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1762", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_1b134d74-a104-4353-a54e-5a420f0822da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[div] 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.378, 0.645, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1763", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_5b0f6466-edf2-454c-bfdf-d1c49da07f97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[button] Athens -> CLICK\n[div] Apr -> CLICK\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.12, 0.5, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1764", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_0a667a4f-86b7-4ec6-b915-9af08d700aca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.016, 0.45, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1765", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_507a4143-8f0c-49cc-90a8-ae3a780eea69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.085, 0.014, 0.157, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1766", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_74542878-df35-4595-8762-c4c3e951d6aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.366, 0.249, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1767", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_4374b5ff-1f84-468b-88d1-cbf28ecd40b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[label] Most popular -> CLICK\n[span] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.643, 0.34, 0.741, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1768", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_c7fbea4e-d582-45ed-82b3-2f01cedfc20b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.148, 0.841, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1769", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e3aea0a2-63d1-40ad-9b55-ca12b927d7c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Sports -> CLICK\n[div] Fit -> CLICK\n[link] Fitted -> CLICK\n[div] Size -> CLICK\n[span] Now Trending -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.155, 0.947, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1770", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_c10ef165-10fb-42e0-858e-713888d54f96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK\n[menuitem] Women's -> CLICK\n[menuitem] Running Shoes -> CLICK\n[label] 5 (10) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.802, 0.197, 0.984, 0.216] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_1771", "image": {"bytes": "", "path": "./images/3b74d9cc-ba1b-441d-a1a2-a05cea62a800_42c5eaec-812c-49d9-b9c5-5c4af9c22f67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending daily deals.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.618, 0.05, 0.7, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1772", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_f49f5305-0a5f-46b1-af16-a1fa43ae89b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.292, 0.152, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1773", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_244fae6f-f044-41cb-b2e9-28ae4d806164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.273, 0.256, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1774", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_a28f8957-0d43-4b38-ae85-e2342c1e9840.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[option] Washington District of Columbia,\u00a0United States -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[checkbox] 5 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.422, 0.677, 0.496, 0.692] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1775", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_00f9659f-e69c-42c9-92cb-fb3779a46c05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.239, 0.709, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1776", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ddea7a9c-3acb-4198-94fd-eb659f813bf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW\n[combobox] Select Model -> SELECT: i3\n[textbox] Zip Code -> TYPE: 10001\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.16, 0.429, 0.179] in the image\nAnd my action is Action: SELECT\nValue: Lowest price first"}]}, {"id": "mind2web_1777", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_81d303ca-5152-4c0e-bd51-bb508e5b8b61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK\n[combobox] Select Sort Order -> SELECT: Lowest mileage first\n[div] Request Info -> CLICK\n[span] Send -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.337, 0.682, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1778", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_bb8274d2-dd40-4967-9c7d-b3b4bbdc86c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.083, 0.914, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1779", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_47d12b84-360d-4519-b16c-db6972664cf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.032, 0.553, 0.058] in the image\nAnd my action is Action: TYPE\nValue: easter home decor"}]}, {"id": "mind2web_1780", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_d2d729b6-3704-4165-b841-843500524934.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[link] Women -> HOVER\n[link] Sweaters & Cardigans -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK\n[p] Women's Sonoma Goods For Life\u00ae All Over Stitch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.562, 0.931, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1781", "image": {"bytes": "", "path": "./images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_aac4d2a7-211c-44bd-9b2f-c1652193926f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the score of the 2020 Super Bowl.\nPrevious actions:\n[link] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.187, 0.771, 0.209] in the image\nAnd my action is Action: SELECT\nValue: 2020"}]}, {"id": "mind2web_1782", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_4eb9f199-5e4e-46b3-9f15-5b65eab3ad0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[input] -> CLICK\n[option] Socks -> CLICK\n[generic] Sort by -> CLICK\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.282, 0.256, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1783", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_7478efe0-d084-4691-b17c-4eb86f32538c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK\n[button] Find Schedules -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.412, 0.295, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1784", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_c49949a8-e0f9-4a34-b795-342e7126a979.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.107, 0.266, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1785", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_3b5d10c3-ef68-4cc2-ad78-d1a5886fbfec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[button] Camp & Hike -> CLICK\n[link] Hammocks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.396, 0.175, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1786", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_c98344a0-ee8d-469a-b593-ec5f1552321d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK\n[span] Asia -> CLICK\n[div] Kyoto -> CLICK\n[div] Select your dates -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.13, 0.236, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1787", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_55539bcd-3c78-406e-ba78-4bc08281ac01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.702, 0.168, 0.969, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1788", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_5d754fb7-f2c9-4ad0-a58b-577b5a88701b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK\n[button] Explore All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.466, 0.442, 0.49] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1789", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_ca2747bd-f638-48d5-922a-3a3d48df068e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.218, 0.597, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1790", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_93b118fc-5d32-48a8-b85c-703862f58792.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[gridcell] 5 -> CLICK\n[span] Jun 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.244, 0.899, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1791", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_6d22158e-f615-4bae-b167-22f650edca52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.394, 0.713, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1792", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_48fb0147-627c-4f08-beb2-1466c609b79c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.81, 0.094, 0.925, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1793", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_20241afc-259f-45ba-9e50-080f1d830f99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[link] $10 to $25 (3) -> CLICK\n[link] $25 to $50 (18) -> CLICK\n[link] $50 to $100 (146) -> CLICK\n[div] Size -> CLICK\n[link] 10 (131) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.248, 0.986, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1794", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_8bd8ad11-efd2-4c3b-b2a5-597daee6be65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.187, 0.133, 0.204] in the image\nAnd my action is Action: SELECT\nValue: 08"}]}, {"id": "mind2web_1795", "image": {"bytes": "", "path": "./images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_b7042017-f4ae-4879-a6b1-8e464d022490.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an hourly parking in New York City of lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.152, 0.785, 0.189] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_1796", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_bffa44f6-a3fb-46d7-acc7-61240391f67f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.065, 0.282, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1797", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_21480087-2574-48ae-be40-92f1dafdf19c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.483, 0.697, 0.523, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1798", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_d1772f3d-086d-4f30-b37f-eed1de2786aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK\n[span] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.175, 0.254, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1799", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_e4e44d28-113f-4bfb-b728-767731f4cab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City\n[div] New York City, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.01, 0.781, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1800", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_b168819e-8122-471e-a359-ee4ed4099355.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\n[link] STORE -> HOVER\n[link] POINTS SHOP -> CLICK\n[link] Avatar -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.507, 0.589, 0.629, 0.655] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1801", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_fae2049c-2694-4893-b305-169ac217ea7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK\n[button] Join Waitlist -> CLICK\n[textbox] First Name * (required) -> TYPE: Joe\n[textbox] Surname * (required) -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.179, 0.573, 0.198] in the image\nAnd my action is Action: TYPE\nValue: joe@bloggs.com"}]}, {"id": "mind2web_1802", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_c6255d44-9f9a-4dc4-9815-f52f35569c56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.229, 0.408, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1803", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_40c60443-a21c-4b2f-90a7-67bc59037f55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London\n[input] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.247, 0.079, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1804", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_2c7cb3e8-b290-44ff-865e-30eb46c48a18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK\n[spinbutton] Flight Number (Required) -> CLICK\n[spinbutton] Flight Number (Required) -> TYPE: DL145"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.894, 0.058, 0.934, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1805", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_c80dc072-b45e-4ca3-bc8a-42454e1554a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.243, 0.11, 0.267] in the image\nAnd my action is Action: SELECT\nValue: 2017"}]}, {"id": "mind2web_1806", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_eaaaf553-37a7-488f-95ac-adf4cde55890.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.096, 0.181, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1807", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a52e515e-a366-4168-9ee0-8206421aeb6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.092, 0.327, 0.121] in the image\nAnd my action is Action: TYPE\nValue: Manchester"}]}, {"id": "mind2web_1808", "image": {"bytes": "", "path": "./images/5b433cc4-26bf-4e62-b406-f00dc09c274d_8df36e50-14d2-43c9-85af-0e2c507c74c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a CVS brand covid home test kit to the cart.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.325, 0.492, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1809", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_a83d190e-6580-4124-aadb-f55e49050396.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York\n[a] NYC - New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.197, 0.875, 0.235] in the image\nAnd my action is Action: SELECT\nValue: Friday, April 7"}]}, {"id": "mind2web_1810", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_06076f5a-e49d-4f9d-aeb0-2947192e0d54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.101, 0.414, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1811", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_652f2f08-6660-439d-af3c-a7fc41fb8da3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Done -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[svg] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.231, 0.263, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1812", "image": {"bytes": "", "path": "./images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_3e95ac82-a5a0-4db0-87bb-0e446a69412e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up information on the potential side effects of rogaine.\nPrevious actions:\n[combobox] Search products and services -> TYPE: rogaine\n[img] Men's Rogaine Extra Strength 5% Minoxidil Solution -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.625, 1.0, 0.651] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1813", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_578adebd-03d2-4cf9-a508-15eb94946605.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Drop D 39,730 -> CLICK\n[link] 1990s 3,183 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.188, 0.97, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1814", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_88261830-aa18-4e93-bf12-4fef640e05d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK\n[button] Filters -> CLICK\n[combobox] Select Region Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.536, 0.488, 0.558] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1815", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_77dc3641-bde1-4ddf-acbe-e7a014cf2d03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.007, 0.561, 0.029] in the image\nAnd my action is Action: TYPE\nValue: Chill"}]}, {"id": "mind2web_1816", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_ee8707f3-64d9-4ca3-b9d7-12a71897b462.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Find tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.656, 0.685, 0.68] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1817", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_af5953e2-01e4-4100-bfde-3a72b66535b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.102, 0.145, 0.152] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_1818", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_0f444a22-9504-4eb4-a64c-e3142da53071.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Oakland, CA\n[span] Oakland, CA -> CLICK\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.015, 0.335, 0.023] in the image\nAnd my action is Action: TYPE\nValue: Hot Dogs"}]}, {"id": "mind2web_1819", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_26ad118a-4b0e-4fcf-aa60-59d470e2ef31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] The Bahamas -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Miami, FL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.432, 0.871, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1820", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_8dd6021e-c277-488e-bfb9-2e65698d85bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[svg] -> CLICK\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM\n[svg] -> CLICK\n[span] 2 guests -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.608, 0.525, 0.654] in the image\nAnd my action is Action: SELECT\nValue: 1 guest"}]}, {"id": "mind2web_1821", "image": {"bytes": "", "path": "./images/b3a28e48-3912-4b0e-b3a9-d359da13864d_456e8f64-967d-4497-8b2c-7c1075f87817.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL passing touchdown season stats.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.062, 0.286, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1822", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_130056b9-3612-4542-a3f6-8db724d54967.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.014, 0.663, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1823", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_7aaf77ba-4f36-41e2-9f6f-737a7cdb55b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.592, 0.307, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1824", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_9ddc5879-cba8-40f9-bb74-62073d3e1148.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.503, 0.309, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1825", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_f7f24d84-4a4a-4bad-9163-6010b47e39be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.247, 0.517, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1826", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_8bcbfa0a-acbf-4339-a568-b3e0b28f774f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Electrical -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.135, 0.577, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1827", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bb69521e-4cea-48cb-997b-5779793d1ce7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK\n[input] -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.294, 0.38, 0.325] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_1828", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_43a16107-29aa-42ef-b84e-d58837934892.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.354, 0.459, 0.384] in the image\nAnd my action is Action: SELECT\nValue: Fair (580-669 FICO\u00ae Score)"}]}, {"id": "mind2web_1829", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_80a3fcae-6109-4867-9a8c-89df2148fe40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.225, 0.673, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1830", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_1694e0bf-0021-422e-a914-aad55c47be68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.056, 0.273, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1831", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_3ed73142-d61d-49dc-b37b-f1ffdf6df747.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.185, 0.942, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1832", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_2e7cc505-4147-435a-9662-293d0880c84d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.005, 0.675, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1833", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_9d423e0d-390c-4608-bd51-2dd07d60bfca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[generic] Click to add item \"Tuscany\u00ae Baytown Drop-In 33\" St... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.333, 0.476, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1834", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_f104a722-6ce0-4b4b-ab2c-28c8653c333c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 0.166, 0.145, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1835", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_7a8793c6-ae97-498e-b9d4-5bc223860950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[div] Price Low to High -> CLICK\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.232, 0.619, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1836", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_f18d3105-6e8d-4c4a-b12b-a3a1351bdca2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK\n[link] Taylor Swift -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.136, 0.838, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1837", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_c240bc73-00e0-40a0-8a8d-283b016b4d66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK\n[link] 9 -> CLICK\n[link] Filters -> CLICK\n[checkbox] Wi-Fi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.28, 0.357, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1838", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_9e4b272b-5b5f-4648-b670-b9f64de663fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 0.089, 0.687, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1839", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_11ca4f53-e4fc-45c4-b503-bd5af383ebe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Look up -> CLICK\n[heading] Barboursville -> CLICK\n[link] More info about Barboursville store -> CLICK\n[button] make it my store -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.361, 0.181, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1840", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_497bba29-70fa-48b0-a11e-3c610e59cb1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: the weeknd"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.953, 0.0, 1.0, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1841", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_345899fd-7f18-4087-99fa-be98ef4a1cd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Polos -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.202, 0.166, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1842", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_131228c6-f6dc-4cf1-8109-d54d5c4abe34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.196, 0.418, 0.245] in the image\nAnd my action is Action: TYPE\nValue: Hackney"}]}, {"id": "mind2web_1843", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_34cd7674-6ef6-4a36-a6d4-86d0ed4840ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.158, 0.463, 0.166] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_1844", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_6836bccf-f0bb-4f6c-86ce-a94e27dfccfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.338, 0.693, 0.358] in the image\nAnd my action is Action: TYPE\nValue: 04/19/2023"}]}, {"id": "mind2web_1845", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_139fd486-b530-460d-8d08-ab5188efe59e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER\n[button] Search -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.46, 0.382, 0.499] in the image\nAnd my action is Action: SELECT\nValue: Arizona"}]}, {"id": "mind2web_1846", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d3f9d0ab-c01c-4a80-a032-02c8fd2b4430.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.218, 0.425, 0.243] in the image\nAnd my action is Action: TYPE\nValue: MANILA"}]}, {"id": "mind2web_1847", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a9eaf474-7818-4f53-b92b-fc9157317806.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.428, 0.595, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1848", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_cd326427-a6a5-468d-81d4-97d01903c790.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.014, 0.441, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1849", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_0d097d8e-5994-40d6-83eb-55dbafcc5a7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.038, 0.164, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1850", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_fc1303fa-b215-437a-b69d-1269e991988d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[generic] Japan -> CLICK\n[div] 5 -> CLICK\n[div] 7 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.319, 0.686, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1851", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_14959a6e-68e3-4fb9-a9de-1bae945670d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[div] Go to checkout -> CLICK\n[div] -> CLICK\n[button] Confirm address -> CLICK\n[p] Choose 3-hour window -> CLICK\n[div] 10am - 1pm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.566, 0.17, 0.596] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1852", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_c93d7f4e-f722-4b74-accf-af32bb9ba52e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.057, 0.43, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1853", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_ce3e58d6-d6f4-4341-8c53-2dd347ac8505.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK\n[div] Fri., Apr. 21 -> CLICK\n[button] April 17, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.153, 0.192, 0.183] in the image\nAnd my action is Action: SELECT\nValue: 2 Guests"}]}, {"id": "mind2web_1854", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_2fd5487b-aa31-4b3c-a230-36c025edc516.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.334, 0.29, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1855", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_b11ff5db-7e2a-487f-846e-fcc2b67d1485.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: red sox vs yankees\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.525, 0.93, 0.542] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1856", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_ee5fabe8-604c-4450-a735-09accff76895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.257, 0.459, 0.279] in the image\nAnd my action is Action: SELECT\nValue: 100 km"}]}, {"id": "mind2web_1857", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8ee2d1d9-6fc9-46af-a6ff-482d1a1fa2ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK\n[button] Confirm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.631, 0.942, 0.672] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1858", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4ecddf71-7ddf-42d5-b7ba-8090b6c8ca7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: black stroller"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.026, 0.187, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1859", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_8cbee5da-e8dc-449d-8239-aad7bfa21b40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.053, 0.882, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1860", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_f7425150-b0a1-4b8d-b230-d614dcbb9168.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK\n[link] Men's Shoes & Boots -> CLICK\n[label] Boots -> CLICK\n[label] Sale -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.201, 0.062, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1861", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_bde4fd17-cc02-4bf9-83bd-db5d49cca9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.304, 0.365, 0.32] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_1862", "image": {"bytes": "", "path": "./images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_bdc83bef-edce-4e40-8ec5-8613da4be602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the birth place of Ali Wong.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.109, 0.595, 0.121] in the image\nAnd my action is Action: TYPE\nValue: Ali Wong"}]}, {"id": "mind2web_1863", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_456d9ebb-0ba2-4003-aaa3-a020deb5f737.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.076, 0.745, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1864", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_e3eb0dee-a1dc-4c62-bcf9-b7c9f56c3113.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.138, 0.11, 0.156] in the image\nAnd my action is Action: SELECT\nValue: 2022"}]}, {"id": "mind2web_1865", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_0e9b3fb1-56a3-4609-98d6-fd91fb47d49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\n[link] NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.065, 0.312, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1866", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_114bcbeb-20b3-4d5c-a261-9db6f51a713a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.182, 0.641, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1867", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_6e893988-a198-4aab-a94b-4180a72e8dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, Tours \uf078 -> CLICK\n[link] VIP Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.468, 0.788, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1868", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_f82185ba-eff4-4e0d-b9f0-d14a1403c7f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.646, 0.111, 0.651, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1869", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_6fdf95e8-5479-42b1-b0cd-7c701cb370b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.206, 0.504, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1870", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_8a0c2511-b6dd-46eb-9591-5f52889652e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> CLICK\n[link] US Deals -> CLICK\n[button] Save Now -> CLICK\n[button] Deals -> CLICK\n[link] US Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.45, 0.618, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1871", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_e7992a9c-7b79-48a4-ac2a-30cb1dfb3e13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.024, 0.829, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1872", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_a87d15f4-9b84-4f7b-9e24-8cd9f9b7a6f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.352, 0.958, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1873", "image": {"bytes": "", "path": "./images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_30baf113-0948-4a1c-a1da-ae5a3b030698.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Leaderboard for the top 10 fantasy Basketball players for the Rotisserie challenge.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.1, 0.14, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1874", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_038250de-f189-4f6b-9a09-14f89412c863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.14, 0.283, 0.86, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1875", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_584ff31f-642d-4e32-a387-3b47a67f9725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Locksmiths -> CLICK\n[textbox] Near -> TYPE: SAN FRANSISCO\n[span] San Francisco, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.114, 0.084, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1876", "image": {"bytes": "", "path": "./images/3c49d9c8-c3e4-402d-9a66-c34232975aa0_b3736604-cbf4-4e59-aee9-d057d7ef7558.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue press releases for the year 2020\nPrevious actions:\n[link] Press RoomExternal Link should open in a new windo... -> CLICK\n[link] View All Releases \uedbe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.304, 0.244, 0.335] in the image\nAnd my action is Action: SELECT\nValue: 2020"}]}, {"id": "mind2web_1877", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_9539b195-8f21-4470-aa1b-46904e797e75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.092, 0.548, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1878", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_195a4b8c-1c6d-41ff-bb22-eca585b4e44b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.564, 0.389, 0.584] in the image\nAnd my action is Action: SELECT\nValue: 3"}]}, {"id": "mind2web_1879", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cfe62f81-d404-4c83-af48-e2a2d50afc4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK\n[label] Brown -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.562, 0.041, 0.571] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1880", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_a5a7935b-240e-460a-a742-723e9f435050.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.01, 0.05, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1881", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_64430af6-f9da-4baa-9f55-d51ca0f50f7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.459, 0.536, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1882", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_0127f704-8546-4c72-806e-70ad7a2c3a07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.304, 0.365, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1883", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_f0f24934-782b-4b19-a80e-cae0dc3acafd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.333, 0.459, 0.374] in the image\nAnd my action is Action: TYPE\nValue: 250"}]}, {"id": "mind2web_1884", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_5c0fdc7d-84e2-401f-a3de-6e925f591bc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\n[searchbox] Search -> TYPE: Nirvana\n[link] Search for \u201cNirvana\u201d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.172, 0.417, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1885", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6ba32131-0de9-44d8-a22f-75c28eb37f80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.05, 0.321, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1886", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_e8c5af4e-b575-4093-9b8f-02ec489f76a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.81, 0.205, 0.853, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1887", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_751493a4-4198-4f94-abf0-701f037f7e5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\n[link] Model Y -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.064, 0.871, 0.087] in the image\nAnd my action is Action: SELECT\nValue: Price low to high"}]}, {"id": "mind2web_1888", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_90311f8c-7889-459b-9739-5fe71a0f49cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.013, 0.232, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1889", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_eefa305d-41d8-4e3e-9105-c389709d90ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.19, 0.582, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1890", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_a2e91cde-5120-4851-a140-2dcd34d9e26e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK\n[input] -> CLICK\n[option] Sweatpants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.373, 0.256, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1891", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_34028620-ebde-4b2d-8709-4c162b03e46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[button] Next -> CLICK\n[gridcell] Sun Apr 02 2023 -> CLICK\n[circle] -> CLICK\n[link] Likely To Sell Out -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.294, 0.927, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1892", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_eb3902e3-de1c-4124-9b4b-23b2190d5e8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Sightseeing Tours -> CLICK\n[label] Private Tour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.145, 0.963, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1893", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d411b028-22bd-42d8-a4c2-ffb7d2c40d32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.194, 0.244, 0.206] in the image\nAnd my action is Action: TYPE\nValue: TEXAS CITY"}]}, {"id": "mind2web_1894", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_858f46ac-aa0f-44ff-8278-4b53cdae0c70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.111, 0.44, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1895", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_fa65f280-f9c8-4656-93fc-af91c10c364e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok\n[button] To make this website accessible to screen reader, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.3, 0.884, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1896", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_8e888704-08c3-4164-9b92-57ad8521fb4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.048, 0.869, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1897", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_9518f246-371a-40eb-b20f-2c5c1083d0f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.122, 0.316, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1898", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_2dea02c5-cea0-4856-a1d3-8abb09bd43f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\n[link] Amazon Basics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.03, 0.321, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1899", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_1fe1c066-5e3b-4124-8973-50ca217bed17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[span] Newest -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Audience score -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.276, 0.648, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1900", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_43231286-647f-4ce4-86e5-39ccda467b94.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: smith\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.794, 0.03, 0.832, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1901", "image": {"bytes": "", "path": "./images/f75e33a6-d7d6-4eea-9ac8-f47be0224114_ccbaad1c-72a2-47fa-9eed-220da3dc67ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the score of the 2020 Super Bowl.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.011, 0.151, 0.018] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1902", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_60bbef2f-114c-4dc4-bbb4-3928f9225c62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.18, 0.931, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1903", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_f39fb52e-050b-44d5-997e-e214bf88693b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.377, 0.365, 0.399] in the image\nAnd my action is Action: TYPE\nValue: san francisco"}]}, {"id": "mind2web_1904", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_c816be58-23d2-467a-bab0-d03ad0e88d90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.198, 0.33, 0.233] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_1905", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_725e6ba0-21ea-43c8-b477-46717892546f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.157, 0.324, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1906", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0a6e420e-940c-499c-a0b2-5bcd58f42594.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER\n[link] All MLB Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.628, 0.655, 0.645] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1907", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_e853407b-48cc-43e9-9872-9a927347af03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.002, 0.925, 0.015] in the image\nAnd my action is Action: TYPE\nValue: travel credit"}]}, {"id": "mind2web_1908", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_33c337b9-44b0-4f88-af43-acaaec73c2c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.011, 0.781, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1909", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_7ddd7a0d-971d-434d-9fe9-1dee38a402a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK\n[button] Miami -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.14, 0.637, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1910", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_ef34dfdf-8e92-4a87-bead-5c134aa1fd8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.395, 0.362, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1911", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_84137f8b-2f70-4479-99db-8a8c3f1da091.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.548, 0.196, 0.575] in the image\nAnd my action is Action: SELECT\nValue: Audio (376)"}]}, {"id": "mind2web_1912", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_9dc42eb4-30b5-4c98-8ae5-e1a1cca00859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK\n[link] Filter -> CLICK\n[button] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.618, 0.375, 0.625] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1913", "image": {"bytes": "", "path": "./images/bd4b77db-00a5-405f-bf0a-a4d168967d64_46a54936-b04f-4a6b-8350-cc4259fe03d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Panini Diamond Kings Baseball cards set below $25.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: Panini Diamonds Kings Baseball cards"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.077, 0.228, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1914", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_dfeae7f8-eb3c-4d38-96e8-ddc4967e89d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.317, 0.331, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1915", "image": {"bytes": "", "path": "./images/612653f8-defe-41be-ae48-26ed859d98ca_1d82f2fc-e917-4ead-95cc-52fe5041676e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate Dry Tortugas in the state of Florida and find out the Current Conditions.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] Florida -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 0.337, 0.679, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1916", "image": {"bytes": "", "path": "./images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_bbefae42-d680-4113-a45a-8319079ac7fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite the top rock track\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.004, 0.791, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1917", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_e29f648e-0dcd-4cb1-8bf0-dc33c40ffb98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.011, 0.82, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1918", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_2d1b8584-a901-4e9a-b1d6-fd6e6df2291a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] Indian/Pakistani -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.19, 0.214, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1919", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_0549de40-9213-46db-9cef-488a057eae19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.33, 0.5, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1920", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_4e2e6234-ed23-40c6-a6fc-c82108cd2f49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] Next -> CLICK\n[link] 1 -> CLICK\n[link] 2 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[combobox] Return Time -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.301, 0.5, 0.347] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_1921", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_649b764f-bbb7-4b14-a135-4ecdf1d73419.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.146, 0.377, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1922", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_aa3d9f1d-e6d8-4a6f-bb93-ae6037c428f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Same Day Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.208, 0.446, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1923", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_6f320723-118d-4b4d-b300-c9f924cf5926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK\n[link] MONTHLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1924", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_1d358c36-6333-4e3e-bb32-505bd9a44c2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.025, 0.668, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1925", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8951ad3c-1dc5-4117-a207-a89a61ef0655.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.164, 0.481, 0.188] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_1926", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_b7ac89f0-fd43-4114-b900-87d7d0c36444.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.313, 0.174, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1927", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_f2a535a9-3a26-4aac-873a-ca97ed26b08e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[button] Search for zyrtec -> CLICK\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK\n[button] Check More Stores -> CLICK\n[textbox] Enter zip code or location. Please enter a valid l... -> TYPE: 90028\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.555, 0.891, 0.571] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1928", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_5b91bcfc-b54e-4802-bd4f-397bba7bf1db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28\n[link] 7:30pm -> CLICK\n[text] J10 -> CLICK\n[text] J9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.606, 0.845, 0.657, 0.915] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1929", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_098efde6-eb53-45cf-890d-7ea0024c1471.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.036, 0.441, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1930", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_2b8a3c38-07a7-4ef6-af36-a725dc25cc96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.236, 0.476, 0.262] in the image\nAnd my action is Action: TYPE\nValue: Monty"}]}, {"id": "mind2web_1931", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_ae94047b-798d-4a4c-a272-9afc85a85965.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[option] 8:00 a.m. -> CLICK\n[combobox] Drop off time Selected 10:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.228, 0.48, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1932", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_bccd212d-8178-46a9-9e6c-adc13537d091.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] T-Shirts -> CLICK\n[heading] Price -> CLICK\n[label] $0-$10 -> CLICK\n[polygon] -> CLICK\n[heading] Mini Short-Sleeve T-Shirt -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.219, 0.906, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1933", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_104bc12f-8c89-401a-9b45-17f03ab34fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.029, 0.74, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1934", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_d6343eab-e997-4934-8527-0d69f7db2bab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.194, 0.413, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1935", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_c196ba4d-236d-4ee2-8936-569abbd6f1bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.387, 0.249, 0.613, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1936", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_dbadff27-1043-4cf2-adb7-329d4aee6c5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK\n[textbox] price to -> TYPE: 70\n[button] APPLY -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Lowest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.677, 0.451, 0.728] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1937", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_f669df1f-2c14-404e-b43a-e6dbb96e757e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER\n[link] Action Figures -> CLICK\n[img] Hasbro -> CLICK\n[button] All Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.113, 0.393, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1938", "image": {"bytes": "", "path": "./images/5fb9730d-f489-4cb7-a220-d406794cef29_501ce3db-36c4-4b7d-a7d3-392f4e797076.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List all George Clooney movies and sort them by box office revenue\nPrevious actions:\n[textbox] Search -> TYPE: George Clooney"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.507, 0.704, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1939", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_7623cff3-2eb7-4a39-aadc-25e7d26866b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.191, 0.24, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1940", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d59c93cf-f6a3-40df-b51f-40934918fa67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.016, 0.39, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1941", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_2e506c3a-e3d2-4d40-8ad2-d345bedcb636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London\n[button] London England -> CLICK\n[circle] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.442, 0.137, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1942", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_602daa70-9988-473f-9c95-cff02a656628.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1943", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_8ba82d19-87d2-49b2-889e-97dbe607f7d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Hawaii -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.442, 0.447, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1944", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_eec3cc44-2bca-4fe8-ac6e-df1f0467410d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[button] Done -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.225, 0.263, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1945", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c3286ff0-a564-437f-b3c5-4362d51d4a5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.07, 0.327, 0.092] in the image\nAnd my action is Action: TYPE\nValue: Leeds"}]}, {"id": "mind2web_1946", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_78f0467d-3283-4ead-972f-8e6d64bc3eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.388, 0.938, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1947", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_a039d9b0-cf8a-4049-b01f-20740f97e6d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.37, 0.783, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1948", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_06490f6f-6835-4206-9d1f-35429e950324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.229, 0.035, 0.352, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1949", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_39200f36-86f3-403e-979d-0505ce6dad4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK\n[generic] 1 -> CLICK\n[generic] 3 -> CLICK\n[button] Let's go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.254, 0.201, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1950", "image": {"bytes": "", "path": "./images/716ed90e-a138-452e-b5b5-167911871fda_da452918-14c7-4410-a6e6-4e50951940a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Playstation gift card of $10.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: playstation gift card $10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.056, 0.228, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1951", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_54518298-2aa4-45ef-91ee-ccc0b8c495a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.238, 0.345, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1952", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_f27219e9-c800-4270-9f5d-348090dff023.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Adults-Only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.372, 0.644, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1953", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_be1d8ecf-609b-4a6c-9485-2f010f65c215.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] Close -> CLICK\n[textbox] *Preferred date of travel -> CLICK\n[link] 22 -> CLICK\n[button] Continue -> CLICK\n[div] + -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.42, 0.711, 0.486] in the image\nAnd my action is Action: TYPE\nValue: Wedding Anniversary"}]}, {"id": "mind2web_1954", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_2900919c-d57a-4636-940e-a1013a7efe4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK\n[heading] Pocketable UV Protection 3D Cut Parka -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.17, 0.768, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1955", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_68be7878-cac0-4d19-8c5c-ccd542c407d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.008, 0.553, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1956", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_3139a384-73c1-48be-9299-680fcd57a365.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK\n[link] Crossover vehicle icon Crossovers -> CLICK\n[button] Make -> CLICK\n[listitem] BMW (389) BMW (389) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.213, 0.253, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1957", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ec97a061-a130-45c9-9ee0-c0db152698f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[textbox] Where -> TYPE: India \n[div] India -> CLICK\n[span] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.272, 0.739, 0.287] in the image\nAnd my action is Action: TYPE\nValue: 99"}]}, {"id": "mind2web_1958", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_94e90ad5-7d9a-4601-9812-255a72709a36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[section] Flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.43, 0.478, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1959", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_afd88868-e107-4cd8-9da4-f234e5d6a3b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.15, 0.783, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1960", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_613c170f-ebe3-451c-ae18-a3d8ad9c5b0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.007, 0.369, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1961", "image": {"bytes": "", "path": "./images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_8ea37fac-98e7-436e-ad0c-0264750abc6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse tickets for Chicago Bears games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.054, 0.28, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1962", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86c95750-be3d-4f61-85ac-8399619f41de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[link] Kindle E-readers & Books -> CLICK\n[link] Kindle Books -> CLICK\n[textbox] Search Amazon -> TYPE: roman empire history\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.344, 0.068, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1963", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_0b463dd8-3b69-49da-9a8e-de032b2c24ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Color -> CLICK\n[link] White -> CLICK\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.791, 0.366, 0.816] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1964", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_f50ba556-898a-4e6f-a470-ce593af6304e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[combobox] By State -> SELECT: Colorado\n[button] Activity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 0.312, 0.382, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1965", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_a4fc7924-09c5-4edb-b4e5-a8c733c2942c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.021, 0.491, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1966", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_cac81dd7-bfc2-4d9a-ab71-29ee91f89e40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.331, 0.87, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1967", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_e5f9b638-b724-473f-869d-615c6c141aeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.189, 0.397, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1968", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_90a95512-a3cd-4f4e-8dec-561efd1c11b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.631, 0.158, 0.642] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1969", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_0f0ef35f-c591-41b1-af03-eda24e8e7abd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Camping Tents -> CLICK\n[link] add filter: 6-person(24) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.197, 0.428, 0.217] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_1970", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_6790b27e-ac5d-4b96-9a93-2e5c9e4d7b71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.529, 0.747, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1971", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fc8e75d0-f09d-4cf2-a112-2f0184fa48e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Veterinarians -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.137, 0.388, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1972", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_19a9c862-0926-49c6-aa16-66a8e1138678.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.099, 0.75, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1973", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_77dbcf0c-47d8-4597-abfc-2b218fe292e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK\n[combobox] Guests -> SELECT: 3 Guests\n[div] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.231, 0.408, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1974", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f3797a4b-3d12-46e3-a420-64ec64f1c501.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.271, 0.408, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1975", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_d8e2d33b-a8de-4eaf-baea-973008afec13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.91, 0.29, 0.952, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1976", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_2330fcb7-1d5f-4a97-b2a2-621ea171fcca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.577, 0.194, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1977", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_0b5688cb-71a1-4fcf-a156-bbac0e95e816.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Charging -> CLICK\n[link] Find Us -> CLICK\n[textbox] search input -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.206, 0.216, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1978", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_53edb01c-5098-443e-bd99-d63dae18684d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK\n[radio] RENEW - An Existing Account -> CLICK\n[textbox] KOA Rewards Number -> TYPE: 1000000001\n[textbox] Postal Code -> TYPE: 10023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.562, 0.934, 0.605] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1979", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_e6d6233d-53a8-469e-b68d-dc33eb7a03f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo\n[textbox] Confirmation or ticket number* -> TYPE: 12345678912345\n[button] Add flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.466, 0.32, 0.51] in the image\nAnd my action is Action: TYPE\nValue: ian.lo@gmail.com"}]}, {"id": "mind2web_1980", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_90da1c51-47a3-4b4d-be32-7427c7b53fef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[path] -> CLICK\n[link] 18 -> CLICK\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK\n[button] Add railcard -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.344, 0.281, 0.365] in the image\nAnd my action is Action: SELECT\nValue: Veterans Railcard"}]}, {"id": "mind2web_1981", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b07726fe-d4d6-4d0d-a101-5bcffd3b52e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK\n[span] Guest checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.416, 0.319, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1982", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_6bae364e-de11-4195-b886-42576377408b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.037, 0.535, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1983", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_2d7f2b77-ec35-4ff0-88c0-d11be25fb44c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.263, 0.561, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1984", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_d64f4710-22b9-48cd-9649-e2969c135a58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK\n[checkbox] list-filter-item-label-12 -> CLICK\n[checkbox] list-filter-item-label-3 -> CLICK\n[checkbox] list-filter-item-label-0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.157, 0.633, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1985", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_29593e46-ab32-4882-a602-dd9905ebbea9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK\n[link] Edit -> CLICK\n[button] 04/11/2023 -> CLICK\n[link] 12, Wednesday April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.337, 0.855, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1986", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_7f3fc81f-dfca-44c6-aa5b-cca862f0d470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\n[link] Store -> HOVER\n[link] Find a Store -> CLICK\n[combobox] Find a store -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.114, 0.668, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1987", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_64b1e75d-0c6e-4f23-b134-1f8115a9bf31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\n[link] Schedules -> CLICK\n[heading] Long Island Rail Road & Metro-North Railroad -> CLICK\n[div] Long Island Rail Road schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.591, 0.275, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1988", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_83a1c672-32d5-41fe-9c95-86a1ac14c208.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[button] Departments -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.128, 0.187, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1989", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e11f907e-6778-4c4f-830e-df9acf69eaaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Gaming Monitors Accessories -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.055, 0.49, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1990", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_a94e1a10-31d7-4c5d-8020-06c9229283b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[div] Tomorrow -> CLICK\n[p] Startups & Small Business -> CLICK\n[div] #virtual -> CLICK\n[label] -> CLICK\n[div] How to Make Six-Figures as a Consultant or Coach -... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.244, 0.575, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1991", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d72858b-a481-4bd0-bfb7-e2556ccf7ae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.01, 0.546, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1992", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_4f567ffc-1405-4110-89d9-9b0671eb7202.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.386, 0.384, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1993", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b7659f78-46a9-4951-952b-37365caa2ab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Sort by -> CLICK\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.258, 0.249, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1994", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_57ff6313-097f-456c-9fcd-a58f3e099011.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.297, 0.133, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1995", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1abebad8-af94-4e45-880b-8bc9dd0bb103.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Choose room -> CLICK\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.281, 0.95, 0.319] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_1996", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_7be19ef7-3aff-4a44-8e6e-27ddc4be533a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.209, 0.881, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1997", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_4e8c633f-7da3-4beb-afea-a194df00dcc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.18, 0.375, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_1998", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_f83bc69c-b77b-4683-998f-5d9e4694add3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK\n[div] -> CLICK\n[button] Contact -> CLICK\n[button] Contact the organizer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.106, 0.692, 0.127] in the image\nAnd my action is Action: SELECT\nValue: Question about the event"}]}, {"id": "mind2web_1999", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_744fc34d-8efe-4c20-96ee-05cad5df1cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.198, 0.335, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2000", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_9ede236e-1cc6-4750-882c-9d9e807b32a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK\n[label] In Stock -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.713, 0.087, 0.735] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2001", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_c3442abe-d676-4250-9bb2-7fab9a09ab8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.241, 0.691, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2002", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_3ebb1cf3-9b8f-4c1b-9da5-6ae7225dff0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.341, 0.48, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2003", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_1adae572-b7a8-479b-8e02-5cff5c0f35b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.134, 0.171, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2004", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_2adf11c4-9ff9-460f-932d-fafc19f37981.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.172, 0.612, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2005", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_95928075-0682-411b-bc35-436756ed5eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] List Explorer -> CLICK\n[link] Manage My Lists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.252, 0.046, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2006", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_41574158-9bb5-445b-8eb8-e3bd3ffc02bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[span] Boston -> CLICK\n[svg] -> CLICK\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.095, 0.448, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2007", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6b3a070-2485-4f86-bfd3-55de0ad13052.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[span] 11 -> CLICK\n[span] 18 -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] Motel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.12, 0.633, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2008", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_74a7c5bc-3967-4777-8fbf-48549de950af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[button] Remove -> CLICK\n[textbox] Flight origin input -> TYPE: Mumbai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.126, 0.573, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2009", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_b5929444-c8ea-4c84-aaa2-f91432a827fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.168, 0.509, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2010", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_28dfe420-b64d-4b66-a40a-50cb80c95ac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.546, 0.285, 0.572] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2011", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_1915589a-bee5-4557-82db-5244bdd93e0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.222, 0.652, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2012", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_d89d6e35-b522-4916-a7d8-8dd1410634bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.308, 0.748, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2013", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_05abe37e-9ee1-4f51-a521-2ea404b58e7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[option] MEN -> CLICK\n[svg] -> CLICK\n[heading] Color -> CLICK\n[span] BLACK -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.313, 0.248, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2014", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_f7e594b5-3cc2-4b2e-8820-2dee88a6a1f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.232, 0.795, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2015", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_9d608d6d-b482-4f2a-8241-567f41501af3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\n[textbox] Search -> TYPE: Gingerbread cakes\n[link] gingerbread cakes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.107, 0.227, 0.175, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2016", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_3a4e82f3-9dd3-42b4-9302-c5e41465df9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK\n[button] - -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.689, 0.263, 0.705] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2017", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_bc0fdec2-b755-425e-b766-c7376a85bd3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\n[link] Find People -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.126, 0.874, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2018", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_50ad5ebd-d46d-4c0a-ad59-f00475a2a57d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean\n[button] \ue9571 NIGHT Wed, Apr 19 - Thu, Apr 20 -> CLICK\n[span] 5 -> CLICK\n[span] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.594, 0.912, 0.633] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2019", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_f49a5c09-68a8-4d43-9871-746acda3a89c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK\n[link] Tab -> CLICK\n[link] High rated -> CLICK\n[link] Far Cry 3 - Ukulele Girl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.101, 0.98, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2020", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_2b14fe2d-795e-420f-a424-5a0246897456.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK\n[combobox] Guests -> SELECT: 3 Guests\n[div] Today -> CLICK\n[button] March 10, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.383, 0.35, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2021", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_a91d399d-3343-457b-b60a-9d9d2ec0676e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2022", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_af8eb390-8d1e-4b3a-b5d1-a3401025320c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Crew"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.099, 0.525, 0.119] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_2023", "image": {"bytes": "", "path": "./images/1d738d01-507e-46ff-8d4d-d4a7dffed936_cacb9fb9-a747-403f-87e6-1d720ec9c876.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the weekly chart-topper artist and play the number 1 track of that artist after liking the track.\nPrevious actions:\n[link] Charts -> CLICK\n[link] Weekly -> CLICK\n[link] The Weeknd -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.39, 0.171, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2024", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_d82e3a62-a555-4e74-8436-dd6204ae1de5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.16, 0.107, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2025", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_023d2f51-86fa-446b-8c75-47ad3f0c4643.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.109, 0.713, 0.121] in the image\nAnd my action is Action: TYPE\nValue: Athens"}]}, {"id": "mind2web_2026", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_3b8bb1e4-be14-49ff-b042-11d4639daa4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK\n[textbox] Depart date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.376, 0.132, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2027", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_2cb27e58-8e3f-4926-a34d-f9fdefebe672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK\n[link] Discover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.59, 0.279, 0.64] in the image\nAnd my action is Action: TYPE\nValue: doha"}]}, {"id": "mind2web_2028", "image": {"bytes": "", "path": "./images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_18f4c557-d4ee-491a-96af-1a5bc2509a8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of critic reviews for the movie Creed III.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: creed III"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.047, 0.657, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2029", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_46135cac-6070-4b3c-a706-4ba121a6e9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Casino -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.724, 0.089, 0.735] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2030", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_3ab27e3b-370d-41bd-af2e-4cabd704a0c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests\n[button] Update search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.095, 0.57, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2031", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_e41e3aa0-0593-4975-bd96-9add53085830.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK\n[menuitem] View All -> CLICK\n[div] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.596, 0.064, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2032", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_f493650e-0cf6-4904-89e7-62105a3d029a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $21.99/Day -> CLICK\n[checkbox] $13.00/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.311, 0.93, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2033", "image": {"bytes": "", "path": "./images/f3850ec8-bf7c-42c3-9469-457836914f77_b1bde9b8-0c8d-43b4-b47e-f36805bd8fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for events in Boston.\nPrevious actions:\n[button] CITY GUIDES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.094, 0.782, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2034", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_cec37f28-5752-445c-a4e5-68017784fcc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.136, 0.418, 0.178] in the image\nAnd my action is Action: TYPE\nValue: ohio"}]}, {"id": "mind2web_2035", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_61a2a3af-3b29-4d5d-b252-856c6a60c022.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.057, 0.326, 0.09] in the image\nAnd my action is Action: TYPE\nValue: pet festival"}]}, {"id": "mind2web_2036", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_2f4fed09-a787-4ec5-8706-4efca121d6a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.052, 0.179, 0.258, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2037", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_8b6a9853-063e-4fc6-82da-0f226ba3679f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.03, 0.88, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2038", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_d6fc3568-7f65-4ebd-9102-c451c4285736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[polygon] -> CLICK\n[button] Heartland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.415, 0.157, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2039", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_bdb53510-a545-40a0-a881-f19507ac47ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.396, 0.31, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2040", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_71b2d6dd-b624-46d6-9dbe-46939c0d6916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.06, 0.327, 0.074] in the image\nAnd my action is Action: TYPE\nValue: ZURICH"}]}, {"id": "mind2web_2041", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_33e764b3-9adf-43f0-9086-44dfb1bd8160.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: sofi stadium\n[span] SoFi Stadium -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.315, 0.379, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2042", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_c6894a9d-3c38-4df4-b21f-e4135fb0b585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.016, 0.461, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2043", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_89b591ba-9b75-494d-8261-e69acb082d04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.552, 0.884, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2044", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_84c808ae-d79f-4884-8b59-0ae14f0dad91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.188, 0.252, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2045", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_7a75dd20-71f5-40d8-88b2-b7f9ee035f48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.067, 0.46, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2046", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e4847f0e-0607-4eb7-b856-7bbcd4c7dde0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.679, 0.55, 0.841, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2047", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_4f07778f-b3c5-486e-ade9-13d279de1d0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.457, 0.834, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2048", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_e8593956-fe5c-4517-8903-06508cece040.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.262, 0.171, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2049", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_0aeb38ea-3cdb-4e1d-95c9-4d8d93b7a0bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.576, 0.263, 0.595] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2050", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_0d1e50fb-654d-455c-96a3-27dd3238b205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.159, 0.407, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2051", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_b376aa10-6957-4130-b75f-17abc80fd6f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK\n[link] Dave Chapelle -> CLICK\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.209, 0.792, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2052", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_21b5581f-18db-4928-9246-ddbfa5e1bc60.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.457, 0.326, 0.58, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2053", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_8e1be294-c865-4017-b9c4-d0039658abe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.081, 0.27, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2054", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_b0af8890-c5ec-4c3e-b40e-069dcdbb91e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.0, 0.482, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2055", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_bc81411f-ea59-4192-b04d-e62b85850b5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK\n[div] Fri, Mar 31 -> CLICK\n[checkbox] 29 March 2023 -> CLICK\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.269, 0.927, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2056", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_10934fcf-a23e-435c-9eed-281e77c57f18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[checkbox] Based On Comic Book (226) -> CLICK\n[strong] IMDb Rating -> CLICK\n[group] IMDb user rating (average) -> SELECT: 7.0\n[group] IMDb user rating (average) -> SELECT: 9.0\n[strong] Refine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.127, 0.54, 0.137] in the image\nAnd my action is Action: SELECT\nValue: Number of Votes"}]}, {"id": "mind2web_2057", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b1d71ec-5dd5-4948-a2b5-6303bf701bf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.174, 0.286, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2058", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_b874a73e-65d0-4078-9549-063b0402b53d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.349, 0.643, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2059", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_f53535ac-ee85-47f4-9e60-9d64b5ce8005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.169, 0.365, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2060", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_3cc263af-93ef-413b-a357-8826b6929b8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[select] Sort by Distance -> SELECT: Sort by Price\n[heading] 335 Columbia St. - MOTORCYCLE PARKING ONLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.674, 0.819, 0.711] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2061", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_ffc5cf9b-30d3-4b1a-8a02-875d0aef04df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.129, 0.697, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2062", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_fc4260f0-f7dd-44e3-8e76-33f0a7a4c96a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.142, 0.495, 0.166] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_2063", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_33fe3c9c-2201-4208-b663-d6bc5160c097.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.049, 0.128, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2064", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_41b67b58-9eb3-401d-b495-ef5e61eca310.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.051, 0.77, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2065", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_230d03bf-d64d-47b0-a803-2d9e20684510.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.22, 0.387, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2066", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_bcafc9b0-d8a1-4b7e-8199-431d48b1765e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.232, 0.115, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2067", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_00c2eafc-0309-4341-9e34-ea1868d3867d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.258, 0.05, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2068", "image": {"bytes": "", "path": "./images/6da08512-9375-4b47-8cd1-addec58f385c_6ce0bdac-5180-4167-939f-a6fc87f8c8e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find musical instruments and filter the results to show only those that are available for free local pickup.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.03, 0.652, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2069", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_a3aeb3e8-0034-40d6-b184-86ab3f05d619.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.171, 0.438, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2070", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_7d161c6f-4df1-4eed-8b1b-3c1d368a1694.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.341, 0.148, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2071", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b7fd1e38-d007-46cd-ae33-f560d075b56b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK\n[textbox] 1 Adults, 18 to 64 years old, 1 of 8 passengers se... -> TYPE: 1\n[combobox] undefined Selected 1 room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.813, 0.339, 0.836] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2072", "image": {"bytes": "", "path": "./images/64051efe-53dc-4e79-9980-c3d75d34c4aa_7aa12e31-3e99-44a8-82c2-471c8e11d629.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my item inventory.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.151, 0.481, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2073", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_93810b4c-013b-4165-94ac-140b40837aed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.197, 0.091, 0.214] in the image\nAnd my action is Action: SELECT\nValue: 2019"}]}, {"id": "mind2web_2074", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_1ebf5cde-2bf7-47a6-8c3f-b567ff20ba4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[link] $25 to $50 (18) -> CLICK\n[link] $50 to $100 (146) -> CLICK\n[div] Size -> CLICK\n[link] 10 (131) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.466, 0.974, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2075", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_0eff6738-107d-4395-8229-d5632a45aedc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.222, 0.636, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2076", "image": {"bytes": "", "path": "./images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_c0b15fcf-8ad7-47dd-85a9-2ee548ce72d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the Dallas Mavericks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.028, 0.178, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2077", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_4f574815-5359-4ee4-95ac-4dd90be90835.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: smartwatch"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.105, 0.673, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2078", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_31a26c55-4c7a-4283-92d7-1653956d7fe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.268, 0.868, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2079", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_5be2112f-8d62-404e-8ab3-6202c78c3536.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.381, 0.281, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2080", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7dde899e-d348-46d1-90db-7e248ce0bf50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK\n[combobox] \uf0d7 -> SELECT: 1 Room\n[combobox] \uf0d7 -> SELECT: 2 Adults"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.139, 0.646, 0.176] in the image\nAnd my action is Action: SELECT\nValue: 1 Child"}]}, {"id": "mind2web_2081", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_a499bf10-f7b8-4771-8234-002fd88c3439.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK\n[div] Bayern Munich -> CLICK\n[heading] STATS -> CLICK\n[link] GOALS Jamal Musiala Jamal Musiala 11 G -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.159, 0.678, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2082", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_d8c9ea80-5e0b-4dda-bb9c-d6c5b512622b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.324, 0.196, 0.354] in the image\nAnd my action is Action: SELECT\nValue: US$20 to US$40"}]}, {"id": "mind2web_2083", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_3ec075a2-b4a2-41b7-80ab-fa807aac5c9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.294, 0.539, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2084", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_9fd66fb7-b3b9-44e0-8279-072992a676c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.321, 0.93, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2085", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_898d9963-7fbd-4ab4-9300-01d6fb45ca32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK\n[textbox] Date use format: 16-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.174, 0.471, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2086", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_6a1a9c2a-d65e-46be-92cb-b0a2527d8d6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[link] Camping Tents -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.238, 0.127, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2087", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_1451ea0d-a18c-48d2-a5fe-55d780698313.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.194, 0.709, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2088", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_bbd16dc3-1b95-4fc5-b68a-ff2a7e6cfb95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.572, 0.237, 0.654] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2089", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_6a4017cd-86da-4732-92f2-308cdbaa27f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[link] Business -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: LAS VEGAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.176, 0.247, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2090", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_ad40c605-21b9-4aef-a231-fa346f287afc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\n[textbox] Search IMDb -> CLICK\n[textbox] Search IMDb -> TYPE: Prometheus\n[div] Prometheus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.316, 0.942, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2091", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_7b6615cd-da39-41f9-a701-e6becbf3bdaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2092", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_0f8ca7c0-8ab4-4a9a-b0e1-3a10056f7f2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.517, 0.648, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2093", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_dde39bfa-ddfb-4dab-91e3-1f242a32d253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station\n[link] T red line silver line commuter rail Zone 1A Sout... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.675, 0.236, 0.845, 0.251] in the image\nAnd my action is Action: TYPE\nValue: north station"}]}, {"id": "mind2web_2094", "image": {"bytes": "", "path": "./images/22509b64-b643-44ec-b486-9828e686303c_71b4f18e-103b-420c-8bcc-da6f09c0d8cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the full menu for AMC Dine-In\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.123, 0.488, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2095", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_71bf576e-88fa-448e-bcf2-cfefb6a34fba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.085, 0.257, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2096", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_c736856e-e26a-4537-b3b8-82969aa2a016.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] Medium Light -> CLICK\n[svg] -> CLICK\n[label] 32\" -> CLICK\n[svg] -> CLICK\n[label] Less than $100 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.79, 0.309, 0.955, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2097", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_f4a3db2b-6081-40ff-9efd-57848cd9bfd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.083, 0.033, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2098", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_dc4596ac-20df-47d4-97db-d42b1c289351.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Locksmiths -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.023, 0.564, 0.037] in the image\nAnd my action is Action: TYPE\nValue: SAN FRANSISCO"}]}, {"id": "mind2web_2099", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_477ae1d7-9f1d-45a1-9447-6dbd34e2ec6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[button] Show all 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.885, 0.263, 0.897] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2100", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_439cfa5f-34dc-41f4-b19d-ef6a9cbae5c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[img] Sports car icon -> CLICK\n[button] Sort by -> CLICK\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.21, 0.249, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2101", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_b58ca5a0-af78-4288-9f0d-78f2c0f18b1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[div] & up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.883, 0.192, 0.896] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2102", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_4a9ffe3e-66a6-4eab-a124-a5d40e3594c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[generic] 30 -> CLICK\n[div] -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.713, 0.681, 0.752] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2103", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_fb6287ce-8359-4fc4-872d-a66acc862823.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[link] Your lists -> CLICK\n[link] Create a list -> CLICK\n[svg] -> CLICK\n[span] Walgreens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.111, 0.481, 0.163] in the image\nAnd my action is Action: TYPE\nValue: Walgreens"}]}, {"id": "mind2web_2104", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_4888c4f0-14f5-4277-80ee-930c07442426.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.13, 0.072, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2105", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_86b72b03-cfd1-47ce-9f4a-1dbb46866645.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.752, 0.274, 0.811, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2106", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_428b2ffe-86ef-407a-9079-cfec97b80000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] The Bahamas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.432, 0.447, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2107", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_3eacf133-cbe2-43ec-8bfc-839b3bc960e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK\n[button] Table of Contents -> CLICK\n[link] Uncharted: Legacy of Thieves Collection - Wiki Bun... -> CLICK\n[link] Collectibles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.349, 0.348, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2108", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_f72407ee-c156-4ee0-b5f9-8f08171c28e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.063, 0.387, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2109", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_fa548110-f9a3-4ec4-a642-bb38488e1d13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.86, 0.161, 0.878] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2110", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_cd8f79bd-6ddb-40ff-b87c-d4c5e4dc6829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[svg] -> CLICK\n[label] Medium Light -> CLICK\n[svg] -> CLICK\n[label] 32\" -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.333, 0.112, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2111", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_e4dfa148-ff80-4458-99ca-8d1c48572e37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.008, 0.546, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2112", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_48ff5ffc-bf68-41ad-b37f-e0470a5754dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK\n[textbox] Date use format: 16-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.29, 0.327, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2113", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_63c79386-dc4e-4073-b094-76e6bb7cb672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.21, 0.477, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2114", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_d7821dba-fdc5-4738-ae2a-d5bf94da0dcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.013, 0.509, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2115", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_f1411a49-e617-4dfa-aaa2-a947056f2ceb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.523, 0.353, 0.582] in the image\nAnd my action is Action: TYPE\nValue: 1234567890"}]}, {"id": "mind2web_2116", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4afe1528-c021-4d9b-8a67-b889a015436a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK\n[button] Join Waitlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.154, 0.351, 0.172] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_2117", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_dacb1820-0368-4992-843d-496d69231c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Electrical -> HOVER\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.09, 0.99, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2118", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_48484940-7fda-45e8-a3c9-21da6c24a342.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.525, 0.091, 0.543] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2119", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_4d006658-126a-4f2d-bfef-45ca970c91a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter\n[button] Search -> CLICK\n[link] Harry Potter and the Cursed Child - Parts I & II -> CLICK\n[link] Add to wishlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.127, 0.716, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2120", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_6f196ecb-8ab6-4aba-82fe-6c3a0041637c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK\n[link] Men's Shoes & Boots -> CLICK\n[label] Boots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.389, 0.056, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2121", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_c2bc0783-09d4-44dd-b45f-ba953a1a7a08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[path] -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[label] Air India -> HOVER\n[button] Air India only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.001, 0.176, 0.284, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2122", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_3db12611-f0a1-49e3-8ecf-cbdc23b3a727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.266, 0.377, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2123", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_5e2ba778-2e6c-44e0-a6f2-e28df0337e1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Rating -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.546, 0.434, 0.669, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2124", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_4618ccf0-6373-4138-aaab-c3e1e86094bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 6944"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.356, 0.62, 0.389] in the image\nAnd my action is Action: SELECT\nValue: Thursday, April 6"}]}, {"id": "mind2web_2125", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_6290e759-d66d-4b88-9146-822223ef0530.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] Country -> CLICK\n[span] -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.252, 0.218, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2126", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_52bdee9e-0430-43f8-a614-9b708c175125.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2\n[combobox] Minute -> TYPE: 30\n[combobox] AM or PM -> SELECT: PM\n[button] Get trip suggestions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.257, 0.874, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2127", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_360bdc21-2f56-4e3c-a631-3f81d3908dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.659, 0.62, 0.672] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2128", "image": {"bytes": "", "path": "./images/5e01c2f7-c63b-4826-98a3-a18f731e16ea_3490d209-2313-4fd3-80fd-52801298b816.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse tickets for Chicago Bears games.\nPrevious actions:\n[button] SPORTS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 0.091, 0.319, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2129", "image": {"bytes": "", "path": "./images/2159d768-6657-40af-b336-ad5726fec1e2_90f0dbff-5c71-40dd-aa7d-a28f941b2827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my wishlist the top rated JRPG game.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.446, 0.188, 0.471, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2130", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_3c6124d5-eeda-47c8-b0e1-bf30cf61aca1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.232, 0.281, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2131", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_6179284c-8010-42fc-9db7-c552407fe3b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.198, 0.753, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2132", "image": {"bytes": "", "path": "./images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_698c355e-5508-4e32-a78a-33f097743f70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a red used tesla Model S.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.376, 0.188, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2133", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_851a313e-37e4-42fa-9dea-af461112eeed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.287, 0.3, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2134", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_a5fb29c8-6ada-490f-9b03-3c28febc5b78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK\n[span] London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.141, 0.194, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2135", "image": {"bytes": "", "path": "./images/9a462751-758e-42bd-967d-373c13b90382_3b89323b-0233-4588-b13e-f2ff2621ba0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current injuries of Phoenix Suns players.\nPrevious actions:\n[link] NBA . -> HOVER\n[div] Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.236, 0.306, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2136", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_4a1fc4f9-6541-4a12-b6c5-7228811c43d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.512, 0.266, 0.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2137", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_d0dd6fda-cd1e-4d3e-b3a5-67611bb74e68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\n[button] Location Columbus, OH -> CLICK\n[button] Sydney -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.021, 0.423, 0.047] in the image\nAnd my action is Action: TYPE\nValue: pasta"}]}, {"id": "mind2web_2138", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_9568b250-989d-4e11-a9e7-4b0dd6772a73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.146, 0.358, 0.166] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_2139", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_d13bb605-b91a-48d6-a6cd-a915bf50dc3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK\n[div] Dual Motor All-Wheel Drive -> CLICK\n[button] 43235 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.265, 0.754, 0.284] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_2140", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_6434f7c3-76f6-4375-a6fa-3179c23dd6cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\n[link] Find People -> CLICK\n[link] BY PHONE\u00a0NUMBER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.351, 0.328, 0.379] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_2141", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_61d753a4-9e3c-4329-96bd-932d046f6f53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000\n[span] Good -> CLICK\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.689, 0.699, 0.714] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2142", "image": {"bytes": "", "path": "./images/9f4b6bd4-69dc-4103-b1c4-eb621639e9dd_77e8e128-c9e5-48a8-a691-331ce9696c14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of publishers for board games\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.005, 0.184, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2143", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_b4e17a34-b113-4740-a22f-b3d783bf549c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.367, 0.559, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2144", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_48e48dce-c73e-4ccf-86e0-9aa26363e0e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK\n[input] -> TYPE: 3329456534543\n[input] -> TYPE: John\n[input] -> TYPE: Green"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.078, 0.934, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2145", "image": {"bytes": "", "path": "./images/85bd1881-6efd-458d-97c9-ae507ecba1ca_4ef4bad0-ed14-4946-8d06-4d672c0f9bdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the odds for upcoming NHL matches.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] NHL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.55, 0.095, 0.604, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2146", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_3b5e5e0d-4c51-489f-84e5-48d3c9e81a1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.048, 0.777, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2147", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_aa293e50-9e36-4097-9e71-1a21249be4a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.203, 0.991, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2148", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_9abdee22-f71a-427a-a5b1-4ed0386a1de5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK\n[button] set store -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.309, 0.253, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2149", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_724a9c62-8906-4da6-afd7-50a4c3a8864d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.352, 0.868, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2150", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_b7d1099e-22d6-4680-b1cc-95da2374335d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.23, 0.695, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2151", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_ed70e106-6213-44c9-ae88-85dc9af09e6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.233, 0.433, 0.272] in the image\nAnd my action is Action: TYPE\nValue: Accounting"}]}, {"id": "mind2web_2152", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_38c4b245-e414-4cce-b837-42706cb27f23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.24, 0.62, 0.268] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_2153", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_a30dba5b-dfd0-4cef-a4b1-2a1fe4a13829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[div] 5 -> CLICK\n[div] 7 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.233, 0.264, 0.253] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_2154", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_797b5624-5baf-4735-a64a-a49edb4a6914.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK\n[span] 28 -> CLICK\n[button] Find flights -> CLICK\n[button] Roundtrip $681 United Economy (U) Select fare for ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.314, 0.497, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2155", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_aec70830-4ba3-44b3-9aff-3d5399090ad6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.363, 0.255] in the image\nAnd my action is Action: TYPE\nValue: Abbotsford"}]}, {"id": "mind2web_2156", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_0c3f522b-a492-4b58-b642-8899445f2ac9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[svg] -> CLICK\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.131, 0.804, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2157", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_b2723760-8071-47cb-9f11-2d675175cbe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall\n[em] Music -> CLICK\n[strong] Filters -> CLICK\n[checkbox] Motorcycle Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.465, 0.328, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2158", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_69539b1f-4a50-4fd5-9700-d3406bff509d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.019, 0.645, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2159", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_b4aa56db-e3ef-4719-8221-e887d800b895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.288, 0.498, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2160", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_4eabbe64-62d2-454b-bad9-12f4206627dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK\n[textbox] Flight origin input -> TYPE: madurai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.138, 0.273, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2161", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2c8d72ba-69ce-4d1c-bfdf-192d600a3e99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[li] Street Taco, Broadway, New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.316, 0.384, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2162", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e9bb1cf2-1d7f-41d7-a17c-ebf215c4e011.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.152, 0.326, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2163", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_316b6826-d405-4b45-9723-8fd585ef7722.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.721, 0.211, 0.754] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2164", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0ed3f86d-df38-429e-90d1-7fafffec69cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[link] Planned Service Changes -> CLICK\n[searchbox] Search Routes -> TYPE: 4\n[listitem] 4 -> CLICK\n[button] 04/12/2023 -> CLICK\n[button] Next Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.181, 0.811, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2165", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_0ffc571f-2b31-4854-8ed2-2f542d6baa0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.248, 0.383, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2166", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_6e7e019b-e6dc-486a-9697-74aa496d4009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.211, 0.729, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2167", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_be3483db-df81-4a20-b60c-360fa9beb6f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.023, 0.353, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2168", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_d1ce353f-b6b7-4181-b6be-9430a19a75d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.285, 0.803, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2169", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_47029a14-ef8d-4d8e-89a6-9d7a672a7f00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.305, 0.48, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2170", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_a91d01fe-afc1-4e3d-94fe-fd6f02b955af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.111, 0.554, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2171", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_85de58d9-3241-44fc-be41-d50c28190e22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] set store -> CLICK\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.182, 0.249, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2172", "image": {"bytes": "", "path": "./images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_a3d9acf3-3ec0-4d6e-bcb3-2ae36600edbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Gloomhaven's ratings?\nPrevious actions:\n[combobox] Search -> TYPE: gloomhaven\n[link] Gloomhaven (2017) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.39, 0.26, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2173", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_022d62a7-4416-402b-a696-356bc1b74b88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.228, 0.598, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2174", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_5bb4feb3-f367-4e03-b999-c2007de19ba7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Jerry Trainor\n[button] Submit Search -> CLICK\n[button] Jerry Trainor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.801, 0.098, 0.819] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2175", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ab92e162-cd39-4288-a702-dc080854bd00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.028, 0.931, 0.044] in the image\nAnd my action is Action: TYPE\nValue: Kevin Durant"}]}, {"id": "mind2web_2176", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_a41fc74f-1dd6-4352-a2f5-1d7bd52e9cfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[link] Popular tracks -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.131, 0.695, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2177", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_a2babf92-d02c-488b-b82d-e051319ca1f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[link] Shower Essentials -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.413, 0.691, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2178", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_d379fe7d-7df4-47a7-9759-adc5e3551cec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.219, 0.355, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2179", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_a92ff28c-7818-4e0d-b705-d0b8c171af63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.717, 0.263, 0.734, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2180", "image": {"bytes": "", "path": "./images/1bf4f465-99cb-483b-aac1-a7512b150755_0675f7cc-293d-46a5-a6ab-9f38810c1376.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hydropack and make the results to show only items that have a capacity of 21 to 35L.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.101, 0.128, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2181", "image": {"bytes": "", "path": "./images/c53a4a47-d155-42fc-b9a8-6b37d092d9aa_f10d0566-f01d-41e1-a9e1-1838fb425783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the release date and supported platforms for the game \"Elden Ring.\"\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.112, 0.677, 0.131] in the image\nAnd my action is Action: TYPE\nValue: Elden Ring"}]}, {"id": "mind2web_2182", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_8336d7c7-ca0a-4911-a354-f8a00a547a1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.182, 0.914, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2183", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_771e94b4-061c-4b4f-9c67-23671ef91e2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.095, 0.825, 0.118] in the image\nAnd my action is Action: SELECT\nValue: Price low to high"}]}, {"id": "mind2web_2184", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_90d8d6e0-addf-4d9c-afc8-61048c824a2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.238, 0.114, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2185", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_4a07dc77-6047-4b53-8808-a53fbd47e6a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK\n[input] -> TYPE: 50\n[input] -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.258, 0.978, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2186", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f617e4ee-1c75-459f-a9a8-f9edf83abf98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan\n[strong] Abidjan -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Accra\n[strong] Accra -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.275, 0.94, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2187", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_f22832a0-47d3-4e90-8f12-3060a88514cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.286, 0.138, 0.337, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2188", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b28fb6d0-74c2-492a-9834-7c55b317bc16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (18) -> CLICK\n[button] Show 18 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.27, 0.191, 0.307] in the image\nAnd my action is Action: SELECT\nValue: 10 00 AM"}]}, {"id": "mind2web_2189", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_5e18d747-451a-47e9-a2b9-4bbeffaf596b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.505, 0.421, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2190", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_2e5c3ba5-79d1-47e9-a404-03e9aa72be31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.225, 0.618, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2191", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_b59fa278-4a1e-4869-866a-137ca9046aa1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[button] This weekend -> CLICK\n[link] See more -> CLICK\n[label] -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.717, 0.065, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2192", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_556ab0b3-e0cc-495c-a76a-93f9487d41a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK\n[link] Limited-Time Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.354, 0.369, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2193", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_f254e9fa-ac68-41f4-97fb-e721299de39d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2194", "image": {"bytes": "", "path": "./images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_e604833c-a61d-4011-9ada-fc0b36437ac7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show my Library and sort by Albums.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.006, 0.984, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2195", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_903f6b4d-3315-47b1-b88d-15cd49d43bb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.941, 0.007, 0.991, 0.014] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2196", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_c3039d57-1d54-4442-b250-b233b580fd64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.282, 0.307, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2197", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_6f2a987a-c636-4917-a2c9-d0396c21a1ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK\n[link] Advanced search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.225, 0.983, 0.248] in the image\nAnd my action is Action: TYPE\nValue: Taylor Swift"}]}, {"id": "mind2web_2198", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1401227d-f0de-44de-86fc-af5ba0c6c520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.011, 0.578, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2199", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_734808d6-57d3-4ae8-98c1-b2f33ee8aef5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Crossbows and Accessories -> CLICK\n[label] Limited Stock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.632, 0.056, 0.65] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2200", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_e71abc2c-4ffb-460e-ba94-f76587391fc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.734, 0.102, 0.969, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2201", "image": {"bytes": "", "path": "./images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_62ad40a1-3e77-4bb7-bced-d863ee082eb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the Resident evil 4 game guide.\nPrevious actions:\n[path] -> CLICK\n[link] Guides -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.539, 0.276, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2202", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_0bcc192a-b80b-485c-a8f5-66deacb89805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[span] , United States -> CLICK\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.389, 0.905, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2203", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_1a497e83-83d0-4ccb-ae4d-22ec497edc64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.035, 0.966, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2204", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_c45ca820-5c91-49f7-8eae-05462119775d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.402, 0.514, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2205", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_fda96947-caa3-40a5-8f47-4413cf7cc0cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.272, 0.233, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2206", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_e34a10ea-d14a-452f-a318-785adaca157d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.262, 0.094, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2207", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_5ca5d0ff-3a34-437d-bee2-1fac238301f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Shoes & Sandals -> CLICK\n[link] Athletic Shoes & Sneakers Athletic Shoes & Sneaker... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.163, 0.986, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2208", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_417ecefd-898d-409e-b06a-fedebbcfd761.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.164, 0.644, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2209", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f72fce46-9cea-405a-bda3-7fa5b65f08b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Neo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.107, 0.487, 0.134] in the image\nAnd my action is Action: TYPE\nValue: Thomas"}]}, {"id": "mind2web_2210", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_36ac0711-a68d-408f-b2eb-1451647e0fe0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK\n[link] Fiction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.122, 0.834, 0.134] in the image\nAnd my action is Action: SELECT\nValue: Price, low to high"}]}, {"id": "mind2web_2211", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_449b0be6-d463-4adf-bc5a-5a7c1173d402.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.011, 0.559, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2212", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_24838af0-91ab-40e0-808b-4f59a031f1dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.18, 0.438, 0.197] in the image\nAnd my action is Action: TYPE\nValue: north las vegas"}]}, {"id": "mind2web_2213", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b33f3bf0-ed3d-4894-92b0-3f5a17a350c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.137, 0.673, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2214", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9ae9ccd0-147e-4ce8-881a-79d4b3d8f717.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.524, 0.645, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2215", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_18f61a73-f84b-4cb1-a2f3-7b6865a53d80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.24, 0.808, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2216", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_6d12a2b4-31a8-4534-97a2-6c84c75d3fad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.149, 0.967, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2217", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_ec287a0e-e011-48ba-b37d-0ed8176625e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.172, 0.828, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2218", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_258c60a2-eebd-485f-86e6-890e57a66ec3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.372, 0.875, 0.41] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_2219", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_78412c79-dfb9-4973-9e9c-c241d9af03fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\n[tab] Hourly -> CLICK\n[textbox] Search for parking -> TYPE: Atlanta International Airport\n[li] Atlanta International Airport, Spine Road, College... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.44, 0.372, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2220", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_9d37eb8e-33e6-4a01-b91d-82a919ed0da2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK\n[generic] Economy -> CLICK\n[option] Premium economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.426, 0.478, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2221", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_1cdaba22-bf16-40a4-a417-9191b610019d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang\n[div] Ford -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[heading] Used 2000 Ford Mustang GT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.084, 0.854, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2222", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_1429b238-de97-4a6a-a14d-c14ac1c47e7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.134, 0.266, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2223", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d7c369dd-f0a0-4296-b42a-21e848626295.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.214, 0.956, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2224", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_1fdaf4a6-f9af-477b-a6ad-d549a923e148.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.079, 0.478, 0.099] in the image\nAnd my action is Action: TYPE\nValue: Las Vegas"}]}, {"id": "mind2web_2225", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_ed78af31-521a-4b45-b4f7-9b09e5b15a09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.099, 0.32, 0.116] in the image\nAnd my action is Action: TYPE\nValue: california"}]}, {"id": "mind2web_2226", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_4ff772da-2257-42ff-8770-abeb5ee76e71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK\n[combobox] Search by Service Number -> TYPE: SE4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 0.279, 0.264, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2227", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_057168d0-10f5-478b-88de-40fc2f2a1544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK\n[span] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.076, 0.961, 0.103] in the image\nAnd my action is Action: TYPE\nValue: atlanta georgia"}]}, {"id": "mind2web_2228", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_ffe46fa1-15a2-44a5-8017-0b39e33fa3b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.389, 0.588, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2229", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_2a4fda62-04d0-4158-8033-5c8be0ba3f71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Shoes & Sandals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.288, 0.344, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2230", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ffa34a7-0378-4a31-9367-019e2fa0115e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.347, 0.268, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2231", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_3bf9fc1a-cc1f-4276-a11a-485dc45a4eab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK\n[link] 2 -> CLICK\n[listbox] hour -> SELECT: 17\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.145, 0.925, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2232", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_06c977cf-f2d5-40bc-ab31-453cdb6412b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 500\n[link] Show 684 stays -> CLICK\n[path] -> CLICK\n[textbox] Name -> TYPE: Togo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.28, 0.703, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2233", "image": {"bytes": "", "path": "./images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_6cab71d0-6ddd-4214-a367-31723a534fe1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an available wine at Kroger.\nPrevious actions:\n[path] -> CLICK\n[button] Departments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.26, 0.5, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2234", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_211ae34b-54e9-4b42-acbb-df977fb6dba6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4\n[link] Fallout 4 $19.99 -> CLICK\n[select] 1900 -> SELECT: 1995"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.427, 0.513, 0.508, 0.541] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2235", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_24cd8cb6-fad4-4840-a423-b2bf2ce4de58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.022, 0.277, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2236", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5cf65818-7dae-4713-b976-169a11e7b498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK\n[button] Buy Now -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.367, 0.405, 0.418, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2237", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_aa137e85-cbce-4920-89d7-24cb550fbf81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.32, 0.256, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2238", "image": {"bytes": "", "path": "./images/da386775-280b-4a84-9801-4ae3098044b0_1f1cfcc3-ec16-49ed-83bd-4b388b1948a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in California city for Limos which also offers military discounts and free wi-fi.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.028, 0.324, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2239", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7a476beb-eca9-48bf-abb4-286b0d996196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK\n[span] New -> CLICK\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.552, 0.192, 0.565] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2240", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_aefbb363-74ca-43d6-9af5-1ed82db20b26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK\n[link] Find one near you (opens in a new window) \uf301 -> CLICK\n[combobox] Find a store -> TYPE: 60540\n[option] 60540 Naperville, IL, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.133, 0.352, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2241", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_b1e774ca-1b61-47d4-bcaa-5e233a773cdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.326, 0.29, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2242", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_4074b1d5-90b6-4f54-a8bf-80233191ff1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.638, 0.114, 0.66] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2243", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f5ffe0d3-f3c5-4f81-800e-d00052a64734.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.142, 0.546, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2244", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_b63cef7a-d7c0-4fd6-a051-17bf4be350e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] LKR\u00a01,120,521 -> CLICK\n[button] Select fare -> CLICK\n[heading] LKR\u00a03,125,932 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.521, 0.48, 0.56] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2245", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_8bd2ba25-f3c9-464c-8943-020e48f3c1c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Rap / Hip Hop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.224, 0.655, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2246", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_a7f466e8-470a-4460-b10b-38a6ab84458c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.012, 0.45, 0.019] in the image\nAnd my action is Action: TYPE\nValue: Oakland, CA"}]}, {"id": "mind2web_2247", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_7c5a9c22-a130-4225-84ab-c661784863e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK\n[link] $189 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.861, 0.077, 0.9, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2248", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_2f2426db-0cca-4e67-a6b0-333de7a4b037.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street\n[span] 2983 Marietta Street -> CLICK\n[textbox] Apt, floor, suite, etc (optional) -> TYPE: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.325, 0.702, 0.389] in the image\nAnd my action is Action: TYPE\nValue: Buck"}]}, {"id": "mind2web_2249", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_f29b8470-f482-4097-a3c0-014d0ea77cb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[textbox] Near -> TYPE: WEST HOLLYWOOD\n[span] West Hollywood -> CLICK\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.124, 0.632, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2250", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_a5bc6789-f1f0-4aeb-9457-993e852ff0fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK\n[div] SUVs -> CLICK\n[span] -> CLICK\n[label] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.305, 0.916, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2251", "image": {"bytes": "", "path": "./images/0b2c1886-0345-4ca7-a555-8b1cd1b1c40a_31825227-163b-4f9a-8253-81e49cd90371.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse certified fresh sci-fi films streaming at home.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.233, 0.587, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2252", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_72041631-5eab-482b-9413-7c3553541926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.5, 0.292, 0.573] in the image\nAnd my action is Action: SELECT\nValue: Delivery"}]}, {"id": "mind2web_2253", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c207e1a1-c5a2-4a3c-aba7-4bf4e98d6829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.019, 0.491, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2254", "image": {"bytes": "", "path": "./images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_d95c30f0-a89a-4c2b-ac95-293e0904cf22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Travel Pack for hiking.\nPrevious actions:\n[button] Travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.128, 0.377, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2255", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_20dccc2d-feeb-4527-b6c6-8873b4b1f8f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.0, 0.482, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2256", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_398e93e6-b97d-4290-846d-dc5d8ec462a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[link] Restaurants -> HOVER\n[span] Thai -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.016, 0.564, 0.025] in the image\nAnd my action is Action: TYPE\nValue: WESTMINSTER"}]}, {"id": "mind2web_2257", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_8c6d633b-d81f-42ce-98c8-0704f88dd95e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.316, 0.256, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2258", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_e8482e40-59db-4fc3-aa34-8f93399de23b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.208, 0.463, 0.235] in the image\nAnd my action is Action: TYPE\nValue: Washington"}]}, {"id": "mind2web_2259", "image": {"bytes": "", "path": "./images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_0dc5da7a-7c42-43ad-8c6d-1270e1186f5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the latest news from rotten tomatoes.\nPrevious actions:\n[link] NEWS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.089, 0.91, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2260", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_222cd8db-5718-4f2a-9fe2-93b144ba93cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.385, 0.192, 0.411] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_2261", "image": {"bytes": "", "path": "./images/1bf4f465-99cb-483b-aac1-a7512b150755_e209fc5a-4d34-43aa-88dc-898fc2cb3c9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hydropack and make the results to show only items that have a capacity of 21 to 35L.\nPrevious actions:\n[button] Camp & Hike -> CLICK\n[li] Hydration Packs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.147, 0.119, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2262", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f3d13de0-6b97-4f7e-acb5-77fc953b68f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50\n[button] Find -> CLICK\n[generic] Brand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.337, 0.34, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2263", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_2211b437-d9e0-41b7-a052-7d3867619be7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[path] -> CLICK\n[link] View More -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.819, 0.83, 0.847] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2264", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_7d152147-a44e-4294-bc01-98b93f05e570.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.804, 0.192, 0.816] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2265", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1f2e7cce-dfb0-4d72-82ce-64467ec3600d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.119, 0.453, 0.14] in the image\nAnd my action is Action: TYPE\nValue: Aquarium of Paris"}]}, {"id": "mind2web_2266", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_bc0cace1-a724-4637-933a-587043f890c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.208, 0.808, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2267", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_391b3a9a-8396-4709-86c0-7d88ba2b43e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.014, 0.05, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2268", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c507a159-e69a-4a8a-9f3a-64cb387e850e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK\n[link] Edit -> CLICK\n[button] 04/11/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.415, 0.626, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2269", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_232b3998-bdde-46ec-839f-e1ddcd632443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.246, 0.378, 0.272] in the image\nAnd my action is Action: SELECT\nValue: Climbing"}]}, {"id": "mind2web_2270", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_d4851253-54fd-4a8e-bfee-8b3e7448733f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Michael Jordan\n[textbox] Search -> ENTER\n[span] Michael Jordan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.172, 0.126, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2271", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_2add8689-72ac-4b04-b149-3ae7d54b630b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.5, 0.748, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2272", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_96b8805f-69fa-420b-823e-29ff28e471f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.153, 0.773, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2273", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5e01100b-9202-4e0d-84f9-a986283066f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK\n[button] CHECKOUT -> CLICK\n[button] Pick up in store Shipping: Free -> CLICK\n[searchbox] City, State, or ZIP code -> TYPE: 10005"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.423, 0.62, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2274", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_2b8a282b-a072-4637-8dc2-bbba1e04c12b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.094, 0.745, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2275", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_6e941c23-f01a-484c-bfdb-cbbac7ea4727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.203, 0.473, 0.235] in the image\nAnd my action is Action: SELECT\nValue: RX"}]}, {"id": "mind2web_2276", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_58fa66a3-0966-4b45-bb59-6ae4cbac9f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[button] Go to next month -> CLICK\n[button] 2023-04-05 -> CLICK\n[combobox] Desired reservation time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.703, 0.251, 0.909, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2277", "image": {"bytes": "", "path": "./images/51d2bb04-8207-4c21-8bc3-c00d826a5d0c_4bbd596c-f120-4a5f-ad59-c4fa1887b64e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Remove the SSD on my cart\nPrevious actions:\n[link] Shopping Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.631, 0.358, 0.695, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2278", "image": {"bytes": "", "path": "./images/a747bed0-0f45-413a-8f48-2c45795e4e3d_a436dc7e-365b-4bc6-ad92-f1444c628f9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give a like to the #1 track of the Real Time Top Chart\nPrevious actions:\n[link] Charts -> CLICK\n[gridcell] Like Crazy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.127, 0.233, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2279", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_27521355-59c4-4bab-a47f-d1d98c5617c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Adventure -> CLICK\n[div] Narrow By -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.161, 0.79, 0.294, 0.806] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2280", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_281334a6-f548-4381-9965-fdac05c9b599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Phoenix -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.345, 0.881, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2281", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_6db2cc83-aa0e-4f3e-91de-71acfed5bdb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK\n[link] User Search -> CLICK\n[textbox] Enter First name, last name, and/or username: -> TYPE: Joe Bloggs\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.247, 0.736, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2282", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_a5e39e4d-8ef5-424e-9370-dc254fdbcb03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.019, 0.39, 0.06] in the image\nAnd my action is Action: TYPE\nValue: wireless keyboard mouse"}]}, {"id": "mind2web_2283", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_ed51f63d-6cab-4ecb-831a-81833977302f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.099, 0.454, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2284", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_d7379735-b499-442a-ab5b-b0b0d6e6d906.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.198, 0.312, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2285", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_2edb875d-df64-4128-9e2e-c8d147290aff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[link] Your lists -> CLICK\n[link] Create a list -> CLICK\n[svg] -> CLICK\n[span] Walgreens -> CLICK\n[textbox] Add a title (Required) -> TYPE: Walgreens"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.698, 0.142, 0.787] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2286", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_b564668c-3c8e-4538-9bee-e1e48c71fa99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK\n[label] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.381, 0.952, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2287", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_b3d17b95-f512-463c-8359-a1ed302829ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.347, 0.833, 0.387] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_2288", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_35217f76-ff90-428e-a78f-72c14b82dc4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.424, 0.302, 0.448, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2289", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_0db62d4c-e735-47bf-bd3f-f5a51ee7f6fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK\n[button] All Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.29, 0.393, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2290", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_961c3a5e-f8ce-4c71-a917-aa546dcea7fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.302, 0.292, 0.341] in the image\nAnd my action is Action: SELECT\nValue: Pickup"}]}, {"id": "mind2web_2291", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_90aba443-6a23-47d7-bd15-ccab225917fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.026, 0.614, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2292", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_40bef6c6-0468-4277-8a65-6b4fd6ef2c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary\n[div] Calgary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.363, 0.619, 0.385] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_2293", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_fa625908-78ca-4882-a85e-528a818b3a77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[option] New York City\u00a0\u00a0 City -> CLICK\n[span] Mar 9 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.244, 0.607, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2294", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_13275433-ad18-45f4-8742-60e8c549e96f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.158, 0.991, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2295", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_d59408ec-1909-4c52-9d24-9d21802048b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.224, 0.495, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2296", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_91a0d4fe-3524-448a-995f-8c4d570884ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK\n[button] Climb -> CLICK\n[link] Explore Climb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.577, 0.579, 0.605] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2297", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_3fac526d-a878-4292-a372-861c97b8d5e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.01, 0.519, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2298", "image": {"bytes": "", "path": "./images/d29fd2a4-2305-4276-8a0e-2599291d0a17_0944f5d6-5126-4eeb-a660-bb87994aeb13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of reviews I wrote about my games.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.074, 0.552, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2299", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_745736e0-20b8-4366-bfa1-c023ed7c78df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.328, 0.195, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2300", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_e49a338c-0d26-48ba-a268-5b8914da3639.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Guardians of the Galaxy\n[tab] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.286, 0.677, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2301", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_1766f5b3-b6c8-489b-9848-636317358a9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK\n[div] Bayern Munich -> CLICK\n[heading] STATS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.242, 0.564, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2302", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_3e587fb3-fd87-4df0-9a6c-74394cabc670.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 0.302, 0.587, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2303", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_29ade6af-9748-423d-8d26-30b16d0881a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.076, 0.474, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2304", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_3e80cc44-0172-49ab-b2b6-bf770c28f9e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK\n[button] sub 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.47, 0.279, 0.486, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2305", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_2d9747a3-5834-4128-9975-1d676e3eff45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK\n[textbox] From / Flight number Autocomplete selection. Enter... -> TYPE: Abidjan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.124, 0.5, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2306", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_b0979f37-ab13-4dc6-b59c-6ff68a53d096.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK\n[input] -> TYPE: South Station\n[option] South Station, 700 Atlantic Ave, Boston, MA 02110,... -> CLICK\n[link] Go to route -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.284, 0.379, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2307", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_56612ecc-9966-4b43-bb15-24148c457635.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.363, 0.29, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2308", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_181a2bf7-0625-438b-85e7-5b0d10523e46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.207, 0.416, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2309", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_e1d01996-7299-4eb2-80e2-36f60c02f589.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK\n[heading] to next step -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.242, 0.371, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2310", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_cc690f3f-8b0c-4cea-8060-4fb8bb31a372.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK\n[button] View more availability for Canal Street Eatery & M... -> CLICK\n[button] 2:00 PM Eatery -> CLICK\n[button] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.277, 0.523, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2311", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_65eeb779-e67c-43e7-a846-e15f5adb0238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.022, 0.576, 0.032] in the image\nAnd my action is Action: TYPE\nValue: KCCR"}]}, {"id": "mind2web_2312", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c06ef8b8-57d2-49be-bdac-79839ef57e7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK\n[button] 20th March (Monday) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.447, 0.546, 0.5] in the image\nAnd my action is Action: SELECT\nValue: 8 00 PM"}]}, {"id": "mind2web_2313", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3650813e-c882-4d37-bbab-bed70f3b6dce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.524, 0.385, 0.547] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2314", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_cd2e2892-6319-4ee5-82a8-f4ea09e54de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.031, 0.176, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2315", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_da8122e0-c040-41c6-8481-3dcd54f56ac2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[input] -> CLICK\n[button] Next month -> CLICK\n[gridcell] May 07, 2023 -> CLICK\n[input] -> CLICK\n[gridcell] May 14, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.274, 0.831, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2316", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_9e375a31-474c-4e38-a58d-96d951a5e0d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.245, 0.249, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2317", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_2e754e3f-f3a2-4f55-9783-bc7ad866d622.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK\n[combobox] \uf0d7 -> SELECT: 1 Room"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.466, 0.161, 0.556, 0.203] in the image\nAnd my action is Action: SELECT\nValue: 2 Adults"}]}, {"id": "mind2web_2318", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_75df0b6f-d448-4ab5-8039-d32f11ab3fc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.263, 0.194, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2319", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_ec710e69-c4be-4825-890c-7e865bcc443e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\n[button] Location Columbus, OH -> CLICK\n[button] Sydney -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: pasta"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.026, 0.657, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2320", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_4b19285c-34b9-4ee7-9e6e-a6a4e9f23b4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Guides -> CLICK\n[link] Vehicle Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.343, 0.337, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2321", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_d7f294ef-efb7-4ee6-907f-6bdd6dca408f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Crossbows and Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.286, 0.103, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2322", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_aad02f8a-a965-4f56-ae56-baf426db1a3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.175, 0.77, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2323", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_b8aff40e-1281-4f10-b20d-829d0ac854c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK\n[textbox] Down Payment -> TYPE: 500\n[combobox] State -> SELECT: New Jersey"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.447, 0.459, 0.485] in the image\nAnd my action is Action: SELECT\nValue: Challenged (< 580 FICO\u00ae Score)"}]}, {"id": "mind2web_2324", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_99d67dc4-ec1a-4417-a267-430411f4c20a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.431, 0.196, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2325", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_98661977-f720-456c-a165-9c8609d94b0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.142, 0.366, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2326", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_ff31502b-357d-4a19-b304-f831b6999618.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.326, 0.359, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2327", "image": {"bytes": "", "path": "./images/fce75183-0825-42b1-baf3-a9214fe20ce9_1673940e-dfb3-43b9-81f5-6e65ead88503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse recent NFL-related videos.\nPrevious actions:\n[link] National Football League NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.044, 0.729, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2328", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b856c4ca-c796-45be-9390-70f6957a0bc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.068, 0.67, 0.085] in the image\nAnd my action is Action: TYPE\nValue: Barclays Center"}]}, {"id": "mind2web_2329", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_62676939-ee3d-4810-b690-a00986baf799.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.426, 0.336, 0.453] in the image\nAnd my action is Action: TYPE\nValue: 1"}]}, {"id": "mind2web_2330", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_d5d3ece8-7439-42f0-82d0-31f0ae61e479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.007, 0.348, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2331", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_fde61447-6446-43dc-a3dc-63b8108c50e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 44012\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.101, 0.63, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2332", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_8329edd0-0afb-4c85-8c1d-84666687cb56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[svg] -> CLICK\n[label] Points -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK\n[span] Price by Core -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.213, 0.817, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2333", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_2853cb4e-f67f-493e-be7a-7361e69c3d7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.432, 0.355, 0.444] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_2334", "image": {"bytes": "", "path": "./images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_29dcce31-0589-4080-9abe-16f7658e7693.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the user reviews for the game \"Cyberpunk 2077\"\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.121, 0.677, 0.141] in the image\nAnd my action is Action: TYPE\nValue: Cyberpunk 2077"}]}, {"id": "mind2web_2335", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_c5bebb8f-e171-4b9e-bc26-c49c4e876152.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER\n[heading] Find your trip - Find a reservation - American Air... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.176, 0.363, 0.214] in the image\nAnd my action is Action: TYPE\nValue: sin"}]}, {"id": "mind2web_2336", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_b7062d2d-0889-4d33-9d6c-47e2e18faaf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[span] Cafe -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[checkbox] Outdoor Seating -> CLICK\n[checkbox] Dogs Allowed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.46, 0.688, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2337", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_9200113b-0a04-4426-8bed-76f54a25cd86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.181, 0.915, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2338", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3f78437f-f79a-4dba-b4aa-310f0fb87f56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Start Journey -> SELECT: Train"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.318, 0.61, 0.338] in the image\nAnd my action is Action: SELECT\nValue: Bus"}]}, {"id": "mind2web_2339", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_3a180732-6454-48e1-b9f6-cdf9b5b339f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[label] Boots -> CLICK\n[label] Sale -> CLICK\n[div] Color -> CLICK\n[label] Black -> CLICK\n[div] Average Ratings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.179, 0.088, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2340", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_74581ca5-a492-41c8-89db-5f671285f014.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.529, 0.424, 0.557, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2341", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_d2931b9c-010c-4937-84c3-cbb43b1adec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.318, 0.259, 0.361] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_2342", "image": {"bytes": "", "path": "./images/13cf0b14-422b-4486-841a-aa9ded048380_676972b3-6baa-442b-bb01-51684fb564af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find kinect camera for xbox one.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: kinect camera\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.285, 0.179, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2343", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_9533f5d5-15e2-4474-9fc1-a25f829529a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: NEW YORK\n[span] All airports -> CLICK\n[textbox] Flight destination input -> TYPE: PARIS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.127, 0.84, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2344", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_c7e0e26c-67cc-4ef7-90b8-78c16829898e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\n[link] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.117, 0.362, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2345", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_b62199e4-1022-40dd-a88c-5dae5942658a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[tab] One-way -> CLICK\n[svg] -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.182, 0.573, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2346", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_712b6f48-ec3d-433b-9804-6663aa03c42c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] *Preferred date of travel -> CLICK\n[link] 22 -> CLICK\n[button] Continue -> CLICK\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.522, 0.451, 0.582] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2347", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_6d14c32b-ee67-415e-80d0-045d489e0731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.591, 0.109, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2348", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_0866c57d-a360-4e2f-b879-f91e92979147.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.638, 0.287, 0.706, 0.312] in the image\nAnd my action is Action: TYPE\nValue: 50"}]}, {"id": "mind2web_2349", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_8b949019-9211-4ed7-8748-cdd325e6ca6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.269, 0.085, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2350", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_2cb0354f-be23-454f-ade7-ab45bb1778f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.406, 0.235, 0.427] in the image\nAnd my action is Action: SELECT\nValue: 50 mi"}]}, {"id": "mind2web_2351", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_22c760c4-ab12-4ef2-ba74-9f42f6fab59a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK\n[heading] ENGLISH PREMIER LEAGUE -> CLICK\n[link] UEFA CHAMPIONS LEAGUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.55, 0.061, 0.604, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2352", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8ce27faa-f678-4a05-8029-1541ca7578a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK\n[li] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.296, 0.347, 0.316] in the image\nAnd my action is Action: TYPE\nValue: 75"}]}, {"id": "mind2web_2353", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_30197f54-0965-4a7f-8a1f-526d0351cbca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Los Angeles -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.3, 0.881, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2354", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_ecb09ce1-7354-4d48-a022-e402dc19cc48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[link] 26 -> CLICK\n[polyline] -> CLICK\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.17, 0.133, 0.185] in the image\nAnd my action is Action: SELECT\nValue: 12"}]}, {"id": "mind2web_2355", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_9d4fc01d-c792-471d-8fa9-dc5d5531aab3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.386, 0.29, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2356", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_5785c6c2-b69a-4770-be93-f0d6131e71fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK\n[link] Beverage Packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.331, 0.352, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2357", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_2ce00d9e-383e-4b57-86f4-b2e5bea18060.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\n[button] DEALS -> CLICK\n[link] MULTI-RIDES & RAIL PASSES USA Rail passes, monthly... -> CLICK\n[img] -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.413, 0.737, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2358", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_8d25c01d-4501-4078-9cd8-f51b5498b1ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.153, 0.573, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2359", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_46bb7c15-23d8-4e39-872a-f5166565b18b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK\n[div] Jun -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.192, 0.171, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2360", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_bd7c2bc5-c2da-40ed-a815-11cd373099bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Choose your room -> CLICK\n[link] Choose Another Hotel -> CLICK\n[button] Choose your room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.335, 0.308, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2361", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_0a04ab16-035e-4c33-9db9-abfe44095a57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[input] -> CLICK\n[div] All dates -> CLICK\n[span] -> CLICK\n[span] 1 -> CLICK\n[div] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.572, 0.229, 0.689, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2362", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_b8cb066b-326c-44a4-bb54-e12f4ba8f863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK\n[link] Sci-Fi -> CLICK\n[button] Open Navigation Drawer -> CLICK\n[span] Top 250 TV Shows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.691, 0.67, 0.701] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2363", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_03fbc8e2-cd22-4438-99ce-6444f9cb06a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\n[link] repeat Trade-In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.428, 0.435, 0.569] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2364", "image": {"bytes": "", "path": "./images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_abf1e30f-2fb3-47f7-9b39-c10d02703d4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nfl game played by the las vegas raiders.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.028, 0.728, 0.051] in the image\nAnd my action is Action: TYPE\nValue: las vegas raiders"}]}, {"id": "mind2web_2365", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_3de629ac-77af-43a3-b249-a76ed19aea42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.235, 0.609, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2366", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_15e52d78-d625-465e-b260-2fc9775b965b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK\n[img] Missing (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.115, 0.085, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2367", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_718514fb-cc04-4b61-a21b-d9e159bd3e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.377, 0.984, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2368", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_b15f4f6f-413b-4ae7-bae8-1ca7e0b4d75a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.193, 0.514, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2369", "image": {"bytes": "", "path": "./images/6df317e6-3414-4f2e-b5fc-b70914def4eb_8dfaa8ab-c597-47ab-a575-06c902f13b04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Jazz music genre albums and tracks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.005, 0.681, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2370", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_63a284bf-17b9-4a58-81c7-7545cc57a69f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.161, 0.372, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2371", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_cedbbc01-d62f-4fcc-9b2d-e44336dabc7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.204, 0.249, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2372", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_b4d4000c-1f63-49c4-9616-44eecec411f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[polygon] -> CLICK\n[button] Heartland -> CLICK\n[link] The page with details for The Heart of America wil... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.386, 0.686, 0.485, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2373", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_df3e9cc5-5632-4d45-b234-7994469d1625.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.008, 0.561, 0.035] in the image\nAnd my action is Action: TYPE\nValue: SOUTH AFRICAN HISTORY PODCAST"}]}, {"id": "mind2web_2374", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_63d785ce-1b1f-4f8a-ba50-8cf8ff40d73f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.364, 0.238, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2375", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_ad5a4145-122e-4236-bc44-b1efcc78caf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] Depart , required. -> TYPE: 04/23/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.71, 0.42, 0.895, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2376", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_e9f82301-438f-4602-9a01-59d80d5bdae2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK\n[link] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.237, 0.265, 0.257] in the image\nAnd my action is Action: SELECT\nValue: 17"}]}, {"id": "mind2web_2377", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_165bc7ab-0f5e-4633-acf5-588ddbef6ef8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.267, 0.634, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2378", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_efa5da67-cadc-4dc5-b66f-b1f73acbac75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018\n[button] Make \ue920 -> CLICK\n[checkbox] Honda (549) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.302, 0.277, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2379", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_a99be392-4e33-4fb6-9e75-0b6db4e3c636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK\n[button] State -> CLICK\n[span] Virginia -> CLICK\n[button] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.458, 0.137, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2380", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7d744575-cd07-4e36-9871-3feb82f857f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.12, 0.953, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2381", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_237a3344-5afb-40a4-90f8-ac59015288ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.554, 0.846, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2382", "image": {"bytes": "", "path": "./images/1538e37b-9c33-48b0-b10e-662e192ad53a_3a3ea0a2-ac4f-4852-9eef-06f64dcc0b45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stops in Alanson, MI\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.173, 0.142, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2383", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_9fb3d99c-0a3a-4d49-b2c8-223e33028333.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI\n[combobox] Guests -> SELECT: 4 Guests\n[button] March 30, 2023. Selected date. -> CLICK\n[button] 1:30 PM Dining Room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.463, 0.523, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2384", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_c8ae4adb-0aa7-406f-8732-7d52c7822725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK\n[div] Dual Motor All-Wheel Drive -> CLICK\n[button] 43235 -> CLICK\n[input] -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.286, 0.395, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2385", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_056c07ae-695a-4085-a246-972a75091afa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\n[link] Investors -> CLICK\n[link] Financial Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.245, 0.867, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2386", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_8fce0cf2-ba98-47ce-945b-36fc51b17258.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.229, 0.294, 0.257] in the image\nAnd my action is Action: TYPE\nValue: all star stand up comedy"}]}, {"id": "mind2web_2387", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_f5c0b94f-00a9-48db-af5e-41b3312cced3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.234, 0.186, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2388", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_0bace322-b3ef-449a-a74a-d80e3a3f0994.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.072, 0.208, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2389", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_ef32e30f-b74b-49e3-87fe-bd6ec3dac346.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.169, 0.359, 0.185] in the image\nAnd my action is Action: TYPE\nValue: Grand Central, NY"}]}, {"id": "mind2web_2390", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_81da20ab-ac0e-46a3-a331-7680d55ffb13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.482, 0.343, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2391", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_e5453e8d-5e53-4cbb-b9b5-9066cf3ff1e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.03, 0.361, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2392", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_e6fa804e-3e98-4f26-9433-3da3a3fa7bf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.942, 0.167, 0.977, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2393", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_03092a53-0508-4731-9c6c-27e82d5e74e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.075, 0.775, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2394", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_42dc7dd0-6b08-48bc-b31f-c62882e67b35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2395", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_f7c7c715-3a1d-43f6-a391-7054a379dcd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.295, 0.269, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2396", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_e56f828d-45bb-4858-98cb-9c6ab5b55e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.308, 0.31, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2397", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_5e1ca919-5001-4d5f-83a3-e8b5f8270ccc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith\n[input] -> TYPE: 123st rd\n[textbox] Email Address -> TYPE: abc@abc.com\n[checkbox] Solar Roof -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.89, 0.365, 0.914] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2398", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_ce4d06b6-cccb-471d-a105-368e76a1aa28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.02, 0.328, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2399", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_d53a314f-6ecd-4ad7-ae39-6ef936c2809a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[tab] Fastest -> CLICK\n[button] See flight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.559, 0.786, 0.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2400", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_c521c553-ed10-4e26-af9a-6e28c2563b07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.368, 0.206, 0.383] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2401", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_880ea728-2568-4211-8078-f7a92a2802b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High\n[img] Black Diamond Zone Climbing Shoes 0 -> CLICK\n[button] Add to cart\u2014$46.73 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.202, 0.494, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2402", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_cbfb435d-7c81-4de0-8bee-f5106b6b09e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.007, 0.393, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2403", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_1ca0bcae-dc05-4a2d-bfd0-0838c8284ae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.551, 0.463, 0.578] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_2404", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_9900996a-927f-4aeb-9632-a97400207554.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.135, 0.487, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2405", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_657f7043-a245-412d-843f-b4cc104f8b22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK\n[span] Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.448, 0.096, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2406", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_64766e71-e258-4354-8bde-2a3a0b75014b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.132, 0.732, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2407", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_01450a81-82d1-4492-a961-c81534798a36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.012, 0.1, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2408", "image": {"bytes": "", "path": "./images/ab989f1e-bacb-4c37-ab81-c8f24ee3284b_7b892c42-4c6d-4adf-af9b-b77b7adf8681.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the special timetable for Far Rockaway Branch of the Long Island Railroad.\nPrevious actions:\n[link] Schedules -> CLICK\n[heading] Long Island Rail Road & Metro-North Railroad -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.427, 0.5, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2409", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc480331-a947-421e-90ed-891f11e70239.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[button] Search -> CLICK\n[button] Recommended -> CLICK\n[div] Lowest Price -> CLICK\n[svg] -> CLICK\n[button] Choose your room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.801, 0.308, 0.826] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2410", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_50d097a8-12d9-488d-bb6e-a7c7a0f4a112.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.298, 0.159, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2411", "image": {"bytes": "", "path": "./images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_b3e1739e-0621-49d7-a6c6-fd3bcdd807c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a red used tesla Model S.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.054, 0.261, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2412", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_f8e87034-4dc4-4109-ba01-2b7b0347713f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.053, 0.173, 0.088] in the image\nAnd my action is Action: TYPE\nValue: Mumbai"}]}, {"id": "mind2web_2413", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_e02b279d-3e39-4465-81eb-d34ad716873d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[svg] -> CLICK\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.499, 0.088, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2414", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_a4034b18-98c0-4a92-8692-dd5255f8212e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[span] 02199 -> CLICK\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.31, 0.373, 0.336] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_2415", "image": {"bytes": "", "path": "./images/54112d86-1d85-4abf-9e12-86f526d314c2_8949caa0-b7f1-48f7-9c16-6303d8e5139e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the BGG rules for Game Submissions?\nPrevious actions:\n[button] Help -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.486, 0.061, 0.571, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2416", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_558e8da1-899c-4f41-804b-8979032f2849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.163, 0.113, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2417", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a60f2c73-7148-4798-a883-9b406aed93d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.315, 0.61, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2418", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_c201e18e-6089-4696-a09a-4c07559c3500.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.245, 0.34, 0.265] in the image\nAnd my action is Action: TYPE\nValue: 45201"}]}, {"id": "mind2web_2419", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_d787b502-0951-4e7d-8f76-7883935a9359.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.528, 0.335, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2420", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_f75324ed-ef88-4e36-9985-867b0955b5d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: SHANGHAI\n[div] Shanghai, China -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.227, 0.471, 0.249] in the image\nAnd my action is Action: TYPE\nValue: SEOUL"}]}, {"id": "mind2web_2421", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_20482680-aa27-49bd-8b8e-310c1b22ece4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Price: low to high\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.336, 0.422, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2422", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_45f432bc-2147-4142-8762-ee4e46d23ec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.191, 0.721, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2423", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_c8661052-8860-4fe0-b8aa-c95cd1ec01de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Sweaters & Cardigans -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.403, 0.974, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2424", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_4bd6b427-d3d3-4918-a9d8-605b56eb6ba7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.307, 0.211, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2425", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_86d43239-affe-4c5e-bc33-0670285d687d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.826, 0.032, 0.84] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2426", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_abb32f30-a508-44b6-a76f-b96667518a71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.211, 0.29, 0.241] in the image\nAnd my action is Action: TYPE\nValue: bhz"}]}, {"id": "mind2web_2427", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_15b5ed14-0073-4fd8-ab5c-90d56475412c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.166, 0.522, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2428", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_069e32e5-39d1-4db3-88d6-167a919fd1c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.006, 0.995, 0.025] in the image\nAnd my action is Action: TYPE\nValue: Atlantis"}]}, {"id": "mind2web_2429", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_073133f1-988d-4c73-b606-4934148a72ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\n[button] Soccer -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.294, 0.371, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2430", "image": {"bytes": "", "path": "./images/5a181549-c79c-499c-b7d7-90860f0e0068_df574cbc-d7ed-4bbe-bbc5-3e0694b79f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play Trailer of \"The Flash\" 2023.\nPrevious actions:\n[textbox] Search IMDb -> CLICK\n[textbox] Search IMDb -> TYPE: The Flash"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.193, 0.657, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2431", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_7e49b87e-b08c-41d7-bde4-c9a4cadedc66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[button] Go! -> CLICK\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK\n[div] All dates -> CLICK\n[span] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.128, 0.616, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2432", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_efd16945-67fb-4e57-ac60-d699b278ddb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\n[link] \ue92f Computer Systems \uf105 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.127, 0.434, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2433", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_b58648d8-209f-43b2-aab1-5d3835e84d59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[div] 1 days -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.214, 0.264, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2434", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_0ef22904-d5ae-48f2-bdea-a4f32b28521a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.37, 0.759, 0.397] in the image\nAnd my action is Action: TYPE\nValue: X123456"}]}, {"id": "mind2web_2435", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_08841ec4-c606-4419-a0ae-ad5480b680ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.114, 0.734, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2436", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_62404140-be23-4dd0-838a-e9319f9c9381.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK\n[span] (0) -> CLICK\n[textbox] Max Price -> TYPE: 200\n[textbox] Min Price -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.571, 0.976, 0.628] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2437", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_a390dcd6-7459-4945-bcfd-9a161018eda5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.147, 0.559, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2438", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_746fec9f-bf5f-49eb-a9b2-0c96916e881d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.193, 0.312, 0.209] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_2439", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_3828f926-29a5-4b41-99ce-471f499356c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.046, 0.164, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2440", "image": {"bytes": "", "path": "./images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_4ba0fa22-4b33-464a-ba78-506bbb581b2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for restaurants near my location with pizza and for 6 customers.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Pizza\n[b] Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.22, 0.562, 0.231] in the image\nAnd my action is Action: SELECT\nValue: 6 Guests"}]}, {"id": "mind2web_2441", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_b488a66d-0c85-42e4-8975-57b86557952d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[svg] -> CLICK\n[button] Choose your room -> CLICK\n[button] Book Business Double Room A -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.896, 0.302, 0.977, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2442", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_76dfc623-7691-4193-8b56-0c3e654a9511.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK\n[textbox] Confirmation or eTicket More information about con... -> TYPE: 12345678"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.295, 0.244, 0.499, 0.269] in the image\nAnd my action is Action: TYPE\nValue: Jason"}]}, {"id": "mind2web_2443", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_3b6a87ff-2811-4fa2-b5c8-e84a06e9231a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: TYPE\nValue: SE4"}]}, {"id": "mind2web_2444", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_4eb19133-5c75-4700-b3fc-0c913c32a1b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.81, 0.492, 0.835] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_2445", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_9e27020b-4af3-474c-be31-2db12fbb98fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.202, 0.477, 0.213] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_2446", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_1d43db7c-3bb1-443d-9a59-9ddf96651271.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle\n[span] Seattle, Washington, United States -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.14, 0.573, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2447", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_11dfd474-542d-4f33-b6c6-caf5d6b23e4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.229, 0.341, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2448", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_bcd7cb41-c53c-406b-bce1-125e51ce307e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.275, 0.279, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2449", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_974168a6-b792-4dd9-863e-a6e6a9127534.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.293, 0.268, 0.322] in the image\nAnd my action is Action: TYPE\nValue: KATHMANDU"}]}, {"id": "mind2web_2450", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_1b0baac2-c9d2-4069-9290-d65c9bce964f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.295, 0.646, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2451", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_27abc650-25bc-4667-8e1d-cd1b4e7b42ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[button] 15 -> CLICK\n[button] 16 -> CLICK\n[div] Petaluma Music Festival -> CLICK\n[button] Get tickets -> CLICK\n[listbox] Select quantity: General Admission price: $60.00 -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.221, 0.573, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2452", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e76d75a5-5836-4b36-9260-e4877e687b79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK\n[span] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.235, 0.393, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2453", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_2dbedcb1-963b-4445-9a0a-bc32144984ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.01, 0.323, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2454", "image": {"bytes": "", "path": "./images/9ebd069a-7703-47b5-9c75-53958637e7c0_c36efecc-f544-470c-93bf-162cb1a83f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Watch the halftime show from the most recent Super Bowl\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.013, 0.519, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2455", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_c42cca35-7a05-4bb6-8671-737bb9dc9812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.046, 0.429, 0.069] in the image\nAnd my action is Action: TYPE\nValue: LOS ANGELES"}]}, {"id": "mind2web_2456", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c20e44a2-0e7e-4e24-865e-91167602faee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] Happy Birthday -> CLICK\n[button] EUR -> CLICK\n[div] GBP -> CLICK\n[button] -> CLICK\n[div] \u00a3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.167, 0.178, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2457", "image": {"bytes": "", "path": "./images/30e310ca-af64-46b4-a0f6-14b8f04fa734_d5387d67-6e85-40ef-8c69-412c86d9cd11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up Popular Photos in the Community featuring Aegean Airlines\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.053, 0.742, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2458", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_9e4f6faa-1691-43ae-ad28-12414527bb85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\n[combobox] Search products and services -> TYPE: cough medicine"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.137, 1.0, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2459", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_fa7bc1d5-ff65-4a55-8ea6-c8154a05c7df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.352, 0.281, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2460", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_4745dde5-a1c1-4426-8a1a-1eff32a73563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.271, 0.459, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2461", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_797e693d-0b01-49e4-856c-74dc502eca54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.381, 0.609, 0.416] in the image\nAnd my action is Action: SELECT\nValue: 01"}]}, {"id": "mind2web_2462", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_ed7f3a80-da26-4473-8e9c-142ffcb114b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK\n[combobox] SORT BY -> SELECT: Low to High\n[checkbox] 4+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.19, 0.914, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2463", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2a5cdf7a-e6ca-4b12-bc94-645a6360642f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.785, 0.627, 0.808] in the image\nAnd my action is Action: TYPE\nValue: Wolowitz"}]}, {"id": "mind2web_2464", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_1e1232ec-6ba6-4991-b3df-4acf8b58f80c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.34, 0.647, 0.657] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2465", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_0d4d9216-bd04-4cdf-9c48-81a60644bb42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000\n[input] -> TYPE: 10000\n[input] -> CLICK\n[input] -> TYPE: 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.34, 0.37, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2466", "image": {"bytes": "", "path": "./images/bd4b77db-00a5-405f-bf0a-a4d168967d64_983d86c3-2498-4b43-8c31-a2e9549e0097.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Panini Diamond Kings Baseball cards set below $25.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: Panini Diamonds Kings Baseball cards\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.26, 0.179, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2467", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_d1da37e1-babe-4725-9acf-6c1dbd955355.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.274, 0.371, 0.303] in the image\nAnd my action is Action: TYPE\nValue: ac recharge"}]}, {"id": "mind2web_2468", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_864d3cfb-f813-4b7b-ad22-bcf37afeb3c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.216, 0.374, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2469", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_023f505d-630a-4860-be8d-bef956e29522.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Event space -> TYPE: 7\n[button] Find -> CLICK\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.538, 0.418, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2470", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7b70a5e0-27fd-4522-ba80-8655d4cfe594.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.175, 0.393, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2471", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_3ee72ba6-1685-4f66-8150-7ab99ac1d9de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.727, 0.31, 0.744] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2472", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_87fd7194-d9d7-433f-8ce3-af6697c92098.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER\n[button] Search -> CLICK\n[button] Filter -> CLICK\n[combobox] Location -> SELECT: Arizona"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.227, 0.512, 0.243] in the image\nAnd my action is Action: SELECT\nValue: Date - latest to soonest"}]}, {"id": "mind2web_2473", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ceb95a13-7820-495e-913d-8cff0a0494c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[span] San Francisco, CA -> CLICK\n[button] All -> CLICK\n[radio] Key extraction -> CLICK\n[radio] Vehicle -> CLICK\n[generic] 2 filters Key extraction \u2022 Vehicle Clear all Cance... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.138, 0.66, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2474", "image": {"bytes": "", "path": "./images/e031c695-28e2-4507-949e-bbb65edf9f3d_1108cb6a-19ec-4e09-935e-67b15d2f8830.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an article about a new restaurant and share it on Twitter\nPrevious actions:\n[link] New Openings -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.472, 0.056, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2475", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_bad8613e-08b4-4ae7-af27-263f36e2ff69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[button] Filter -> CLICK\n[button] Adults-Only -> CLICK\n[button] Airfare Included -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.313, 0.772, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2476", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_0a1122f8-7172-4300-985d-5abcb7750ca4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[gridcell] 1 -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.342, 0.187, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2477", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_35d262d2-4f80-4480-9a29-7c095d95e029.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.425, 0.572, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2478", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_b53007a1-0221-4a80-88a1-ccc9575705d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.027, 0.917, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2479", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_76439ab2-9cbc-4b10-9a8a-10aa688d53aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.458, 0.39, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2480", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_3db9ae03-d8ec-410c-a8b1-c8436fb9194e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK\n[div] Sports -> CLICK\n[link] Outdoor -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.294, 0.078, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2481", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_f1269494-1cf9-4f7c-ab7b-43521cf53783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.294, 0.164, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2482", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_a1264e25-26bf-49e0-b1f1-9efe9e8a1adb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK\n[textbox] Custom name your search -> TYPE: Jaguar\n[button] CONTINUE -> CLICK\n[radio] Daily summary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.418, 0.588, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2483", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_bf3377aa-05c8-44a6-a152-194d47239df9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\n[button] Fastbreak -> CLICK\n[link] Fastbreak Program -> CLICK\n[link] Join Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.736, 0.783, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2484", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_d6daa669-ddfa-48d3-90e7-2838239b4926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK\n[div] -> CLICK\n[button] Confirm address -> CLICK\n[p] Choose 3-hour window -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.433, 0.754, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2485", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_627981ff-61e9-46da-ab13-3e011fe1a748.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.152, 0.664, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2486", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0fef1419-a98d-41fd-ad31-a96c7cfd4f4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.702, 0.284, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2487", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_b0dcb485-a80d-4014-8137-c2c7c9675b7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK\n[button] Show sorting options modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.311, 0.299, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2488", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_905eff11-9e4f-40ec-8794-0aa4dbad687a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.137, 0.588, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2489", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_6a89e073-7ebe-4c7c-8623-e310034d7e6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.256, 0.472, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2490", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_c0435cf8-b490-4f65-a376-0fc31e91ef2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.098, 0.981, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2491", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_5987f07d-d700-45ea-b55d-163cb8e28520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.21, 0.303, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2492", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_b8b485f5-fffa-457a-98c8-3e3721b953f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.19, 0.712, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2493", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_a2b2735c-c36d-4565-b31e-00371ed0717c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK\n[option] 2:00 pm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.088, 0.963, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2494", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_6c70afce-e87a-4d1b-8d7f-f99589b2b407.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.203, 0.568, 0.22] in the image\nAnd my action is Action: TYPE\nValue: Alfred"}]}, {"id": "mind2web_2495", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_902efeef-0e70-46fd-8f95-96df32535561.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.466, 0.285, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2496", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_64040e25-5374-49ac-bea1-3e0fbf44525b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Book -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.137, 0.109, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2497", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_2de322fb-c659-4be9-90bf-9c7010ba87e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22\n[button] Vehicle Class -> CLICK\n[radio] Minivans -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.357, 0.821, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2498", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_cc11e618-5383-4745-a31e-9b971622ef02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\n[img] -> CLICK\n[textbox] Search Kroger... -> TYPE: bananas\n[span] bananas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.18, 0.969, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2499", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_db2eac70-945f-4c8f-aaac-b8ec140bc870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles\n[b] Los Angeles -> CLICK\n[button] Next -> CLICK\n[gridcell] Sun Apr 02 2023 -> CLICK\n[circle] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.233, 0.122, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2500", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_e21752a7-a515-41b7-9ac0-e1cb6394a9ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.095, 0.549, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2501", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_331f8da6-9df1-4da3-bdf9-0a7b9f3a15d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK\n[searchbox] Please type your destination -> TYPE: MANILA\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.41, 0.824, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2502", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_2b13ecd2-3bf5-41ec-8dfe-063e95329550.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.626, 0.566, 0.638] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2503", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_6d323066-077a-4cce-884e-23a3f42ac7cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: nba2k23"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.055, 0.228, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2504", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_6c351b5e-0ace-4391-ae82-bd84884f79f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.446, 0.263, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2505", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_6f68971e-2d54-4d7a-bd20-dc93e6b5b1fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[generic] Close -> CLICK\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.266, 0.341, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2506", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_890d1f76-6792-4972-a0e5-8d1215c8fea3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.188, 0.341, 0.207] in the image\nAnd my action is Action: TYPE\nValue: GOA"}]}, {"id": "mind2web_2507", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_d50a905e-f895-4188-9ca6-63081d81b204.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.165, 0.742, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2508", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_018e6be5-2f73-4aaa-8710-7dea55fb84ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK\n[textbox] Your Name * -> TYPE: James Smith\n[textbox] Email Address * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone Number * -> TYPE: 6157075521"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.268, 0.557, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2509", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_0efc9d65-98b0-46ce-9791-66f408e8cd1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Phuket\n[generic] Thailand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.179, 0.686, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2510", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_d980a252-0916-403f-8778-bc2e09948456.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.23, 0.487, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2511", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_ab1f2f97-9c1e-4336-8c3f-a252a460eb1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.691, 0.879, 0.721] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2512", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_2099ed07-e8fd-4a2e-9004-0351a78a8e72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[link] Hip-hop & Rap -> CLICK\n[div] Play -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.449, 0.052, 0.589, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2513", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_7338cf47-dda0-4a46-85bd-3d8d340b7f21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.101, 0.757, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2514", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_9ac1fc21-dd17-467b-ad80-40db1092b18a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.18, 0.265, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2515", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_8ac9fd2a-bb62-4303-93e6-8a5c3276a367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.321, 0.567, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2516", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_fb2cfc47-01cf-4aed-96aa-7632b3d5e2e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.012, 0.496, 0.038] in the image\nAnd my action is Action: TYPE\nValue: Western Digital internal SSD 1 TB"}]}, {"id": "mind2web_2517", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_39381d41-f8cd-4298-a524-0412ae6ba389.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: los angeles \n[div] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.089, 0.398, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2518", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_8ea62d22-a406-479b-a65a-acb24a4adb33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: professional boxing\n[option] Professional Boxing -> CLICK\n[link] CES Boxing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.152, 0.941, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2519", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_6fe956a0-e058-4224-83a1-f19fd7d3f44c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK\n[textbox] Near -> TYPE: Texas City, Texas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.016, 0.62, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2520", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_f1682371-d43a-4a7f-8fff-491e2150b2f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.51, 0.226, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2521", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_6e17b7d2-b893-403c-a122-e0256b285750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.123, 0.477, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2522", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a3c07b8-54dd-4137-b462-bc030e3860d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK\n[p] Compact -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.236, 0.93, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2523", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_0004f2a7-90d6-4f96-902a-b1d25d39a93d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK\n[button] Overview of the Annual Pass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.672, 0.95, 0.714] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2524", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_cd3c2bb0-9c7e-4ac9-ad60-b26a34297217.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.101, 0.777, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2525", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_b319eb24-8b9a-449b-9d38-9e9fc2ac0bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.163, 0.345, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2526", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_cfcf2ac3-e03c-4911-98d9-b75840eeddb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.263, 0.045, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2527", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_f2cae883-42b7-4d29-8f26-c4caf0e0b1d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] 2+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.106, 0.965, 0.127] in the image\nAnd my action is Action: SELECT\nValue: Price Low to High"}]}, {"id": "mind2web_2528", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_9ad5cd9f-cc85-44aa-bf91-8bc253839abc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[link] Search open jobs -> CLICK\n[input] -> TYPE: TX"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.256, 0.884, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2529", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_e23eb900-deb5-4f4b-8941-625c60a5ea37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.213, 0.263, 0.234] in the image\nAnd my action is Action: TYPE\nValue: SHANGHAI"}]}, {"id": "mind2web_2530", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_c5abc24e-404c-49fc-905e-a250d8b1010f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.188, 0.568, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2531", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_d7ad69f6-d2a6-49eb-9b8e-d3dd23c57bbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.236, 0.5, 0.262] in the image\nAnd my action is Action: TYPE\nValue: accra"}]}, {"id": "mind2web_2532", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_9a73b2c1-062d-4f9b-9d4d-af9d4c0abc95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.608, 0.526, 0.654] in the image\nAnd my action is Action: SELECT\nValue: 10 guests"}]}, {"id": "mind2web_2533", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_3135abcd-a139-493b-8c7b-9321fa5acc73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.667, 0.158, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2534", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_1c4496af-1ba1-49a9-99f6-61f547787b5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.298, 0.795, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2535", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_a7f02e91-d2bc-4941-a731-ad039f3c4cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 08817\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.014, 0.189, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2536", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_cc798f2b-dcc5-486f-b9a8-98d352b378e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.177, 0.369, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2537", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_404c41ad-b28f-42fe-a465-64585cbd1cd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\n[link] shopping. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.23, 0.395, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2538", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_b1b300fb-d1f4-423c-ba32-4dbfeb8cada0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.0, 0.191, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2539", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_cdadd9af-a0b0-47d9-8b2e-9b01d1ecf507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.178, 0.568, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2540", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_d5abb011-938b-47a2-965b-33584ed07f91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.185, 0.504, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2541", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_b7df6516-6050-426f-b729-a41885186422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.414, 0.277, 0.444] in the image\nAnd my action is Action: SELECT\nValue: 2018"}]}, {"id": "mind2web_2542", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_429d9db7-1a1c-4bb2-8b4b-09d1a8b862b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] FISHING REELS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.359, 0.178, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2543", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_2b005599-77bb-4e09-9eaa-3cb686343ee2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.141, 0.72, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2544", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_143b4db4-0c32-4579-9fe3-edc5b7cdf40d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, NY, US Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.467, 0.271, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2545", "image": {"bytes": "", "path": "./images/effb9df8-3b3f-4349-8033-f79ba1587a4d_aadbdc6d-3710-4fa9-a11b-6941a191a7b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a projector which accepts crypto currency as payment.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.013, 0.39, 0.039] in the image\nAnd my action is Action: TYPE\nValue: projectors"}]}, {"id": "mind2web_2546", "image": {"bytes": "", "path": "./images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_07e211f6-3f82-484e-8465-34c9b2f91f5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about the NBA Finals schedule.\nPrevious actions:\n[div] Sports -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.06, 0.082, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2547", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_b5d844b7-0b88-4b88-9174-7ba4c6f5423d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.177, 0.257, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2548", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_f39fd0f5-1a72-4a43-8c03-6e9ce2d22de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.0, 0.279, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2549", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_9c5ab548-979b-4b73-a0dc-144229a6a59b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Fit -> CLICK\n[link] Fitted -> CLICK\n[div] Size -> CLICK\n[span] Now Trending -> CLICK\n[li] Newest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.221, 0.485, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2550", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_f3514a0d-7a41-4ecd-93df-f14a6fad29a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[button] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 0.706, 0.651, 0.759] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2551", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_b71031f3-1e82-4395-8e53-3b038b707899.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.01, 0.546, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2552", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_c60cbbc5-6586-48ee-b238-c2b3c0488113.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.236, 0.174, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2553", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_51154c4f-01db-42c1-8081-1c18d4786dea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[span] Milano (Milan) -> CLICK\n[textbox] Date use format: 01-Apr-23 -> CLICK\n[rect] -> CLICK\n[link] 26 -> CLICK\n[polyline] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.085, 0.166, 0.16, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2554", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_21f89d91-cd21-47c6-9155-084a3ff620aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[path] -> CLICK\n[button] -> CLICK\n[div] On the Water -> CLICK\n[label] Up to 1 hour -> CLICK\n[label] 1 to 4 hours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.457, 0.236, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2555", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_c5f99b92-14a8-475f-91a9-2350aeef1398.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.163, 0.46, 0.182] in the image\nAnd my action is Action: TYPE\nValue: brain"}]}, {"id": "mind2web_2556", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_0f806e67-309d-409d-8959-e24867e11888.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.058, 0.455, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2557", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_218890ad-5305-4f3b-b3dd-da31e5b40fbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.041, 0.282, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2558", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a6763793-cd95-45e2-8a89-7b39cd608221.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.21, 0.84, 0.237] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_2559", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_2ddef786-a576-4379-8ca1-136036060c78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.558, 0.562, 0.576] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2560", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_790289e7-9b0e-4672-abeb-18703347e599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.201, 0.536, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2561", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_512499f3-8a7a-46a3-89c2-27ab80ecd283.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.35, 0.341, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2562", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_2b1af735-6002-4a1b-a021-dd9d263f3e1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.054, 0.617, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2563", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_56f01aa9-cfc2-423a-9c5b-daecf15e17a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com\n[checkbox] Career opportunity Career opportunity -> CLICK\n[checkbox] Office location Office location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.238, 0.684, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2564", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_435a480b-10e4-45fb-b384-378735865d8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.011, 0.39, 0.034] in the image\nAnd my action is Action: TYPE\nValue: xbox wireless controller"}]}, {"id": "mind2web_2565", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_6c991c31-464a-4ba9-a214-c6f849212ea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.267, 0.154, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2566", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_b8d7b8d1-7a8f-49e5-93af-d3f99b95b647.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK\n[button] Follow -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.161, 0.171, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2567", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_c7f740d0-cf88-49df-8733-a50c0383c393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.496, 0.193, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2568", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_14fbcad5-7316-455e-af71-4205fb2df872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.283, 0.089, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2569", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_5aa0d023-f4c7-4939-b947-5dc59943b1c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.131, 0.506, 0.154] in the image\nAnd my action is Action: TYPE\nValue: Aruba"}]}, {"id": "mind2web_2570", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_4dfdbc97-9aa9-466a-ab54-17f52d97a814.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.062, 0.902, 0.088] in the image\nAnd my action is Action: TYPE\nValue: mexico"}]}, {"id": "mind2web_2571", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_e0f11846-4a63-435d-a2c7-49d804e28e5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.181, 0.652, 0.228] in the image\nAnd my action is Action: TYPE\nValue: heathrow"}]}, {"id": "mind2web_2572", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_110c514a-0c12-4e5f-8a6f-68ea4fa545d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK\n[button] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.139, 0.393, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2573", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_c266e30f-94b5-4161-a61d-b00f033b1e7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\n[link] Find Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.172, 0.997, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2574", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_1fb73b24-199a-4f34-9077-52fc82e584fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.302, 0.447, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2575", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_c249afb0-9d76-4cf3-bc7c-8dd58876ce45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.085, 0.628, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2576", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_e9c76458-26a1-4095-8726-6f6a158f1e25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.255, 0.848, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2577", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_a702de86-38a4-4567-9959-b6515a416862.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.308, 0.326, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2578", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_a0a21e4c-4d0b-43da-9605-49c7968f34d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.134, 0.277, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2579", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_00cf6ce0-d213-4a2c-bd82-a17d21179d40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.22, 0.312, 0.24] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_2580", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_e7a69198-f985-4899-b721-e53fc38e8dde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.255, 0.963, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2581", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1036b5f0-eb6a-4ea1-b0f7-ed1c0e37abae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.258, 0.481, 0.292] in the image\nAnd my action is Action: TYPE\nValue: Phoenix"}]}, {"id": "mind2web_2582", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_602a35f1-1a78-4137-8444-16379c1aa2e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.007, 0.492, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2583", "image": {"bytes": "", "path": "./images/12324b80-88bd-4d48-962c-ae80203e5058_41631711-b251-4fe0-9b5f-0b86f4b58466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find $1 movie night film and rent blockers\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.106, 0.468, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2584", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f273ee73-79f4-4536-aaf9-db2ccf3d8e1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Set My Store -> CLICK\n[button] set store -> CLICK\n[button] Make -> CLICK\n[span] (954) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.248, 0.253, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2585", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_83318fc2-ad80-4bda-8a6d-1be341afe2a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.366, 0.29, 0.383] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_2586", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_228b0634-9b76-4570-b428-fafc3b439443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 400\n[textbox] Maximum Value in $ -> TYPE: 500\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.182, 0.027, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2587", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b88d7456-bced-43d2-886c-48cff487fdba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 12345678\n[combobox] Date -> SELECT: Friday, April 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.183, 0.875, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2588", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_9b765e22-bd76-461a-abf0-47558fa3de83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.263, 0.539, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2589", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_6004dfae-b262-479a-8e78-8ba5fbe68470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] MEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.174, 0.463, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2590", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_3827733e-5426-4c24-b369-ebf496245627.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.111, 0.277, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2591", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_03d09aee-1faa-4853-a0b0-d989d64b8c36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.856, 0.112, 0.974, 0.135] in the image\nAnd my action is Action: TYPE\nValue: 10"}]}, {"id": "mind2web_2592", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_5e9cd272-fa52-47fd-826c-8c5a2ebd93e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK\n[tab] Hourly -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM\n[combobox] End Time -> SELECT: 8:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.317, 0.484, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2593", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_55d58f35-4297-41a6-a078-363060e92b32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.091, 0.127, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2594", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_99817004-e146-4ae0-91fe-42055681c14f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\n[link] STORE -> HOVER\n[link] POINTS SHOP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.435, 0.141, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2595", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_aac48f29-af47-4b76-9b6a-d3eb828b87dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.473, 0.894, 0.538] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2596", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_4607b007-3775-44ff-8b39-d20807e3572e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.068, 0.327, 0.09] in the image\nAnd my action is Action: TYPE\nValue: PARIS"}]}, {"id": "mind2web_2597", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_ef03f2ae-a21c-44a6-b180-a23414d36bf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK\n[link] Stats \ue00d -> CLICK\n[button] Stats -> CLICK\n[link] Season Leaders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.259, 0.174, 0.274] in the image\nAnd my action is Action: SELECT\nValue: 2020-21 Regular Season"}]}, {"id": "mind2web_2598", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_d74a9097-01f7-44a1-b1bc-6097432e6ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.237, 0.962, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2599", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0eebe04e-32f9-4329-95c2-12ba3c6b59d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.447, 0.793, 0.467] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_2600", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_1b1282e4-21fd-4dee-8bd1-d6e3b5e60e32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.251, 0.363, 0.277] in the image\nAnd my action is Action: TYPE\nValue: Timesqure New York"}]}, {"id": "mind2web_2601", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_339ca2c3-dd91-42f7-bbe7-f6d60bff35ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.186, 0.138, 0.224, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2602", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9440b99d-8aea-4482-9e40-7df8f1a3844b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.633, 0.27, 0.65] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2603", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_af642869-dd83-4561-92f1-f004a419fc6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK\n[link] VIEW RATES -> CLICK\n[button] Member Rate Prepay Non-refundable -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.667, 0.96, 0.677] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2604", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_532ea23b-b6fa-4d12-a857-8c60674dd2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.372, 0.407, 0.409] in the image\nAnd my action is Action: SELECT\nValue: Compass"}]}, {"id": "mind2web_2605", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_d0df170f-377c-437c-83e8-4519a6387c77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London\n[input] -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.175, 0.079, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2606", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_35e66ac1-e253-4232-849d-9b68d27b76b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.532, 0.7, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2607", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_8abb0cae-8fa5-4de2-9d2c-2b1f1476a3ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.125, 0.347, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2608", "image": {"bytes": "", "path": "./images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_ab7e03f5-52b6-4226-aa07-97b50ddf55cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the Resident evil 4 game guide.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.122, 0.216, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2609", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_5e7a77e3-c722-4be5-9dd8-394a7d3ef942.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: Lubbock, Texas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.205, 0.871, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2610", "image": {"bytes": "", "path": "./images/69065697-a426-4e4c-80f7-82bf592b268c_87c4ceaa-e61e-4250-aa05-6deb28fe18db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find details on converting points to miles.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.404, 0.131, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2611", "image": {"bytes": "", "path": "./images/52a8bace-f14c-41ce-980f-50d95e5ac259_bc681a63-23ff-493c-9959-e1b8e93a7aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of countries with the highest number of seen aircrafts.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.067, 0.427, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2612", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_c2d1eb13-7383-4947-af51-5a8233988ca8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 2, 2023 -> CLICK\n[button] Jul 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.135, 0.571, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2613", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_7afdfbab-b581-4794-8584-185fa115bbba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.139, 0.78, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2614", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_43d69022-fad5-4117-b3a0-98489b7889a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2615", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_96e23488-6063-4efe-9b16-86d2e304cbc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456\n[textbox] First Name (required) -> CLICK\n[textbox] First Name (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Buck"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.24, 0.833, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2616", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_af1b2580-5ce6-48af-a29f-8d9152414487.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK\n[input] -> TYPE: 50\n[input] -> TYPE: 100\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.927, 0.232, 0.982, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2617", "image": {"bytes": "", "path": "./images/f9e88baa-a109-454b-839f-1ab0746a5f13_ec0001ec-792f-4e40-aa10-63b1286ebefc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all watch options of Avatar: The Way of Water and where it's available.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Avatar The Way of Water\n[div] Avatar: The Way of Water -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.295, 0.632, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2618", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_b991d354-d2d4-409a-9e22-98f3bc4c8ddb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.056, 0.869, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2619", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_f5bb5237-3617-4177-856e-81c617d0acfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK\n[span] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.271, 0.686, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2620", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_495a67cf-f571-4d50-ae2f-f2f2b6274b27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.433, 0.32, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2621", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_e6f0de37-a72b-4b57-94c8-6d65e77a025d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.174, 0.529, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2622", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8e4a80cb-3d65-4a00-9649-1985306aa50c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.12, 0.893, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2623", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_afec19b2-2c9e-4a02-b24f-00932ef73c44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK\n[img] AIRism Cotton Graphic Short-Sleeve Raglan T-Shirt -> CLICK\n[input] -> CLICK\n[option] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.478, 0.906, 0.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2624", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_8729dbe9-778d-4dc9-a7bc-3f3a6f0125dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK\n[link] Explore the directory -> CLICK\n[searchbox] Refine Location -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.177, 0.12, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2625", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_3c20ee92-54ff-4e67-9882-d6a25db69802.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.757, 0.107, 0.769] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2626", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_dba7625b-d345-4017-a7cc-2381cc5b5348.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.309, 0.687, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2627", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_61b7da8b-1015-40c8-8a7a-7fe00288aacb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.084, 0.266, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2628", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_853e26f1-d8f8-4821-b800-f3357b988e5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK\n[label] XXS -> CLICK\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.778, 0.286, 0.806, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2629", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_13a3d40d-9eaf-431a-929e-17a081ca2a59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] SUV -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.336, 0.105, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2630", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_3622c0d9-2623-4c45-a5a0-cb7dacecec7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.152, 0.5, 0.175] in the image\nAnd my action is Action: TYPE\nValue: Roanoke"}]}, {"id": "mind2web_2631", "image": {"bytes": "", "path": "./images/7f94386a-d032-43cf-9dbe-2b64430c9c28_ca58d7fd-9205-48c6-960e-83307f6d843c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: locate the store in IL\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.626, 0.977, 0.652] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2632", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_0568946d-5e24-4ab6-aaca-f448308ff253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[span] 9 -> CLICK\n[button] SEARCH FLIGHTS -> CLICK\n[span] Price per person -> CLICK\n[p] Cheapest first -> CLICK\n[div] Economy Light -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.677, 0.796, 0.703] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2633", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_4b7632ad-468d-42f3-933b-c11f40d27ded.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.162, 0.559, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2634", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_25de61d4-f92d-455f-8905-cbb26e30395b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM\n[combobox] Drop off time -> SELECT: 11:00 PM\n[div] Search -> CLICK\n[div] Premium -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.753, 0.188, 0.927, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2635", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_a8729521-544c-4677-bbeb-2aebc43bf83d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK\n[div] Price (lowest first) -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.184, 0.93, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2636", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_73a605e7-8819-41bb-8cfe-73fb22979a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.558, 0.273, 0.605] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2637", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_0f568b3c-9312-4f45-a919-af0b1d2e7d99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.473, 0.046, 0.619, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2638", "image": {"bytes": "", "path": "./images/6f1fe14d-543a-43c6-964a-0c74f6d86091_09e4b8ca-0a6e-4236-80ad-0662b8b16205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me movies produced by Aaron Horvath.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Aaron Horvath"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.024, 0.657, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2639", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_d67c1e8e-be13-4094-9d39-bb0daffc2f14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Guardians of the Galaxy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.093, 0.463, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2640", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_a4c16da0-0706-4d0a-a259-eb7657bbbbc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.052, 0.553, 0.093] in the image\nAnd my action is Action: TYPE\nValue: wall art"}]}, {"id": "mind2web_2641", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_abc09fd8-c93f-4e0a-a150-52b8aa5a03f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.47, 0.297, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2642", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ebb0133a-07b3-47ba-957c-3e48838a2827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.42, 0.834, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2643", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_f1a4664b-00c9-4016-8c61-d86520080cc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.315, 0.051, 0.44, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2644", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_341a74e7-e3bc-49bd-8c12-ff4d7c51fc02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.282, 0.473, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2645", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_3707f7d8-e3dc-4f98-965d-5b7cbc562c31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.66, 0.08, 0.668] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2646", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_8be97cad-f129-4f15-bdc1-5d22eb161c88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.203, 0.593, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2647", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_50b5e01f-dd2c-4329-b782-a44c27b2326f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Thalia Hall"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.435, 0.459, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2648", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_c398aaf8-6107-421e-a9b4-8c7518e18c46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.442, 0.648, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2649", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_cb3687da-0349-4e99-a3e4-e8d30f34901a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.504, 0.526, 0.551] in the image\nAnd my action is Action: SELECT\nValue: 8 15 PM"}]}, {"id": "mind2web_2650", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_c00eecee-db09-45ba-935f-9db580215fc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.046, 0.361, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2651", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_da2291c0-9f33-4a50-ba54-cb1a4a4ec265.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.782, 0.277, 0.804, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2652", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_f487d5af-079d-4256-aea8-c423f788c7b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK\n[textbox] Parking Start Date -> CLICK\n[gridcell] Tue Apr 18 2023 -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.141, 0.484, 0.168] in the image\nAnd my action is Action: SELECT\nValue: Shuttle Time"}]}, {"id": "mind2web_2653", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_590646aa-5dd0-47b6-9181-4cadfe0cf58e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.284, 0.622, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2654", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_1b07bed7-815f-4c71-8b77-0f9abc587b36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.181, 0.257, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2655", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_ed897c6d-603a-4159-9a7e-9b397bf2e289.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith\n[input] -> TYPE: 123st rd"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.53, 0.355, 0.546] in the image\nAnd my action is Action: TYPE\nValue: abc@abc.com"}]}, {"id": "mind2web_2656", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_bcfbffee-6953-464a-8489-5fe5b67dc723.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.552, 0.296, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2657", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_b3f45973-641c-4e50-bca1-519fcd6f135d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.335, 0.233, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2658", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_54c8ce05-463a-4151-9ae1-6b09bb09a183.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.024, 0.564, 0.039] in the image\nAnd my action is Action: TYPE\nValue: 94115"}]}, {"id": "mind2web_2659", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_f406f093-4ec0-4056-beed-b6f59270656d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.127, 0.546, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2660", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_3d867619-5934-4379-a470-a5f78405c6c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.124, 0.185, 0.175, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2661", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_f219afff-4fbc-4b22-843b-347a60a6896b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.015, 0.183, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2662", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_b75d1f3b-2376-4441-b2d6-624fd7a5e15f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.353, 0.309, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2663", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_bf997fb5-69db-4c87-9ebe-fba3ab9f26c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Style -> CLICK\n[checkbox] Family -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Air-conditioned -> CLICK\n[div] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.552, 0.087, 0.765, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2664", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_6c2920e2-8409-41f9-acb1-4749cde8de5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.143, 0.406, 0.173] in the image\nAnd my action is Action: TYPE\nValue: pedicure salon"}]}, {"id": "mind2web_2665", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_ce745d88-3511-43d3-9e02-401be37eca9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.006, 0.651, 0.025] in the image\nAnd my action is Action: TYPE\nValue: Google Play"}]}, {"id": "mind2web_2666", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_d17036de-b354-42ee-b6a4-9b0cbc5d44fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.334, 0.56, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2667", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_cf0f773a-5040-453e-91ae-e7416a2e470a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.019, 0.456, 0.043] in the image\nAnd my action is Action: TYPE\nValue: vegetarian"}]}, {"id": "mind2web_2668", "image": {"bytes": "", "path": "./images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_62df7775-357c-4748-b3ad-6d521606cb9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a living History event to attend in in April .\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2669", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_282c09d0-c9e0-4007-b88a-27887fe1e388.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[span] Sony -> CLICK\n[button] APPLY -> CLICK\n[span] Free Shipping -> CLICK\n[button] APPLY -> CLICK\n[combobox] Featured Items \uf0d7 -> SELECT: Highest Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.345, 0.33, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2670", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_df163dcc-4779-4f0b-ad7e-ad149da8f2de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.187, 0.259, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2671", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_226fcc40-31c7-4c76-8934-4c6294ae162d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: national university of singapore\n[button] National University of Singapore Singapore, Singap... -> CLICK\n[button] Check-in April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 0.223, 0.473, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2672", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_d44f6051-c9f4-462a-a897-ffa2e2d4ef62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.034, 0.343, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2673", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_a918c8d9-504e-4c11-b878-34e2b00a3cf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[combobox] To -> TYPE: new orleans\n[button] New Orleans, LA, US (MSY) -> CLICK\n[textbox] Depart date -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK\n[combobox] CabinTravelers with Economy (first checked bag cha... -> SELECT: Business or First"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.326, 0.238, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2674", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_55d981df-d9d9-4428-998c-76ae31d88d41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.137, 0.848, 0.175] in the image\nAnd my action is Action: SELECT\nValue: Walking"}]}, {"id": "mind2web_2675", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5d28ead0-d4de-4f2d-9f18-759cd87611ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.232, 0.287, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2676", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_fc698c26-502b-442b-8790-0538d09406bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Sweaters & Cardigans -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.657, 0.986, 0.689] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2677", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_f026582a-747c-4f3e-86bd-c219d7425d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK\n[link] US Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.46, 0.324, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2678", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_cb2a47dd-1c83-45d4-9186-65d56dd7ca78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.352, 0.693, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2679", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_e79a792b-7b25-460a-a0cd-ec532fb4a26e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.332, 0.754, 0.364] in the image\nAnd my action is Action: TYPE\nValue: Venice Beach"}]}, {"id": "mind2web_2680", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_40062ba5-e84e-4672-adca-053020ff758e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.006, 0.561, 0.024] in the image\nAnd my action is Action: TYPE\nValue: Ricky Kej"}]}, {"id": "mind2web_2681", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_d25f5393-a999-4987-910e-9397e8e29ab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.189, 0.354, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2682", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_75b9ad08-ee27-423c-8cb0-0605c7531495.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.032, 0.712, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2683", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a950a8a1-9cd6-423c-8bb6-8411564ed498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.418, 0.595, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2684", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_a3033f2b-83bd-41b1-9972-66f3135bd083.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.318, 0.23, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2685", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0ac88699-988e-4a0d-b0bd-1e73d8eb268e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[combobox] State -> TYPE: new york\n[textbox] Card number -> TYPE: 1234\n[combobox] Card type -> SELECT: MasterCard\n[combobox] Month -> SELECT: 01\n[combobox] Year -> SELECT: 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.465, 0.609, 0.498] in the image\nAnd my action is Action: TYPE\nValue: 123"}]}, {"id": "mind2web_2686", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_40ef5585-94d4-4cf2-97ed-691180d5b6ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.016, 0.428, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2687", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_330af54f-7d87-4d91-a5a8-f393a0f6e0e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[button] Next -> CLICK\n[span] -> CLICK\n[button] Next -> CLICK\n[div] License Plate -> CLICK\n[textbox] License Plate -> TYPE: AZXA46"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.248, 0.565, 0.268] in the image\nAnd my action is Action: SELECT\nValue: AZ"}]}, {"id": "mind2web_2688", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_6c9ca4fe-cd29-4b39-ad58-b099603ccc63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.717, 0.588, 0.724] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2689", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_6c575a9e-0f6b-417e-855f-3e998fa406cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK\n[textarea] -> TYPE: Directors\n[textarea] -> TYPE: To Watch"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.437, 0.609, 0.46] in the image\nAnd my action is Action: SELECT\nValue: People"}]}, {"id": "mind2web_2690", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f4bbc6e3-9922-4100-b3cd-cf0322e739b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK\n[checkbox] Ford \ue066 -> CLICK\n[checkbox] Jeep \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.678, 0.408, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2691", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_b165ce6a-330e-4979-8733-1d329d59b870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK\n[div] Dual Motor All-Wheel Drive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.293, 0.748, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2692", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_d89fe4e6-31ff-4e6f-97a6-498dfeac0525.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.05, 0.326, 0.081] in the image\nAnd my action is Action: TYPE\nValue: music"}]}, {"id": "mind2web_2693", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_27502e8e-1ee0-49f3-a0ed-60b044dd585c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.09, 0.727, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2694", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_6ec122a9-3a93-4787-abb1-da425a910bc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male\n[span] Mal\u00e9, Maldives -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.896, 0.116, 0.909, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2695", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d6f41e4c-9843-4db8-a803-4844920ce2cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.468, 0.699, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2696", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_7cccd532-c34e-487b-9a2b-c0a0f96305b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.177, 0.84, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2697", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_1a6370b9-054e-468e-8385-b363be981b1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 AM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.257, 0.349, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2698", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_bfb91ba3-f56d-4ddf-893f-0742d11e5d15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.123, 0.868, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2699", "image": {"bytes": "", "path": "./images/21f5aaaa-d54e-42a6-91c0-d1a427533963_93323461-5177-468a-b61a-e0248520605a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the new arrivals on Easter home decor\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: easter home decor\n[span] easter home decor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.252, 0.986, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2700", "image": {"bytes": "", "path": "./images/3be7acd4-c8c4-4e0d-b2b6-f82a4e17de8c_f321c6df-c46a-4b3c-85f6-255e8db91d65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the Resident evil 4 game guide.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.16, 0.079, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2701", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_8ae8c40d-2a52-496f-ac1b-a012fdf01d3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK\n[span] 11 -> CLICK\n[span] 18 -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.289, 0.522, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2702", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_513bf92e-6c28-43d9-9fb0-0d858631436c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2\n[button] Done -> CLICK\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.393, 0.709, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2703", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_dec7212a-0ef8-4cbe-86b6-1aa9f3ec293e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.037] in the image\nAnd my action is Action: TYPE\nValue: Ariana Grande"}]}, {"id": "mind2web_2704", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_51845b7f-fb7b-4bc3-9c13-2c0a2afb5e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\n[link] Solar Panels -> CLICK\n[link] Order Now -> CLICK\n[button] Help Me Choose Solar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.448, 0.838, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2705", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_0aa34099-a83e-4931-897f-1f1b7e0f7e16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.507, 0.192, 0.524] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_2706", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_91afaca3-3df5-479e-aa43-1717da3b664c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[img] -> CLICK\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK\n[textbox] first-name -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.312, 0.352, 0.33] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_2707", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_4ddee31b-ec7d-496c-a1bb-92e16c3306ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.371, 0.192, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2708", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_e8eeebb3-8539-4adf-830b-b6bfeaa8a609.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.608, 0.296, 0.631] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2709", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_41d4cbcd-ea80-4132-a2c9-b4da47c6a95f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.416, 0.263, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2710", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_afa1433c-8d13-4e0d-9b05-e0d7299da884.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2711", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_51e9982e-0a95-4525-af85-fba89b577a34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Highest Rated -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.477, 0.098, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2712", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_776e07ac-4fec-47f9-8642-b4aa8dfe359e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.6, 0.263, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2713", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_4b8526ca-6237-4769-b1de-06e1097f8783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.317, 0.474, 0.347] in the image\nAnd my action is Action: TYPE\nValue: grand central"}]}, {"id": "mind2web_2714", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f0038f74-a616-44cf-b13a-29111280ae8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.246, 0.436, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2715", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_4077127f-bf53-43c9-8fff-96ffb9ecb611.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.323, 0.237, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2716", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_bdf6ba83-c40c-4f99-89e1-56131fab37b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.176, 0.539, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2717", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_9c7a03fc-35cc-4769-869b-469e1363dca2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.09, 0.478, 0.112] in the image\nAnd my action is Action: TYPE\nValue: kashi vishwanath temple"}]}, {"id": "mind2web_2718", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_55ba9bda-3735-48f8-8ce5-bdb904725fe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.739, 0.2, 0.753] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2719", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_a77a3a7f-cc1d-447b-903a-d09588b8a89c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK\n[textbox] Flight origin input -> TYPE: madurai\n[div] Madurai, Tamil Nadu, India -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.131, 0.975, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2720", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_4bb48e3e-2a32-4135-b436-33621055fc36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] Any Quantity -> CLICK\n[label] 4 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.333, 0.652, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2721", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_1f4c17d0-a075-4249-8621-8b8366006cca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK\n[button] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.018, 0.418, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2722", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_dff98ff2-9f90-4274-abe1-de38cb0767d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[button] Today -> CLICK\n[button] April 20, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK\n[button] View more availability for Canal Street Eatery & M... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.212, 0.185, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2723", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_d683e390-ec8a-47db-8772-cb52166ae30d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $14.45/Day$13.95/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.621, 0.777, 0.657] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2724", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_a2e11919-3d09-4a0d-bcb1-521927016889.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Greenville\n[span] Greenville -> CLICK\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.022, 0.335, 0.036] in the image\nAnd my action is Action: TYPE\nValue: McDonalds"}]}, {"id": "mind2web_2725", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_59920c95-802a-4e3d-b08b-4807653406d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[button] Check in / Check out Any week -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Month -> CLICK\n[div] Add guests -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.108, 0.793, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2726", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_ea3e7300-7769-45d1-b36e-1958830a8e3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas\n[button] Search Jobs -> CLICK\n[link] Developer/ Senior Developer, IT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.249, 0.349, 0.277] in the image\nAnd my action is Action: TYPE\nValue: 8"}]}, {"id": "mind2web_2727", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_e99bb9c7-aeec-4826-b8f8-407c00622c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.227, 0.245, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2728", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_4abed667-f469-420b-9820-825cdd9e9b91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.133, 0.623, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2729", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_e834b996-182e-4755-bc68-504eb48496ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.22, 0.715, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2730", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_3f0533c7-e459-49b9-801c-2da4e0a3b04d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston\n[option] Boston area -> CLICK\n[button] Today, Tue Apr 11 -> CLICK\n[span] Tomorrow, Wed Apr 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.422, 0.272, 0.578, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2731", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_d53a6991-303f-429e-a864-ef07350fe423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.316, 0.161, 0.345] in the image\nAnd my action is Action: SELECT\nValue: Daytime Only Parking"}]}, {"id": "mind2web_2732", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_b76c6f62-43c7-465e-98ff-39332220d881.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.069, 0.094, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2733", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_f23cef2f-fa8c-4eb9-b7a4-4ece33aacea0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK\n[button] Continue -> CLICK\n[button] No thanks -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.841, 0.702, 0.953, 0.749] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2734", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_39fe4719-f218-4d71-aab6-f8e6a4082dad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.097, 0.949, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2735", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_4f88ffbf-3206-47b4-84d2-d849707ed499.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK\n[link] Find a bike shop near you -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 10013"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.818, 0.366, 0.831] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2736", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_ee1e8ed0-43c7-4576-941e-61bb00b10218.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK\n[gridcell] Thursday, May 18, 2023 -> CLICK\n[button] Search flights -> CLICK\n[span] Refundable fares -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.191, 0.458, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2737", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_eea67014-a3f9-41e0-8b0e-e2ca7dd69079.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.0, 0.44, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2738", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_adb3f628-dbad-4824-8d8b-c53ac8161b15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.031, 0.602, 0.049] in the image\nAnd my action is Action: TYPE\nValue: Camarillo"}]}, {"id": "mind2web_2739", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_888c5867-268f-4edc-a635-9bc336f1fef5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.049, 0.327, 0.064] in the image\nAnd my action is Action: TYPE\nValue: Edinburgh"}]}, {"id": "mind2web_2740", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_0e2f5a42-2c2b-4554-9ec1-5e5a78e2f12c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon\n[input] -> TYPE: New York\n[link] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.163, 0.664, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2741", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_48956859-b850-47a4-a0a2-7c0bdc12231f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon\n[input] -> TYPE: New York\n[link] New York, NY -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.342, 0.394, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2742", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_c72c3d18-5a88-42c8-8c16-9294a6019000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> CLICK\n[searchbox] From -> TYPE: empire state building"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.274, 0.359, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2743", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3f010014-c3fd-457c-a17d-1ab30ce9a333.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.251, 0.958, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2744", "image": {"bytes": "", "path": "./images/4ff347e6-e911-4af5-8151-7805a9e91b28_7ac03130-a5e7-41b0-a3bc-01c1cb99f1da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show current alerts for red line subway.\nPrevious actions:\n[tab] Alerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.166, 0.392, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2745", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_e0931dce-0e84-42f5-91eb-b97b8d727e00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.113, 0.489, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2746", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_4c77cf02-5428-4f24-86c3-dd73dee21f63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.01, 0.546, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2747", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_ee0ac3c1-985e-43a9-915e-f489f926b0a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 0.342, 0.651, 0.374] in the image\nAnd my action is Action: TYPE\nValue: 1648926800"}]}, {"id": "mind2web_2748", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_7232efcc-03a5-4cf6-abca-52d962651164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Charging -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.186, 0.294, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2749", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ddadf800-5a52-419b-a717-ac5acbec55d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop\n[button] \uf002 -> CLICK\n[div] Newest Lenovo Ideapad 3i Laptop, 14\" FHD Display, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.268, 0.952, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2750", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_7baddb2c-fc8d-44cf-be47-7d265a0c1d8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.071, 0.697, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2751", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_450227c9-df44-4ea7-a169-6997823c8105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.272, 0.153, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2752", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_9137881e-849f-4da9-bf17-076132e3b61d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.294, 0.28, 0.324] in the image\nAnd my action is Action: TYPE\nValue: colombo"}]}, {"id": "mind2web_2753", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_44d4df9f-e984-403f-aefb-96169d606b23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK\n[span] -> CLICK\n[div] Leather -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.309, 0.096, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2754", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_ecd96a58-af68-400d-bac5-e637de08d916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.296, 0.463, 0.306] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_2755", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_18636549-3ec4-44ab-9778-216113946411.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.007, 0.789, 0.03] in the image\nAnd my action is Action: TYPE\nValue: Seattle, WA"}]}, {"id": "mind2web_2756", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_0310c46b-bca5-4bd5-b568-7af5cce54b97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK\n[span] -> CLICK\n[button] Next -> CLICK\n[div] License Plate -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.386, 0.223, 0.473, 0.236] in the image\nAnd my action is Action: TYPE\nValue: AZXA46"}]}, {"id": "mind2web_2757", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_3bc7e106-2d01-485b-bf0d-a1f32cca0604.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.624, 0.52, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2758", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_7911af81-53d1-402e-acf4-53625f86f726.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark\n[textbox] *Email Address -> TYPE: Johnmark@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.35, 0.711, 0.383] in the image\nAnd my action is Action: TYPE\nValue: 234567890"}]}, {"id": "mind2web_2759", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_26a9327f-ce5d-41b5-b34f-e87ee369fe33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.044, 0.917, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2760", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_163ac6b4-cfa8-4e29-8a90-0e0b9ed3c8c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK\n[div] Price -> CLICK\n[link] $25 to $50 (2,237) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.392, 0.974, 0.426] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2761", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_d4753161-a0e6-48a8-bd37-6dacdc712fa9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris\n[button] sam harris -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] Paperback -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.242, 0.413, 0.428, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2762", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_8ee0dcc3-5cfd-49ec-9324-4e578df23877.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Fresno -> CLICK\n[img] -> CLICK\n[span] Order Online -> CLICK\n[link] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.423, 0.237, 0.575, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2763", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_bd9a4601-4e54-41e7-ba10-2c10b0d6f156.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.621, 0.783, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2764", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_dd9f6105-c072-46c9-b958-1a67631c68b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.072, 0.292, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2765", "image": {"bytes": "", "path": "./images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_05b31466-ca97-48e8-a8b5-d7be869d2c7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow the Denver Nuggets NBA team.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.063, 0.335, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2766", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_33c7a999-38ef-4589-8279-fdf8c2302c63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[heading] SOCCER -> CLICK\n[a] FEATURED MATCHES -> CLICK\n[link] ENGLISH PREMIER LEAGUE -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.309, 0.454, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2767", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_c5d08604-1632-41cd-89c7-39dbdcb8a353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[link] Polos -> CLICK\n[div] Size -> CLICK\n[link] M -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.261, 0.485, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2768", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_b315afc2-75f7-4067-a09b-2a8b3b31c8b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[div] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.249, 0.96, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2769", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_efa0a116-02af-4a54-a426-72d5b7f09ac1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.16, 0.376, 0.177] in the image\nAnd my action is Action: TYPE\nValue: Carla"}]}, {"id": "mind2web_2770", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_f1fcb650-e85a-459c-a24f-1140130da6b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.004, 0.651, 0.02] in the image\nAnd my action is Action: TYPE\nValue: Sam Harris"}]}, {"id": "mind2web_2771", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_302070c4-a7ae-4fc4-957d-f31444de6ed6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Alien Worlds"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.151, 0.594, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2772", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_2046ff41-f18c-401b-b0b6-acb8c47d4752.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 45201"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.22, 0.514, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2773", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_a78fc3b8-5fae-4252-baf5-97f41c62fb6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.271, 0.393, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2774", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_1745b057-153d-4830-9fc0-a0dd6789d5bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK\n[input] -> CLICK\n[button] Next month -> CLICK\n[gridcell] May 07, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.2, 0.903, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2775", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_dd90c01f-05ab-4d6e-bfa5-3e9a3c00d161.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK\n[checkbox] list-filter-item-label-12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.67, 0.089, 0.677] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2776", "image": {"bytes": "", "path": "./images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_adf176ec-c852-40c8-842f-2c4133f8aa43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for restaurants near my location with pizza and for 6 customers.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: Pizza"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.101, 0.244, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2777", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_6544d313-8e7a-42e9-a996-497789511924.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.189, 0.873, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2778", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_831a73c6-155d-4ce3-b1f4-03b69243735f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.3, 0.187, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2779", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_4f967b7a-9ed5-4a01-ac5a-4bfb8c5cf276.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK\n[combobox] Select Make -> SELECT: Lexus\n[combobox] Select Model -> SELECT: RX\n[textbox] Zip -> TYPE: 90012"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.601, 0.119, 0.748, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2780", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e0f6b19c-2da6-4552-944e-8e375cf719be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Hot tub -> CLICK\n[checkbox] Pool -> CLICK\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.707, 0.205, 0.849, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2781", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_19891f32-74d8-4b7e-9529-b6ad116c7002.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK\n[button] Select My Car -> CLICK\n[link] All Vehicles (13) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.199, 0.929, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2782", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0d7ecbf2-34f6-44ad-8a78-48c3102e5df2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: TYPE\nValue: 5456165184"}]}, {"id": "mind2web_2783", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_88177b5d-5f76-4638-84cf-a9abf0abec85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.11, 0.641, 0.128] in the image\nAnd my action is Action: TYPE\nValue: Miami, FL"}]}, {"id": "mind2web_2784", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_769d15aa-5ed6-45e7-8caa-d271597da9d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.767, 0.192, 0.782] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2785", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8bcb7868-5c9f-444e-8759-3c089e797034.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.481, 0.745, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2786", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_99cd8c52-e44f-48d2-a670-27822e4ff213.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.188, 0.571, 0.21] in the image\nAnd my action is Action: TYPE\nValue: 1HGCM66543A064159"}]}, {"id": "mind2web_2787", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_24f44f71-9d71-4647-a587-f4f33b5b3fb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.613, 0.465, 0.694, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2788", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_31ea9a5f-c903-46bb-a7cb-6b04c5af555a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK\n[span] Asia -> CLICK\n[div] Kyoto -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.112, 0.354, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2789", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_89339d95-1d28-4d8e-bd50-7014518d77f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.204, 0.754, 0.219] in the image\nAnd my action is Action: TYPE\nValue: MOMA"}]}, {"id": "mind2web_2790", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_c909f54e-a1b0-4e7d-b89d-53df496da5ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.619, 0.307, 0.658] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2791", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_241b0765-cc4d-4ce1-9b29-92cf7f2173c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.521, 0.045, 0.555] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2792", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_5ebb131c-9681-479f-ae2a-2c8d50b7e606.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Kindle E-readers & Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.281, 0.285, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2793", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_f1d21f88-e302-42ae-8d0c-0144616650fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.193, 0.38, 0.24] in the image\nAnd my action is Action: TYPE\nValue: Miami Airport"}]}, {"id": "mind2web_2794", "image": {"bytes": "", "path": "./images/37564222-bb58-4a55-b47b-e9ffbbc1d160_c62fa753-fdf3-4a97-a464-d6e1a2d7c20f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the results of the most recent NFL games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.265, 0.078, 0.286, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2795", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_b47b0fbe-e5bf-4cb4-a560-93d4d86a1f35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.133, 0.568, 0.165] in the image\nAnd my action is Action: TYPE\nValue: Seattle"}]}, {"id": "mind2web_2796", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_d7618269-d378-4b6e-9124-9f8558d304d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK\n[button] - -> CLICK\n[div] Mr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.7, 0.353, 0.737] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_2797", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_24f70490-eff7-4c67-aaaa-e72120ee5528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.553, 0.184, 0.571] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_2798", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_2c7c5fe2-1b57-449c-83f0-eb79a096739f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.487, 0.137, 0.503] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2799", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_ee36e538-a5c9-46f4-a19d-7fb9f3fc9b3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.379, 0.568, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2800", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_95a5b4f9-e96e-4315-abec-cd2a380ae344.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[button] - -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.345, 0.263, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2801", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_61eea936-7790-4dc4-b778-5e3ceae09c14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.101, 0.196, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2802", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_362374fe-0388-41ab-bc3f-222224b2119b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.033, 0.42, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2803", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_e537a59c-12b2-4a02-b0eb-399d677e5b81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags M\u00e9xico -> CLICK\n[button] Go! -> CLICK\n[link] Planea Tu Visita \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.813, 0.118, 0.967, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2804", "image": {"bytes": "", "path": "./images/059327ab-e26e-4af6-9063-f58c50ecd2d2_ea6d65a2-979e-4fa0-816e-5c637b48c014.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the schedule and maps for the orange line\nPrevious actions:\n[link] subway Subway Lines -> CLICK\n[span] Orange Line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.105, 0.281, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2805", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_f88f7e83-95c2-41e3-a733-ec1997c2f55b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK\n[button] sub 1 -> CLICK\n[div] open -> CLICK\n[option] 6 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.351, 0.677, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2806", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_c4c1be99-57dc-46bc-bff2-b0687469cc42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK\n[combobox] Flying from -> TYPE: MUMBAI\n[option] Airport Chhatrapati Shivaji Maharaj International ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.184, 0.764, 0.194] in the image\nAnd my action is Action: TYPE\nValue: NEW DELHI"}]}, {"id": "mind2web_2807", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_1eb20c82-4d2d-4d62-8ddf-3f902ad6e301.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Open Jobs -> CLICK\n[span] -> CLICK\n[button] Country -> CLICK\n[span] -> CLICK\n[button] State / Province -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.348, 0.218, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2808", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_d13b77bd-d861-4db3-a2bd-5e9b93f3a743.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.76, 0.266, 0.8] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2809", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_454e8a2e-689a-4cc6-b987-459a0ad78207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[button] Show all 14 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.251, 0.157, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2810", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_69915593-b522-4215-bd37-8a27f3aa41b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec\n[button] Search for zyrtec -> CLICK\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK\n[button] Check More Stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.102, 0.881, 0.118] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_2811", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_93efc07c-9976-40b0-8eec-9bee64d4f349.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK\n[button] Boston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.379, 0.655, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2812", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_0511e7ae-896b-464b-b6ce-185a2db5c887.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] New York -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: 66 perry st\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.096, 0.718, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2813", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_7439f8b2-c8dc-495f-8dab-944cea4da660.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji\n[div] Fiji -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.085, 0.266, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2814", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_aa57cbad-a560-403a-a60e-dac248b9a9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.048, 0.566, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2815", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_acb70a60-4b7a-40b0-ae7d-80f59fd9d80a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.417, 0.274, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2816", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_249c9e0b-a8b9-48e5-a518-f5f037532ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[button] Back -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.668, 0.193, 0.704] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2817", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_0d28ebcd-d197-45b9-9d04-92004c51a57a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK\n[span] Sony -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.372, 0.192, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2818", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_37ce8975-b564-4ed3-9ef4-93ef6e3d31cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 6944\n[combobox] Date -> SELECT: Thursday, April 6"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.301, 0.875, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2819", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_4e0cd350-2884-4841-b365-0b0d62b7a9ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.584, 0.325, 0.598] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2820", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_530c1c2b-4253-4258-be3d-ace6cee9102e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.29, 0.29, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2821", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5af256e5-4b7d-429d-a1d6-e4c6fffd8129.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK\n[textbox] Drop-off location -> CLICK\n[textbox] Drop-off location -> TYPE: Miami\n[span] Miami, Florida, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.101, 0.963, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2822", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_bacc7f66-2ed3-4753-92c6-b517e447321b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] New -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[img] Woods 41366 Surge Protector with Overload Safety F... -> CLICK\n[span] Qty: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.314, 0.857, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2823", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_32136539-d563-4515-a062-e74052a89105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.166, 0.447, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2824", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_47be89ea-69a2-4c87-a4c6-2068241fee24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.068, 0.181, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2825", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f2cbeb2f-72b6-4862-8ea3-3b40e6926317.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.727, 0.092, 0.738] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2826", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_88ae7475-92a6-4415-bbe3-16b73b100272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER\n[link] On Sale Now -> CLICK\n[img] movie poster for Elvis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.189, 0.177, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2827", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_fa588085-8f33-47b1-8ec2-145c85ae252f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.573, 0.047, 0.591] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2828", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_e6e07a93-605d-4da0-aafd-e8e6f39a344c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.127, 0.666, 0.146] in the image\nAnd my action is Action: TYPE\nValue: NEW DELHI"}]}, {"id": "mind2web_2829", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_8b345767-07e0-4c1c-b939-9cdc2d8bd275.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[checkbox] SUV -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] Avis -> CLICK\n[button] More filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.813, 0.3, 0.822] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2830", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_dec5bc64-6ab4-47dd-bdd7-3d9b3c321864.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 0.432, 0.231, 0.453] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_2831", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_2786788a-6a55-495b-bcb7-19ceadaa2632.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[span] Westminster -> CLICK\n[checkbox] Offers Takeout -> CLICK\n[button] Thai -> CLICK\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.154, 0.529, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2832", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_79d473dd-a77c-4f65-afc8-e214c8355550.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK\n[combobox] Select a Size -> SELECT: Queen\n[spinbutton] Main item quantity -> TYPE: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.69, 0.407, 0.96, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2833", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_e8ebac86-489a-45a0-83e3-9963de2cf23a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK\n[span] Agencies -> CLICK\n[link] Bridges & Tunnels -> CLICK\n[div] Tolls by vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.325, 0.367, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2834", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_3466e787-56c0-4c59-96f4-a31bf152d42e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[gridcell] Fri May 12 2023 -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK\n[span] Traveler Rating -> CLICK\n[img] 4.5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.165, 0.159, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2835", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_2b7cd081-ffb7-4ead-9b48-8c8a72c92b5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK\n[svg] -> CLICK\n[button] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.177, 0.855, 0.212] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_2836", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ee0f6005-30c0-42c8-a5be-131d002b1322.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.03, 0.159, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2837", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_e70021d7-5c43-4e5b-a710-953fccc9b3f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.245, 0.699, 0.275] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_2838", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_7d65206e-629f-44a2-9720-1d58f8889d97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Guest Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.266, 0.249, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2839", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_a5503712-4dd3-4c5a-ae2f-89359854adbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Women's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.828, 0.082, 0.842] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2840", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_f0a252b2-33f2-496a-8540-e943e77082bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK\n[select] 1 -> SELECT: 8\n[select] 00 -> SELECT: 37"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.797, 0.309, 0.854, 0.339] in the image\nAnd my action is Action: SELECT\nValue: AM"}]}, {"id": "mind2web_2841", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f70b6c21-ea35-4256-824d-f478df3bc254.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.177, 0.393, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2842", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_cc6fc299-2804-4e5d-88b7-816f63bf8bd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.174, 0.418, 0.209] in the image\nAnd my action is Action: TYPE\nValue: Hackney"}]}, {"id": "mind2web_2843", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_29b304e8-8ac3-4dca-a084-e2a2b157d560.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.006, 0.467, 0.041] in the image\nAnd my action is Action: TYPE\nValue: gorillaz"}]}, {"id": "mind2web_2844", "image": {"bytes": "", "path": "./images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_b644b13e-0e08-4e91-8dbb-e80427e1b76f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vacation packages to Hawaii.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.194, 0.181, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2845", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_265e54aa-6c90-4afd-832f-8a1f4fe6294b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.277, 0.328, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2846", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_6379c507-cb3e-4e70-bd40-2c45ea705298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.315, 0.347, 0.354, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2847", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_c011c7eb-4c26-49b9-8331-9f1e96f2331c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[select] All -> SELECT: In stock (53476)\n[select] All -> SELECT: Digital"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.239, 0.196, 0.26] in the image\nAnd my action is Action: SELECT\nValue: French (299)"}]}, {"id": "mind2web_2848", "image": {"bytes": "", "path": "./images/8710addc-5ff3-4aaf-b397-4c6165f285ee_083450dd-1a24-4b01-a29b-f370c094324d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the service options for cars under warranty.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.422, 0.155, 0.491, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2849", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_88e67c68-b1a0-4509-b2f8-bb568aa3142b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\n[link] Navigate to on-demand -> HOVER\n[link] select to navigate to Supported Devices -> CLICK\n[generic] FREE LIVE TV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.388, 0.273, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2850", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_f0e82b12-d16c-4d45-b667-0ceba837fc70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.005, 0.491, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2851", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_3e758aec-19d7-4865-aad8-cf4d53774bf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.321, 0.37, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2852", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_18977e76-04cc-4a66-a066-08c24cd53b5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[radio] Owned -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.328, 0.284, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2853", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_1fbb7015-2342-4083-9fc8-141bfe2c3d68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.203, 0.347, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2854", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_fe6b056b-6b75-4102-91d4-acf37296d4ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.459, 0.29, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2855", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b285e7b0-2a4e-43b4-a6d0-4ac251fcc085.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.274, 0.234, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2856", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_52f08417-3d87-4854-b93b-6c9e07559ab6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[generic] 4 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.166, 0.863, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2857", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b0065ed3-dd9d-4845-8174-7be5aed5406d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.245, 0.359, 0.269] in the image\nAnd my action is Action: TYPE\nValue: 74th street, brooklyn"}]}, {"id": "mind2web_2858", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_4edcf87f-d3a2-4bcc-b55d-01d9bb2cef31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] Railcards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.226, 0.311, 0.244] in the image\nAnd my action is Action: SELECT\nValue: 18-25"}]}, {"id": "mind2web_2859", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_683c0864-23e0-4989-979a-16e9b0ba204f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[textbox] Date use format: 17-Mar-23 -> CLICK\n[path] -> CLICK\n[link] 18 -> CLICK\n[listbox] hour -> SELECT: 15\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.362, 0.16, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2860", "image": {"bytes": "", "path": "./images/e9300d50-11fa-4f98-8c39-424630668ab9_0be78dd3-a700-4d87-92b5-bc57c37a4384.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the popular online Health events for tomorr\now?\nPrevious actions:\n[link] Health -> CLICK\n[heading] Most popular events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.255, 0.153, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2861", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_3f7ed9af-1f23-4b1c-aa63-1b897f1f8742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.217, 0.56, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2862", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_78338dfb-bd62-4024-9a68-17480d94f80b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.041, 0.966, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2863", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6097d607-4458-40c6-9005-9e21bf70ecaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK\n[span] -> CLICK\n[button] Find tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.324, 0.781, 0.338] in the image\nAnd my action is Action: SELECT\nValue: Price"}]}, {"id": "mind2web_2864", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_2e3725c5-8ae4-4af4-b4da-d19fa51f89d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.41, 0.339, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2865", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ca937f48-03be-48be-9daa-ffe7587749c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.137, 0.362, 0.172] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_2866", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_981cfbcc-0b50-4f18-80a7-35a4cf18e9d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.125, 0.963, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2867", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_4da9977b-a124-4efe-9395-6120ab50f4d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.277, 0.413, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2868", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_37123deb-b58b-48af-806d-b33471d5e546.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK\n[div] SUVs -> CLICK\n[link] See Details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.122, 0.443, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2869", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_55282477-0c69-47cb-aab3-15caa2215b85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.111, 0.446, 0.123] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_2870", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_f94749d5-d311-4026-8036-c81f05ec38e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.229, 0.326, 0.268] in the image\nAnd my action is Action: TYPE\nValue: Hackney Clothes Swap - Earth Day"}]}, {"id": "mind2web_2871", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_1f12c03d-ee6f-4717-bdfc-66c289973d4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[listitem] BMW (389) BMW (389) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] X5 (87) X5 (87) -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.087, 1.002, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2872", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_0134b2a2-0960-4c1c-b128-61aeb08dd0c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.536, 0.287, 0.633] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2873", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_41e84ed6-7a28-41f6-92e0-daa1fc5f5611.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM\n[combobox] Select Residency -> SELECT: Vietnam\n[generic] Vehicle Type * -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.22, 0.782, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2874", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_67e32043-4f15-4318-a51f-237dfcf55ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK\n[searchbox] Search by keyword -> TYPE: women t-shirts\n[div] WOMEN / Tops -> CLICK\n[gridcell] Size -> CLICK\n[label] S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.152, 0.432, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2875", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_dd2e43c1-312e-420a-b90a-c274075490db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK\n[gridcell] 19 -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.529, 0.111, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2876", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_6f9b8dbd-0ec0-49af-8d7e-9e8596170ef8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.173, 0.84, 0.195] in the image\nAnd my action is Action: TYPE\nValue: New York JFK"}]}, {"id": "mind2web_2877", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_63848707-7e00-4f6b-9033-a086a7c6bdaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Black (1) Black (1) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.403, 0.243, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2878", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_c37c733c-abb7-4a0a-a1cd-c3d90df774a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.442, 0.15, 0.471, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2879", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_0d111192-54d9-412a-84ff-e2603690250a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.233, 0.182, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2880", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_18c3278f-f64b-424e-87a4-39072ea492f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.222, 0.657, 0.263] in the image\nAnd my action is Action: TYPE\nValue: laguardia airport"}]}, {"id": "mind2web_2881", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_cfa92282-3f8c-4a8b-a7c5-4cb5ad14ef19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.463, 0.263, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2882", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_d8e1e74e-2d72-49ef-9c27-8e81179156c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK\n[generic] Thursday April 20th -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.28, 0.354, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2883", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad3a284c-17af-4570-b6d4-fd177a683a78.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.194, 0.709, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2884", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_0e0ccf1d-7ddd-456d-a89e-469d3a00a188.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.837, 0.945, 0.874] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2885", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_6b3f5ae5-e781-4e07-beea-f548df42dfe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.218, 0.782, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2886", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_4b695869-979b-4fc6-bcef-b75508d9d353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.274, 0.923, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2887", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_49915c43-ef6b-4ab0-9559-24be43b60267.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Education -> CLICK\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK\n[select] All -> SELECT: In Stock (41,088)\n[select] All -> SELECT: Hardback (13,067)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.232, 0.196, 0.248] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_2888", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_50059fe0-a21c-4c62-a8ea-ce6abbb1679a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK\n[textbox] Destination -> TYPE: EWR\n[div] Newark Liberty Intl (Newark) - -> CLICK\n[img] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.427, 0.204, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2889", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_c4137ce6-fa4d-4c99-9a90-5f8465c290c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK\n[link] User Search -> CLICK\n[textbox] Enter First name, last name, and/or username: -> TYPE: Joe Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.617, 0.13, 0.645, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2890", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_9ad6f25b-3247-4c7d-843e-ba9936959a88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.027, 0.67, 0.05] in the image\nAnd my action is Action: TYPE\nValue: mirror"}]}, {"id": "mind2web_2891", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_a3d44210-e42f-4f1a-99c7-6c695782189e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK\n[button] $10,000 -> CLICK\n[menuitem] $20,000 -> CLICK\n[button] $56,000 + -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.254, 0.24, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2892", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_eb9e2f08-e45e-4152-b15c-68af8e163e11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.243, 0.732, 0.286] in the image\nAnd my action is Action: TYPE\nValue: Nemo Front Porch 2P Tent"}]}, {"id": "mind2web_2893", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_38402c1f-8d43-4fce-97b2-4dde762c43cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK\n[link] Go to route -> CLICK\n[button] View upcoming departures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.305, 0.497, 0.342] in the image\nAnd my action is Action: SELECT\nValue: SOUTHBOUND Forest Hills"}]}, {"id": "mind2web_2894", "image": {"bytes": "", "path": "./images/fb73611b-dc68-4a75-bf5b-7e151dc151af_5c6956c9-6868-4100-809d-2d60c8266d39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get tickets for the Buckeyes football game on April 15th for a group of 5 people.\nPrevious actions:\n[link] BUY TICKETS -> CLICK\n[span] -> CLICK\n[label] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.927, 0.232, 0.982, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2895", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_711afc2a-5dda-4d63-9704-e148390bbd8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.223, 0.795, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2896", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_16158a8f-e6d0-46ee-b592-bb982f0ea0b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] Search -> CLICK\n[div] Premium -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.224, 0.331, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2897", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_aa1e041e-4dec-4d00-971d-f27cbae2c3bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.115, 0.75, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2898", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_e6e3ca49-d6be-447f-9169-b729bc647ee1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.129, 0.828, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2899", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_d2628a1f-38e2-45f0-b1cc-07292b3b737b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\n[link] Model Y -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.155, 0.276, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2900", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_b6343c7a-6d35-4068-a022-3d52bdfb2d80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.059, 0.264, 0.068] in the image\nAnd my action is Action: TYPE\nValue: cough medicine"}]}, {"id": "mind2web_2901", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_e11ffb02-f80c-4113-90d9-a7a3fc334da9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2902", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_b1b6ed4f-e03d-4b9f-8c20-2487956712bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.484, 0.253, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2903", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_4f3485c5-4000-44a9-b95c-82f5f488f49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.568, 0.179, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2904", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_2442d176-ba01-4693-9f18-ee18aeb6baba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.094, 0.265, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2905", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_ffe315e9-ad0e-4366-ba6c-cbbe02d20908.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.18, 0.797, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2906", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_51b45d5f-8bb8-4178-8db0-ea0a9c2a2138.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.817, 0.257, 0.943, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2907", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_60755e3c-381c-4066-bf09-930cb0c80bd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.352, 0.285, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2908", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_3f3a9593-8678-4c23-9d1b-d5bfa52c98c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.691, 0.52, 0.715] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2909", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_bbfb3d84-6cda-4a67-ae68-04e8649f8c38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York\n[button] Times Square, New York, NY, USA -> CLICK\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.269, 0.927, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2910", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_b258f35f-a2e4-4edc-8102-f3109e0b4909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.239, 0.688, 0.259] in the image\nAnd my action is Action: TYPE\nValue: New Yok"}]}, {"id": "mind2web_2911", "image": {"bytes": "", "path": "./images/a6080a77-ec5d-44d6-a51e-0b4ca0d50879_ff20befa-13eb-49ef-9601-c1423f6d06d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of critic reviews for the movie Creed III.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.229, 0.156, 0.244] in the image\nAnd my action is Action: TYPE\nValue: creed III"}]}, {"id": "mind2web_2912", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_28d18847-a922-481b-983f-a0131d55e6a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.505, 0.194, 0.515] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2913", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_4c7b210f-a952-4105-a305-666b67c4413d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.328, 0.195, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2914", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_a2c937ff-c9d4-49ec-8481-f6c7292f0a27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[span] Filters -> CLICK\n[a] Software Development -> CLICK\n[a] Hybrid -> CLICK\n[i] -> CLICK\n[button] Apply Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.474, 0.977, 0.501] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2915", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_a65bd23a-d4e7-404d-b5f9-a18afdcd9516.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 10001\n[textbox] mm/dd/yyyy -> CLICK\n[link] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.273, 0.181, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2916", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_657a05b7-3405-4ec1-bc74-6e2dcacc2244.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] 2 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[combobox] Return Time -> SELECT: 11:00 AM\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.212, 0.891, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2917", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_da3756d6-bdec-418d-bd70-c9b28d7ae532.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[select] All -> SELECT: In stock (53476)\n[select] All -> SELECT: Digital\n[select] All -> SELECT: French (299)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.195, 0.196, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2918", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_e686e478-89e5-4245-8f6b-9066b3cfcd46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston\n[option] Boston area -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.202, 0.803, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2919", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_d0942028-ea6c-4aa1-b417-3f768a2c6013.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.395, 0.47, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2920", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_8de97dd0-06a1-43b2-9db1-9a50efe628b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Cruise Deals -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.191, 0.552, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2921", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_54d26b8d-20c4-482b-99b5-1e444c403105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean\n[button] \ue9571 NIGHT Wed, Apr 19 - Thu, Apr 20 -> CLICK\n[span] 5 -> CLICK\n[span] 15 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.108, 0.914, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2922", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_b41d47a0-2af6-4068-877c-5fecb6b8b45e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.191, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2923", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_669be30c-bc87-4806-9cc7-18139eb1e8bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[button] Deals -> CLICK\n[div] -> CLICK\n[button] Update -> CLICK\n[button] Discount -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.199, 0.988, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2924", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_525afe7d-fb68-4af8-83b9-ae729b67d9e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.194, 0.641, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2925", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_de150a34-2838-4669-80db-ac4bf235c452.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.497, 0.708, 0.531] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2926", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad1a399e-b8b0-4a93-bd03-f6a9c930c30e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.437, 0.249, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2927", "image": {"bytes": "", "path": "./images/851ed4e6-51ee-47ad-a861-a28bdc61a102_c3012402-7d2b-49f8-8a3f-4a6ff36ca6a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to schedule a Model X test drive.\nPrevious actions:\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.172, 0.886, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2928", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_4fa33a9b-c512-4911-a2c8-c8118c344b25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK\n[dt] Price -> CLICK\n[textbox] price to -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.645, 0.192, 0.66] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2929", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_522a5110-9344-48c9-a348-5cf143bdfd09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.057, 0.828, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2930", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_c0154493-539d-46e0-a7d8-13d0ec9ea144.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[div] Calendar -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.516, 0.09, 0.548] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2931", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_5307a9cc-3824-4f21-ba80-86eb1dcab306.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.345, 0.486, 0.371] in the image\nAnd my action is Action: TYPE\nValue: BOSTON LEGAL"}]}, {"id": "mind2web_2932", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_42833388-627b-43f2-a72d-ab7582cef893.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Paris Charles de Gaulle Airport (CDG)\n[button] Paris Charles de Gaulle Airport (CDG) Paris, Franc... -> CLICK\n[generic] 13 -> CLICK\n[generic] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.549, 0.372, 0.574] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2933", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_3067c854-4afb-4093-9f28-ee9b0f735e2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.584, 0.209, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2934", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_2cd0f6aa-940d-4a88-a992-726a6eab0ea3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.484, 0.187, 0.504, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2935", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_b119563f-04a3-4152-97ee-312e2601cea8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK\n[textbox] Parking Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.41, 0.118, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2936", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_ea2a29f1-6848-4ffe-a130-49eb73e01e99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[link] Herbs -> CLICK\n[div] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.015, 0.981, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2937", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_4d9d8608-8e78-44ec-b091-592d1433d369.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK\n[div] Sports -> CLICK\n[link] Football -> CLICK\n[link] Men's UA Football All Over Print Metal Logo Short ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.421, 0.952, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2938", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_098ac9fe-e29c-4f6a-ab2c-78e793bf43d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.082, 0.628, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2939", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_1c629567-4fe9-4654-8aba-72a300154818.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Pick-up -> CLICK\n[textbox] Pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.292, 0.931, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2940", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_f7f48747-38d7-43c2-9499-034d6ab8590f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: Under US$20\n[select] All -> SELECT: Hardback"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.397, 0.196, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2941", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_31182b46-b2bc-4c21-9b91-ef93eaff57aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.162, 0.103, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2942", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_94539249-ecc8-4133-9890-519b2ea618e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.225, 0.5, 0.26] in the image\nAnd my action is Action: TYPE\nValue: AUCKLAND"}]}, {"id": "mind2web_2943", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_f3cedb20-a49f-49a2-922a-8cfd2a350ec1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[link] Movies, Music & Games -> CLICK\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK\n[link] Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.167, 0.099, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2944", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_a08689bf-a507-4c9c-b25a-dd1d2d5adc1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.579, 0.223, 0.607, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2945", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_ae9b2eda-9e42-4a8f-a07e-abaad1212cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_2946", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_1014509f-e902-4d92-9c7f-1668d0bf2f45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.019, 0.335, 0.03] in the image\nAnd my action is Action: TYPE\nValue: cafe"}]}, {"id": "mind2web_2947", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_a5c7978a-226d-4d6f-919d-a987f495eab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.377, 0.421, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2948", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_6efa03dd-c712-4bce-80cd-c1ac3251e298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK\n[div] Add to Watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.01, 0.613, 0.035] in the image\nAnd my action is Action: TYPE\nValue: The Magician's Elephant"}]}, {"id": "mind2web_2949", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9cb46440-5f2a-4fd8-8725-bb7a17846a1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[option] 8:00 a.m. -> CLICK\n[combobox] Drop off time Selected 10:00 a.m. -> CLICK\n[option] 12:00 p.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.812, 0.484, 0.844] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2950", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_56a6a786-d692-4dca-969f-3d04f183ff2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] Xerox Toner -> CLICK\n[span] Newegg -> CLICK\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.349, 0.158, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2951", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_38cab605-7db6-4ce2-b910-1a9793ec2332.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[button] Go -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] $50 to $100 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.934, 0.086, 0.968, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2952", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_60eff40d-6f5d-42ae-ab89-1cb9059a3eeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.004, 0.204, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2953", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_43918fa3-9bec-465d-bd86-e9fe67bdd317.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[link] Wichita -> CLICK\n[link] Showtimes -> CLICK\n[heading] John Wick: Chapter 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.227, 0.134, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2954", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_139899ed-e29d-42d0-b65d-dd0f16f15868.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK\n[button] Team -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.556, 0.307, 0.568] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2955", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_ba81a68d-2108-4b3e-a009-e9f9c0992380.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.009, 0.35, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2956", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_db4f9635-d23d-4d28-bebc-f80e58667212.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK\n[button] January 2024 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.371, 0.736, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2957", "image": {"bytes": "", "path": "./images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_bf7273cc-5470-4d70-9726-f5baa1e05def.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the design tool for a new home office.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 0.069, 0.501, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2958", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4d7706e7-35fe-4adf-a968-ba5f21d0fe38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK\n[button] 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.417, 0.238, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2959", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_7842187c-40c4-40d1-9735-376204241576.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.063, 0.737, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2960", "image": {"bytes": "", "path": "./images/28d54466-de85-45e6-9649-2575d38adfd4_0f18ac4c-1a06-4005-a45d-4b9e1b1096c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse documentaries streaming on Netflix.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.299, 0.311, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2961", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_8961a973-3d00-4e41-a0b8-c24b544aa233.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.199, 0.359, 0.221] in the image\nAnd my action is Action: TYPE\nValue: Greenport"}]}, {"id": "mind2web_2962", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_be028dfc-bafa-4ce8-9b29-da311352ba93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK\n[link] Training -> CLICK\n[div] Size -> CLICK\n[link] YXL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.213, 0.233, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2963", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_5d397754-60c0-4eec-bc5e-b1f68748dddf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.139, 0.38, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2964", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_9b095d3d-faae-4676-bb3d-46ffc3ae5f3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.197, 0.743, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2965", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_b87559eb-fbcd-470b-bf56-c63609269ba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.219, 0.517, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2966", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_89812b96-d097-40d2-9b31-672894992c81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Lebron James\n[div] LeBron James -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.07, 0.574, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2967", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_099c45fc-ac66-40c7-92f9-016c98c58a85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.087, 0.35, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2968", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_fd3c4d6b-de6c-49dc-a1ce-b8c22bbd015f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.185, 0.736, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2969", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_ceca548f-aa24-4a6b-8249-a0974e25b9d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.163, 0.405, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2970", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_fa09573f-3fe8-4781-878c-595e27b1289d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.322, 0.207, 0.343] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_2971", "image": {"bytes": "", "path": "./images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_ca1c9ff1-ad81-4a7a-b51f-0d2958396277.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the page to find classic rock concert tickets.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.015, 0.211, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2972", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_26c2202a-1d0c-4925-b6cc-87b75a0d5d09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK\n[link] Your lists -> CLICK\n[link] Create a list -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 3.637, 0.509, 3.716] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2973", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_9e2ac8b4-b3a9-4882-983f-24af5568549d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.345, 0.253, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2974", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_1e8b8975-5a8e-4555-aa79-7b38c3a2f62e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK\n[dt] Memory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.643, 0.095, 0.65] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2975", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_6cd59e9c-d4c6-408e-bcc8-74cec11ee801.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.334, 0.349, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2976", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_733acd7f-9db8-407d-9169-c28918f38ce6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[heading] Color -> CLICK\n[span] BLACK -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[button] L -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.319, 0.642, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2977", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_d9a57dc9-b37d-47f8-801f-36523ba7235a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.197, 0.238, 0.23] in the image\nAnd my action is Action: TYPE\nValue: Cheyenne"}]}, {"id": "mind2web_2978", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_9a2bfce6-7f6c-496d-8537-c063af49d516.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.178, 0.312, 0.192] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_2979", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_6dde45f6-ca37-4848-9b11-2c361a0e023c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.216, 0.359, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2980", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_da2aa56b-9261-4456-8a0e-d4d5b0087429.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK\n[link] Tab -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.259, 0.971, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2981", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_b2ff3565-c9c1-4ad1-8be6-a68c94ff24e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.01, 0.323, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2982", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_cb1b0ad6-b6ce-4345-bb63-f83f179d8bba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to national parks -> CLICK\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.161, 0.325, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2983", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_129ca29d-80b0-4d60-ba91-0e80e47f9911.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.15, 0.972, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2984", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_6bdf3560-322b-4a2d-800a-74c3e8f62dc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.095, 0.258, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2985", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_02cd0163-e5d7-4bad-92c2-dfe415380130.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK\n[label] 0 - 5 Miles (2) -> CLICK\n[button] APPLY -> CLICK\n[link] ADD TO RFP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.811, 0.96, 0.848] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2986", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_17b6a926-894b-4c39-82a4-70ce263fd6db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Locksmiths -> CLICK\n[textbox] Near -> TYPE: SAN FRANSISCO\n[span] San Francisco, CA -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.203, 0.048, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2987", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_6fcb9d43-418b-4352-8aa2-ac7e22d8e10a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.46, 0.691, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2988", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_241fbd7e-3408-47f1-ba87-e873528e7048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] 10+ Night Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.347, 0.306, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2989", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_de548550-7457-48b1-8215-d63d7fe2643e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.15, 0.617, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2990", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_e8435fdf-1ad0-4c53-936e-9416382b58e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.172, 0.216, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2991", "image": {"bytes": "", "path": "./images/440273fa-f1b0-41e8-be75-90732ad5170d_955f7e1b-c9a4-4f90-999b-d133dae42588.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Mens size guide for bottoms\nPrevious actions:\n[link] Size Guide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.241, 0.665, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2992", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_99e1fdce-02c7-4ac8-8777-6a8a73444332.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK\n[button] Join Waitlist -> CLICK\n[textbox] First Name * (required) -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.367, 0.311, 0.573, 0.348] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_2993", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_e247cc44-be69-402b-8df7-1b64365510fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.432, 0.498, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2994", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_9b9714a3-1307-4aed-8fe7-c4aa796cf448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.236, 0.214, 0.265] in the image\nAnd my action is Action: TYPE\nValue: 60538"}]}, {"id": "mind2web_2995", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_741f7aa0-288a-42b4-95c0-1dbe3f4025e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers\n[div] The Elephant Whisperers -> CLICK\n[div] Add to Watchlist -> CLICK\n[textbox] Search IMDb -> TYPE: The Magician's Elephant\n[div] The Magician's Elephant -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.435, 0.942, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2996", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_cb171a5f-f105-4818-bc45-b9a05368abf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK\n[link] Shop by Color -> CLICK\n[link] Shop red -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.344, 0.459, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2997", "image": {"bytes": "", "path": "./images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_fa576ce8-1b4b-433d-8ac0-b881f304710c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find videos from the Oscar 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.002, 0.031, 0.012] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2998", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_38e3e23c-932b-406a-bd3f-34958395c5c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK\n[link] Running -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.329, 0.166, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_2999", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_6aeb2f30-3d17-4ef6-8073-6ace2fdbc4b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2\n[button] Done -> CLICK\n[button] Get cheapest tickets -> CLICK\n[button] OK, got it -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.179, 0.925, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3000", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_4fb89993-2c2d-43b4-8021-5cf94957b393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.007, 0.988, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3001", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_5bbdade7-b345-4703-964f-99ff3ae7385c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.536, 0.336, 0.548] in the image\nAnd my action is Action: TYPE\nValue: 123st rd"}]}, {"id": "mind2web_3002", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_c670a2c4-9acb-4532-a8e0-bcd618e1f8f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.376, 0.233, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3003", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_72a3df07-b748-4dce-9fcd-8047ccba0f04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.136, 0.573, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3004", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_f6cab64e-9db9-4928-b663-52d3bd4561da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.161, 0.326, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3005", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d233722a-833e-4708-baa1-b6e6ed139325.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] 9 -> CLICK\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.226, 0.939, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3006", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_9f55a450-95dd-424f-950a-6e250aadc6a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[heading] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.325, 0.63, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3007", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_369d575b-1f79-4eca-87a2-b9478ab681be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.612, 0.326, 0.629] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3008", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_159c7b17-5f58-4a88-bc18-07362dc1987e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK\n[link] Parking -> CLICK\n[span] From $62 -> CLICK\n[button] Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.32, 0.198, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3009", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_cd309bbc-3a76-4037-a334-4a8af50af9fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.418, 0.894, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3010", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b583686e-09eb-48e7-9bcb-65faa05d92cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.734, 0.49, 0.908, 0.518] in the image\nAnd my action is Action: TYPE\nValue: 888888888"}]}, {"id": "mind2web_3011", "image": {"bytes": "", "path": "./images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_18af82b7-9edc-400c-be67-a8172c96e423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of HomePod mini\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.0, 0.737, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3012", "image": {"bytes": "", "path": "./images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_b0bd4807-1c83-4f24-a9ca-e6b59dd2d8b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse camping stoves that have an auto ignition feature.\nPrevious actions:\n[button] Camp & Hike -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.36, 0.543, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3013", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_db770456-a5f4-40c0-9b55-2e3e0857f4bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK\n[textbox] Confirmation or eTicket More information about con... -> TYPE: 12345678\n[textbox] First name -> TYPE: Jason"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.244, 0.726, 0.269] in the image\nAnd my action is Action: TYPE\nValue: Two"}]}, {"id": "mind2web_3014", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_dcc79ac5-57a9-4ec7-8035-f7bc14000e30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[heading] Southeast Region -> CLICK\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.452, 0.777, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3015", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f861f270-4006-47c8-abb8-b7c3ec0ee2c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.152, 0.379, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3016", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_d201ad20-3ae5-4d4f-95ee-54a12ba937e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.288, 0.304, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3017", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_2167a763-7333-43ce-8b28-5dd161d43cf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK\n[link] HOT DEALS \uf0da -> CLICK\n[textbox] City, State or Zip -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.905, 0.283, 0.938, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3018", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_4a747255-e268-4174-9aae-ef927747e463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.013, 0.262, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3019", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_ebfd5d22-29c0-4188-8b78-a1901e05974b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\n[textbox] search -> CLICK\n[textbox] search -> TYPE: Dota 2\n[link] Dota 2 Free -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.485, 0.294, 0.611, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3020", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_67e9ecfe-bf95-42a4-aabf-a684323a69c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.242, 0.393, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3021", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_f84c6ce3-3f65-4091-a2ba-e372b65fbaa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK\n[link] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.078, 0.902, 0.102] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_3022", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_9ae69032-e90d-4dd4-a331-dc5968c0c211.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Veterinarians -> CLICK\n[label] Veterinarian Emergency Services -> CLICK\n[heading] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.145, 0.388, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3023", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_37a05e4f-282b-4550-b32b-59a3ae182626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.175, 0.943, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3024", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2f895a7d-c8c2-474e-959d-2cc70df86dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.016, 0.335, 0.026] in the image\nAnd my action is Action: TYPE\nValue: barbershop"}]}, {"id": "mind2web_3025", "image": {"bytes": "", "path": "./images/73cf6eec-cae6-4d5b-9b8e-e44359311565_7806581b-01a1-4c64-80e8-249ca26e8226.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for the newsletter\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.551, 0.337, 0.598] in the image\nAnd my action is Action: TYPE\nValue: larryknox@gmail.com"}]}, {"id": "mind2web_3026", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_d0ff114c-d1e0-4002-88e7-a44d33b20e16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.017, 0.564, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3027", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8d01874f-6ff5-460f-85ca-ec27f7a38461.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.455, 0.619, 0.493] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_3028", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_958c6197-e149-4213-bf7e-760f1d4708f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.579, 0.284, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3029", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_972c9c30-8c70-4bdc-b484-3f38d969ee99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.302, 0.958, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3030", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_ff95f90f-0dfe-4ec7-a33f-3f7fa040acc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.046, 1.051, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3031", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_1b545264-a8c9-48ef-a6c6-873b960fa27f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.069, 0.339, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3032", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_75d262b6-ddd5-48f9-966f-4438087ee50e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 2.677, 0.415, 2.934] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3033", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_f80825e0-a464-4208-892e-4982389fabd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.382, 0.344, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3034", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_77889bbd-2782-42c1-9514-0f31846074cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[div] May -> CLICK\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.612, 0.922, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3035", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_f6deabe8-871c-4244-a62c-a369378c0352.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.372, 0.582, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3036", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_d12d7503-85c9-4e58-a998-eb5cb3fd47a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.331, 0.027, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3037", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_af73a962-fb6a-4393-b7fb-2607ab8a26ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.095, 0.629, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3038", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_fa243564-2db4-4637-be03-7c5855112c7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[div] India -> CLICK\n[span] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> CLICK\n[textbox] max price $ -> TYPE: 99"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.64, 0.557, 0.786, 0.599] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3039", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_a36e1bc5-c3fe-4821-b962-0b360dab1f1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[img] Samsung -> CLICK\n[span] 11\" & Larger -> CLICK\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.728, 0.158, 0.736] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3040", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_4b1fb823-3bba-4dbe-b7fb-b6aa69585739.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK\n[button] Germany -> CLICK\n[button] Posting Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.212, 0.712, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3041", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_028f293a-f011-4c5a-a8d7-75c3024c70c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.103, 0.402, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3042", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_2b413c09-655e-41ba-8f2b-fd66aba87bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.279, 1.0, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3043", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_595b3f33-53fd-426c-95d1-2049a525a4cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK\n[div] Bayern Munich -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.858, 0.05, 0.889, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3044", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_3893c47f-d0f7-4f68-8989-f92b5d4b553a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK\n[div] -> CLICK\n[checkbox] Online Paperwork (4)\uf05a -> CLICK\n[span] Vehicle History -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.535, 0.277, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3045", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_95435f7e-87e9-47de-ba6b-3818d8a47081.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.036, 0.673, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3046", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_ca0c6a4f-e759-4971-bbff-02f2bee950be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.099, 0.327, 0.123] in the image\nAnd my action is Action: TYPE\nValue: Glasgow"}]}, {"id": "mind2web_3047", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_c1fdc477-879c-42ad-b10a-cb7edb58d429.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 0.684, 0.906, 0.715] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3048", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_c59dad46-c249-4ecd-9c02-3ffe955c5147.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.041, 0.917, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3049", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_fef21c26-cc60-438b-935d-d274235a5ce6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.106, 0.957, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3050", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_8d91ee68-49ba-4c63-a109-0a0728c06026.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.241, 0.655, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3051", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_262f044d-75e5-4a9d-863c-bbc2e8206b5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK\n[checkbox] Red Devil (2) -> CLICK\n[label] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.171, 0.791, 0.192] in the image\nAnd my action is Action: SELECT\nValue: Wish List"}]}, {"id": "mind2web_3052", "image": {"bytes": "", "path": "./images/5fb9730d-f489-4cb7-a220-d406794cef29_193f402d-c499-4804-84e4-a47a487844d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List all George Clooney movies and sort them by box office revenue\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.274, 0.525, 0.307] in the image\nAnd my action is Action: TYPE\nValue: George Clooney"}]}, {"id": "mind2web_3053", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_47640ee0-514b-4bc9-85b9-e6000e0cfc0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[button] Next month -> CLICK\n[gridcell] April 10, 2023 -> CLICK\n[button] Find tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.737, 0.685, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3054", "image": {"bytes": "", "path": "./images/13cf0b14-422b-4486-841a-aa9ded048380_3829d5f0-4044-4132-aba4-64fad198fbcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find kinect camera for xbox one.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: kinect camera"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.094, 0.228, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3055", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_9a136118-0c6f-44c8-b64f-5dbbc3e3deaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.369, 0.645, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3056", "image": {"bytes": "", "path": "./images/ca80bb42-2617-4f29-bbcd-bcc426b3e407_4b023b85-2772-4077-afb4-13b4e39518dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Airport information of Camarillo Airport, CA and check weather.\nPrevious actions:\n[combobox] Search Method -> SELECT: Airport\n[textbox] Airport City (e.g. New York) -> TYPE: Camarillo\n[strong] Camarillo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.593, 0.023, 0.605, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3057", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_6e4b5086-ab7f-4c94-8467-faf6a06f1082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.181, 0.478, 0.226] in the image\nAnd my action is Action: TYPE\nValue: Santa Fe"}]}, {"id": "mind2web_3058", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_13ee71ed-2890-462f-aab6-cefc95bf6e81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.517, 0.038, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3059", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_c58b2bc7-5044-42d8-8804-2536761d5dd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.6, 0.087, 0.621] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3060", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_5e4a5ce7-a657-4c55-b4fe-52ef17b2c466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK\n[select] All -> SELECT: In Stock (7,640)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.251, 0.196, 0.267] in the image\nAnd my action is Action: SELECT\nValue: Spanish (42)"}]}, {"id": "mind2web_3061", "image": {"bytes": "", "path": "./images/a88676d0-c252-408f-b796-93c95f6b71fc_ba3a8dc1-ddd6-4c96-9215-fa4470fc1329.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my trade offers.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER\n[link] INVENTORY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.214, 0.804, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3062", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_0c62b3b9-463e-44c1-a5db-b91ffde052e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[link] Herbs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.261, 0.367, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3063", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_f59cc735-1769-4401-9349-c127435edfb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\n[span] Your store for 43219 -> CLICK\n[searchbox] Enter ZIP or State -> CLICK\n[searchbox] Enter ZIP or State -> TYPE: 07055\n[button] Search for CarMax stores. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.488, 0.709, 0.53] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3064", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_df0101a1-cdcb-4001-a99d-5fe01a9d5f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.304, 0.484, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3065", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_477c560d-170c-41df-8298-b3a5df097ed9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.44, 0.056, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3066", "image": {"bytes": "", "path": "./images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_0925b90f-0055-40f8-a347-3771f43852dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for news about the latest news about Lebron James' injury and share the article on twitter.\nPrevious actions:\n[combobox] Search query -> TYPE: lebron james\n[img] LeBron James -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.304, 0.186, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3067", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_610dceb2-f1d8-49d8-ac16-046af44796d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[link] More info about Barboursville store -> CLICK\n[button] make it my store -> CLICK\n[path] -> CLICK\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.168, 0.384, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3068", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_f267bc79-4189-4353-af8c-74f490c0c6fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.861, 0.128, 0.98, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3069", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4659e9b2-1197-45eb-b644-7c9166476d4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 9, 2023 -> CLICK\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.367, 0.336, 0.404] in the image\nAnd my action is Action: TYPE\nValue: 7"}]}, {"id": "mind2web_3070", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_e32bf4f6-c213-4e3b-90a4-759546efe869.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] PlayStation 5 -> CLICK\n[link] Filter -> CLICK\n[checkbox] Shop Pre-Orders Shop Pre-Orders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.022, 0.378, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3071", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_537cac8a-10bd-4de9-8487-99bf3041bd13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.218, 0.291, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3072", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_b91fc8ad-9716-4df7-89ed-d728a87b758a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK\n[span] see all stores -> CLICK\n[button] SET AS MY STORE -> CLICK\n[link] SHOP LOCAL CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.27, 0.093, 0.288, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3073", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_fe3b630c-50fb-4bd4-8414-5a22fcbf3de8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Las Vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.157, 0.573, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3074", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_c1213d35-a9c0-44e6-a81b-c3f04bb4ef40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.133, 0.181, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3075", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_3966eb03-525b-43a2-adc3-77b700f1eff0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK\n[div] Real Madrid -> CLICK\n[heading] ROSTER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.257, 0.587, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3076", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_4f8fbba5-548b-4037-bcbe-a63232bbf964.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.343, 0.375, 0.355] in the image\nAnd my action is Action: SELECT\nValue: New"}]}, {"id": "mind2web_3077", "image": {"bytes": "", "path": "./images/7f1f085b-5765-40f8-86c7-8df6e8b68053_71f6ece9-2a59-4408-8da1-d01c4e8a36a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about baggage allowance for business class.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.027, 0.456, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3078", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_ad14875a-2bee-4b4c-b9a9-5229a9213f46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\n[button] Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.07, 0.42, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3079", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_bed3001d-01d6-431c-bff7-bcf8ff8ea839.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.135, 0.961, 0.183] in the image\nAnd my action is Action: TYPE\nValue: 60173"}]}, {"id": "mind2web_3080", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_93292f85-ace3-4d5f-9c66-58a5030b4526.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK\n[heading] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.403, 0.48, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3081", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_5f0bc65a-a65f-4c3a-a9e8-ec714ee4a01d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] New -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.083, 0.367, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3082", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_32e3bc8b-4bdb-4e41-b530-c6856fd481ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Activity -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.302, 0.154, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3083", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_9c2959ea-dc43-4168-b5af-91a91fccb5b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK\n[button] Price: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.107, 0.172, 0.123, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3084", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_30bc3786-daf3-4ec9-a9ff-37b8f6c57ae0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> ENTER\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.43, 0.379, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3085", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e95c855-bc55-4b1d-95e7-4b68a2b075dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.22, 0.769, 0.256] in the image\nAnd my action is Action: TYPE\nValue: Texas"}]}, {"id": "mind2web_3086", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_4379cbc1-7c06-473d-9df6-705f2b4e3321.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Exterior Color -> CLICK\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.28, 0.249, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3087", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_8e0f41cf-b371-4a85-a613-09c17e485957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK\n[button] Boston -> CLICK\n[link] {{ 'see_more_label' | translate }} {{::list.info.n... -> CLICK\n[button] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.171, 0.383, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3088", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_3b94029d-b4fc-45d2-8460-41fe1a2dae10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.296, 0.388, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3089", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_36d9bad0-4139-4bb7-9fba-972a1c25c356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\n[div] Sports -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.11, 0.077, 0.211, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3090", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_10d25542-b5a1-41fc-83f2-470ec16e6b0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.161, 0.023, 0.434, 0.052] in the image\nAnd my action is Action: TYPE\nValue: indian"}]}, {"id": "mind2web_3091", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_414d6244-0798-42fd-8c5a-fba032091a90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[heading] Car specs -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.4, 0.916, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3092", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_be5d0683-3fab-48a5-9ce6-454a884f75b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[path] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.107, 0.83, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3093", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_37c75273-7565-4e18-9ed7-981b670517c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.241, 0.073, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3094", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_831dd65f-fe94-410c-959b-cbbbaaf170f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\n[link] commuter rail Commuter Rail -> CLICK\n[combobox] Search for a line -> TYPE: Oyster Bay"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.121, 0.433, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3095", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_03ea971c-d3cb-44e0-92d7-0470361bc977.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK\n[span] $200 - $300 -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.165, 0.4, 0.182] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_3096", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_ff077583-f04d-41d5-b21a-8ce068740bec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.196, 0.359, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3097", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_3ba37681-2553-4a13-a574-56d3d82c6247.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.162, 0.407, 0.181] in the image\nAnd my action is Action: SELECT\nValue: Jeep"}]}, {"id": "mind2web_3098", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_4fe53d58-b083-41ad-b7ec-0857093df247.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Flights & Car Rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.304, 0.079, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3099", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_0e9722ee-d2e7-4a8a-8a00-ef91a11a39da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[button] APPLY -> CLICK\n[button] From today -> CLICK\n[Date] FROM -> CLICK\n[Date] FROM -> TYPE: 04/01/2023\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.158, 0.645, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3100", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_3af71761-eb64-489c-a12c-fc741805c4a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK\n[button] Calendar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.229, 0.514, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3101", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_5fcb6fb8-1500-462b-902b-e0e689a6d351.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.569, 0.519, 0.7, 0.562] in the image\nAnd my action is Action: SELECT\nValue: Under 1"}]}, {"id": "mind2web_3102", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_9c019089-5faf-48e5-a693-58652ee8c53d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> TYPE: 12345678"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.353, 0.62, 0.393] in the image\nAnd my action is Action: SELECT\nValue: Friday, April 7"}]}, {"id": "mind2web_3103", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_6ac17b5f-32f5-4a08-91b0-708e270d6d61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.215, 0.359, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3104", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_77299dab-129b-4d1d-a419-48f5c2ba558a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] 2 adults -> CLICK\n[button] - -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.258, 0.263, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3105", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_fef1a976-a670-48f2-818a-82e23cd8c1f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[span] Grocery -> CLICK\n[span] Easter Baking -> CLICK\n[img] -> CLICK\n[button] Brand -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.008, 0.988, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3106", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_9e02ef91-c028-4e4d-a052-20a65eddc765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.206, 0.568, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3107", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_5a450f14-ffec-4efe-83fa-4383f087c099.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.056, 0.246, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3108", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_8e5642a1-2b18-401e-ae2d-1addfb70704d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] select to browse a kiosk -> CLICK\n[button] Show Filters -> CLICK\n[button] Filter by rent -> CLICK\n[span] Hide Filters -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.882, 0.168, 0.969, 0.178] in the image\nAnd my action is Action: SELECT\nValue: Newest"}]}, {"id": "mind2web_3109", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_ed669e14-0f55-401d-80f4-9708fed8e93f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK\n[link] Denzel Washington -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.752, 0.653, 0.778] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3110", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_21a24a97-8661-434c-849d-b37228d48abf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.117, 0.556, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3111", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_085c77a4-a501-4a57-a42b-1fbe40737f32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK\n[span] New York -> CLICK\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.791, 0.276, 0.834] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3112", "image": {"bytes": "", "path": "./images/673841c2-de8c-4417-bdcc-dc48753a539f_b741cfde-eb55-4b8e-964c-40e90b358bd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the current standings for the western conference in the NBA and find the top team.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.103, 0.335, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3113", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_79c52401-21b9-4853-b92c-a16509ed72e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.573, 0.233, 0.581] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3114", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_d62876cb-1030-4f2b-a107-57a4769fbe1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] 31 -> CLICK\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.196, 0.705, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3115", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b054d49c-5467-4b56-be24-b91fbf14da65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK\n[div] Make/Model -> CLICK\n[combobox] Year -> SELECT: 2016\n[combobox] Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.322, 0.71, 0.358] in the image\nAnd my action is Action: SELECT\nValue: Camry"}]}, {"id": "mind2web_3116", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_f265ebef-4567-412c-affb-b29a66b3318a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.326, 0.969, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3117", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_e7011714-61ba-428e-903f-5c06b791549a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK\n[label] Austria -> CLICK\n[label] Belgium -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.309, 0.249, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3118", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_6d1655d7-b144-4284-a3f2-60ffcafeac40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.362, 0.956, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3119", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_689b707c-5a1c-4d4b-a8c4-78b279f9f47a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male\n[span] Mal\u00e9, Maldives -> CLICK\n[button] Start date calendar input -> CLICK\n[div] 13 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.194, 0.975, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3120", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_d129b23b-eec3-4afa-9787-6e2e06042e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.342, 0.261, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3121", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_70a5e455-dc3e-45b1-a233-1c4f6a4b7464.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[button] Bathroom -> CLICK\n[link] Bathroom storage -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 60173\n[button] Submit -> CLICK\n[span] Select store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.124, 0.971, 0.147] in the image\nAnd my action is Action: TYPE\nValue: 60173"}]}, {"id": "mind2web_3122", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_fc8e8688-fe07-461b-a576-85b64a501827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.104, 0.713, 0.115] in the image\nAnd my action is Action: TYPE\nValue: bournemouth"}]}, {"id": "mind2web_3123", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_d54537bb-6960-4393-a098-fb7b2390fd25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK\n[label] In Stock -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.824, 0.488, 0.862] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3124", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_24383821-fccc-4aad-9072-cc8ce10bd95b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.113, 0.205, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3125", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_87e4d380-4acc-465f-a3ee-6c5084405805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.174, 0.036, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3126", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_ebe6ec91-c8b0-4150-8180-728167110e5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] Indian/Pakistani -> CLICK\n[link] East Village (9) -> CLICK\n[link] $16 To $30 (4) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.544, 0.603, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3127", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_d85cc5e5-16ef-4abe-b03d-fb6d6e7372e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[textbox] Registration Zip Code Where you will register the ... -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.215, 0.282, 0.236] in the image\nAnd my action is Action: SELECT\nValue: 200 miles"}]}, {"id": "mind2web_3128", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_1a611a4a-8c22-4c5c-ab4f-c061be863c91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\n[link] National Football League NFL -> CLICK\n[button] Open More Dropdown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.079, 0.775, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3129", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_69dcb555-6f29-4d80-9783-dcd3f9ebdef4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.248, 0.294, 0.274] in the image\nAnd my action is Action: TYPE\nValue: New york knicks"}]}, {"id": "mind2web_3130", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_27a5a40d-ef36-4bbf-9d79-d6a1269d66e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[div] Dallas, TX -> CLICK\n[input] -> CLICK\n[button] Next month -> CLICK\n[gridcell] May 07, 2023 -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.776, 0.037, 0.828] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3131", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_cd75134c-4538-42c7-9197-02dc7aaa3621.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Search for CarMax stores. -> CLICK\n[button] set store -> CLICK\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.271, 0.243, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3132", "image": {"bytes": "", "path": "./images/ac35e5a5-f52f-4886-9a23-65f0a6492c5e_8cc079c8-9a71-477b-8bf6-9bbdc8ccf88b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find FC Barcelona's next fixture in the Spanish Copa de Rey\nPrevious actions:\n[li] Soccer -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.676, 0.1, 0.818, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3133", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_9d0945af-a93e-4af8-8aea-b8350b3741f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] Flavor -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Pizza Crust Type -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.013, 0.988, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3134", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_8d39083e-f62b-4599-bcc0-c857a5abf85f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.095, 0.965, 0.114] in the image\nAnd my action is Action: SELECT\nValue: Low to High"}]}, {"id": "mind2web_3135", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_9baa2836-7809-4f44-8023-dc4c5f602eef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.294, 0.315, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3136", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_b9e530f7-a586-48bf-8e6d-99f59bf306d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking\n[textbox] Monthly Start Date -> CLICK\n[gridcell] Sat Apr 22 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.308, 0.3, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3137", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_c2803987-3226-4c5f-b470-33c51dec0f99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Next -> CLICK\n[span] Black -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.292, 0.286, 0.331] in the image\nAnd my action is Action: SELECT\nValue: Good To Go"}]}, {"id": "mind2web_3138", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_c73c1b00-3a34-4287-8884-327b234c2dfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK\n[button] Change location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.133, 0.705, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3139", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_1d11a5bf-821a-470e-af46-80630855a8f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.765, 0.789, 0.792] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3140", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_f62b9966-0056-47a0-b9f6-8c6da5a0210c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK\n[button] 05/02/2023 -> CLICK\n[button] Date -> CLICK\n[button] 05/05/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.362, 0.837, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3141", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_0e54e343-24da-418b-bb3a-3695576b276d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258\n[input] -> TYPE: thomas.neo@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.154, 0.769, 0.193] in the image\nAnd my action is Action: TYPE\nValue: Anderson"}]}, {"id": "mind2web_3142", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_f7a84d47-0214-4ba2-90cb-e556c1ed2802.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.125, 0.709, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3143", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_c95b9c83-3c15-4619-af54-19f4b373ccdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Chicago\n[option] Chicago, IL -> CLICK\n[button] See next Categories -> CLICK\n[img] Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.523, 0.158, 0.551] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3144", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_2f631344-e9f0-4a3a-87bc-273d5f604b5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.563, 0.336, 0.569, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3145", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18914d6c-86d1-4e1b-9a42-0517303af913.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.14, 0.048, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3146", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_5f142677-efdb-410d-b3b9-917b3bd60b03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\n[button] Fastbreak -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.04, 0.954, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3147", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_f4c92290-d674-4ed1-9ba4-d8a1d2be1464.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Polos -> CLICK\n[div] Size -> CLICK\n[link] M -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.097, 0.925, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3148", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_3be67d5b-638a-4f22-bed6-294e7fbce6b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[input] -> CLICK\n[option] Tops -> CLICK\n[heading] Size -> CLICK\n[label] L -> CLICK\n[heading] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.556, 0.266, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3149", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_a50b95ca-ee96-44a7-bd4a-87fc4ceaaaf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.005, 0.887, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3150", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_69ca24e2-a91e-433b-9e32-73b3ec203f00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.79, 0.102, 0.84, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3151", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_89aed3f2-a0c4-4b0d-85e2-04f93aaca067.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.187, 0.375, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3152", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_afd9ae37-5686-4a76-8d2a-b5a040a49170.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[button] 23 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.217, 0.156, 0.229] in the image\nAnd my action is Action: TYPE\nValue: 150"}]}, {"id": "mind2web_3153", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6f5a9bef-bc1f-4a26-8c39-1211813d1a79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.558, 0.116, 0.645, 0.146] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_3154", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_f562f61b-3bae-4e8a-b712-fdd0b009e09d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\n[tab] \ue90aFlight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.363, 0.28, 0.405] in the image\nAnd my action is Action: TYPE\nValue: Abidjan"}]}, {"id": "mind2web_3155", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_67c94a3f-d29d-4504-8e0e-4008a0d59813.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.572, 0.988, 0.592] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3156", "image": {"bytes": "", "path": "./images/69065697-a426-4e4c-80f7-82bf592b268c_ae5a5edd-5bd7-49a5-8be6-ed830b009bfb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find details on converting points to miles.\nPrevious actions:\n[link] Redeem Points \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.373, 0.796, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3157", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_be2f81b7-9bba-4448-bfed-6a56c9582521.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: vintage clothing\n[option] vintage clothing -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.2, 0.905, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3158", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_fa153d9e-d788-4f23-b0b6-468c38cc3a47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.009, 0.211, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3159", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_f23a29a4-d885-4c86-bbb7-6eee5e6b991f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.194, 0.832, 0.213] in the image\nAnd my action is Action: SELECT\nValue: XL"}]}, {"id": "mind2web_3160", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_f7ef0388-f470-4a60-8d1d-a720b444c577.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.467, 0.049, 0.517, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3161", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_bba1f237-a046-46b9-b0f3-96dd331c620e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[searchbox] Search by city... -> TYPE: Los Angeles\n[option] Los Angeles, CA -> CLICK\n[span] Filter by -> CLICK\n[div] 16 -> CLICK\n[div] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.801, 0.661, 0.822] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3162", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_8e52097f-9096-4b87-9a14-afec783592f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.839, 0.012, 0.858, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3163", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_d5aa4626-0afd-483e-adcb-bb722903ce10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: diamond stud earrings\n[span] diamond stud earrings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.181, 0.385, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3164", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_96c03dff-6653-4a1f-8dc9-e88932cd6e43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.011, 0.781, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3165", "image": {"bytes": "", "path": "./images/06ed65ba-d2d3-4114-a67f-4622ec357aa3_bb95cc56-dd2c-4f80-9154-2b7fc0e2737c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the empire builder route in West US part of heartland and check the Minnesota stop.\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[polygon] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.392, 0.609, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3166", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_29bbfb84-2c18-4a74-a208-f68abf1d3f48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.215, 0.782, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3167", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_a7534242-8fff-4286-9a78-7289a2e16c2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.012, 0.781, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3168", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_44857629-fc4a-44e4-873c-080e42c7c79e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.15, 0.048, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3169", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_88dec089-2b92-494a-b781-c7f3e9cafe3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.259, 0.47, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3170", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_1a0e511f-4135-4bfb-8e74-71286dd71adf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK\n[button] CONTINUE SHOPPING -> CLICK\n[checkbox] PURPLE -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.694, 0.491, 0.738] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3171", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_e11f7437-a42d-4e13-8ba8-9dd466485e7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK\n[div] Fri., Apr. 21 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.239, 0.265, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3172", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_5c4ef2f2-8851-483b-9f3e-c966e222ae8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.077, 0.617, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3173", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1e1c4337-d331-40e4-81fc-395b7c639757.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Home Essentials Under $20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.54, 0.159, 0.573] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3174", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_65752882-a6ba-4e9f-9b03-278bfeabdf73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.157, 0.697, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3175", "image": {"bytes": "", "path": "./images/c9215395-70cd-4da5-af99-9ba36cde858a_352d4ed9-383d-4f19-90ac-4167fe22c6e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trailer for a new winter show from HBO.\nPrevious actions:\n[link] Recommendations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.129, 0.851, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3176", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5a4b0241-e72e-4a4e-abe9-afcd776fa96b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK\n[button] CHECKOUT -> CLICK\n[button] Pick up in store Shipping: Free -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.339, 0.595, 0.385] in the image\nAnd my action is Action: TYPE\nValue: 10005"}]}, {"id": "mind2web_3177", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_eaf630ca-a28b-46b3-8f1b-a3b32cfb073f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston\n[button] Place Houston, TX -> CLICK\n[div] Search -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.248, 0.916, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3178", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_8840e68d-babf-4527-95cb-df13c183703e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[button] Products -> CLICK\n[label] Kids -> CLICK\n[label] Maternity -> CLICK\n[button] Store type -> CLICK\n[label] Large store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.978, 0.389, 0.997, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3179", "image": {"bytes": "", "path": "./images/789b7d2d-fb01-453c-b933-383965e6123c_3f6c2c3c-bb4e-4a97-93a6-670b449ee82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cast and crew of Titanic and add to watchlist\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Titanic\n[div] Titanic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.151, 0.281, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3180", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_fd1d1cfe-2054-45fe-9470-701b302cc200.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.07, 0.902, 0.101] in the image\nAnd my action is Action: SELECT\nValue: 1 Guest"}]}, {"id": "mind2web_3181", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_e12c9c4e-c1bc-4f69-9d7c-fce2c1ca59c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.135, 0.196, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3182", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_2a36bfba-4c63-4682-8629-38002691467e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.058, 0.482, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3183", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_89d607c5-b591-4816-a2cd-068640e4e281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] 66 -> CLICK\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.096, 0.807, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3184", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_5234c799-fd49-4103-8be7-e15a1bcfd84c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Divorce -> CLICK\n[button] Apply Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.309, 0.372, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3185", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_d204295e-0a07-4ad7-8dd2-92287300ce28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\n[span] Nearby Stations & Stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.194, 0.335, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3186", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_ef9f87aa-ac59-4820-9149-5dbd1c644beb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.564, 0.298, 0.576] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3187", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_1f65089a-5b26-4f98-a884-82c44e2cc83a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.144, 0.536, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3188", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_463a2f6e-e1fa-42fe-beb0-1e4fbe74ac51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.426, 0.132, 0.494, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3189", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_3b6fa8c6-be91-439a-b3bf-004e9f5da22f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.127, 0.326, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3190", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_202bd5cb-530f-45b1-8674-5aa0ff9b3e0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.346, 0.438, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3191", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_047beaf4-32d6-4503-ab8c-605d51ef5049.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK\n[link] Navigate to New Releases At The Kiosk See More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.347, 0.174, 0.555] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3192", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_f6c8fe1f-3ce7-4cb5-aac4-3843d4af5920.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.235, 0.472, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3193", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_b44807ac-6f07-474c-b99f-13d1a3841f1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.177, 0.498, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3194", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_841d4e1a-07cf-405d-97d3-771bfc9bd3fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old\n[div] Age range -> CLICK\n[select] All -> SELECT: Ages 3-5 (31)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.238, 0.196, 0.256] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_3195", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_2d372e31-24cb-41f2-8bfe-95836f933805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK\n[option] United States of America (USA) -> CLICK\n[combobox] Vaccination status Vaccination status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.218, 0.897, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3196", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_12883b19-0053-43e8-9cfc-2b87ef699e9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] Any -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.153, 0.234, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3197", "image": {"bytes": "", "path": "./images/440273fa-f1b0-41e8-be75-90732ad5170d_c7d12711-12a8-4053-b048-362c7133caf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Mens size guide for bottoms\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.608, 0.555, 0.626] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3198", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_5ebf6d77-1802-40f6-a790-1445f8f6ddca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[link] Personal Care -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.597, 0.691, 0.62] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3199", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_81552843-1039-466f-8d45-f68f83177b73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.004, 0.187, 0.085, 0.196] in the image\nAnd my action is Action: TYPE\nValue: Nintendo Switch"}]}, {"id": "mind2web_3200", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_8e5bdf6e-9a87-406b-b130-634faa438a4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[link] Car rentals -> CLICK\n[textbox] Pick up -> TYPE: Houston\n[a] Houston, US -> CLICK\n[button] Search -> CLICK\n[button] Economy cars 5\u00a0Seats 1 Large bag 1 Small bag From ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.214, 0.768, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3201", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_551a50ba-12b7-47fe-843b-b62606544767.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.0, 0.44, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3202", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_55038342-45e8-4973-a605-cadf080c5785.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.514, 0.539, 0.546] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3203", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_a2674f77-6ce4-4d25-a6c9-9c5dbbecd99a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.111, 0.615, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3204", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_9998f7f2-76a0-4860-8f9c-bd56a2bccaf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.342, 0.037, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3205", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_3d571853-dba8-4f55-a5ec-afcb5b710d90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.082, 0.567, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3206", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_24c1b90a-2057-4926-9fac-ee342f7d7299.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.393, 0.664, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3207", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_42a254f0-62bd-4b7c-b209-b0dd924e05d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.234, 0.868, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3208", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_ea8a1212-eff4-48b1-9b49-1c10cd79ec35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[textbox] US City,State or Zip Code -> TYPE: 07718\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.273, 0.181, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3209", "image": {"bytes": "", "path": "./images/04782cf5-acdf-4a9e-b3a6-4b51a18e5c28_d55856ae-eeb8-4e08-8c7e-9a4b384ab9f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an hourly parking in New York City of lowest price.\nPrevious actions:\n[textbox] Search for parking -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.124, 0.914, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3210", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_6a440fc3-e9d3-4292-a5b1-1109388f3dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.0, 0.169, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3211", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_43acb344-07a8-4519-92c8-32d404a0ae8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.009, 0.1, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3212", "image": {"bytes": "", "path": "./images/cd5d03cc-24a8-4284-ac43-b38579f416b1_e8afff4d-d27f-4877-99d3-bc44942bf223.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flights between Abidjan and Accra.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.199, 0.954, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3213", "image": {"bytes": "", "path": "./images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_bd575876-47dc-4259-ba68-82544768d412.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the most popular photos of the Adam A-500 model aircraft.\nPrevious actions:\n[span] Community -> HOVER\n[link] Highest Ranked -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.143, 0.269, 0.156] in the image\nAnd my action is Action: SELECT\nValue: Adam A-500 (twin-piston) (26)"}]}, {"id": "mind2web_3214", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_da70fb73-dad9-4999-beb2-e770abc20a73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.334, 0.306, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3215", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_aa07204b-5ca1-4418-b291-5e699c085977.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: vintage clothing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.203, 0.643, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3216", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_c2d92dd0-dd6a-4957-abed-473e7a82bd0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.147, 0.218, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3217", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_e07b8f59-1316-4fc5-b5de-cd8befb4cbd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[svg] -> CLICK\n[div] RawElegant.Life -> CLICK\n[div] Envy Yourself With Beauty Makeup MasterClass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.24, 0.575, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3218", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_3c351c6f-edb4-43b4-89ed-cbabebaf4917.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3219", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_65225c2f-524a-4d2f-b2f1-277e85b90696.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.305, 0.491, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3220", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_1619fb2d-5d4d-45d4-b5af-0d2853002d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK\n[label] 0 - 5 Miles (2) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.86, 0.944, 0.96, 0.985] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3221", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_45fd9710-3ac8-4e4e-beb0-e624ac8a3e9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK\n[div] Choose date -> CLICK\n[generic] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.151, 0.953, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3222", "image": {"bytes": "", "path": "./images/6a326478-2a1b-4e47-b298-53f3ac12ed51_51cc3205-af3d-43a8-b777-60200db8d366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with information about luggage and what to bring.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.121, 0.286, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3223", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_e6dd4954-4b1d-4f56-8049-0ff9698e56af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.029, 0.309, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3224", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_a054f615-6d0c-41cc-9d18-1b7a88647a37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.043, 0.036, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3225", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c55541b2-0344-486c-b183-b3494993e838.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201\n[button] Antenna -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.092, 0.494, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3226", "image": {"bytes": "", "path": "./images/9b03e9a1-39bb-48d5-a33a-a2b05a6eb379_e152cb15-af77-4b96-b03f-c5feb507de22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for upcoming MLB games taking place on Sunday, 3/19.\nPrevious actions:\n[link] MLB . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.093, 0.218, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3227", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_fe72704d-6041-4c96-9ac4-dabec16780df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[textbox] Search events -> TYPE: pet festival\n[generic] Run Search -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.126, 0.581, 0.159] in the image\nAnd my action is Action: TYPE\nValue: portland"}]}, {"id": "mind2web_3228", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9c008888-c54b-42fc-958e-a7023fea0765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.513, 0.179, 0.523] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3229", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_b3ae5b47-1de0-443b-a314-b300e04cd29b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.021, 0.353, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3230", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_7f4f3081-4f9d-4238-83ec-87f4b992e5f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK\n[select] All -> SELECT: In Stock (41,088)\n[select] All -> SELECT: Hardback (13,067)\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.562, 0.196, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3231", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_b45b2866-9761-4b0b-8e03-6b4264113621.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.729, 0.105, 0.852, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3232", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_ec119d53-7ced-4964-8fb0-95482559b137.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK\n[p] Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.435, 0.226, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3233", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_fa95b8e9-d45b-456f-b16e-73f81d5dfb59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.176, 0.691, 0.214] in the image\nAnd my action is Action: TYPE\nValue: Hello World"}]}, {"id": "mind2web_3234", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_695f1ad2-16c0-4655-b15d-ad6a894df41a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.25, 0.385, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3235", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_7da9920d-d511-4a49-a6d4-482753f64cff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.312, 0.797, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3236", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_f6f37749-676d-4faa-8a44-22139190c76b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.162, 0.285, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3237", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_b8eff4ca-52f9-4a19-af84-cfa36e4a376b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] To -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 20 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.202, 0.408, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3238", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_3ff7c153-dc10-47fc-9bb3-1c5efd5307f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: Mexico\n[div] Mexico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.108, 0.964, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3239", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c767c653-e622-4df9-8b1f-a83eb531e1fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201\n[button] Antenna -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.238, 0.773, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3240", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1a5ad52f-9758-4c99-992c-7f1ac68ef8bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.418, 0.897, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3241", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_a03147e4-838d-4a8e-a343-f72f05555caf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.4, 0.495, 0.526, 0.562] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3242", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_174d6b34-d5d3-4c75-907c-9547ae8607cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India \n[div] India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.111, 0.819, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3243", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_f0255e45-ed88-45fb-bb17-d493aabf1d30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.171, 0.086, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3244", "image": {"bytes": "", "path": "./images/da386775-280b-4a84-9801-4ae3098044b0_be766567-c42c-4657-8021-c37a5151f283.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in California city for Limos which also offers military discounts and free wi-fi.\nPrevious actions:\n[link] Auto Services -> HOVER\n[span] Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.026, 0.564, 0.042] in the image\nAnd my action is Action: TYPE\nValue: CALIFORNIA"}]}, {"id": "mind2web_3245", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_d9a196d8-e11e-4d85-aab2-89aa169ebc1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.65, 0.938, 0.712] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3246", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_277dd183-dd49-4294-8b98-5da138f0cc1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Romantic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.43, 0.772, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3247", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_312e9b1f-8d64-43f4-83e3-eb7d0b715739.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\n[searchbox] Search -> TYPE: Nirvana\n[link] Search for \u201cNirvana\u201d -> CLICK\n[link] Nirvana -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.288, 0.171, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3248", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_ef49383f-479e-4814-b8f5-f010ee86a655.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.391, 0.285, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3249", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f0bdf42b-3c1b-4d04-a6be-4abab3133890.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.118, 0.487, 0.137] in the image\nAnd my action is Action: TYPE\nValue: Texas city"}]}, {"id": "mind2web_3250", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_6c9584a6-5745-4585-9d4c-56a9d0d4a24f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[span] London -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 29 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.18, 0.194, 0.198] in the image\nAnd my action is Action: SELECT\nValue: Leaving at"}]}, {"id": "mind2web_3251", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_d5ea9d5f-8db5-43db-812e-7810f8c7a683.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Chicago Bulls\n[div] Chicago Bulls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.051, 0.601, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3252", "image": {"bytes": "", "path": "./images/099a9da4-c8db-4900-ada2-76600f3655a4_adb826f9-2b2e-4ed0-979a-348c5e3bb7bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of baby products that are on sale and under 10 dollars.\nPrevious actions:\n[link] BABY -> HOVER\n[link] Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.161, 0.481, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3253", "image": {"bytes": "", "path": "./images/370a037c-c397-4adb-ab7c-0c388f448f68_463f5123-4572-4b78-8a57-faf3f84441a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vinyl records at the lowest price.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Vinyl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.108, 0.898, 0.122] in the image\nAnd my action is Action: SELECT\nValue: Price Lowest"}]}, {"id": "mind2web_3254", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_68a47f57-3330-41da-9000-1e28dd7d0151.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Guest Rating -> CLICK\n[link] Pets welcome (118) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.257, 0.169, 0.38, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3255", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_6a7d4322-2bb4-4427-a64a-a4e4e9ef5731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[gridcell] Sun May 07 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.284, 0.142, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3256", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_f12955aa-647c-4dff-af41-24a5357f42df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.086, 0.426, 0.13] in the image\nAnd my action is Action: TYPE\nValue: New York, United States, 100"}]}, {"id": "mind2web_3257", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_db3a2cca-7d23-48a8-a3c6-7dd991378b98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.263, 0.34, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3258", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_ec48b801-b658-459c-8b45-d2e9ff9f4238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: diamond stud earrings\n[span] diamond stud earrings -> CLICK\n[img] 10k Gold 1 Carat T.W. Black Diamond Stud Earrings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.34, 0.931, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3259", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_a6d9b3ec-76dc-4e1e-b4cb-d6b607719bc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER\n[span] Gender -> CLICK\n[link] Neutral (7) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.126, 0.974, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3260", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_ddb033d7-d0ba-4fd3-a207-ab678a2e12eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK\n[input] -> CLICK\n[input] -> TYPE: 1648926800"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.342, 0.969, 0.374] in the image\nAnd my action is Action: SELECT\nValue: Hindi"}]}, {"id": "mind2web_3261", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_ed9c6c23-5d98-4945-a9c7-aae2cc041574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.05, 0.674, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3262", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_7a02fae6-8fcb-46dc-b718-4bfdd02729dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.204, 0.375, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3263", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_83c4ca2d-e6f6-4dd3-8981-904229809643.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.48, 0.141, 0.573] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3264", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_e888c578-870d-4ede-873d-2d09d7cdc189.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy\n[textbox] Phone Number -> TYPE: 123-999-0000\n[textbox] Email Address -> TYPE: RA@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.742, 0.391, 0.755] in the image\nAnd my action is Action: TYPE\nValue: 90001"}]}, {"id": "mind2web_3265", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_05fbfb1b-ca54-4bcf-afa1-df49a7a6b480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK\n[link] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.304, 0.406, 0.387, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3266", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_7d8e5218-f5aa-40c1-a37a-9b84def7e069.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.065, 0.443, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3267", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_464d3892-61e6-4d36-81fa-94b33577eda9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.007, 0.256, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3268", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a7343102-879a-40e8-8b57-6b2b96ee2dab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.197, 0.3, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3269", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_6743a749-1918-4fdc-8ec7-6d0319125849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.396, 0.272, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3270", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_94af0adc-9075-48cf-a933-f0fad4d2a873.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] To -> CLICK\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.242, 0.256, 0.264, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3271", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_0191e556-51aa-4851-928d-12a02ca30a6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] December -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.171, 0.662, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3272", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_43e6acc3-98fb-4911-8cc5-128d2ce4c14f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin D -> CLICK\n[checkbox] Buy 1, Get 1 Free 33 items, On Sale list 3 items -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.613, 0.397, 0.682, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3273", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_2bfbc791-a24f-4b42-934e-a5d5e7ecd8cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.23, 0.209, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3274", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_76bbcf6b-64fb-45de-be5d-ade45a0b2247.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 1234567890\n[textbox] last name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.222, 0.94, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3275", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_9b26383c-fd9f-4719-80be-5cab61f9a8b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Used -> CLICK\n[button] Style -> CLICK\n[link] French -> CLICK\n[button] Material -> CLICK\n[link] Oak -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.612, 0.253, 0.767, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3276", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_baefd0f9-5e71-4c4f-9263-83765e760b4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK\n[button] Apply -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.168, 0.905, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3277", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_18f385db-5b2a-4643-aead-754c6836369e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK\n[checkbox] 1 Stop (49) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.23, 0.048, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3278", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_993af1e2-ecb0-4a4d-bf38-6ba35b599c98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.25, 0.187, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3279", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_49ed6f6a-ecc6-4c70-986f-d9504322827f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\n[link] Restaurants -> CLICK\n[link] OPEN 24 Hours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.118, 0.672, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3280", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ceba3c43-95bd-4b82-9110-676cb466aab9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK\n[textbox] Locations -> CLICK\n[option] US, CA, San Francisco -> CLICK\n[link] Manager, Field Sales - West -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.3, 0.131, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3281", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_bd722609-34f1-4a98-bab9-25999877944e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[div] Soccer -> CLICK\n[link] La Liga La Liga -> CLICK\n[div] Real Madrid -> CLICK\n[heading] ROSTER -> CLICK\n[heading] Vinicius Junior -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.556, 0.063, 0.574, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3282", "image": {"bytes": "", "path": "./images/f754d919-2191-464e-a407-0f3da9409081_fe05d80f-01d3-43b5-9568-9204d53f7100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See today's deals on video games and find the best deal on the lowest-priced mouse.\nPrevious actions:\n[link] Video Games -> CLICK\n[link] Today's Deals -> CLICK\n[link] PC -> CLICK\n[link] Accessories -> CLICK\n[link] Gaming Mice -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.934, 0.072, 0.968, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3283", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_f9091185-4976-4ac0-bc2d-a85a3143a6e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: smartwatch\n[button] smartwatches -> CLICK\n[i] -> CLICK\n[img] Sponsored Ad - SKG V9C Smart Watch for Men Women, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.7, 0.236, 0.838, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3284", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_4c768a53-5540-42b6-8e12-ea72c58a1908.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[button] \uf002 -> CLICK\n[span] New -> CLICK\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.434, 0.192, 0.454] in the image\nAnd my action is Action: TYPE\nValue: 70"}]}, {"id": "mind2web_3285", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_c8a0567f-38f0-4def-b8fe-9a7508661566.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Netflix streaming -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.416, 0.43, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3286", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_ffa7efe9-9e11-490f-a776-0d1999334fcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> TYPE: Oakland, CA\n[span] Oakland, CA -> CLICK\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Hot Dogs\n[span] Hot Dogs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.095, 0.313, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3287", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_34eccecf-fd3e-43ca-965c-98d3be310a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.133, 0.734, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3288", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_1e0f3579-bba4-479f-a9d0-2b7660f62767.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport\n[heading] Chicago O'Hare International Airport -> CLICK\n[textbox] Drop-off location -> TYPE: 123 Main St, West Chicago, IL\n[heading] 123 Main St -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.294, 0.641, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3289", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_7d9e6c1b-62ca-4ac9-9847-158fdce932e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK\n[checkbox] 30 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.126, 0.846, 0.164] in the image\nAnd my action is Action: SELECT\nValue: 1 00 PM"}]}, {"id": "mind2web_3290", "image": {"bytes": "", "path": "./images/d637c171-dc6e-4a4e-a162-9c230e822932_9053f0e2-da05-4721-87b5-13edf923052b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show popular news which is at number one in comics.\nPrevious actions:\n[button] News -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.418, 0.216, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3291", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_986fa986-16ec-4b2a-ab72-03f8ab10bec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.139, 0.84, 0.17] in the image\nAnd my action is Action: TYPE\nValue: BIRMINGHAM"}]}, {"id": "mind2web_3292", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_25e05af9-37da-4d90-b855-8ab0b7020188.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.029, 0.158, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3293", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_2f3de943-1b23-4176-af7f-423c24803f39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[button] Next Month -> CLICK\n[use] -> CLICK\n[div] 1 -> CLICK\n[span] 8 -> CLICK\n[span] SEARCH FLIGHTS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.499, 0.151, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3294", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_ba013772-a229-43d2-881f-3b1edf1d1cf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK\n[button] Ship Location: Any -> CLICK\n[link] Canada -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.256, 0.312, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3295", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_8fd0040d-0a18-4fb3-8f00-7426b7c53bd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK\n[span] 14 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.46, 0.912, 0.49] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3296", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_87981f4e-2b84-4c8e-a7ab-0b3a2813ba20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.157, 0.257, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3297", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_410c37f5-5711-41ac-893c-9b0a78045d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.181, 0.969, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3298", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_7e920296-08ae-40e7-a085-ce00bbd794e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.237, 0.273, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3299", "image": {"bytes": "", "path": "./images/b4a2fc25-a7fe-4e31-beae-b31f2ef8cf3e_9ed5695b-9888-4996-8d8d-fdc59e8b84ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of games I've played recently.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.0, 0.519, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3300", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_2d83fc2f-5d8e-45ab-8a78-7b0c8705d37d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.145, 0.192, 0.174] in the image\nAnd my action is Action: SELECT\nValue: 4 Guests"}]}, {"id": "mind2web_3301", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_c0dc76c0-098f-41f7-8e9b-2a548ded774b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang\n[div] Ford -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[heading] Used 2000 Ford Mustang GT -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.228, 0.838, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3302", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_b402a6dc-c2dc-4d0b-86d2-7ee0f55a3275.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\n[span] Top activities -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.089, 0.442, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3303", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_84c41c48-86b7-4420-8cff-e286908d36c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.245, 0.153, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3304", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_59b5850a-eab8-437d-9b9b-571a2835604e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.153, 0.467, 0.194] in the image\nAnd my action is Action: SELECT\nValue: 1 Room"}]}, {"id": "mind2web_3305", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_82f690c6-2231-4e3f-b030-7f3f205d1e20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.757, 0.113, 0.772] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3306", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_e1d28f7d-0da8-437d-aa75-220acdd712c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.589, 0.906, 0.608] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3307", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_97b19ffb-e2c9-4bab-97f3-735bdb136ad7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.134, 0.81, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3308", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_2d4ff701-58d7-4a52-b443-9927b918a992.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.014, 0.284, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3309", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_c748a44b-f6e8-496a-be28-c3b90ad70a2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.33, 0.236, 0.373, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3310", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_05076a4c-ba04-4130-b156-b64d7acf1594.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[div] New York, NY -> CLICK\n[div] Sat, Apr 1 -> CLICK\n[checkbox] 30 March 2023 -> CLICK\n[div] Search -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.54, 0.331, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3311", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4cc3fe67-f860-4993-a050-ee0e7f64b481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.168, 0.32, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3312", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_73be4bdc-72e9-42d5-b42e-b5f9ae0ab90a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK\n[gridcell] Wed Jul 05 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.33, 0.3, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3313", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_3075d6ef-74ac-4361-87f7-817f02e4a80e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.145, 0.36, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3314", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_2e444d2d-3922-45da-99f8-1e1843d6dcdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.235, 0.848, 0.261] in the image\nAnd my action is Action: SELECT\nValue: Walking"}]}, {"id": "mind2web_3315", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_ad13c7b6-6d05-41fc-a140-cec783e3ca92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Products & Services -> CLICK\n[div] Learn More -> CLICK\n[div] Learn More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.668, 0.268, 0.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3316", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_874d2fc1-2cd7-41f9-9631-22c7542480fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023\n[combobox] Number of passengers -> SELECT: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.401, 0.492, 0.434] in the image\nAnd my action is Action: SELECT\nValue: Adult (16-64)"}]}, {"id": "mind2web_3317", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_2ed256fb-5cb4-47e1-8d8b-49003507da7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.278, 0.359, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3318", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_dd77262b-0947-4de1-b81d-eb2b77b2382a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.264, 0.562, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3319", "image": {"bytes": "", "path": "./images/4c578076-b877-4097-bf67-e231e349d56f_09ae59d8-4c49-4242-b028-24d761e54b7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of most popular upcoming game releases.\nPrevious actions:\n[link] New & Noteworthy -> CLICK\n[link] Popular Upcoming -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.368, 0.613, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3320", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_a55fd649-8057-4b3b-877c-bbde4b4ec8a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.32, 0.5, 0.355] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_3321", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_5359e7ef-c441-4c68-a3f9-d54ea991b51f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.119, 0.719, 0.163] in the image\nAnd my action is Action: TYPE\nValue: CHICAGO"}]}, {"id": "mind2web_3322", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_5ee0bcc8-a842-47cc-a02d-2d0b9b7b3f3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.709, 0.473, 0.739] in the image\nAnd my action is Action: TYPE\nValue: 30000"}]}, {"id": "mind2web_3323", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_51737bb7-36f6-4b37-a121-8d829c2c17ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: New York\n[option] New York, NY -> CLICK\n[img] Concerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.725, 0.264, 0.866] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3324", "image": {"bytes": "", "path": "./images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_bf77953e-e135-4403-9b3d-494a7bf161f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the San Francisco 49ers.\nPrevious actions:\n[link] Schedule -> CLICK\n[link] Team Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.543, 0.672, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3325", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_bfe95a78-0720-4801-b97e-f4661a8e6de7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[circle] -> CLICK\n[button] -> CLICK\n[div] Sightseeing Tours -> CLICK\n[label] Private Tour -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.246, 0.954, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3326", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_e04c1a00-5b98-4724-8d98-7bc6a2fe9241.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Produce -> CLICK\n[link] Fresh Fruits -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.247, 0.193, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3327", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_b327b28a-2cc9-4315-9cd0-1545d21d74f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[link] White -> CLICK\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.042, 0.378, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3328", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_a2035429-c51b-4259-b2d5-9eb766c20bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.105, 0.314, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3329", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_490f9126-f087-4622-9c3c-05e9efc3aaf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.237, 0.651, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3330", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_aa0b3bef-232f-4fda-aea1-7ad5aa44e543.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[button] Next -> CLICK\n[path] -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.518, 0.705, 0.548] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3331", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_ee082227-512a-4438-bed0-43fcedf4d1ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK\n[link] Schedule & Maps -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.641, 0.28, 0.875, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3332", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_4687dd1f-40b5-4b03-bf86-49541ae51d01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.373, 0.455, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3333", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_80df35d3-a409-4097-b3c7-30f24edbb24c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.33, 0.341, 0.361, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3334", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_13d235dc-c6ce-45c6-a075-8da1683281ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.208, 0.331, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3335", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_35bc428e-f0a7-4e6c-a921-213d082a151f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[textbox] Where to? -> TYPE: SEOUL\n[div] Seoul, Republic Of Korea -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.829, 0.306, 0.858, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3336", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e672afc1-9115-45a5-acad-08c5dfcab90a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 07718\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.169, 0.352, 0.192] in the image\nAnd my action is Action: SELECT\nValue: 04 00 PM"}]}, {"id": "mind2web_3337", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_9a099213-7601-400e-b8b7-37a54615abc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.0, 0.816, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3338", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_f68ffe3e-f73c-4de7-8726-93644fdb6ba2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Thalia Hall\n[span] South Allport Street, Chicago, IL, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3339", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_f8873fc6-9f55-4338-9b3e-08a10fba7047.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.01, 0.651, 0.043] in the image\nAnd my action is Action: TYPE\nValue: COMFORTER"}]}, {"id": "mind2web_3340", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_f949bab4-d297-47ca-926b-32bc1573d765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\n[link] Store Locator -> CLICK\n[input] -> CLICK\n[option] WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.312, 0.412, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3341", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_3161aa29-78c0-4ab3-b776-10d894cf75fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.247, 0.16, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3342", "image": {"bytes": "", "path": "./images/92b51ef3-7b37-4423-aa6b-e8fb6bf32156_ff1358c0-bfa5-4b15-aee0-ad09119d4bd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for news about the latest news about Lebron James' injury and share the article on twitter.\nPrevious actions:\n[combobox] Search query -> TYPE: lebron james"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.058, 0.259, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3343", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_27999ff5-6e84-4a07-995a-919b679d68a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.603, 0.62, 0.658] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3344", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_961a671e-f10e-4dc0-bee9-429a8f389b15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\n[button] Explore -> HOVER\n[link] Dining -> CLICK\n[span] EXPLORE DINING -> CLICK\n[radio] Filter group Ship: Carnival Breeze -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.473, 0.055, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3345", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_c58a3392-4056-47f7-b1e6-16ecb0b2cc8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.418, 0.095, 0.466, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3346", "image": {"bytes": "", "path": "./images/44284a24-b3de-44ef-bcfc-abf57c3f791a_0d85fdcc-ca3f-4b80-97ca-0d509c03ccc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the status of flight #039028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.021, 0.572, 0.031] in the image\nAnd my action is Action: TYPE\nValue: 039028"}]}, {"id": "mind2web_3347", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_342fe554-d039-4d10-a909-323c6af8fead.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.378, 0.242, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3348", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_92c548c1-e0d2-4990-a88d-93230c0b8c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport\n[option] Boston Logan Int'l Airport, 1 Harborside Dr, East ... -> CLICK\n[combobox] To\u00a0 -> TYPE: North Station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.536, 0.161, 0.812, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3349", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_9ca6d59b-86bc-45b7-8234-669bc1d307a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.711, 0.192, 0.738] in the image\nAnd my action is Action: TYPE\nValue: 700"}]}, {"id": "mind2web_3350", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_69947f74-50aa-4d03-ae09-eead95ecefe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK\n[link] My trips -> CLICK\n[textbox] Last name -> TYPE: Atas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.415, 0.193, 0.424] in the image\nAnd my action is Action: TYPE\nValue: 1000001"}]}, {"id": "mind2web_3351", "image": {"bytes": "", "path": "./images/9f57055d-c269-47d7-99be-3525d725439e_dba417cb-26b6-43b5-a275-b52134a8df8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the best odds to win the NBA title.\nPrevious actions:\n[link] NBA . -> HOVER\n[link] Odds . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.4, 0.537, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3352", "image": {"bytes": "", "path": "./images/22509b64-b643-44ec-b486-9828e686303c_c466b584-1fa5-4720-a342-51bc560b65c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the full menu for AMC Dine-In\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Explore Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.155, 0.172, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3353", "image": {"bytes": "", "path": "./images/6d963cc0-90d3-4908-bee4-29a8530536af_8d663377-4e7c-4493-be53-0c5f14abeae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all my offers for 2-5 day cruises\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.06, 0.286, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3354", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_4e58e6b2-b337-42bf-8a9e-f516499a1f51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK\n[link] Under $35.00 -> CLICK\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.118, 0.923, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3355", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_a046b31a-ea81-4dc4-9aac-e65fe81da727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[textbox] US City,State or Zip Code -> TYPE: 10001\n[textbox] mm/dd/yyyy -> CLICK\n[link] 23 -> CLICK\n[input] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.375, 0.352, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3356", "image": {"bytes": "", "path": "./images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_9585a0e8-17ac-4cc3-9f8e-3616fc8ef354.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Read the 1 star reviews of DayQuil Severe Cough Liquicaps.\nPrevious actions:\n[combobox] Search products and services -> TYPE: dayquil\n[button] Submit search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.221, 0.33, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3357", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_9e80dd9f-5216-4dc7-8aeb-c7dc433119af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.144, 0.213, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3358", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_6e7d4501-298a-4ea9-a266-e9ae3bc160b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.743, 0.089, 0.751] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3359", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_66558d92-b0c7-4478-a66a-dcba598144ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.318, 0.24, 0.352] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_3360", "image": {"bytes": "", "path": "./images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_b99c5581-8a56-4bd7-bbe4-782795ebf93c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking spot near Dallas Love Field Airport.\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: Dallas Love Field"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.15, 0.914, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3361", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_4ee08fe7-5ec5-46d5-8ccf-d41e84d607b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK\n[link] My trips -> CLICK\n[textbox] Last name -> TYPE: Atas\n[label] Confirmation number -> TYPE: 1000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.289, 0.201, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3362", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_da230e9f-977e-4972-bd15-c41c61617881.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[p] Video Games -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK\n[button] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.434, 0.104, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3363", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_50186aaf-d2b7-49ce-91c2-7bf37430ea50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.173, 0.546, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3364", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_f20ca446-98cb-4ce8-8ca2-96c8fb4fbb69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK\n[button] Pick-up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.125, 0.495, 0.157] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_3365", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_97db88b1-dff6-4fa4-a01d-b6e189ada5ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER\n[link] M3 Rock Festival -> CLICK\n[span] From $86 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.176, 0.308, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3366", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_5224c061-6b17-495f-981e-d40d5de3af4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK\n[span] -> CLICK\n[span] Search flights -> CLICK\n[link] Sort & Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.169, 0.439, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3367", "image": {"bytes": "", "path": "./images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_1b347a24-e015-4d1e-bce1-d999b5d80448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking spot near Dallas Love Field Airport.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.061, 0.563, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3368", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_9d8c2c30-a449-44bf-8f1d-120f3a8057f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.08, 0.664, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3369", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_4fc388d4-400a-4097-86db-59e5f812f69e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.022, 0.564, 0.034] in the image\nAnd my action is Action: TYPE\nValue: Greenville"}]}, {"id": "mind2web_3370", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_7e4cd11c-208f-4b21-a9d4-0bd61c860ab8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.16, 0.453, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3371", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_94e763d4-5367-4cfe-8d21-24e4c5eeb937.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[img] Right -> CLICK\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK\n[button] 1\u00a0Room, 2\u00a0Guests Click to edit rooms and guests -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.194, 0.677, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3372", "image": {"bytes": "", "path": "./images/b1055658-7993-448c-9af6-a722cf69ff97_53677991-4d59-4105-b3ad-6896b77fecba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for the next restaurant-related event, only 1 ticket is needed.\nPrevious actions:\n[link] See more venues in the Special Events & Experi... -> CLICK\n[link] HAPPY HOUR BURGER, Aria, Thursday, April 13, 2023 ... -> CLICK\n[combobox] 2 tickets for HAPPY HOUR BURGER -> SELECT: 1 Ticket\n[button] 5:00 PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.348, 0.523, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3373", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_dbcd3c77-a7bb-42ba-bf7f-c2d693cede67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.263, 0.459, 0.293] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_3374", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_8acda36c-321f-4831-af11-490704136e6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.21, 0.705, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3375", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_ff0d9b95-71ea-4ffb-ba47-f3b317d24f09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.123, 0.969, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3376", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_1b1deeda-16e7-4c5c-b287-5fb707328edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[link] English -> CLICK\n[link] Last 90 days -> CLICK\n[span] Featured -> CLICK\n[option] Publication Date -> CLICK\n[img] Records of the Medieval Sword -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.376, 0.985, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3377", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_5b577f52-8708-437d-956a-f196bb1aed0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK\n[checkbox] 30 April 2023 -> CLICK\n[combobox] Drop off time -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.191, 0.923, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3378", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_d26cb28b-0f54-4b70-bad7-c7e384c799c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.14, 0.159, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3379", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_15956c00-b71f-4623-834e-67fb0a4e40c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.202, 0.359, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3380", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_2f2fab58-539a-48d5-acac-1c7f8dcd741a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] THEATRE -> CLICK\n[link] Off-Broadway -> CLICK\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.318, 0.636, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3381", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_1c085040-b388-4cc6-988e-cda1e7b83177.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.385, 0.169, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3382", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_cbd1bec1-9482-4cc3-87d6-74e3d455da4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: LAS VEGAS\n[div] Las Vegas -> CLICK\n[svg] -> CLICK\n[div] Tomorrow -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.148, 0.607, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3383", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_7a2802b1-aaa1-4e6c-82da-7ae9fae081b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Computers -> CLICK\n[link] Drives & Storage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.144, 0.163, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3384", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_7973940d-9d78-4a0a-97f7-b98b5c80ae7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.287, 0.359, 0.319] in the image\nAnd my action is Action: TYPE\nValue: Queensboro plaza"}]}, {"id": "mind2web_3385", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_ad212d0b-ad5b-46ca-a911-3fe3755efd13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.287, 0.194, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3386", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_d955349c-a5f7-4faf-90cb-4503103bb09d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.606, 0.918, 0.613] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3387", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_02970790-c898-4ec5-929a-dab35d6b4e31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\n[button] Manage -> HOVER\n[use] -> CLICK\n[link] Visit our cruise deals page to view your offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.422, 0.556, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3388", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_7c62b124-6c92-4f88-acc4-c0200e2706f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK\n[link] Go to route -> CLICK\n[button] View upcoming departures -> CLICK\n[combobox] Choose a direction -> SELECT: SOUTHBOUND Forest Hills"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.348, 0.695, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3389", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_2a1a10d0-4641-4d0a-85fd-024a5dbfef2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK\n[link] Close -> CLICK\n[textbox] *Preferred date of travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.487, 0.258, 0.518, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3390", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_d882dd9d-3efe-420a-b0e0-ef2d36f1f947.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.205, 0.266, 0.227] in the image\nAnd my action is Action: SELECT\nValue: Honda"}]}, {"id": "mind2web_3391", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_3e785891-d6b3-42d4-ba08-316207cd0aeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.455, 0.226, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3392", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_fa257e3f-253e-412f-8144-5d901eb5b29c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.053, 0.345, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3393", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_4b6c0d2f-2736-4ca7-890c-7b1a6b188e32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Taylor Swift"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.014, 0.553, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3394", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_6dd86b17-3d5a-408f-9230-2a4683dde8cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse TV Shows by Genre -> CLICK\n[link] Documentary -> CLICK\n[link] User Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.229, 0.228, 0.301, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3395", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_9b411bb7-0ebe-45e4-a378-0eee5a93e61b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.454, 0.48, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3396", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_846f8e5c-0ed6-4857-8e54-b61d65a4f687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] ADD TO CART \uf0da -> CLICK\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.428, 0.118, 0.533, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3397", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_42038435-d0d2-408c-9edd-8c6b49b062b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.183, 0.611, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3398", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_15ac7d92-b5d1-4f92-8da2-54c3dd71a321.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 2 Guests\n[link] Este -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.358, 0.581, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3399", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_582334ed-53ce-436c-86f1-03525500363c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.708, 0.363, 0.725] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3400", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_7e0ec349-8a65-494d-90cb-d3ecee8d23f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\n[button] Special Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.077, 0.265, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3401", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_2ef33feb-d103-4283-8d88-68fb0ca9c9c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.192, 0.096, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3402", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_223e0643-acb4-4c02-ac89-012a32461ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Electricians -> CLICK\n[textbox] Near -> TYPE: WESTMINSTER\n[span] Westminster -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.141, 0.7, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3403", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_eb4e73a1-9b97-4d34-ab8b-df4419a6dbc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK\n[link] Tom Brady -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.497, 0.185, 0.813, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3404", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_fab6b0d8-0467-4a98-97f4-e43c25baa36b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK\n[img] Women's Sonoma Goods For Life\u00ae Everyday V-Neck Tee -> CLICK\n[textbox] Product quantity -> TYPE: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.768, 0.359, 0.931, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3405", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_75619055-a693-4aca-9b29-0f8fe7efc2db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu\n[option] Honolulu, HI, US (HNL) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.528, 0.509, 0.707, 0.519] in the image\nAnd my action is Action: TYPE\nValue: 1300"}]}, {"id": "mind2web_3406", "image": {"bytes": "", "path": "./images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_8fda4ce4-72f7-453e-bc1b-0f357512edd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all used Tesla cars for 10017 zip code.\nPrevious actions:\n[combobox] Select Make -> SELECT: Tesla\n[textbox] Zip -> TYPE: 10017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.601, 0.169, 0.748, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3407", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_8668151c-111b-4824-8572-dd3adc202437.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.329, 0.595, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3408", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_b5462d25-73ae-4282-8023-716e111a610d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK\n[button] Search -> CLICK\n[button] Get alerts for this flight for flight 906 American... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.25, 0.636, 0.27] in the image\nAnd my action is Action: TYPE\nValue: lin.lon@gmail.com"}]}, {"id": "mind2web_3409", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_c343cfea-aace-4a34-bd4d-b2bb679d74b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK\n[label] Austria -> CLICK\n[label] Belgium -> CLICK\n[label] Bulgaria -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.887, 0.144, 0.936, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3410", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_e3069f03-e2a7-49e3-9c70-b9538cfd103a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK\n[button] From today -> CLICK\n[Date] FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.272, 0.287, 0.303] in the image\nAnd my action is Action: TYPE\nValue: 04/01/2023"}]}, {"id": "mind2web_3411", "image": {"bytes": "", "path": "./images/96fb7e5d-08b1-4def-a6f8-6ee81055d944_8873a220-4c4a-4217-9046-012a50badcdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Gingerbread cakes recipe and add ingredients to cart.\nPrevious actions:\n[textbox] Search -> TYPE: Gingerbread cakes\n[link] gingerbread cakes -> CLICK\n[button] Recipes -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.802, 0.52, 0.969, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3412", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_c488f9fc-084f-4e7d-9c02-41933cf52026.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK\n[link] Crossover vehicle icon Crossovers -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.335, 0.253, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3413", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_cf1fff18-833a-4080-a441-29c38bb95682.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.24, 0.415, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3414", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_798fb5b9-2d7a-463d-acac-2bef7f223623.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[button] Calendar -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.338, 0.514, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3415", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ff9c2e42-59c7-4b21-8080-d0631b14b481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.752, 0.227, 0.778] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3416", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_3a7fbb2d-26c1-497b-9e24-7a7d13a5d5ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.098, 0.713, 0.122] in the image\nAnd my action is Action: TYPE\nValue: florida"}]}, {"id": "mind2web_3417", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_b27add92-735a-4aad-a91d-1a8ca179d35e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.804, 0.128, 0.857, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3418", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_0aae2ff1-b036-4560-9beb-701ce59d4e71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.194, 0.325, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3419", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_0900afdc-1ed0-4605-857a-0d5f0a186230.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[heading] Size -> CLICK\n[label] XXS -> CLICK\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.331, 0.906, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3420", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_1e44ded7-35bd-463e-a135-7e2098862504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[button] -> CLICK\n[div] \u00a3 -> CLICK\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.408, 0.916, 0.445] in the image\nAnd my action is Action: TYPE\nValue: Denise"}]}, {"id": "mind2web_3421", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_0c543e05-b392-4a39-ac27-7d2d82c1cb47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\n[link] Bargain Cave -> CLICK\n[link] Men's Shoes & Boots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.198, 0.063, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3422", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_7647cc73-2926-423a-b613-cd280e8c3858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK\n[button] HOME -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.485, 0.216, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3423", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_dd055c45-e037-4fa1-8b06-c6d60efae226.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.452, 0.829, 0.466] in the image\nAnd my action is Action: TYPE\nValue: mumbai"}]}, {"id": "mind2web_3424", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_7da23b05-7a5a-46b0-90bf-ba2e4dfa8ec9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.122, 0.942, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3425", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_65155648-ef0f-46ef-bde1-64d693369f03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.059, 0.327, 0.078] in the image\nAnd my action is Action: TYPE\nValue: BERLIN"}]}, {"id": "mind2web_3426", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_ee1a7f4e-9ba8-454a-a034-2b3c21806cdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK\n[link] Car rentals -> CLICK\n[textbox] Pick up -> TYPE: Houston\n[a] Houston, US -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.285, 0.467, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3427", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_97a7578d-a8e2-4089-88bc-e45292ac3435.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: udupi\n[option] Udupi Karnataka,\u00a0India -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.504, 0.742, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3428", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_38f4cf88-6985-4927-9e8c-c6115ea700af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[button] City -> CLICK\n[searchbox] City -> TYPE: fre\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.851, 0.312, 0.972, 0.347] in the image\nAnd my action is Action: SELECT\nValue: Most recent"}]}, {"id": "mind2web_3429", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_d468c98a-de53-4b08-b5d4-d7fd7df98ef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City\n[div] New York City, NY -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.467, 0.074, 0.517, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3430", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_56fa7b74-7f71-40c3-9efe-af9d76d7d282.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.107, 0.421, 0.133] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_3431", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_04801367-454c-48fb-a8ac-db91f6d88bb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK\n[radio] RENEW - An Existing Account -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.431, 0.707, 0.466] in the image\nAnd my action is Action: TYPE\nValue: 1000000001"}]}, {"id": "mind2web_3432", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_c6af7067-da65-403a-be07-5e2f40406cdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.398, 0.754, 0.428] in the image\nAnd my action is Action: TYPE\nValue: sofi stadium"}]}, {"id": "mind2web_3433", "image": {"bytes": "", "path": "./images/f61456ed-3cc2-41a0-b79c-56737515fac9_73731a6c-3075-44fa-90d4-903f8fc39520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the photo gallery for Tsiakkos & Charcoal.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.038, 0.456, 0.086] in the image\nAnd my action is Action: TYPE\nValue: Tsiakkos & Charcoal"}]}, {"id": "mind2web_3434", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_fe71a180-7a37-416d-9728-db4936372cb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.675, 0.873, 0.699] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_3435", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_031cfcfd-3ab1-4519-9d37-3d418a54d4a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK\n[select] 1 -> SELECT: 8\n[select] 00 -> SELECT: 37\n[select] AM -> SELECT: AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.183, 0.855, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3436", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_54031223-8306-4317-b2cd-7c63428cc122.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[p] Number of Travellers -> CLICK\n[img] -> CLICK\n[button] Check availability -> CLICK\n[img] -> CLICK\n[li] English - Guide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.621, 0.201, 0.794, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3437", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_46828258-db63-45df-b5d6-3807c8d23840.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER\n[span] Tokyo -> CLICK\n[link] Food & Drink -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.342, 0.113, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3438", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_d0d60f7e-1c65-476d-95b5-731034550fab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\n[searchbox] Search -> TYPE: game mix\n[button] Search -> CLICK\n[link] Albums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.379, 0.415, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3439", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_ae451dae-1e4a-41d5-a580-57a183968ac1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.144, 0.257, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3440", "image": {"bytes": "", "path": "./images/ae969e05-d10e-4255-99f7-c27e071fad69_189310b5-d088-400b-a817-9a4ea975fb6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the standings for the English Premier League.\nPrevious actions:\n[div] \u2026 -> CLICK\n[link] Soccer . -> CLICK\n[link] Premier League -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.111, 0.377, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3441", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_92f8e2ae-f711-4a6a-b624-4c42d87fb214.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.57, 0.169, 0.598] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3442", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_592e633c-4a13-4a6e-9032-106326773974.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: XA1234\n[combobox] state -> SELECT: AZ"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.596, 0.201, 0.746, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3443", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_1db700ef-960a-4147-b117-c0f64f18138e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[button] 1 adult -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.093, 0.702, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3444", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_f3d717cb-a19f-4d53-a68c-8fb2658b1294.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER\n[link] M3 Rock Festival -> CLICK\n[span] From $86 -> CLICK\n[button] Low prices -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.176, 0.402, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3445", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_55f1399e-7b1c-4717-b348-bec38fe194b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[span] Honolulu -> CLICK\n[checkbox] Vehicle -> CLICK\n[checkbox] Large appliances -> CLICK\n[radio] Out-of-state -> CLICK\n[button] Virtual Consultations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.143, 0.63, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3446", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_ad240481-8606-4009-a2f0-7b4766c10fae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.014, 0.328, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3447", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_3df37d9c-f0b4-46a2-b7ea-aa19650153f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK\n[tab] MOVIES -> CLICK\n[searchbox] Search -> TYPE: Prometheus\n[div] Prometheus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.174, 0.671, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3448", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_943c6b5b-f363-4e61-aa72-4dbd15fe24b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[span] Dry Cleaning -> CLICK\n[textbox] Near -> TYPE: new york city\n[span] New York, NY -> CLICK\n[button] Virtual Consultations -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.164, 0.612, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3449", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_4449ebe8-3714-47ac-b6ea-becc1926ca48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023\n[combobox] Number of passengers -> SELECT: 1\n[combobox] Passenger 1 -> SELECT: Adult (16-64)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.6, 0.29, 0.632] in the image\nAnd my action is Action: SELECT\nValue: Lowest fare"}]}, {"id": "mind2web_3450", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_137d1a09-cb26-41c0-b266-1d77219dcd09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.121, 0.3, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3451", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6cac47b8-d3d2-47b2-a53d-0457923f7d19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.284, 0.625, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3452", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_90fb8feb-ae40-4b6a-bf30-fd8be24554ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[span] Mar 9 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[gridcell] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.096, 0.807, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3453", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_fecbc521-d4d5-458c-a1bd-63931a9f4f54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.197, 0.845, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3454", "image": {"bytes": "", "path": "./images/39358d9c-6db2-4662-a91e-47a416eeacf7_8720478b-0b2c-4c71-a216-68ea7cc6ec42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what deals there are for Dish Outdoor.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.023, 0.855, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3455", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_0e80c45f-23d4-40ed-b1b3-013f44b2f9d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.004, 0.032, 0.029, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3456", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_68d59689-9de8-43ea-b8a8-de293b68448e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK\n[span] Grocery -> CLICK\n[span] Easter Baking -> CLICK\n[img] -> CLICK\n[button] Brand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.08, 0.79, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3457", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_c61429c5-6d45-4632-b80b-1cfe0e7532e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] WOMEN -> CLICK\n[heading] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.351, 0.096, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3458", "image": {"bytes": "", "path": "./images/5fb9730d-f489-4cb7-a220-d406794cef29_f4792054-15e6-43d5-a50d-aab11eba8bf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List all George Clooney movies and sort them by box office revenue\nPrevious actions:\n[textbox] Search -> TYPE: George Clooney\n[p] George Clooney -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.185, 0.589, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3459", "image": {"bytes": "", "path": "./images/41ff100f-582a-422e-b387-3abd9008cee4_47c29841-4175-4266-b729-28314be9ae13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open red line subway schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.014, 0.369, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3460", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_bb6621bd-5dc8-433a-b6fd-aaaf91bf4d06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.605, 0.0, 0.716, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3461", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_20627442-25be-4dc3-aa4c-e36a3b8a6f3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.143, 0.964, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3462", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_0f4938d4-3d29-44c8-89bc-96b02e751dc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York\n[a] NYC - New York, NY -> CLICK\n[combobox] Date -> SELECT: Friday, April 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.437, 0.875, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3463", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_2860f79e-a8ff-44ed-af2b-0e95f2ac1731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.16, 0.765, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3464", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_1e45626d-e3d4-4d8d-a2d5-e00027b696fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[button] Refine results -> CLICK\n[button] Add to basket -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.677, 0.331, 0.716, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3465", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_78c8ae11-a625-4244-9dc6-d9b5c26f064e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.256, 0.027, 0.631, 0.05] in the image\nAnd my action is Action: TYPE\nValue: gobites uno spork"}]}, {"id": "mind2web_3466", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_11a99bb9-f182-4346-99d5-23975b4994b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.146, 0.375, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3467", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_fa285b88-bf55-4afc-a580-255bd2e0b867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.22, 0.196, 0.237] in the image\nAnd my action is Action: SELECT\nValue: In stock (53476)"}]}, {"id": "mind2web_3468", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_22f36720-f49c-4bc1-a779-3156feea0178.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.06, 0.653, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3469", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_23f03cc3-d1ed-4273-9031-a4516ecac26a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.402, 0.783, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3470", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_1fa9707f-8e33-4b26-924e-5290048b35d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[svg] -> CLICK\n[span] -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.695, 0.913, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3471", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_3804f977-9b03-466c-a198-f4e922f1bb25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: John\n[textbox] Last name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.255, 0.691, 0.301] in the image\nAnd my action is Action: TYPE\nValue: Davis"}]}, {"id": "mind2web_3472", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_2909b809-4844-45d8-b651-c4e61bb750d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.164, 0.094, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3473", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_b815ae06-e12d-4a76-bb21-6ea9e107b158.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[span] Continue -> CLICK\n[listbox] Direction -> SELECT: Forward facing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.346, 0.444, 0.377] in the image\nAnd my action is Action: SELECT\nValue: Window"}]}, {"id": "mind2web_3474", "image": {"bytes": "", "path": "./images/f45b0783-4325-4b3e-959d-c181971d72f6_2fb831d2-8c62-425f-befe-ee018631d50b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest news and rumors about the NBA team the Los Angeles Lakers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.066, 0.335, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3475", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_335f924d-95e0-4f41-a4b9-9398ac0c958b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.024, 0.564, 0.038] in the image\nAnd my action is Action: TYPE\nValue: Texas City, Texas"}]}, {"id": "mind2web_3476", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_35161327-e603-416e-871c-83dc1e489fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.131, 0.325, 0.154] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_3477", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_ced15527-c767-4abb-87d0-3add94dbb551.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.321, 0.522, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3478", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_46cd12b5-61fb-4d35-9de8-082eeb0a11de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.123, 0.123, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3479", "image": {"bytes": "", "path": "./images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_9c452aa0-3de5-4570-992e-52374b7e7678.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about how to buy metro card on booth.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.0, 0.597, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3480", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_f88b0a6c-386b-4147-ba11-0b26bdf5f77d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.237, 0.329, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3481", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_c6af11c8-db3a-4a7c-8774-ded5eec79969.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston\n[button] Place Houston, TX -> CLICK\n[div] Search -> CLICK\n[img] -> CLICK\n[button] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.557, 0.632, 0.632] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3482", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_4b438c6e-a35c-4841-979d-677c72c26074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK\n[link] Fiction -> CLICK\n[link] Romance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.18, 0.834, 0.198] in the image\nAnd my action is Action: SELECT\nValue: Price, low to high"}]}, {"id": "mind2web_3483", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_8b75beda-efc6-4710-a083-8df8a18becd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] One Day Add-Ons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.379, 0.702, 0.607, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3484", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a2b55fc4-8a30-4dea-a8e1-ef1fe9141036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.198, 0.249, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3485", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_a1272cef-9bbf-4485-bdf7-3c6181cef0f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[span] New -> CLICK\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK\n[textbox] price to -> TYPE: 70"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.491, 0.192, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3486", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_9dd4b28e-4bad-4611-b5e2-93ebd1ed35c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK\n[div] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.446, 0.612, 0.554, 0.626] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3487", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_7af99269-815e-4229-bc9f-599cda4974ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.336, 0.389, 0.353] in the image\nAnd my action is Action: SELECT\nValue: 12"}]}, {"id": "mind2web_3488", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_bd5dfac5-d874-4fb1-beba-7ecc203439f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Traveling with children -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.575, 0.341, 0.586] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3489", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_8897f156-5756-44ea-8035-2d1e1aa1ddd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.433, 0.71, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3490", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_aeeb7a00-f146-4eef-9051-5cce99f1d2d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.216, 0.446, 0.239] in the image\nAnd my action is Action: TYPE\nValue: Houston"}]}, {"id": "mind2web_3491", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_f42668ce-2ef9-4046-99fc-b0bb221a96de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.02, 0.572, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3492", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0018120a-8da1-4a36-a1c4-b4642c97211b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] From -> TYPE: faro\n[span] Faro -> CLICK\n[textbox] To -> TYPE: bournemouth\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.12, 0.748, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3493", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_1688074f-20d8-4b87-be42-6323f8e78807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[option] 2:00 pm -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] SUV -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.495, 0.241, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3494", "image": {"bytes": "", "path": "./images/b3c7e28e-00a2-4ef7-89c5-f3ad7efadd6a_6a28730e-2571-4d4b-aba1-4e7470873680.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show my Library and sort by Albums.\nPrevious actions:\n[img] james9091 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.077, 0.356, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3495", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_fe4765b7-9e8e-44f9-946c-dca01ee3049a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.34, 0.886, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3496", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f0deb072-39be-4a95-ad08-f142fd16bef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.093, 0.367, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3497", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_4b040e38-9b37-4656-a247-f1e793174ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] African -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.327, 0.214, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3498", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_d8ffd9d0-a450-410b-843b-e17ac7c12022.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[span] 14 -> CLICK\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK\n[button] Find Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.127, 0.329, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3499", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_1a2b5134-f49b-44a0-8398-2c8c34b3636f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[span] Attractions -> CLICK\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK\n[div] Select your dates -> CLICK\n[checkbox] 5 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.251, 0.825, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3500", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_c32cfad9-2c31-4337-9ac8-0ec37245a3e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.09, 0.16, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3501", "image": {"bytes": "", "path": "./images/9ebd069a-7703-47b5-9c75-53958637e7c0_21b9e2ba-8482-4690-94ca-59dbe0423aba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Watch the halftime show from the most recent Super Bowl\nPrevious actions:\n[use] -> CLICK\n[link] Super Bowl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.439, 0.527, 0.561, 0.56] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3502", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_edb39148-8f83-4870-b6c3-459e520e1b50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK\n[button] Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.257, 0.2, 0.285, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3503", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_629a8d57-269d-4468-b07d-4709c572f645.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com\n[checkbox] Career opportunity Career opportunity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.71, 0.684, 0.721] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3504", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_e1fe119b-be4b-474b-a766-4b1e38ee29e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK\n[dt] Price -> CLICK\n[textbox] price to -> TYPE: 1000\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.18, 0.701, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3505", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_a0b7d226-b1ea-47b0-a653-d6eb5ba4ba05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.417, 0.196, 0.444] in the image\nAnd my action is Action: SELECT\nValue: Ages 9-11 (13,217)"}]}, {"id": "mind2web_3506", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_5ee7f323-9126-4fd2-9fe7-42af620acde1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.12, 0.257, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3507", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_145c2b41-3f86-4ce1-af76-9eae6f8845f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Los Angeles\n[option] Los Angeles, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.174, 0.639, 0.268, 0.653] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3508", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_7751058d-19a1-4973-90c3-187dba735d4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.721, 0.099, 0.731] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3509", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_0cc51631-52d2-485f-a503-60d18725d858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.185, 0.713, 0.211] in the image\nAnd my action is Action: SELECT\nValue: 9 00 am"}]}, {"id": "mind2web_3510", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_84500f23-9176-446d-8886-4f791999ef9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.086, 0.492, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3511", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_e02471ae-a287-4366-858f-e1e9c9166463.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.429, 0.459, 0.463] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_3512", "image": {"bytes": "", "path": "./images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3a3dcc2b-8ba5-4ecc-9ebe-8935157be036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the most recent NASCAR driver news.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.179, 0.491, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3513", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f0ca54a4-d720-47a1-9ef7-1250abb05bbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Sort by -> CLICK\n[div] Top rated -> CLICK\n[link] Get tickets -> CLICK\n[p] Number of Travellers -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.552, 0.825, 0.605] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3514", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_b46b2518-cf00-4a95-9c7c-8be9d9ea9bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK\n[link] Planned Service Changes -> CLICK\n[searchbox] Search Routes -> TYPE: 4\n[listitem] 4 -> CLICK\n[button] 04/12/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.857, 0.199, 0.879, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3515", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_8d855d42-03e9-4258-a883-c730da0c9300.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.231, 0.238, 0.265] in the image\nAnd my action is Action: TYPE\nValue: Helena"}]}, {"id": "mind2web_3516", "image": {"bytes": "", "path": "./images/1ed913ba-62a3-4214-a947-217b74c4b8f7_75baa18c-3317-48fd-a276-3fd8f74781c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List upcoming animation and anime movies in theaters\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] COMING SOON -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.39, 0.415, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3517", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_f15096b9-a0de-4645-96d0-19b3ddb924ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Fiesta Texas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.379, 0.844, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3518", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_cd63bae1-61f3-4ddd-b567-98de6b542827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK\n[link] Find A Travel Agent | Carnival Cruise Line -> CLICK\n[textbox] City -> TYPE: Grand Junction"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.334, 0.789, 0.354] in the image\nAnd my action is Action: SELECT\nValue: Colorado"}]}, {"id": "mind2web_3519", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_3974d678-91c4-437e-8d34-e418f635f93b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 AM\n[button] Select My Car -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.372, 0.567, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3520", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_e3aaa938-3cff-4e70-bf97-d5b032254b8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.078, 0.273, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3521", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_9e69c679-e673-4c93-9bfa-0ef90cfd8822.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] The Bahamas -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.28, 0.716, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3522", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_c2b8e0f2-ac12-492f-921d-3e6beba7ee58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK\n[button] Change location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.133, 0.705, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3523", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_49d5f5b4-1bec-409c-8e15-abd1df8fd680.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: los angeles kings"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.162, 0.396, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3524", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_d34f2314-6468-4913-b6dc-56ae993b7467.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 2 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.144, 0.202, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3525", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5ae033d1-dce4-48b6-b901-b87c39aff698.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.332, 0.695, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3526", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_60575d32-63ce-4809-b1e9-936707216285.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Crew\n[input] -> TYPE: James\n[input] -> TYPE: Johnson"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.118, 0.777, 0.137] in the image\nAnd my action is Action: TYPE\nValue: james.john@gmail.com"}]}, {"id": "mind2web_3527", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_e50dbda8-1633-4acf-9084-5102c30e7ae8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.106, 0.81, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3528", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_59f55fde-ad57-424f-a2fb-3045b8b4d5a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.661, 0.166, 0.675] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3529", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31a7e3cb-79ad-49f0-bc24-1e908f91db3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK\n[img] Right -> CLICK\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK\n[gridcell] Choose Tuesday, April 25th 2023. It's available. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.619, 0.216, 0.774, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3530", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4e6f0232-53c5-4c0c-b655-3769916435a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.211, 0.492, 0.241] in the image\nAnd my action is Action: TYPE\nValue: ewn"}]}, {"id": "mind2web_3531", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_ac505227-fbab-4016-b968-22a429f2788c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.124, 0.413, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3532", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_1b698a2f-400b-4069-aa08-252e1b41c7e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.172, 0.777, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3533", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_a4326b33-6f1e-44f7-bea5-3d9949eb8009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.436, 0.265, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3534", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_c4f666ab-2efa-4467-b72e-e21775ff008e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\n[link] STORE -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.132, 0.388, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3535", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_61a96c4a-8d14-4bb8-8181-f01bb9e493c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[link] New York, NY -> CLICK\n[button] Find -> CLICK\n[link] Beauty & Youth Village Spa -> CLICK\n[use] -> CLICK\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.239, 0.323, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3536", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_fda0099f-d128-4efa-800d-b5a118ac9d96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.227, 0.194, 0.247] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_3537", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_b4a1a676-cba7-4e00-9d26-56a1b833680c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[button] Friday, April 14, 2023 -> CLICK\n[button] 1 Traveler -> CLICK\n[textbox] 1 Adults, 18 to 64 years old, 1 of 8 passengers se... -> TYPE: 1\n[combobox] undefined Selected 1 room -> CLICK\n[option] 1 room -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.518, 0.197, 0.543] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3538", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_6d7e3a4b-2ba2-4c54-9f63-3bd480654856.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.317, 0.495, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3539", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_83a5d3f8-3fbd-407e-a301-d31950d83fbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.043, 0.787, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3540", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_605cda38-8b6c-4335-a9b6-56f97387b951.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK\n[span] Agencies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.102, 0.234, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3541", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_32c92320-9fb6-4e3e-96d8-a1c24c44c451.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[b] Indian -> CLICK\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK\n[button] View more availability for BayLeaf Modern Indian C... -> CLICK\n[button] 8:15 PM Table -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.375, 0.523, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3542", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_dd7d101a-ecbd-49e4-b7ff-ca19a02e1703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.889, 0.41, 0.927, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3543", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_af4734a1-b0a3-4f99-9519-9c0a2ece32b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Beachfront -> CLICK\n[checkbox] Hot tub -> CLICK\n[checkbox] Pool -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.8, 0.089, 0.812] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3544", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_90eaa533-7bcb-43a7-9e66-21ceab440567.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\n[link] Model Y -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK\n[combobox] Sort By -> SELECT: Price : low to high"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.172, 0.273, 0.195] in the image\nAnd my action is Action: TYPE\nValue: 94043"}]}, {"id": "mind2web_3545", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_4741ca4a-48b1-4a2c-ba06-4b43ec6a2164.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.225, 0.691, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3546", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_8cb52a62-2206-4347-b42b-aa230acd9a96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 0.338, 0.91, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3547", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_99b80b37-6099-456f-87a9-32b3ec8481d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807\n[combobox] 25 miles -> CLICK\n[generic] 250 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.441, 0.261, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3548", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_c4f097ae-417b-4e54-b706-78282c045acc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.115, 0.287, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3549", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_cf9b71f2-508d-43b2-abac-02d151aef07e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.436, 0.032, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3550", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_d41e77ad-e822-4e59-8fd8-dcc8807c67f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.63, 0.832, 0.642] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3551", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_fb8e030b-a84b-4126-a14f-c1cb8d319e00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Katy Perry\n[button] Search -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.133, 0.191, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3552", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_addf3e71-975d-475e-8f5e-5d005886f8ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK\n[link] Jackets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.285, 0.233, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3553", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_114787f7-a2e7-4e60-876d-faed27ba9a6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[link] Route Map -> CLICK\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.007, 0.16, 0.293, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3554", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_f3f7201a-72b4-4659-8a82-feec13d3cb17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.148, 0.763, 0.158] in the image\nAnd my action is Action: TYPE\nValue: Phuket"}]}, {"id": "mind2web_3555", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_998f610f-7f61-4784-9f51-d37d0a3d635a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK\n[button] Search -> CLICK\n[checkbox] Hotel -> CLICK\n[slider] price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.124, 0.825, 0.151] in the image\nAnd my action is Action: SELECT\nValue: Distance from landmark"}]}, {"id": "mind2web_3556", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_90ccc0fb-f4f3-4a2a-a635-4de2b8634a4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.285, 0.43, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3557", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_a622a437-2d91-4253-b902-699ec35998f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.206, 0.148, 0.281, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3558", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_faf7979d-6c0b-4d9c-a40d-02f62a08fbc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai\n[heading] Senyai Thai Kitchen -> CLICK\n[button] Notify for Dinner -> CLICK\n[combobox] Preferred end time -> SELECT: 7:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.388, 0.75, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3559", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_2a2b01e7-6723-4766-8f9c-83c518877422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\n[button] Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.272, 0.12, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3560", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_c148dec2-df53-4d6d-8da3-c1277ded7048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Pet -> CLICK\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.612, 0.146, 0.629] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3561", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_f5167534-c977-4b5d-9525-f05085be7f43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[option] Las Vegas Nevada,\u00a0United States -> CLICK\n[checkbox] 10 April 2023 -> CLICK\n[gridcell] 16 April 2023 -> CLICK\n[button] Search -> CLICK\n[button] Sort by:Our Top Picks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.214, 0.448, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3562", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_dc617f22-b94b-42d5-995a-b37fc818ba51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK\n[link] CITIES: SKYLINES -> CLICK\n[link] Bundle info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.368, 0.6, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3563", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_be3cf6de-378f-44d3-bcd7-8c7d715c04f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK\n[div] Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.012, 0.781, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3564", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_6ed06c1b-36f1-4b31-8129-16887e34d948.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.08, 0.3, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3565", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_bb20e275-f386-45b5-a913-79812fd3d5ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Los Angeles\n[span] City -> CLICK\n[div] 21 -> CLICK\n[div] 23 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.166, 0.194, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3566", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_46264135-6884-4764-bf80-6f4645b46d2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.156, 0.377, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3567", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_3142cfd0-23ad-43a8-b417-f4d77c8545de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, May 15, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.098, 0.957, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3568", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_889e9377-6802-48fc-b4ac-abacdb2d89b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[strong] Miami -> CLICK\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK\n[span] 31 -> CLICK\n[span] Search flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.17, 0.591, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3569", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_c08cdc90-9282-4aa0-83c6-93436b95f425.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.291, 0.196, 0.305] in the image\nAnd my action is Action: SELECT\nValue: In Stock (41,088)"}]}, {"id": "mind2web_3570", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_13a5e3e7-94b4-40ec-9580-9b81fe415d79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.292, 0.234, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3571", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_7c557819-bb01-4638-ab93-94a47f72ad22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew\n[textbox] Email address -> TYPE: johndoew@gmail.com\n[textbox] Phone number -> TYPE: 4533234565\n[textbox] Social Security Number -> TYPE: 234567895"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.373, 0.698, 0.403] in the image\nAnd my action is Action: TYPE\nValue: 06/23/1992"}]}, {"id": "mind2web_3572", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_16980e46-484c-4532-873c-aa941e926a51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.352, 0.454, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3573", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_4c5c24aa-b9e2-4824-a82e-3e44302e8707.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK\n[span] see all stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.861, 0.465, 0.886] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3574", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_4601bf91-26ec-4072-bef5-7a24ec700def.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK\n[link] Fare Finder -> CLICK\n[textbox] From: -> CLICK\n[div] Dublin -> CLICK\n[textbox] To: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.42, 0.432, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3575", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_1ef62685-5086-432a-af32-b3cf57bab812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK\n[path] -> CLICK\n[div] 8+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.163, 0.161, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3576", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_261b1738-122a-44ea-bd95-54f319709a86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\n[link] CareersExternal Link should open in a new window o... -> CLICK\n[textbox] Search by Location -> CLICK\n[textbox] Search by Location -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.455, 0.814, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3577", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2a757624-40fe-4ce8-ae18-82fe7d2a1c97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[polyline] -> CLICK\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.318, 0.194, 0.346] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_3578", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_101b5602-d0c4-43e7-8d2e-97a5ce286aac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.227, 0.777, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3579", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_eac642da-a78b-49e2-a39a-8d9b8f0c1baf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK\n[div] May -> CLICK\n[span] -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.156, 0.739, 0.165] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_3580", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_179c061d-401e-4352-a450-913609704574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK\n[span] Select store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.152, 0.186, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3581", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_fd0b1974-3c9c-4824-a547-7f4d6e47199d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 44012\n[button] Search -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.17, 0.612, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3582", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_bd391f4c-1ced-4137-99bc-1d337bbf2639.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[div] Price -> CLICK\n[link] $10 to $25 (3) -> CLICK\n[link] $25 to $50 (18) -> CLICK\n[link] $50 to $100 (146) -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.515, 0.069, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3583", "image": {"bytes": "", "path": "./images/63d1f820-37bf-4adb-aabb-65eb7925790c_99c79398-3d2e-47f4-9ba3-1df19f5f70cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current roster of the Miami Heat.\nPrevious actions:\n[link] NBA . -> HOVER\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.184, 0.197, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3584", "image": {"bytes": "", "path": "./images/2f660153-d470-4744-8b75-f8dc64c7f94e_47c0cd12-231a-4660-82ca-493fc19a1456.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Games & Puzzles in the Toys category that are suitable for ages 14+\nPrevious actions:\n[link] Categories -> CLICK\n[span] Toys -> CLICK\n[span] Games & Puzzles -> CLICK\n[button] Explore All -> CLICK\n[button] Age -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.421, 0.8, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3585", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_19094dcc-b685-4d6a-bf2b-d3844ad0662c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.795, 0.822, 0.825] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3586", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_d85c223a-4914-4d3b-b8fc-be2bf865f05d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[textbox] Near -> TYPE: WESTMINSTER\n[span] Westminster -> CLICK\n[checkbox] Offers Takeout -> CLICK\n[button] Thai -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.247, 0.529, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3587", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_709e43ed-4bca-483c-8cf6-20e17da426c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.057, 0.153, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3588", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_31e673c0-8f73-4188-8f0b-6d94548c7ff8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.359, 0.984, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3589", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_9c4fbe9e-68fb-4c08-965d-82474dfa64c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK\n[link] Edit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.646, 0.427, 0.651, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3590", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_4b20a92b-ba63-4d8b-818f-3b4f5f62d65e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Fiesta Texas -> CLICK\n[button] Go! -> CLICK\n[link] Events \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.622, 0.165, 0.735, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3591", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_f202d61a-2054-4123-96e8-3ef0008ddc27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK\n[link] Indian/Pakistani -> CLICK\n[link] East Village (9) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.534, 0.207, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3592", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_153ea3be-8400-41e8-b0d1-339375d3b742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.047, 0.646, 0.063] in the image\nAnd my action is Action: TYPE\nValue: BATMAN"}]}, {"id": "mind2web_3593", "image": {"bytes": "", "path": "./images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_a1f5c62e-b068-422d-8b69-407eb0f05496.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a park in the state of California called Castle Mountains National Monument and find out it's Basic Information.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] California -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 0.786, 0.672, 0.801] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3594", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_75de3fc4-e791-4ae1-a8fd-765b72f24302.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.068, 0.129, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3595", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_ce7369ff-594f-473b-8e09-4a88c6876c80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[button] Number of rooms and guests -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[div] Update -> CLICK\n[div] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.552, 0.068, 0.765, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3596", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_8c977ab8-7653-4549-a276-20b0a42543e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.548, 0.01, 0.558] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3597", "image": {"bytes": "", "path": "./images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_48e54885-a445-42a6-9b49-b473a0468246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up information on the potential side effects of rogaine.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.014, 0.804, 0.035] in the image\nAnd my action is Action: TYPE\nValue: rogaine"}]}, {"id": "mind2web_3598", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_1f18cb52-46db-4409-ad2e-47505cdbbcf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[span] AWD/4WD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.21, 0.284, 0.277, 0.3] in the image\nAnd my action is Action: TYPE\nValue: 50000"}]}, {"id": "mind2web_3599", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_bc39ea75-d2da-4418-abda-7bda18e15c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.168, 0.939, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3600", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_08c1d250-f956-4e8d-90dc-d9ae433f1a12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street\n[span] 2983 Marietta Street -> CLICK\n[textbox] Apt, floor, suite, etc (optional) -> TYPE: 2\n[textbox] Business name (optional) -> TYPE: Buck"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.65, 0.702, 0.702] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3601", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_0696d1e4-9bf2-40ab-a5c2-fa44e42a4e1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.058, 0.291, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3602", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_589c149d-3ebb-478a-851e-ee098f3a2f14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.031, 0.905, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3603", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_0a81b2d9-3825-43c7-8e12-c6658b73422e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07055\n[button] Set My Store -> CLICK\n[button] set store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.234, 0.249, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3604", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_b2d669d1-77b6-48b8-8769-60bf2b316324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.191, 0.194, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3605", "image": {"bytes": "", "path": "./images/edb8e817-e9d7-45eb-9706-01967e3a0fe8_6068b180-5a93-4752-8e12-0faafbbdd5c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get offer to sell car with AZ XA1234 number plate from Arizona state.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: XA1234"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.3, 0.583, 0.353] in the image\nAnd my action is Action: SELECT\nValue: AZ"}]}, {"id": "mind2web_3606", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_0c26d744-d579-4c4b-a235-1e6127cc77a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA\n[input] -> TYPE: car accident lawyers\n[link] car accident lawyers -> CLICK\n[div] Sort: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.203, 0.683, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3607", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_3fbd0200-f2c8-4e2f-8708-d66f70d3a194.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK\n[link] Amazon Pharmacy -> CLICK\n[span] Type your medication name -> TYPE: Metformin 1000mg"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.178, 0.55, 0.822, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3608", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_226219ef-0a76-4285-b2d1-b601d1086627.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.064, 0.753, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3609", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_cdd03303-a8f9-4c95-9c04-fbe006ec7497.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.413, 0.43, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3610", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_a107d49f-937e-412b-9e1c-4497d02bef15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.026, 0.309, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3611", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_ef81e5e1-5428-498a-98ec-6410b14f4ad8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.157, 0.414, 0.175] in the image\nAnd my action is Action: TYPE\nValue: 37863"}]}, {"id": "mind2web_3612", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_88036838-a91e-4722-97c0-8136bdd850b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.494, 0.245, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3613", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_9cad183e-9be2-406a-9b05-f4373367e1a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.19, 0.466, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3614", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_d8be4795-fcaa-4230-b948-82c117e3d0d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.35, 0.024, 0.391, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3615", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_3b286426-04f0-433c-b13e-98ff7eb4ca88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.116, 0.209, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3616", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_5f7d8dc9-cccc-4362-a752-71bf2543a680.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.255, 0.285, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3617", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_e39e31de-cae6-4cd0-9393-50238c808560.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.155, 0.481, 0.179] in the image\nAnd my action is Action: TYPE\nValue: SHANGHAI"}]}, {"id": "mind2web_3618", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_014f12cf-0228-4d0a-95f9-2acff952c74b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.273, 0.986, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3619", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_470c7cf0-6d9e-4a43-913b-fb309a888da5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.082, 0.041, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3620", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_229f869d-ed7f-453b-8924-56d1568435d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.34, 0.427, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3621", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_57d7eb23-80b4-4046-9c8b-114b199b3b6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.216, 0.394, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3622", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_a294c5a9-20c0-46b5-b25d-a4153b76d065.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.228, 0.469, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3623", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_9a83aab6-af9c-4062-afbe-ca6eaa4e3249.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.209, 0.664, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3624", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_05f51ed3-a3e2-4ef0-909a-353e91edf249.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.175, 0.432, 0.217] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_3625", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_5dfefa56-2c3f-4f70-b6f5-da3a613a883d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.173, 0.192, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3626", "image": {"bytes": "", "path": "./images/af6655c8-0047-4c93-8da2-aeb04ff06b17_3498a297-8121-418f-a8a4-ce50490e51ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse climbing courses within 50 miles of 90028.\nPrevious actions:\n[link] Classes & Events -> CLICK\n[combobox] Activity -> SELECT: Climbing\n[textbox] Location -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.561, 0.218, 0.717, 0.241] in the image\nAnd my action is Action: SELECT\nValue: Within 50 miles"}]}, {"id": "mind2web_3627", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_1d57e568-92c5-431a-acb6-65d74fe5e11c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.802, 0.72, 0.84] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3628", "image": {"bytes": "", "path": "./images/9a462751-758e-42bd-967d-373c13b90382_4ddaf55b-33d3-4784-8370-56d90014d635.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current injuries of Phoenix Suns players.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.105, 0.335, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3629", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_593599e6-9edd-4f0d-bc13-d6d92f8ce00f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.183, 0.104, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3630", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_c39edac7-e345-4b0a-85b0-aeb1ffc251eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.767, 0.326, 0.797] in the image\nAnd my action is Action: TYPE\nValue: las vegas"}]}, {"id": "mind2web_3631", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_3281daa6-4a6a-4cdc-b3d5-b28e0f977a09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Done -> CLICK\n[path] -> CLICK\n[link] View More -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.522, 0.309, 0.543] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3632", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_c629a825-fdc9-4dde-adca-9b8920a2ba7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.272, 0.393, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3633", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_74a2475d-3369-49de-8be5-e1aeaaa0f1e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.189, 0.096, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3634", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_a9810361-2044-4872-9fea-484bc49072e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.197, 0.931, 0.224] in the image\nAnd my action is Action: SELECT\nValue: 6 00 pm"}]}, {"id": "mind2web_3635", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_9d038530-f4c7-4d78-a473-7ab36b4280d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] 32\" -> CLICK\n[svg] -> CLICK\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.476, 0.074, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3636", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_7f718732-3846-4fe5-9b78-053b204a1731.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.089, 0.199, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3637", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_17af7d32-c2a2-4881-bd39-083b934f6dde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.054, 0.454, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3638", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_218ae251-373b-48e8-8cf3-1af7deee8ebc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\n[searchbox] Where are you going? -> TYPE: Aquarium of Paris\n[div] Aquarium of Paris -> CLICK\n[div] Select your dates -> CLICK\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.137, 0.92, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3639", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_a3dabbcc-97e0-43ed-b5eb-1c323c70302e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.06, 0.518, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3640", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_91ccb5c2-07c7-4ad6-afd3-8371104390d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK\n[link] White -> CLICK\n[div] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.174, 0.107, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3641", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_cbb3b97e-254d-43a0-90e1-31250caeca05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.048, 0.931, 0.077] in the image\nAnd my action is Action: TYPE\nValue: Golden State Warriors"}]}, {"id": "mind2web_3642", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_1bcc0fa7-4d6f-4144-90af-4829befb88b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.323, 0.647, 0.359] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_3643", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_c4e0a089-60dd-4bb7-8945-bbe783357494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.308, 0.552, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3644", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_63561ca3-8abb-4027-a3cb-4bbae12a9f7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK\n[button] San Francisco, CA -> CLICK\n[span] Dates -> CLICK\n[button] October 2024 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.394, 0.871, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3645", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_1a8a24ff-8bbd-4682-a611-5cdfe6fe4811.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[textbox] From: -> CLICK\n[div] Dublin -> CLICK\n[textbox] To: -> CLICK\n[div] Anywhere -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.71, 0.287, 0.824, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3646", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_765af537-4144-47d1-8e0c-838a365b423d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.069, 0.246, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3647", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_23d0755e-281c-4f18-a0be-b4c4da2fa859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.197, 0.312, 0.226] in the image\nAnd my action is Action: TYPE\nValue: Albany, NY"}]}, {"id": "mind2web_3648", "image": {"bytes": "", "path": "./images/642ac4f9-dea1-4c10-89a5-208238a2c6e9_ff5d4950-a9ff-4c8c-b17e-036c857de1e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a red used tesla Model S.\nPrevious actions:\n[link] Model S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.208, 0.491, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3649", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_5a0d43c1-dfc2-438a-b164-948f1f510d2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.892, 0.034, 0.963, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3650", "image": {"bytes": "", "path": "./images/0991035b-6acb-4dca-aaef-5384a0739781_90f4e811-90b3-4d4c-9a0f-ae51d58d2121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find careers at the company on the Product Management team\nPrevious actions:\n[link] Company -> CLICK\n[link] Career Opportunities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 0.46, 0.758, 0.488] in the image\nAnd my action is Action: SELECT\nValue: Product Management"}]}, {"id": "mind2web_3651", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_0b62a0d8-1317-4f8f-b64c-d11d7f14b218.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[button] Load more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 0.253, 0.715, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3652", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_4859f571-24a4-48b6-aed1-e9267843e8e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.399, 0.34, 0.43] in the image\nAnd my action is Action: TYPE\nValue: 10003"}]}, {"id": "mind2web_3653", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_3ea5e794-345f-46e8-bbc8-b7b4484a3de2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.064, 0.697, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3654", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_5f6c4a74-bc21-4db6-a489-df0c42e7fc32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.096, 0.896, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3655", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_d3549f30-b63f-480c-afbe-9574b4078c82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] From -> TYPE: Queensboro plaza\n[listitem] Queensboro Plaza Station, Long Island City, NY, US... -> CLICK\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.475, 0.359, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3656", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0dbd5cb-4ecf-4e6b-8612-ad6b7974e5aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[span] -> CLICK\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.605, 0.959, 0.635] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3657", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_1f80f5d5-546c-4959-bdc5-865a0879eadf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.361, 0.727, 0.389] in the image\nAnd my action is Action: SELECT\nValue: 10"}]}, {"id": "mind2web_3658", "image": {"bytes": "", "path": "./images/d4298ee7-ac1d-4cac-bfa6-62bf7cd67004_7fe8d3a7-525e-468f-9651-e4b48a64e849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show camping hammocks with lowest price first.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.048, 0.128, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3659", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_af60907a-d2d1-4c07-9b1c-fbc64cd98f23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.924, 0.214, 0.936, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3660", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_cc1da7b0-3376-49e3-8030-9d2dc5302d38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.179, 0.277, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3661", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_ffaef589-2d9e-4621-9fd9-ac90bf31af16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\n[li] Soccer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.05, 0.36, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3662", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_33e83e8c-d733-4027-8a09-202f4e49feb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[button] Condition -> CLICK\n[checkbox] Pre-Owned Pre-Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.046, 0.378, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3663", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_1e410a9f-d065-48b2-ab36-3ff5f7565e28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.579, 0.835, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3664", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_d1b389f4-383f-4b10-8e74-cf4f8ad0dfe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.643, 0.263, 0.671] in the image\nAnd my action is Action: TYPE\nValue: Phoenix"}]}, {"id": "mind2web_3665", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_dcdbcae5-1f3a-4f5a-8794-b75c41184eae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK\n[div] Make/Model -> CLICK\n[combobox] Year -> SELECT: 2016"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.187, 0.361, 0.208] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_3666", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_04d56d8e-6a56-43e5-b0cd-91f655b199c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.062, 0.459, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3667", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_b8e73317-d7a9-4a9a-8eda-7d78fd298cd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK\n[button] View more availability for BayLeaf Modern Indian C... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.638, 0.354, 0.669] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3668", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_a20f898d-4a59-40a7-8710-5fbb4914d1fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK\n[link] View Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.194, 0.991, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3669", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_09906659-140d-4f28-bfc2-14222fa6aa19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[button] Selected Return Date 03/22/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.263, 0.837, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3670", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_e78c04e6-b25d-428f-8632-af2289e2059d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.216, 0.777, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3671", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_04bf0b33-af4b-4c0e-ac4e-cc990e747c79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[textbox] Flight origin input -> TYPE: madurai\n[div] Madurai, Tamil Nadu, India -> CLICK\n[path] -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.564, 0.101, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3672", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_d5098452-d25b-474d-8bf7-267ce1c1b48a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.212, 0.757, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3673", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_99a9f5ae-ce6c-4ca8-aad5-2a7374738144.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\n[link] subway Subway Lines -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.218, 0.339, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3674", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_148f65ff-4194-4d67-a558-70f7122f3ca9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[radio] Owned -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.695, 0.156, 0.719] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3675", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_389ba481-1a87-4e37-bd09-b2c6934e5bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3676", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_580a3f91-4259-4821-8318-d3a02646e2ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200\n[combobox] Number of Stories -> SELECT: Two-Story"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.793, 0.963, 0.83] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3677", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_a921fb0a-6baa-41d4-9927-a3400e96af32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.028, 0.598, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3678", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_e28396bc-f80b-4f22-adc6-9462051a4b4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK\n[label] 0 - 5 Miles (2) -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.467, 0.944, 0.503] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3679", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_5354557d-77af-42ce-9b8d-f4948fc805b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK\n[button] 19 -> CLICK\n[button] 23 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.437, 0.393, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3680", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_79c4b306-cc3f-48cf-afe4-16fc9eaaf580.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.196, 0.748, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3681", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_012c30d7-d7bb-42ba-9e01-2f8f6b5d986a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.129, 0.047, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3682", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_beae922c-80e6-4d7a-940c-8e6b259f2e64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.448, 0.285, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3683", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_35261f26-98d1-44e4-80c9-9aead528ab00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.404, 0.414, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3684", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_be5b4fb3-a2e4-4dff-a6a7-c3050aea75b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.116, 0.196, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3685", "image": {"bytes": "", "path": "./images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_2724b6e4-0312-4e04-8e67-424ee5c3c16c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the most recent NASCAR driver news.\nPrevious actions:\n[link] NASCAR Cup Series NASCAR -> CLICK\n[button] Open More Dropdown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.078, 0.978, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3686", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c34fd044-2f8d-46fe-b315-356e1882f1db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK\n[link] 20 -> CLICK\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.546, 0.352, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3687", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_02254aee-cb52-48db-bd49-b5397932ee15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\n[link] \ue92f Computer Systems \uf105 -> HOVER\n[link] Gaming Desktops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.627, 0.158, 0.658] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3688", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_07d2e417-e70c-4681-b7bf-d317df0f8582.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.172, 0.858, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3689", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_b1592aec-679c-4045-8cc0-3fa78dddac1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK\n[span] see all stores -> CLICK\n[button] SET AS MY STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.154, 0.291, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3690", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_71867b4f-43e7-401c-8d43-19485f985139.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai\n[heading] Senyai Thai Kitchen -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.205, 0.235, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3691", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_b87396c2-e500-49d9-b71a-b664bc30e50d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK\n[span] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.005, 0.2, 0.128, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3692", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_4d685f2b-b04d-4d96-9ed3-8bd8f3208911.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK\n[link] Shop by Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.576, 0.139, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3693", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_af4d003a-a706-4015-902e-83ede88b94cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.066, 0.263, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3694", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_72feb769-7538-4166-8839-69d4ab675c3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.177, 0.446, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3695", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_3e4047ec-800b-44dd-bd0d-eb31c5702bbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Spain -> CLICK\n[button] All cities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.121, 0.652, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3696", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_9fb3ceb3-fc32-4e90-a632-a140bbf943f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK\n[checkbox] list-filter-item-label-12 -> CLICK\n[checkbox] list-filter-item-label-3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.729, 0.089, 0.737] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3697", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_ca8ea814-846f-4509-8df6-9bcd231c1753.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai\n[heading] Senyai Thai Kitchen -> CLICK\n[button] Notify for Dinner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.372, 0.607, 0.404] in the image\nAnd my action is Action: SELECT\nValue: 7 00 PM"}]}, {"id": "mind2web_3698", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_1e35aad2-d7c4-415f-a641-d0f2f2249eae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[combobox] Return Time -> SELECT: 11:00 AM\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.193, 0.567, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3699", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_bf59aa95-f3c4-46cf-a6d8-9dd2cc9b7d93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 07718\n[textbox] mm/dd/yyyy -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.334, 0.403, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3700", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_bb5bc946-e348-4e88-878a-a8b5ca8d580b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[a] LON - London, United Kingdom -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 20 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.226, 0.875, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3701", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_98a5fa7f-da39-49bb-a5af-0e4fc96dee15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\n[tab] Hourly -> CLICK\n[textbox] Search for parking -> TYPE: Atlanta International Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.14, 0.29, 0.86, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3702", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_46d72dc5-24a9-488b-9fda-4f168686e6be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.268, 0.35, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3703", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_63b17018-e0a0-4c59-95fe-5f76311a2bf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.482, 0.512, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3704", "image": {"bytes": "", "path": "./images/130b1cd5-3e2f-4676-aea4-24ba9706171b_ef1fc0e7-40f7-4a73-91c6-b2442598e009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the on demand sales section and rent a film in HD format.\nPrevious actions:\n[link] Visit the On Demand page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.127, 0.488, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3705", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_0277e754-72a2-4593-be67-f4a773a1bc74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK\n[link] The Smiths -> CLICK\n[button] Sorted by: Last 7 days -> CLICK\n[link] Last 30 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.276, 0.104, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3706", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_7fa88677-82b5-4d90-876b-5f482ce96cf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.204, 0.188, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3707", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_0b5bef49-b4a1-4dfa-958b-2ca8f9b5d0b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, April 17, 2023 -> CLICK\n[button] FIND TRAINS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.874, 0.192, 0.93, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3708", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_89a74936-e94d-46b5-acc8-142543492cd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK\n[link] Car rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.221, 0.469, 0.243] in the image\nAnd my action is Action: TYPE\nValue: Houston"}]}, {"id": "mind2web_3709", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_89d4ed36-f44d-45fc-b119-11e4213af3f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.102, 0.74, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3710", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_57e8de5c-e063-4c92-a186-10c330179a68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Grocery & Home -> HOVER\n[link] Grocery -> CLICK\n[link] Bread -> CLICK\n[label] In Stock Today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.459, 0.078, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3711", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_ea9ed2c3-e0a1-449f-85b9-3c708ee4cc7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch\n[link] nintendo switch -> CLICK\n[img] Nintendo Switch with Neon Blue and Neon Red Joy-Co... -> CLICK\n[button] pickup - unselected - 1 of 3 - Ready within 2 hour... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.874, 0.202, 0.97, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3712", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_7f04e0ee-bde9-4028-ab6d-0f911194d39b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.167, 0.107, 0.278, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3713", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_b072a168-e520-4371-8fd7-bec2c6b65157.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.278, 0.404, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3714", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_7f55529b-891c-4541-901a-90309af19a6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Attractions -> CLICK\n[span] Asia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.273, 0.28, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3715", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_74f68853-2766-4276-8df3-1703d486591f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.199, 0.471, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3716", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_41b402fe-382e-48b8-802f-92c009ea16d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Herbs -> CLICK\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.201, 0.113, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3717", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_0cfb8e52-e1e1-4d68-8dd0-9510f371d4d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK\n[button] Sail To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.212, 0.185, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3718", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_9b8d9526-5190-4d9e-8ab1-2845681d329c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.227, 0.271, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3719", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_ea7ad801-a927-4346-8491-60ac1394d7fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.052, 0.441, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3720", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_d3712cc7-e7db-450c-98e2-ffedf82420bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK\n[link] AT HOME -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.2, 0.226, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3721", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_ef86012d-abba-4be3-96bc-e4952b0e8c66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[span] Manchester -> CLICK\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK\n[link] 2 -> CLICK\n[textbox] Date use format: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.283, 0.633, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3722", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_aded9f6c-a6d3-4ac8-9090-f2bd5ceeb5fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[div] Apr -> CLICK\n[generic] 2 -> CLICK\n[div] May 2023 -> CLICK\n[div] Apr -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.173, 0.161, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3723", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_2478e44d-fd60-463e-bf01-3073e5b5b703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.007, 0.561, 0.028] in the image\nAnd my action is Action: TYPE\nValue: Mark Knight"}]}, {"id": "mind2web_3724", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_7450671e-a644-40ce-b909-56b5ee226fad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.347, 0.045, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3725", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_a1f30be5-5578-4ce5-bf6d-749fb72afe40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK\n[combobox] Passengers -> SELECT: 2\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.323, 0.553, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3726", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_aec31435-a64c-4d66-9fc2-9e9600ff35c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.267, 0.693, 0.286] in the image\nAnd my action is Action: TYPE\nValue: 04/21/2023"}]}, {"id": "mind2web_3727", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_6c0e6305-bd44-4bc3-a988-f15e494a4983.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.376, 0.368, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3728", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_d1b22a5f-79ea-4ec1-adf8-4b3f11890d9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.147, 0.453, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3729", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_ee2d1aef-5a2a-4702-bd44-e5d6536ca7d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.221, 0.71, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3730", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_12955726-9213-4e4c-bf79-a6773d5f74f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.617, 0.291, 0.646] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3731", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_76413976-1d41-430a-ba07-e0862e40f90d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Material -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Pickup -> CLICK\n[button] Pick up Plastic Eggs in Egg Nesting Easter Eggs Mi... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.442, 0.988, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3732", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_b355331e-4d3d-4f44-855e-35803f0c361d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[button] Monday March 20, 2023 -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[tab] Review score -> CLICK\n[checkbox] Free internet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.217, 0.081, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3733", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4c7fd1d2-f2a3-4fb3-b095-0b4009b4d455.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 0.051, 0.294, 0.1] in the image\nAnd my action is Action: SELECT\nValue: AMC Grove City 14"}]}, {"id": "mind2web_3734", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_f3ce94f2-22f0-4a8c-b1a8-9bd4a4b30725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.583, 0.022, 0.644, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3735", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_57b08e09-a0e6-42ad-a73b-bec681a0fe05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.05, 0.777, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3736", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_851b2cbd-f474-4372-976c-f0b18b7fdbf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.817, 0.194, 0.831] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3737", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_924bce62-03db-4c07-8747-b201b4878623.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Oakland, CA\n[span] Oakland, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.022, 0.268, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3738", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_de659bc7-ef2a-4d54-82e1-0f451dcb0ad2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.348, 0.382, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3739", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_1ae9ef10-208d-4de5-8039-ca99154d2c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.398, 0.173, 0.416] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3740", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f2ea8d95-fcd1-4372-b4da-1cc5f1afbbc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.786, 0.032, 0.917, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3741", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_3c4d601f-4977-410f-85d7-145e7bcfeedd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.159, 0.206, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3742", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_48083d7b-9980-4fdf-a149-9e9b59d87979.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.478, 0.202, 0.586, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3743", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_2b0e0d18-c0cf-4ae9-a1da-3a815944a4b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.274, 0.221, 0.294, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3744", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65e952d7-d927-49cd-921c-69fdba1e5f71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James\n[div] -> TYPE: Smith\n[input] -> TYPE: 123st rd\n[textbox] Email Address -> TYPE: abc@abc.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.725, 0.135, 0.739] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3745", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_a61a15a9-e82e-4e0b-a708-692906fc75a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.465, 0.634, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3746", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_1b89abdb-f73a-43c1-ac9e-1c0f9c6469f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.123, 0.914, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3747", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_3118d8f6-34c8-4f6f-80f7-f5d6a50d8d16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK\n[link] Amazon Pharmacy -> CLICK\n[span] Type your medication name -> TYPE: Metformin 1000mg\n[li] metformin 1000mg tablet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.083, 0.378, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3748", "image": {"bytes": "", "path": "./images/5b433cc4-26bf-4e62-b406-f00dc09c274d_69d65ccf-9e5a-4fe1-a6f9-cd639fb37903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a CVS brand covid home test kit to the cart.\nPrevious actions:\n[img] -> CLICK\n[span] Shop all at-home COVID-19 tests -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.564, 0.377, 0.601] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3749", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_f9d90439-ae28-4177-8b1a-a13aba005f33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.287, 0.389, 0.319] in the image\nAnd my action is Action: TYPE\nValue: Sheboygan"}]}, {"id": "mind2web_3750", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_c359dc6b-ba98-46b8-873c-82c429ee952c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.345, 0.384, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3751", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_74e387ce-066e-4f96-8387-702c1b05bf50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[button] 4 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.338, 0.389, 0.35] in the image\nAnd my action is Action: SELECT\nValue: 5"}]}, {"id": "mind2web_3752", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_0295c274-6cda-45f7-98c7-7166ccc9b078.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.28, 0.328, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3753", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_987ecaf2-68de-479d-ac77-4f3e3210dc44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: sheffield\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 29 -> CLICK\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.291, 0.133, 0.316] in the image\nAnd my action is Action: SELECT\nValue: 10"}]}, {"id": "mind2web_3754", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_9b46df9e-8342-4070-a385-365f7f893f7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[button] Sat., Apr. 22 -> CLICK\n[button] April 21, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 2 Guests\n[link] Este -> CLICK\n[button] 12:00 PM Dining -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.294, 0.523, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3755", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_01ebc433-929a-451d-88ea-5e8a625df494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[link] Products -> CLICK\n[button] Bathroom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.131, 0.605, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3756", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_01228a6c-8f69-4071-b709-39c2001dbcdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[button] Add to Cart -> CLICK\n[textbox] Search Amazon -> TYPE: laundry detergent\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.047, 0.866, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3757", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_85be9dc1-3b83-4792-8d50-19e0cb3540ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK\n[span] View -> CLICK\n[link] I don't have the password -> CLICK\n[textbox] Your Name -> TYPE: Michael Cahill"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.336, 0.671, 0.37] in the image\nAnd my action is Action: TYPE\nValue: cahillm@gmail.com"}]}, {"id": "mind2web_3758", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_a73ad60f-4274-4365-bfbc-944f9bab2057.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.536, 0.217, 0.812, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3759", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_badb1150-e62e-4e2b-aec2-bc2053436366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[gridcell] Sat Apr 01 2023 -> CLICK\n[button] 4 -> CLICK\n[img] Add -> CLICK\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.273, 0.393, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3760", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_59519c11-891c-4029-9ef0-3ba24de3ac95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK\n[button] \ue908 Depart and Return Calendar Use enter to open, es... -> CLICK\n[link] 21 April 2023, Friday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.196, 0.858, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3761", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_e160c509-1fef-4f7f-b0bd-43295ecb6d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.488, 0.858, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3762", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_fc37ffb2-77a7-460f-a6bb-b4b3437bd545.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.855, 0.048, 0.9, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3763", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_fe8b8ffe-c907-403c-a03d-6850c3d9f96b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK\n[link] Limited-Time Offers -> CLICK\n[gridcell] Size -> CLICK\n[label] 11-12Y(150) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.316, 0.473, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3764", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0233f7f3-ea8f-4b99-a6ff-89ebc2d48120.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.253, 0.613, 0.3] in the image\nAnd my action is Action: TYPE\nValue: Dirty"}]}, {"id": "mind2web_3765", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_7b711ada-4f5d-41ae-b080-8f23e4d171fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.234, 0.709, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3766", "image": {"bytes": "", "path": "./images/14be9a2b-5559-423b-8362-4dccf306992e_4f276e90-fedf-456b-846d-97813a812772.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rail for Oyster Bay, Provincetown.\nPrevious actions:\n[link] commuter rail Commuter Rail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.189, 0.395, 0.213] in the image\nAnd my action is Action: TYPE\nValue: Oyster Bay"}]}, {"id": "mind2web_3767", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a6f3a180-c5b5-4939-9b17-8493fda922f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK\n[link] 17 -> CLICK\n[link] 20 -> CLICK\n[combobox] How many guests? -> SELECT: 4 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.057, 0.964, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3768", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_10273ad8-391b-434a-be8d-9bc3df13ce88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.27, 0.045, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3769", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d23e8ae2-e172-437f-93f0-db24ea60fe87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK\n[button] State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.282, 0.123, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3770", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_1ef02895-60ba-4c09-b182-0296afae7c18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.072, 0.525, 0.091] in the image\nAnd my action is Action: TYPE\nValue: scream"}]}, {"id": "mind2web_3771", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_17a4c8ec-34de-455d-b607-6752a9cfdd37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.22, 0.156, 0.236] in the image\nAnd my action is Action: TYPE\nValue: The Elephant Whisperers"}]}, {"id": "mind2web_3772", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_76103e97-41d0-43d7-9d47-732f0067c485.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang\n[div] Ford -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.124, 0.888, 0.141] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_3773", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_9ef05896-4029-4ebf-a6ba-c5fee0ad34ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK\n[button] Release Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.131, 0.638, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3774", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_5d5f28c5-fb7c-4135-823c-8ff20b3c324d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.145, 0.238, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3775", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_4703ca2c-dcb8-47c4-b517-1e71d5bdca63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] MOVIES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.216, 0.424, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3776", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_2616aadf-415f-4074-990c-4b08b8a936f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.106, 0.257, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3777", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_13946ef7-7b91-478e-adc6-d3ef4f6a8270.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\n[link] Tabs -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK\n[link] Absolute Beginner 91,138 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.38, 0.237, 0.523, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3778", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_67839d50-ac48-453d-a9a0-acb0cf67f1de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.308, 0.462, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3779", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_b1b264ad-e45c-499e-b244-7057ae2b6a20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.174, 0.196, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3780", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_1aca95b5-b812-4f65-8921-f9a045ac4c34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: MUMBAI\n[button] Mumbai, IN (BOM) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.187, 0.481, 0.214] in the image\nAnd my action is Action: TYPE\nValue: LONDON"}]}, {"id": "mind2web_3781", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_9044b1b5-0c59-4b6b-beda-cd9346f97119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Guides -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.394, 0.373, 0.469, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3782", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_89271800-5603-4e29-92cc-dd53f66ecbcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Purple -> CLICK\n[div] Support Level -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.351, 0.194, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3783", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_5971c14a-a5d3-406e-b9e1-73501ebafdc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.343, 0.34, 0.371] in the image\nAnd my action is Action: TYPE\nValue: 10018"}]}, {"id": "mind2web_3784", "image": {"bytes": "", "path": "./images/2159d768-6657-40af-b336-ad5726fec1e2_5c6b6d07-967f-43cf-9ddb-b8dc31465744.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my wishlist the top rated JRPG game.\nPrevious actions:\n[link] Categories -> CLICK\n[link] JRPG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.462, 0.432, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3785", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_269409d6-ecee-4f2b-99b4-ba29d6714c6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.433, 0.777, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3786", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_1668cab6-5869-42d8-8680-ba5dcec3b260.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK\n[textbox] Reservation/Ticket Number -> TYPE: 123456\n[div] -> CLICK\n[option] Last Name -> CLICK\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.333, 0.623, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3787", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_3a8538ef-d7f5-4aa9-bb0c-3397534a6f13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle\n[span] Seattle, Washington, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.085, 0.964, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3788", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9301c32c-6e2d-47b7-b52e-d71621d2e4e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.282, 0.329, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3789", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_de6219f6-89fb-4574-9f9d-a5f9841ac5f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[input] -> CLICK\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.153, 0.915, 0.168] in the image\nAnd my action is Action: SELECT\nValue: 04 00 PM"}]}, {"id": "mind2web_3790", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_bb572b4c-5926-4951-8d9d-69a1dea4bbde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.562, 0.509, 0.886, 0.583] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3791", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_f7f3bec6-4a04-4892-9ec9-705082705c0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK\n[link] VIEW ALL -> CLICK\n[span] Tomatometer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.318, 0.802, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3792", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_c91647f3-2ca4-43e8-b3a6-0868ad5cbb47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.259, 0.764, 0.273] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_3793", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_f6aefd41-aa8a-43b1-8161-19406378a4db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.236, 0.244, 0.252] in the image\nAnd my action is Action: TYPE\nValue: 40"}]}, {"id": "mind2web_3794", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_fb4ec3f7-9d27-400e-bfdb-d206cf95919c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.487, 0.165, 0.493] in the image\nAnd my action is Action: TYPE\nValue: caldwell"}]}, {"id": "mind2web_3795", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a2dc6c3f-b0d4-432b-b984-65d526e49e90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] 2+ -> CLICK\n[radio] Owned -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.465, 0.111, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3796", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_22ad3562-e0f4-42c3-b096-8c173a47673c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\n[button] Find a Park by State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.371, 0.788, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3797", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_766ece84-ce32-403a-a5b1-b4d395c07763.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.61, 0.846, 0.639] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3798", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_75b1e3b3-0f65-46e3-9bdd-183e1fa89c13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\n[link] The Hit List -> CLICK\n[a] -> CLICK\n[link] Book Now\ue90b -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.335, 0.354, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3799", "image": {"bytes": "", "path": "./images/0692908b-4bf4-48ac-b41a-37c59ea5ba1d_94548ec8-59f2-4d02-9f36-8261307d4a80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nfl game played by the las vegas raiders.\nPrevious actions:\n[combobox] Search query -> TYPE: las vegas raiders"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.079, 0.259, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3800", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_a9e8fc20-2ea7-4e96-a9b0-57178326a414.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\n[combobox] Search -> TYPE: Atlantis\n[svg] -> CLICK\n[link] Atlantis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.237, 0.509, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3801", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_839a0b11-1b05-4278-a88b-7643ec8d49e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.653, 0.547, 0.669] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3802", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_56bb169a-e765-48b8-a83e-afbef30548bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[path] -> CLICK\n[switch] COMPARE -> CLICK\n[button] Add to Compare -> CLICK\n[button] Add to Compare -> CLICK\n[button] Go button to Compare the Selected Car's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.323, 0.17, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3803", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_e0553eae-5195-46a6-a861-4a5d92255ee0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.254, 0.919, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3804", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_45b39ab8-87b0-414a-9d33-24d95074b735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.202, 0.252, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3805", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_ad9349f2-8c75-4639-902b-53ab55d5777d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK\n[button] $10,000 -> CLICK\n[menuitem] $20,000 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.325, 0.236, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3806", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_753c3a25-32d1-4440-bc15-21fe074f1507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[span] -> CLICK\n[span] Search flights -> CLICK\n[link] Sort & Filter -> CLICK\n[button] Default \u00a0 -> CLICK\n[span] Price (low to high) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.452, 0.817, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3807", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_f9bef785-c84b-4232-9461-02f5773cd5b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.606, 0.578, 0.727, 0.594] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3808", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_47f27a16-dea7-46da-b800-33f2c3f70383.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10003\n[button] Find care -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.571, 0.448, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3809", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_fe4efd1c-410a-48b7-b828-680d8788f260.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Reserve Now -> CLICK\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.449, 0.514, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3810", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_65fab831-efd4-477b-9da8-0faaaef8bb8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK\n[textbox] Monthly Payment -> TYPE: 250\n[textbox] Down Payment -> TYPE: 3000\n[combobox] State -> SELECT: Tennessee"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.266, 0.459, 0.288] in the image\nAnd my action is Action: SELECT\nValue: Challenged (< 580 FICO\u00ae Score)"}]}, {"id": "mind2web_3811", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_6afbd6e5-eb0c-41c2-a3b1-6befe4805e1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK\n[span] No trade-in -> CLICK\n[span] Buy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.398, 0.938, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3812", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9eb1b18e-f4c5-49f7-b46d-5a8ba355de59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.087, 0.292, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3813", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_0f4fe5e2-b0da-40f2-855c-b90d7b3b2911.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.447, 0.633, 0.527] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3814", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_89f727a9-5994-4db2-bb45-0252e4288321.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[checkbox] Vehicle -> CLICK\n[checkbox] Large appliances -> CLICK\n[radio] Out-of-state -> CLICK\n[button] Virtual Consultations -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.166, 0.612, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3815", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_75e8514e-c3b1-4654-9bca-cd6f81f056d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK\n[select] All -> SELECT: In Stock (7,640)\n[select] All -> SELECT: Spanish (42)\n[select] All -> SELECT: Paperback (39,356)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.313, 0.196, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3816", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_f8078f35-e8f2-4eb4-be8b-f3a68ee359fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[link] Restaurants -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.083, 0.329, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3817", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_8ad738de-57cb-45e4-8224-58518d4392df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.106, 0.424, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3818", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_c595cdaf-154a-4496-81bf-4db06cba5982.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.105, 0.316, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3819", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_15a08e40-68ed-4f4d-a357-4fd4c651041b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.174, 0.506, 0.204] in the image\nAnd my action is Action: TYPE\nValue: miami"}]}, {"id": "mind2web_3820", "image": {"bytes": "", "path": "./images/63388e25-a4a3-416d-bc9b-bb915fbca977_acdc38cd-1d94-4d85-bbd1-5179e1ce1ae5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular free online fashion class events occurring this weekend in English, find details of the top result event, save the event and follow the organizer of this event.\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[svg] -> CLICK\n[div] RawElegant.Life -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.152, 0.556, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3821", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_a7515530-9517-4dbe-acf5-de91208b0e87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.664, 0.067, 0.961, 0.09] in the image\nAnd my action is Action: TYPE\nValue: san diego"}]}, {"id": "mind2web_3822", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_44d13dde-5192-4e92-9fe1-1246632f3e97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.322, 0.233, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3823", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_5400dcf0-be71-4d8e-9a26-ad6290b3814d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[searchbox] Search -> TYPE: Selena Gomez\n[button] Search -> CLICK\n[link] Selena Gomez -> CLICK\n[link] Popular tracks -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.171, 0.518, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3824", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_451ce382-7c2f-460e-9e44-773995a6b6ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India \n[div] India -> CLICK\n[span] -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.219, 0.739, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3825", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_e35d2867-2a3f-478a-a454-b0ba703b2765.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.249, 0.571, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3826", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_65379b10-2f40-4af4-a21a-685cdc35a990.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.281, 0.89, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3827", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_5dbc5df6-49dc-425b-a6b4-27142ff6f88f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK\n[link] Produce -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.335, 0.193, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3828", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_d661979e-d1ee-4a39-8bf0-6e7167d44cc9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.302, 0.284, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3829", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_fdf4113a-2a65-4a27-8cb5-594795802f21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.237, 0.397, 0.27] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_3830", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_6dc7cd7d-428f-4f78-971b-fa96dc6a2afc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[svg] -> CLICK\n[button] Done -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.241, 0.795, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3831", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_9c594686-36b7-46ca-840f-21c065100725.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.142, 0.925, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3832", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_8613dbd6-8b64-4a19-9b1c-fe4c2190d93f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: smith\n[button] SEARCH -> CLICK\n[button] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.107, 0.897, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3833", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_34355832-70f9-4329-a27d-567ce662a636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK\n[link] The Smiths -> CLICK\n[button] Sorted by: Last 7 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.713, 0.656, 0.739] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3834", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_9681f5d0-e126-4c7b-91ca-97c50520ae5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[textbox] Zip -> TYPE: 90012\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2017\n[combobox] Select Maximum Year -> SELECT: 2017\n[div] White -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.226, 0.429, 0.247] in the image\nAnd my action is Action: SELECT\nValue: Lowest price first"}]}, {"id": "mind2web_3835", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_0fd72db4-9850-4454-9f19-30b877e934a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.43, 0.24, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3836", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_88026934-0d2a-4303-86b2-0cbebe66da86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[span] Paris -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: MILAN\n[span] Milano (Milan) -> CLICK\n[textbox] Date use format: 01-Apr-23 -> CLICK\n[rect] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.226, 0.282, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3837", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_9fa4d940-d07f-412d-8bff-7f66d56fc5e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[button] Continue to flight results -> CLICK\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK\n[label] Points -> CLICK\n[link] Sort & Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.111, 0.439, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3838", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_7ff4adcf-0ad0-4b73-bae6-6d5f955da03e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.258, 0.396, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3839", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_571fca71-af69-4723-b8b5-c0ca0f59f498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.281, 0.434, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3840", "image": {"bytes": "", "path": "./images/c51cc2c0-5750-4bae-b665-ad1b7fafa63e_f6b71d34-d022-4c76-867a-5aac1e9c41c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show ticket price for Aquarium of Paris for 2 adults on May 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.044, 0.551, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3841", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_a451a11c-b4e5-4d40-a845-71a26097a776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK\n[span] 20 -> CLICK\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.151, 0.341, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3842", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_941508d6-247e-481b-9735-b0798b4133a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox Series X|S -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.288, 0.101, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3843", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_45b58892-de1f-4e13-b47a-bb947376442d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[div] Select your dates -> CLICK\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.269, 0.164, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3844", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_d616b2c2-43e5-43e0-89db-5a8daf4728de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.776, 0.244, 0.885, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3845", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_cf2ada0b-3120-4416-b301-08bf8df0fa65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.117, 0.713, 0.129] in the image\nAnd my action is Action: TYPE\nValue: VENICE"}]}, {"id": "mind2web_3846", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_2fa2e6c0-de6b-4376-bed6-9acdf1eb3c84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\n[link] Coupons -> CLICK\n[input] -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.066, 0.788, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3847", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_3f38216e-16cf-4a62-82d5-00bd6a493e5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.103, 0.735, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3848", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_2e6609c2-84a3-4a3a-bc5d-f29bee7e86f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.344, 0.192, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3849", "image": {"bytes": "", "path": "./images/330d5618-9db4-447b-9b56-0d2c33f414d5_23526806-c2f8-4218-b6a7-e66c61eb3c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the full menu for AMC dine-in locations.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.027, 0.524, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3850", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_ff05c512-c5cf-458b-a977-051cf2423d2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.159, 0.128, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3851", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8dc7145a-abf6-4b53-94fa-c2a1348aab81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: UNITED STATES"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.347, 0.557, 0.387] in the image\nAnd my action is Action: TYPE\nValue: smith"}]}, {"id": "mind2web_3852", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_726bec92-5493-4eaf-ae53-ccf5041b29d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[link] Flatware -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.359, 0.648, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3853", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_d1944090-239b-4c54-a478-91e6b01bdfba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.201, 0.393, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3854", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_fca5d423-f301-4411-b33c-f7956eae3a0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.146, 0.699, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3855", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_de893c6e-8bae-40fc-ae87-01165ef350f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK\n[link] PURCHASE OR RENEW NOW \ue080 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.455, 0.112, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3856", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_dc327d5f-d07b-496d-8680-400483790fce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.254, 0.653, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3857", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_dd7801cd-f2c0-4d87-a7fa-cc6d80723a61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK\n[option] United States of America (USA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.737, 0.426, 0.876, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3858", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_f78b743a-0b12-4f1c-b33d-a1e29de080df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.29, 0.648, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3859", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_c88f469f-8d6f-4573-895e-f79f176a1c0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Divorce -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.308, 0.559, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3860", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_454c214a-547b-4f92-bd1a-8c7af5315360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.215, 0.727, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3861", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_220eebf1-381e-490c-8f48-c96d8228e83c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford\n[span] Abbotsford, WI -> CLICK\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.332, 0.125, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3862", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_788991d0-db86-4c7f-94b6-f19ffa1d7996.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[label] Month -> CLICK\n[div] Add guests -> CLICK\n[path] -> CLICK\n[button] increase value -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.078, 0.819, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3863", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_1c165a7e-dea3-4fb6-96f1-f52d15dc0ed7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK\n[button] All -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.665, 0.93, 0.694] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3864", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_d2e28992-f3bc-445d-b2a4-876daf96d479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Beyonce -> CLICK\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.275, 0.487, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3865", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_f5a5e597-4007-4c76-b1d0-69beef875a67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Create flight status notification -> CLICK\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.242, 0.595, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3866", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_9783b6dc-cd33-4763-99c7-92b577797400.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.006, 0.675, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3867", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_660f5a50-1d68-4a30-a58b-25330fcabbe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK\n[textbox] Pickup -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.288, 0.326, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3868", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_2bd52759-721b-4129-a6e8-16877c8237d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK\n[option] Apr 7, 2023 -> CLICK\n[button] Search -> CLICK\n[link] View details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.666, 0.931, 0.72] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3869", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7e967714-530e-4ee3-aab8-c8943a08b141.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[link] S -> CLICK\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.829, 0.178, 0.843] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3870", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_ef11694e-c52c-41dd-bc90-a2ec7bc71e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele\n[b] ukulele -> CLICK\n[link] Tab -> CLICK\n[link] High rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.327, 0.523, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3871", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5a4a2016-b8c2-4a54-86c0-e69897f19172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[span] Bournemouth -> CLICK\n[generic] 30 -> CLICK\n[div] -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.87, 0.273, 0.918] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3872", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_3bf8ca73-e41c-42b9-b642-7cf7743311ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[button] 04/11/2023 -> CLICK\n[link] 12, Wednesday April 2023 -> CLICK\n[link] Find Schedules -> CLICK\n[div] Earlier -> CLICK\n[generic] Press enter key to get details about this schedule -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.576, 0.875, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3873", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_2a12305f-5f27-4743-b696-61ca159e6fc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[label] Armenia -> CLICK\n[label] Austria -> CLICK\n[label] Belgium -> CLICK\n[label] Bulgaria -> CLICK\n[div] Popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.844, 0.095, 0.948, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3874", "image": {"bytes": "", "path": "./images/4c578076-b877-4097-bf67-e231e349d56f_5485fe1f-9623-4530-be5e-76bf6dce88c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of most popular upcoming game releases.\nPrevious actions:\n[link] New & Noteworthy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.219, 0.303, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3875", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_0acfa22e-2dfe-4d1f-b42d-93ddfd168334.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.24, 0.318, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3876", "image": {"bytes": "", "path": "./images/51fce1f7-566e-4ae4-89a4-dea4d0ec569e_552b53c7-b311-478a-9d2e-752a31e92556.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an available wine at Kroger.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.039, 0.249, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3877", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_d2848639-97f6-4e63-a587-3a55a70d0525.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Start Journey -> SELECT: Train\n[combobox] End Journey -> SELECT: Bus\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.287, 0.359, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3878", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_f2609a37-f14c-4a51-9474-cb3de2b81745.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.295, 0.473, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3879", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_ed2436f8-6375-4214-b4ff-64c690a30d12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena\n[button] Get Directions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.08, 0.578, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3880", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_f00798ea-e59e-4f62-8079-eeb0d52ac0a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.145, 0.84, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3881", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_f159143b-b456-45b0-8ac4-0a4c3a2a9f6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.137, 0.586, 0.165] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_3882", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_399cefa2-831f-4fc6-83da-87899078705b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.195, 0.329, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3883", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_47a6ae98-a2c3-4fbc-8a06-389316088503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.047, 0.866, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3884", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_0b3d6a70-f8fc-4541-80bc-ec18ed024db3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square\n[li] Times Square, New York, NY, USA -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.31, 0.339, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3885", "image": {"bytes": "", "path": "./images/e783cb30-26d8-45b8-b3d3-04570566bd32_aa129fe1-fa82-4007-80d5-c8700bb6dac4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Flight status on 21 April for flight number DL145.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[span] Mon, Apr 10 -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.112, 0.562, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3886", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_7e41a150-71e0-49cf-9c81-2ab0101e943d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK\n[checkbox] Outdoor Seating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.513, 0.203, 0.529, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3887", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_cffd21a6-6348-47b9-a5f3-461b9532ad99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.351, 0.758, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3888", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_a921a26c-218a-4d0b-98bc-d5df89444762.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863\n[path] -> CLICK\n[generic] Bike shop Pigeon Forge -> CLICK\n[link] See services menu for Rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.925, 0.646, 0.944, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3889", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_d09aa5b6-073d-4456-895a-50e397fb9f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.17, 0.406, 0.205] in the image\nAnd my action is Action: TYPE\nValue: car accident lawyers"}]}, {"id": "mind2web_3890", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_933b0ad5-f7f5-4195-96aa-530e47401fbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.55, 0.251, 0.562] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3891", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_d9c1a3c4-2953-4441-b535-b0ae2fed6215.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.23, 0.341, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3892", "image": {"bytes": "", "path": "./images/9365fba7-2698-4063-b151-dd0bd55e0f50_35eeeb9b-b19e-424f-a037-42daf164c207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the cruises that go from Miami Florida to The Bahamas\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.362, 0.285, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3893", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8230e92a-1a42-47f3-8884-891a159c10bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.528, 0.496, 0.606] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3894", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_395355a1-b038-4bc0-b846-7df25d07f4d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 400\n[textbox] Maximum Value in $ -> TYPE: 500"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.336, 0.176, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3895", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_8971ff26-7b5c-4b17-be3d-006f780b3657.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.069, 0.416, 0.081] in the image\nAnd my action is Action: TYPE\nValue: developer"}]}, {"id": "mind2web_3896", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_8a6f950a-4bb4-4b36-b0e9-e2d45e8d69fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.269, 0.808, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3897", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5175d48c-cd70-4186-be31-ffda1afc9e9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK\n[button] Subscribe -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.214, 0.977, 0.257] in the image\nAnd my action is Action: TYPE\nValue: abc@abc.com"}]}, {"id": "mind2web_3898", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_a0f73b91-7f84-4c1b-b00f-816a3592bea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK\n[button] CONTINUE SHOPPING -> CLICK\n[checkbox] PURPLE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.581, 0.906, 0.625] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3899", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_c0462513-58ef-424d-baba-92baeaac15cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.168, 0.641, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3900", "image": {"bytes": "", "path": "./images/d4f7da1f-2ed8-4110-8dea-35f07b7a1756_1b9b9aeb-9fc2-47b7-88ba-d9aa35bd748f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Begin booking a 14 day cruise from Los Angeles to Hawaii\nPrevious actions:\n[span] Sail To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.369, 0.568, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3901", "image": {"bytes": "", "path": "./images/fb7741f6-f388-4535-903d-d07315ea995e_7ee7d0b5-88a4-40da-9ffc-b863efa019a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find fitness events for this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.081, 0.939, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3902", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_3e5ef950-3ea6-411c-86a9-59318940c3aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.277, 0.658, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3903", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_396e747f-8d48-472d-9972-4fb76df776fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.282, 0.528, 0.327] in the image\nAnd my action is Action: TYPE\nValue: mexico city"}]}, {"id": "mind2web_3904", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_9f6a84e6-6b69-424c-8e55-1759affbedd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.345, 0.693, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3905", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_cb4e102c-6cbf-4b5b-ab7e-c6b1e6be700c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.609, 0.241, 0.633] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3906", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_96aaffb2-02ba-4ac2-b804-6a30b524648c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.417, 0.373, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3907", "image": {"bytes": "", "path": "./images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_52fd01f6-62fb-44a2-afa8-13ff5b1088cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Gloomhaven's ratings?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.005, 0.995, 0.023] in the image\nAnd my action is Action: TYPE\nValue: gloomhaven"}]}, {"id": "mind2web_3908", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_8f5236d2-4814-46b1-8952-9fd67d4d4a13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK\n[button] CHECKOUT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.23, 0.628, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3909", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_52744503-3e70-4e58-9502-dfbd39ccbedc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 60538\n[link] Store Locator \u00a0 -> CLICK\n[textbox] Zip Code -> TYPE: 60538\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.042, 0.837, 0.058] in the image\nAnd my action is Action: TYPE\nValue: Magtag electric dryer"}]}, {"id": "mind2web_3910", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_aef60faa-2b24-4efc-9056-42572c18b68e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK\n[span] View -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.442, 0.571, 0.46] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3911", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_0e94f861-5839-4768-8110-49739e46dfed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.319, 0.284, 0.34] in the image\nAnd my action is Action: SELECT\nValue: 45"}]}, {"id": "mind2web_3912", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_e93a3684-bb9d-444f-af6c-bacd809f740e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[checkbox] Tortillas (4) -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK\n[button] Increase Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.495, 0.99, 0.512] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3913", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_81ccfa2d-166d-495a-831b-1dbb94eff401.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\n[svg] -> CLICK\n[link] Super Bowl -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.438, 0.97, 0.567] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3914", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_26e354fd-cad7-453f-a070-138d9ebc55ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Same as pick-up -> CLICK\n[textbox] Same as pick-up -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Pick-up date April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.494, 0.278, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3915", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b909fb5c-7fa3-4c7e-b535-b80693e60d34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.644, 0.812, 0.689] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3916", "image": {"bytes": "", "path": "./images/f0f8088f-46dc-453a-b695-772b30421ece_26843443-9d32-4833-931f-cf80f8a6b542.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trip for 123456 last name Smith.\nPrevious actions:\n[button] MY TRIP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.384, 0.112, 0.616, 0.123] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_3917", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_aae409a5-3510-4340-a48c-8f4040559538.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.729, 0.107, 0.739] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3918", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_2c12ecd9-ae60-4175-ba36-c56cd052b480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK\n[label] Accessible Trip -> CLICK\n[link] Plan my Trip - Press enter key to submit the form ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.324, 0.332, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3919", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_8f8ea352-b937-46b3-aabe-c25ac19c884e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 246,685 CD -> CLICK\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.176, 0.331, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3920", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_3ab91777-9f12-48f7-b203-03dc68f21c05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.074, 0.036, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3921", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c05241f0-b26e-40df-b388-6067f69ff404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.111, 0.902, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3922", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_2d197653-4bfc-436a-83be-5d50125fe4c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018\n[button] Make \ue920 -> CLICK\n[checkbox] Honda (549) -> CLICK\n[checkbox] Civic (122) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.213, 0.888, 0.244] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_3923", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c8de24b2-8468-492f-bcc6-dfac28f0b19e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[button] April 20, 2023. -> CLICK\n[combobox] Party of 2 -> SELECT: 7 Guests\n[svg] -> CLICK\n[button] View more availability for Canal Street Eatery & M... -> CLICK\n[button] 2:00 PM Eatery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.36, 0.523, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3924", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_34f58f02-a3b6-4e7d-a1b8-03b2370311ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.04, 0.749, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3925", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_21dba24e-8f7e-4330-b7ef-66a664eb3024.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.033, 0.553, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3926", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_1bba89d9-59d9-446b-8e59-2ff15fcb4302.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[link] Flights to popular destinations -> CLICK\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu\n[option] Honolulu, HI, US (HNL) -> CLICK\n[textbox] Budget. Please enter a numerical value -> TYPE: 1300"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.533, 0.793, 0.545] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3927", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_8ef7b552-971c-4c7e-b142-a295424b5e0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.354, 0.553, 0.477, 0.577] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3928", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_6a110bc2-e04c-4274-b0b5-4b40194e6780.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK\n[a] Software Development -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.328, 0.426, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3929", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_ce22d6a6-4223-44ce-83fd-889b45b45818.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.022, 0.426, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3930", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_93a58437-0259-4b0a-b430-74597d880feb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] All dates -> CLICK\n[span] -> CLICK\n[span] 1 -> CLICK\n[div] Sort by -> CLICK\n[div] Top rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.172, 0.892, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3931", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_6f8808c4-2c04-47c8-b464-b6fa5494f4d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.044, 0.248, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3932", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_55f783b6-bbd7-44aa-b83b-cf2fa14ef9dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[img] -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK\n[img] -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.23, 0.984, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3933", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_a87b377d-8822-4c90-8f14-11e689d3aacf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.226, 0.562, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3934", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_f7490aaa-f8c1-4fb4-b36b-2f6c2be04574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.011, 0.369, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3935", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_5c56aa6b-095e-4946-8cce-398de16ac7e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\n[link] Restaurants -> CLICK\n[link] OPEN 24 Hours -> CLICK\n[link] Default -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.292, 0.683, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3936", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_8964f24a-8d05-46b5-a096-d8fe1fec006d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.075, 0.355, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3937", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b5b40650-0447-4f2a-adbc-15b3faf5babb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.303, 0.463, 0.337] in the image\nAnd my action is Action: TYPE\nValue: SMith"}]}, {"id": "mind2web_3938", "image": {"bytes": "", "path": "./images/f45b0783-4325-4b3e-959d-c181971d72f6_4dee51f3-114f-4ef5-924f-25e997581347.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest news and rumors about the NBA team the Los Angeles Lakers.\nPrevious actions:\n[link] NBA . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.126, 0.47, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3939", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_7521f3d0-1427-4e25-b646-ae267d83d1ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK\n[img] AIRism Cotton Graphic Short-Sleeve Raglan T-Shirt -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.18, 0.803, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3940", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_c326a9f7-8f91-48da-b17e-8fa200ceabef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.261, 0.312, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3941", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_33e7bd87-795d-44da-81be-390346b9829c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.116, 0.691, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3942", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_006fd607-bc1b-4a63-a6b6-49ce4554b83b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.361, 0.474, 0.391] in the image\nAnd my action is Action: TYPE\nValue: stoney brook"}]}, {"id": "mind2web_3943", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_0a8c70e6-c258-4a5d-90e4-9b1e497ecd19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\n[button] Shopping -> CLICK\n[link] BGG Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.068, 0.376, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3944", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_10995afb-c7d3-4055-b7eb-853178f8205a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.268, 0.245, 0.297] in the image\nAnd my action is Action: TYPE\nValue: San Francisco"}]}, {"id": "mind2web_3945", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_d4376ba0-aefd-4d57-a775-a3cb687627e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.102, 0.145, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3946", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_aef3f78b-c01d-4cad-b931-cf7360857b74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.909, 0.194, 0.991, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3947", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_bb98aed0-a7f0-492a-95e4-623bdc9edf55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia\n[select] Model -> SELECT: Carnival\n[textbox] ZIP -> TYPE: 11101"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.728, 0.136, 0.947, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3948", "image": {"bytes": "", "path": "./images/61c8e051-a847-4424-9d8b-b8bc2c134a35_16006817-5fb0-425b-80e7-16d8eda37863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the recent trades\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.006, 0.393, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3949", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_600847c5-d8ae-4f6b-96a4-c247607440b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.325, 0.359, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3950", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_fc0499e3-1006-4cbb-ad05-f2530c0915e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.006, 0.31, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3951", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_0b8fc837-695c-4373-a0fc-9a01cb61535a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.053, 0.082, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3952", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_ba1ffe14-ee2a-4736-96d7-b3c1d5f6f99f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.391, 0.474, 0.432] in the image\nAnd my action is Action: TYPE\nValue: Breakneck ridge"}]}, {"id": "mind2web_3953", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_c6dbb23e-ae8c-4d6a-94e6-58ea0df2339e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University\n[li] New York University, New York, NY, USA -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.373, 0.339, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3954", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_e44d2293-b6f9-4388-b87b-f11c66219504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.599, 0.861, 0.617] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_3955", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_7335eea8-a7a4-4655-85a4-67ac3a93642a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.466, 0.72, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3956", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_58afded5-ed7e-4bc6-b0c7-b83e4bfb4234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Pop Rock -> CLICK\n[link] 958,222 United Kingdom -> CLICK\n[link] 246,685 CD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.405, 0.163, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3957", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_69b8b5e3-07de-49f3-a2dd-149dcd1bef3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK\n[button] Ship Location: Any -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.168, 0.535, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3958", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ac7a36f2-839a-4c24-bda8-118aa8ec52be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Flights + Cruise -> CLICK\n[button] Search flights + cruise External Link should open ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.143, 0.968, 0.154] in the image\nAnd my action is Action: TYPE\nValue: ATLANTA"}]}, {"id": "mind2web_3959", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_4d3c65b7-8e9c-4bb7-9347-708aaba58996.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.573, 0.846, 0.61] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3960", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_d4beccf5-98e8-4e29-9e4a-f6f38a31e064.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.128, 0.553, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3961", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_5f4295e1-0830-4af2-a782-84396e3d8a0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.09, 0.188, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3962", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_f14a0102-32f0-44f4-8a4f-28a04f537807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Music -> HOVER\n[a] Trending -> HOVER\n[link] Beyonce -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.183, 0.255, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3963", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_1c772b71-1382-4a17-9f3c-fd3cf0115d5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.572, 0.796, 0.606, 0.803] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3964", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_fbfca76a-4bd4-4f4e-a063-acbbe70803c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\n[button] Shopping -> CLICK\n[link] BGG Store -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.102, 0.233, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3965", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_0e89a5cb-a2b2-43a0-8c4e-9c4962e3096c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Kitchen -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.38, 0.197, 0.561, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3966", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_d1701f0b-aad7-4ffa-ac74-2ced4ccbf481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: thomas.neo@gmail.com\n[input] -> TYPE: Anderson\n[input] -> TYPE: po box 2846\n[input] -> TYPE: Texas\n[button] Complete -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.231, 0.664, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3967", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_148c30a5-ecb3-409a-a1b2-610d4b504d8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.061, 0.191, 0.082] in the image\nAnd my action is Action: TYPE\nValue: 25504"}]}, {"id": "mind2web_3968", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_2dcd2fcc-3dd2-4ca1-bedd-d70892c4f393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] LKR\u00a01,120,521 -> CLICK\n[button] Select fare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.35, 0.727, 0.506] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3969", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_837dcc3a-4018-485b-a267-89a4c738349a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\n[link] Amazon Basics -> CLICK\n[link] BEDROOM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.231, 0.301, 0.4, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3970", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_833d7854-cd6b-4f03-ba0c-a4c72e905d03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK\n[link] Intro 65,171 -> CLICK\n[link] Guitar Pro -> CLICK\n[div] Today's most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.428, 0.97, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3971", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_3d3cf273-c147-48b7-8ac7-0e2f84ccfc4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.207, 0.72, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3972", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_62135e6a-b2f3-46bf-b769-ba492e7b2c13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, April 17, 2023 -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.388, 0.944, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3973", "image": {"bytes": "", "path": "./images/ef09c913-747d-4256-bd51-2b91540c5222_6d6f7cec-62f9-470d-bec2-d7867d662dba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find map for the KCCR airport\nPrevious actions:\n[input] -> CLICK\n[input] -> TYPE: KCCR\n[option] \uf041\u00a0Buchanan Fld (Concord)\u00a0 KCCR CCR \u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.112, 0.45, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3974", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_3607097c-cce3-4597-9d98-6882d4f5621e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK\n[combobox] SORT BY -> SELECT: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.26, 0.059, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3975", "image": {"bytes": "", "path": "./images/64051efe-53dc-4e79-9980-c3d75d34c4aa_f64548ad-02e9-4c42-af4c-240a494202e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my item inventory.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.0, 0.519, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3976", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_0914fa6f-323a-4498-9742-0b1fad40a9bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.054, 0.727, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3977", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_f4439995-85b8-459b-b852-f64741f39d39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK\n[checkbox] Engineering (41 items) -> CLICK\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.547, 0.415, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3978", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_7dc2e9bb-ea97-4cc9-8824-aaf9e70f74d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\n[textbox] Search -> TYPE: Tom Hanks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.61, 0.704, 0.661] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3979", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_873ad00e-a6df-4834-ad94-1f8d537ee77c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK\n[heading] Boston Bruins -> CLICK\n[link] Full Team Statistics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.338, 0.223, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3980", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_75c2b629-cdff-4b3e-820e-15b047009f95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.064, 0.498, 0.299, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3981", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3d51e444-a3b2-4aaf-abaf-dde9346fdd65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.017, 0.345, 0.028] in the image\nAnd my action is Action: TYPE\nValue: 44240"}]}, {"id": "mind2web_3982", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_a14dcd4a-847a-4935-9708-4dda2e60137a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.69, 0.432, 0.838, 0.47] in the image\nAnd my action is Action: SELECT\nValue: Queen"}]}, {"id": "mind2web_3983", "image": {"bytes": "", "path": "./images/66a5b212-cf94-4917-8015-58970dc54187_6a84d789-0284-4b64-9412-96091f7b7a32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the amtrak national route map\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.032, 0.139, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3984", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_e6c9df9f-b289-4bb9-8552-e6367e9e3057.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK\n[link] Theaters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.484, 0.375, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3985", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9204f384-9544-4d77-abbd-a69960ef3360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.456, 0.284, 0.49] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3986", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_03fa7043-d0b4-486d-846d-27c2930cf768.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.292, 0.151, 0.388, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3987", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_1549bfcf-8eef-4ed1-bb64-9f74c32b7be0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\n[link] attractions. -> CLICK\n[li] Neighborhood -> CLICK\n[link] Midtown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.267, 0.218, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3988", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8b5bb82d-a7cc-4864-a16f-ed089b55f45b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.081, 0.587, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3989", "image": {"bytes": "", "path": "./images/dd38e3f0-242c-4ef2-b4d9-bd13adc675b7_c116fa0e-3a0a-498f-a57c-bb6427441019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with download options for fantasy apps.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.048, 0.14, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3990", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_ed8cb3d3-6c92-4bc6-a927-ea4a11ef074d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.241, 0.233, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3991", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_b8a214f6-b3a1-41b0-997b-d341e62a8bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK\n[div] Find a Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.349, 0.76, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3992", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_deeff052-6db8-4239-be1e-1939ba33fe3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] Color -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Material -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.021, 0.981, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3993", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_6caa49b0-990d-4fdf-8534-bc3e4e6ab8d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.392, 0.234, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3994", "image": {"bytes": "", "path": "./images/4c578076-b877-4097-bf67-e231e349d56f_e8bf99cb-e1b5-4b35-9c26-9bc74fcd3cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of most popular upcoming game releases.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.059, 0.31, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3995", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_5e85f39c-3a4a-40aa-8120-4491ff59cbc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.167, 0.613, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3996", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_2cde0581-1919-4200-9358-c3d15bd24028.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK\n[button] Climb -> CLICK\n[link] Explore Climb -> CLICK\n[link] Shop all climbing gear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.381, 0.428, 0.409] in the image\nAnd my action is Action: SELECT\nValue: Price High - Low"}]}, {"id": "mind2web_3997", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_43648ae0-85c2-474e-a170-3220f5ffa6e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK\n[i] -> CLICK\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.054, 0.464, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3998", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_5065da71-9a45-4c92-8cd3-8e15708647a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\n[link] Library -> CLICK\n[link] History -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.647, 0.093, 0.721, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_3999", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_0f483551-50fe-4653-8fac-ed575e420118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.269, 0.316, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4000", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_43f0a0c4-1c11-406e-b5e6-38cdca83e896.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.359, 0.744, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4001", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_a5959020-fb70-4c99-b3bd-4e1ca12b85c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SEARCH CRUISES -> CLICK\n[button] Number of Guests -> CLICK\n[path] -> CLICK\n[path] -> CLICK\n[button] Ships -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.553, 0.158, 0.654, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4002", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_2b9925ab-a059-47a3-9bcb-4007b10f734a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK\n[textbox] To , required. -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.254, 0.764, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4003", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_070bbaab-3707-41c9-b426-8c019877cacd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[button] Next -> CLICK\n[button] Next -> CLICK\n[gridcell] Fri May 12 2023 -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.187, 0.905, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4004", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_e1032b62-f375-4745-9278-9923947deba4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[div] Recommended -> CLICK\n[option] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.239, 0.536, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4005", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_5f031bff-a772-4cd4-a912-b6d83a0c0d7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK\n[button] \ue908 Depart and Return Calendar Use enter to open, es... -> CLICK\n[link] 21 April 2023, Friday -> CLICK\n[button] done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.049, 0.934, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4006", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_5bbb896b-8564-4603-9fc7-16ef2a072d56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.06, 0.719, 0.082] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_4007", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_700fb498-5bab-4e61-9e6b-6f3679b2ca2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\n[textbox] Search -> TYPE: Tom Hanks\n[p] Tom Hanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.192, 0.52, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4008", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_230628ec-5182-4284-8bc5-a4bf221832e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22\n[button] Vehicle Class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.093, 0.341, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4009", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_da776b38-bc28-4400-92f8-870b52637a55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.654, 0.094, 0.663] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4010", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_f4364245-326b-4e7b-8712-6a5189987c6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.161, 0.628, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4011", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_7aae967a-36c3-48e3-9d72-b741fa568806.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK\n[searchbox] Please type your destination -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.225, 0.409, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4012", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_2b36eab1-cf61-497e-92cc-c454fb7c4aaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK\n[searchbox] Type your destination -> TYPE: Las Vegas\n[option] Las Vegas Nevada,\u00a0United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.217, 0.483, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4013", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_7c8aca4c-f6b7-4812-a730-34902fbf8b54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK\n[button] Find Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.264, 0.41, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4014", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_0245d291-b55c-42cc-9700-3869687e0b6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK\n[link] Planned Service Changes -> CLICK\n[searchbox] Search Routes -> TYPE: 4"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.156, 0.866, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4015", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_7e18cfda-a3ba-4519-b4b9-4c0b79083f6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.041, 0.661, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4016", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_4325202f-ddfb-4f0d-9997-9e911bd2079a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.205, 0.831, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4017", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_6777b49e-bd42-42a4-83c8-6924d950ddec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[searchbox] Search make, model, or keyword -> TYPE: jaguar xf\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.359, 0.236, 0.388] in the image\nAnd my action is Action: TYPE\nValue: black"}]}, {"id": "mind2web_4018", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_bc8d44c6-bcd3-4cb2-8d4e-f7e33a3a71fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 11231"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.09, 0.948, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4019", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_1d55f87c-4f09-44d6-a9e3-9b9809a3584e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.064, 0.036, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4020", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_1c9cfff8-1391-492c-8dc1-3071bbfe0f64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.117, 0.765, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4021", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_685aa3ad-c42f-4765-92f4-8f2ee0b62d85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[textbox] Vehicle Price -> TYPE: 10000\n[textbox] Down Payment -> CLICK\n[textbox] Down Payment -> TYPE: 500\n[combobox] State -> SELECT: New Jersey\n[combobox] Credit Score -> SELECT: Challenged (< 580 FICO\u00ae Score)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.371, 0.459, 0.396] in the image\nAnd my action is Action: SELECT\nValue: 72 months"}]}, {"id": "mind2web_4022", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_ea265149-8907-442c-97ef-ed3136183634.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\n[link] BABY -> CLICK\n[link] One-Pieces -> CLICK\n[img] Ribbed 1*1 Long-Sleeve One Piece Outfit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.696, 0.906, 0.746] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4023", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_d6eb2826-502e-4acf-b43c-29e25ace5847.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.084, 0.42, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4024", "image": {"bytes": "", "path": "./images/6df317e6-3414-4f2e-b5fc-b70914def4eb_d1178e23-83d4-4c13-8b20-31c9cf3a4166.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Jazz music genre albums and tracks.\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.389, 0.645, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4025", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_a0fe253d-3df0-4239-b233-b9f03863b5b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.287, 0.623, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4026", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f6e611c9-ad21-49ca-a841-7ad529b56c95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.103, 0.987, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4027", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_06873a11-d752-4bbb-98ad-c892947fbbc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.077, 0.326, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4028", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_9dc761aa-1a13-4905-a922-726a3f469738.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station\n[link] T red line silver line commuter rail Zone 1A Sout... -> CLICK\n[combobox] Enter a location -> TYPE: north station\n[link] T orange line green line D green line E commuter ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.287, 0.863, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4029", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_4ca00eeb-f8c1-4324-9f21-78059e35b12e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.045, 0.384, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4030", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_2d74ad4e-f2c3-492b-8a78-ae86a999f90e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.575, 0.609, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4031", "image": {"bytes": "", "path": "./images/c2e4800e-684f-4bb6-99ab-782806c8776d_1e652aec-cd7e-4681-824e-ead22c58c1e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for McDonalds located in Greenvill, SC.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Greenville\n[span] Greenville -> CLICK\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: McDonalds"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.02, 0.62, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4032", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_5631a528-35ea-425a-acb7-41c0fa888737.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK\n[svg] -> CLICK\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.155, 0.855, 0.195] in the image\nAnd my action is Action: SELECT\nValue: 4 guests"}]}, {"id": "mind2web_4033", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_cf9d27ef-6bc2-4be0-a3e9-39527c596408.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK\n[link] 1 -> CLICK\n[link] 2 -> CLICK\n[combobox] Pick Up Time -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.281, 0.891, 0.324] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_4034", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_11129f78-0976-4251-bc55-5dc77032e1c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.162, 0.257, 0.391, 0.278] in the image\nAnd my action is Action: TYPE\nValue: Roy"}]}, {"id": "mind2web_4035", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_966832cc-a997-4288-a3ad-1d8a567483a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.194, 0.713, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4036", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_c8e636ed-095c-4824-ba93-64541480befc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.093, 0.902, 0.113] in the image\nAnd my action is Action: SELECT\nValue: 2 00 PM"}]}, {"id": "mind2web_4037", "image": {"bytes": "", "path": "./images/61c8e051-a847-4424-9d8b-b8bc2c134a35_a008646a-eed9-4a5d-ba30-872e8775a109.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the recent trades\nPrevious actions:\n[button] Shopping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.07, 0.42, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4038", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_bb78dc80-dcd2-4a33-bc62-fba8db2989f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] North Las Vegas -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.14, 0.846, 0.177] in the image\nAnd my action is Action: SELECT\nValue: 11 00 PM"}]}, {"id": "mind2web_4039", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_2dcf1f5a-3eb9-43e5-9f31-661189ad71d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK\n[link] $0 - $10 $0 - $10 -> CLICK\n[button] Customer Rating -> CLICK\n[checkbox] & up & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.034, 0.378, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4040", "image": {"bytes": "", "path": "./images/6a56a1fb-f201-4bf9-b225-842181920388_45786006-20a8-4fb4-b399-e07ae8d308d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show events in Los Angeles and select the one on the Friday this weekend.\nPrevious actions:\n[button] CITY GUIDES -> HOVER\n[link] Los Angeles -> CLICK\n[button] All dates -> CLICK\n[link] This weekend -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.355, 0.941, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4041", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_d4384cf9-8eba-4b9b-8cf6-fedec53fe0db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.32, 0.877, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4042", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_622461a1-47ff-4c2c-b95e-05e62fa43a01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.229, 0.929, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4043", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_ed7f9c58-052e-4dd7-9452-9e017dc53f6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK\n[label] All Regions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.423, 0.269, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4044", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_d5e4afb2-e893-4df1-a0de-9602b4a381c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[textbox] First Name -> TYPE: Carla\n[textbox] Last Name -> TYPE: Cahill\n[link] SEARCH -> CLICK\n[span] View -> CLICK\n[link] I don't have the password -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.283, 0.671, 0.317] in the image\nAnd my action is Action: TYPE\nValue: Michael Cahill"}]}, {"id": "mind2web_4045", "image": {"bytes": "", "path": "./images/56e4a9c1-d39e-4173-9857-9cf980c0fba9_80f11b1b-c7f8-4ad7-be9d-68556e06ba5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow the Denver Nuggets NBA team.\nPrevious actions:\n[link] NBA . -> HOVER\n[div] Denver -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.459, 0.179, 0.527, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4046", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_5e1eaaa5-5b32-4ef7-8a06-5934987e804c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.008, 0.31, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4047", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_3b858472-560e-42e4-9f9a-d3134e1e2f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.001, 0.288, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4048", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_10d3f0c0-5536-4a81-809e-3a9b1bd98b96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: london\n[span] London -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.184, 0.481, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4049", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_53fd9509-ec94-4760-82c5-afdc92e45ba4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.0, 0.291, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4050", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_1361d84a-7104-44ca-a6d3-373efea244df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.153, 0.892, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4051", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_2c9a65ce-531f-4010-b149-949ce3004142.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Casino -> CLICK\n[checkbox] Restaurant -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.572, 0.089, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4052", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_70093fcf-8cdd-4fd6-acd9-a4ba14673610.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey\n[radio] Yes -> CLICK\n[radio] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.237, 0.609, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4053", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_197bb442-771f-4fff-84e1-cef8b3978bd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK\n[link] Sci-Fi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.008, 0.1, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4054", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_66caa56b-9a19-485d-ad22-cbb39fda106a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.125, 0.147, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4055", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_916fed28-a897-4397-bbb1-6829346f320d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK\n[link] $0 - $10 $0 - $10 -> CLICK\n[button] Customer Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.432, 0.137, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4056", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_8254ee13-e78b-4f68-8a4a-f3b80026d454.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK\n[button] Table of Contents -> CLICK\n[link] Uncharted: Legacy of Thieves Collection - Wiki Bun... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.178, 0.657, 0.244, 0.668] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4057", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_e78aa8b5-4d2f-4aab-a13a-e7a4d0be9428.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Hawaii Vacations -> CLICK\n[generic] Departure date input -> CLICK\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.226, 0.509, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4058", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_dee8a727-5865-49e4-b498-d1e5742c704e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK\n[link] Flatware & cutlery -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: san diego"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.091, 0.948, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4059", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_b5bf6287-38d3-4152-9941-e345eb0396ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] PlayStation 5 -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.125, 0.13, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4060", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_2fb8c104-cff9-426a-848b-db783f818ab8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.125, 0.418, 0.164] in the image\nAnd my action is Action: TYPE\nValue: los angeles"}]}, {"id": "mind2web_4061", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_2fef2ebc-4457-4de8-a2b1-20a39a197b6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.04, 0.263, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4062", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_f6508bab-5a51-44f5-abd0-fa6863f8d1d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[button] 12, Wednesday, April 2023. Available. Select as ch... -> CLICK\n[div] Add guests -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.105, 0.819, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4063", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_8c53712e-89d1-46d1-bfe6-d2ace827c9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.58, 0.157, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4064", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_5e6fe782-07c7-4444-b163-1b8063d3aafb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.202, 0.325, 0.247] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_4065", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_73c9f429-644c-4c67-a7fe-47f68f350c36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.064, 0.479, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4066", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_cbfcbc6c-9ccd-4e7f-8376-6ec56fbf2469.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City\n[div] New York City, NY -> CLICK\n[textbox] When? -> CLICK\n[li] Spring -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.008, 0.82, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4067", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_54995115-8314-40d1-bdb1-564538ecd6f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.252, 0.699, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4068", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_e2b1b2d9-dacd-4da6-9c79-fff51e9fd7e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_4069", "image": {"bytes": "", "path": "./images/fbefeb82-f3e6-4db4-acc6-a68b1d519ba7_30604de5-f631-477f-96ac-daa281fcef83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a parking button code for my company Boston Legal located near Boston Navy Yard, and the button should be 160x400 pixels, and the parking spot should be within 15 miles of my address.\nPrevious actions:\n[link] ADD PARKING TO YOUR WEBSITE -> CLICK\n[textbox] STEP 1: Enter Your Company Name -> TYPE: BOSTON LEGAL\n[combobox] STEP 2:Enter an address to search and select a loc... -> TYPE: BOSTON NAVY YARD\n[em] Navy -> CLICK\n[span] 300 pixels x 150 pixels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.691, 0.493, 0.713] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4070", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_b308cce5-d50d-4080-abf6-23523051267b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[combobox] Start Time -> SELECT: 9:00 AM\n[combobox] End Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.353, 0.3, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4071", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_b99de965-6c3b-41f8-af69-0188a1db8435.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.093, 0.917, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4072", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_0d72fff6-d3e1-4d08-9fa3-ecd760e525fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.057, 0.882, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4073", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_fbd56342-ff93-4a36-92a4-b463d0d1c9c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456\n[textbox] last name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.323, 0.94, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4074", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_d94ad8e6-7e5e-4aa8-a2db-c7f469e82776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK\n[button] Check-out April 25, 2023 -> CLICK\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.2, 0.571, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4075", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_22f40101-359f-4039-b0cb-fde2895aadc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.272, 0.371, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4076", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_0495ec9b-5a3c-4d1f-9f95-7384ed92414d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.191, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4077", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_b0d38385-d24a-44e4-9d2c-8083d639762f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] LKR\u00a01,120,521 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.3, 0.326, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4078", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_c2d435f9-82ee-451a-b32e-6045541e4c48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.174, 0.677, 0.203] in the image\nAnd my action is Action: TYPE\nValue: Guardians of the Galaxy"}]}, {"id": "mind2web_4079", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_4aabad19-31dc-4141-a99c-6d665544a782.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\n[button] Soccer -> HOVER\n[link] Leagues & Cups -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.44, 0.168, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4080", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_061388be-251a-4437-b7a2-8f6cd7bfcbb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK\n[link] Phil Collins - Both Sides (CD, Album, RE + CD + Dl... -> CLICK\n[link] Add\u00a0to\u00a0Cart -> CLICK\n[checkbox] I agree to Sales & Transaction Policy and Seller ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.285, 0.665, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4081", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_d9cda9b4-abee-42dd-9dee-81f0f2d76601.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] Sale -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Tops -> CLICK\n[heading] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.59, 0.266, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4082", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_67703df2-19ca-4bac-b1d6-272be445bcf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.479, 0.059, 0.509, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4083", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_51df100c-08cd-426d-839f-fce05efbf3c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK\n[p] Compact -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.374, 0.567, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4084", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_3fb76e56-0be0-4742-beb1-49587ce945aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Beds & mattresses -> CLICK\n[link] Mattresses -> CLICK\n[img] Foam and memory foam mattresses -> CLICK\n[button] Show sorting options modal -> CLICK\n[span] Price: low to high -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.281, 0.255, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4085", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_12b569ac-fd66-4dc8-a875-f3542d60c848.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.255, 0.894, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4086", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e3b859b3-3158-4566-bb2e-e81319206a73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.126, 0.712, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4087", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_eef7bfcb-4ff5-42d2-b573-855aa991eb4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.311, 0.834, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4088", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_210df914-bbcb-4529-9054-666734af4cc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.335, 0.679, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4089", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_1ea88ab5-b80f-4656-8554-af68a9752d0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.184, 0.344, 0.205] in the image\nAnd my action is Action: SELECT\nValue: Colorado"}]}, {"id": "mind2web_4090", "image": {"bytes": "", "path": "./images/0ad8d621-8f1a-4fc2-adf9-a78d363ebd3a_3d393bea-c440-4777-9e40-6d7d9bc4fac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the most recent NASCAR driver news.\nPrevious actions:\n[link] NASCAR Cup Series NASCAR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.936, 0.039, 0.978, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4091", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_fb1207f7-703d-411a-8128-546df580dbe2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[link] Tickets & Passes \uf078 -> CLICK\n[link] One Day Add-Ons -> CLICK\n[link] Buy Now -> CLICK\n[menuitem] Meal Deals -> CLICK\n[menuitem] All Season Dining -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.054, 0.266, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4092", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_01301e65-56a9-4d31-8d3e-ce354d6fa71a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK\n[button] \ue908 Depart and Return Calendar Use enter to open, es... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 0.36, 0.591, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4093", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_b75c9c94-7ef6-4420-bb81-33661e5e430d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Mal\u00e9, Maldives -> CLICK\n[button] Start date calendar input -> CLICK\n[div] 13 -> CLICK\n[button] Search -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.64, 0.815, 0.665] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4094", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8887cd5c-e8b7-419e-b48b-3fcb1e46fd82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.372, 0.874, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4095", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_5aa47c09-f306-4e03-b55b-95b304d7a729.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[searchbox] Search Site -> TYPE: motherboard\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.294, 0.701, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4096", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_c8346f41-3686-4039-b18c-40eb8b76516b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[div] 14 -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.216, 0.553, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4097", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a57e5ed1-2c94-4dc6-b280-6d75b63a3eea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] close -> CLICK\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.357, 0.565, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4098", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_de5428ef-b45a-4d20-ac2b-f8e854db520c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.107, 0.938, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4099", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_78c4087b-0f97-4920-8529-834cdb618baa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.122, 0.143, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4100", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f55faaf5-acea-484d-be37-0cc18774f094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.821, 0.226, 0.838] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4101", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_943474ad-4378-4912-9784-e64ea2b22a7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.415, 0.68, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4102", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_2a3fa5ea-4980-48d8-974f-86b5b0a904d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.078, 0.783, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4103", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_df3abd68-bd67-4399-a450-33a89f3e7929.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[combobox] From -> TYPE: Abbotsford\n[div] Abbotsford -> CLICK\n[button] Find my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.256, 0.403, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4104", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_9f96c9e1-3fd7-4fdc-be01-3c98192d9cbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.095, 0.719, 0.131] in the image\nAnd my action is Action: TYPE\nValue: detroit"}]}, {"id": "mind2web_4105", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a8fa1979-765c-48b2-9f63-f931c7c44900.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.22, 0.238, 0.277, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4106", "image": {"bytes": "", "path": "./images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_1a91c6ec-4978-47c9-8bf7-8f15d6a78b47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the profile page for author of latest shot\nPrevious actions:\n[link] Shots -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.084, 0.567, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4107", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_74456eba-7768-489b-838d-3f49d90d29b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.183, 0.693, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4108", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d796ca41-ff49-434e-9980-14d8b156e4c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.213, 0.763, 0.228] in the image\nAnd my action is Action: TYPE\nValue: Miami"}]}, {"id": "mind2web_4109", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_3559cfdd-31d0-481e-9598-8b9b8f75aa31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK\n[link] Mirrors -> CLICK\n[link] Wall mirrors -> CLICK\n[button] Show more filters modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.253, 0.969, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4110", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_34656a89-2cb1-4e13-b63b-9d643eece29e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Bridges and Tunnels tolls -> CLICK\n[span] Toll Rates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.535, 0.617, 0.619] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4111", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_f22dc09a-f72a-46e8-b245-40fa16163f84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.276, 0.891, 0.311] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_4112", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_0c7057f1-7639-49e3-8429-720608a24422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.9, 0.387, 0.959, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4113", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_c3548b02-cec7-474f-bce8-7e280432e230.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.574, 0.209, 0.61] in the image\nAnd my action is Action: SELECT\nValue: 9"}]}, {"id": "mind2web_4114", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_08dff165-73d4-4827-8dfd-92aee651a914.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[textbox] Search by SKU, Model # or Keyword -> TYPE: Magtag electric dryer\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.396, 0.104, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4115", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_dbb56b26-c531-4672-9299-555c711b8688.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] FISHING REELS -> CLICK\n[link] Power Assisted Reels (6) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.301, 0.074, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4116", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_61dea86d-e842-4be0-b179-ec76381b455e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.272, 0.198, 0.292] in the image\nAnd my action is Action: SELECT\nValue: 2018"}]}, {"id": "mind2web_4117", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_64bd7423-2284-491d-be4f-1c12ee2eaab0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.202, 0.771, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4118", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_45607f84-2528-47d1-b276-81c3a6d51f07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.189, 0.894, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4119", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_390be8cb-8c11-4011-8a91-b0eb8dffe25c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.489, 0.158, 0.498] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4120", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_3ea20727-aaf8-408f-91bf-7dd93234a5bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog bed 30 inches"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.006, 0.686, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4121", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_82517181-c0b9-44b8-99db-a12fe6acd05f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.149, 0.316, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4122", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_d6620023-7972-4e34-8818-7f7a51768f61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] 29 -> CLICK\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.15, 0.453, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4123", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_e3b492b2-86e6-4b47-b744-aa81675abad6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.361, 0.715, 0.577] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4124", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b8c12588-7323-4532-ab73-d2a388e1fa4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] select to navigate to New -> CLICK\n[img] Missing (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 0.229, 0.492, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4125", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_980113f4-2f0a-4f86-bb2f-143710c7653f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.218, 0.285, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4126", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_a3af5576-db64-4685-bb1e-df34b324f361.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.467, 0.158, 0.53, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4127", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_8c524439-5c1d-44a2-842d-14d4cf92a4c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.371, 0.168, 0.552, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4128", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_6fa44411-fae8-427a-a086-b687187d19a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.288, 0.24, 0.332] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_4129", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_effb76da-8083-4512-999b-1c3c41b8d5a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.024, 0.553, 0.044] in the image\nAnd my action is Action: TYPE\nValue: mens hiking shoes"}]}, {"id": "mind2web_4130", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_b20c7f83-1126-476a-bc80-04de993b895d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[svg] -> CLICK\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK\n[div] Price (Low to High) -> CLICK\n[label] In Stock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.468, 0.222, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4131", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_22dbddd3-037a-4ffc-8622-f0181c16c949.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.06, 0.386, 0.07] in the image\nAnd my action is Action: TYPE\nValue: bananas"}]}, {"id": "mind2web_4132", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_fb716bd0-0cb0-4d20-b64c-5c603a5af0e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.278, 0.966, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4133", "image": {"bytes": "", "path": "./images/b49f88ac-b407-41af-a9e7-f5e4cd1f970a_b60d6d8e-b331-4d00-945a-f2a2a29926a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the rating and user reviews for the game \"Deathloop\".\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Search -> TYPE: Deathloop"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.199, 0.677, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4134", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_8de0346e-4043-48dd-b59e-01e8edd713df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[button] Times Square, New York, NY, USA -> CLICK\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK\n[button] Done -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.803, 0.263, 0.815] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4135", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_e4fd0574-b204-4e03-bb92-4ece87b183d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.568, 0.501, 0.577] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4136", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_4f966b9f-2163-4b2a-88cd-500239870dfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.123, 0.652, 0.154] in the image\nAnd my action is Action: TYPE\nValue: Miami, FL"}]}, {"id": "mind2web_4137", "image": {"bytes": "", "path": "./images/60bfb72f-e7a5-414f-990c-8ddd569744f4_3904a380-086e-454f-aacf-140c31c9974b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the top restaurants in Boston to reserve for april 22?\nPrevious actions:\n[path] -> CLICK\n[button] Boston -> CLICK\n[link] {{ 'see_more_label' | translate }} {{::list.info.n... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.118, 0.048, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4138", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_8442397f-7a50-4c98-b836-3c9da40df35f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.068, 0.661, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4139", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_a4b6efac-e32f-478c-9177-28e49d7ac7de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.105, 0.984, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4140", "image": {"bytes": "", "path": "./images/7ad37a91-c4c2-42d6-9b87-1f3e6a81647e_5ea6f7f1-9226-40bc-921b-fbaba9cc580a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check reservation availability for an Indian restaurant with price below $30 in East village\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.467, 0.518, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4141", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b18889ca-80bd-49a5-a847-ed799ac183bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: berlin\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.156, 0.312, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4142", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_8f4f2684-c054-41e1-aa3e-7ddd71a11026.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK\n[option] 2:00 pm -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.198, 0.241, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4143", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_4093dcf4-7a5e-49f9-8eb7-adf5db810557.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.125, 0.177, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4144", "image": {"bytes": "", "path": "./images/7f1f085b-5765-40f8-86c7-8df6e8b68053_a6242145-4277-493b-86f0-175a233fea76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about baggage allowance for business class.\nPrevious actions:\n[button] Experience -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.115, 0.171, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4145", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_578fb390-89d1-4041-a5a8-867b7d55b182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK\n[checkbox] 5 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.389, 0.942, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4146", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_b6d33f4e-d09e-4d10-ac72-98ddab6a40ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] Pet-Friendly -> CLICK\n[button] Road Trips -> CLICK\n[button] Last-Minute -> CLICK\n[button] $ Price: Low to High -> CLICK\n[button] DONE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.205, 0.984, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4147", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_cc81bedb-0054-414a-873f-dc03997bd360.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.272, 0.956, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4148", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_478e55a2-a3ab-4f80-b442-3f6c356c95f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.019, 0.564, 0.03] in the image\nAnd my action is Action: TYPE\nValue: Concord"}]}, {"id": "mind2web_4149", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_055baae4-1d23-47fc-afe9-c93f39a6ceb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Albany, NY\n[div] Albany, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.211, 0.568, 0.23] in the image\nAnd my action is Action: TYPE\nValue: Bloomington, NY"}]}, {"id": "mind2web_4150", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_174d81fa-298e-4062-bc37-7e88037a43d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.082, 0.332, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4151", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_09b385ea-8bba-47d6-bdc4-1f42b4cfe5f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.022, 0.348, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4152", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_14157082-7e93-422c-9c85-b1595ec919f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.181, 0.664, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4153", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_51cf2cc6-26b8-45fd-b9b8-eea01ca732a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.168, 0.073, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4154", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_4ce5adec-f536-4f51-9dc7-4867949f1c20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: new york\n[span] New York -> CLICK\n[generic] Apr 6, 2023 -> CLICK\n[option] Apr 7, 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.621, 0.892, 0.66] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4155", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_b9b3f9b0-6440-4894-b23c-6be659a69df5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.337, 0.384, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4156", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_a43d3350-8e3a-4d40-b1cb-fba874d15c92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.565, 0.29, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4157", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_a2a4a0ae-d58f-4e0c-9c4b-84c36e89dbe8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[option] Udupi Karnataka,\u00a0India -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.415, 0.824, 0.461] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4158", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8d8d0373-60d3-481b-8aa8-41c5cd2de300.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK\n[generic] Thursday April 20th -> CLICK\n[div] 23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.944, 0.193, 0.963, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4159", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0d5bf127-c6f5-4d6a-91ee-7a365759f335.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[button] Search -> CLICK\n[button] List of search results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.045, 0.144, 0.309, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4160", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3fd67889-8ab4-4640-b382-b8491611e103.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.051, 0.491, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4161", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_ab228503-2ac5-4989-b2bb-57db3bf18fc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK\n[link] Discover -> CLICK\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.216, 0.748, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4162", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_828390d5-8a45-4d89-af60-3ced4439f066.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.396, 0.277, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4163", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_a52e0685-b9cd-429b-af8f-e1a9a994a2c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\n[link] Store -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.047, 0.382, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4164", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_11ef7ceb-4c64-4d8c-a2f9-8cfb1874d942.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\n[textbox] Search -> TYPE: scream"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.12, 0.704, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4165", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_61ab2c90-12d3-4294-96d2-bd79d9ee8181.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.058, 0.523, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4166", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_90fb63c3-5f3d-4c14-9878-f5ce0458bb6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\n[link] Music -> CLICK\n[link] hip-hop -> CLICK\n[gridcell] Clint Eastwood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.558, 0.639, 0.572] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4167", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_fae30fbd-7fbe-4be2-a718-1695f357385d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[button] -> CLICK\n[div] White Water Rafting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.147, 0.963, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4168", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_57b2efcc-12bd-437b-973a-1c5fd75e39db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK\n[gridcell] Wednesday, April 5, 2023 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.289, 0.957, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4169", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_1e146ce2-ae82-47e6-91ed-0f36ecc4b61d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\n[img] movie poster for Creed III -> CLICK\n[link] get tickets for Creed III -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 0.049, 0.293, 0.096] in the image\nAnd my action is Action: SELECT\nValue: Change Location..."}]}, {"id": "mind2web_4170", "image": {"bytes": "", "path": "./images/b4872f0e-9d9e-4259-8b1e-844509b85712_1e74a3a8-01f9-480c-a924-561348ab26d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all campgrounds located in California.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.108, 0.266, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4171", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_0cc8cc75-1c79-42f9-a1d1-1af3cf84ff58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[button] Filter -> CLICK\n[button] Adults-Only -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.304, 0.469, 0.4, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4172", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_ca1921ca-fb66-4d9d-b1a6-7695452f3ce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK\n[span] Distance -> CLICK\n[link] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.412, 0.328, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4173", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_188b0bed-cc38-40e9-8652-97811bb3b5e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\n[span] Your store for 43219 -> CLICK\n[searchbox] Enter ZIP or State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.214, 0.847, 0.238] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_4174", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_1fa019d8-0d92-44b0-803b-88881eac1293.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK\n[div] Fri, Mar 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.251, 0.542, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4175", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_b53df9a0-55be-448b-ba60-f6d1fba1653c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: pet festival"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.166, 0.398, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4176", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_aa97ab13-302f-4371-b31f-a17cb1c4c0f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[generic] Sort by -> CLICK\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK\n[img] Short Socks (2 Pairs) -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.497, 0.776, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4177", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_bc096211-84e0-4bc1-9823-94b5011a8780.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.383, 0.296, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4178", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_9b93d5af-3dd7-41fb-8252-19a7406bc245.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK\n[button] Confirm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.372, 0.94, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4179", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_d025456f-7f08-48c2-bd5c-368b869e6a5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[button] Refine results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.276, 0.366, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4180", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_774ff5d1-0b71-489b-81f0-c0cc6ba9e6cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.459, 0.334, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4181", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e975344d-35a1-4268-8e2e-d15e4617cd26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[combobox] Flying to -> TYPE: NEW DELHI\n[option] Airport Indira Gandhi International Airport -> CLICK\n[button] Next Month -> CLICK\n[use] -> CLICK\n[div] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.345, 0.592, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4182", "image": {"bytes": "", "path": "./images/2cb6558b-3dd7-4acd-a495-a4d3b96369ad_54ae18ba-fa04-4295-a6b3-509266945442.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of convertible cars for sale under 20000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.026, 0.666, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4183", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_8d138ab7-7a82-4a74-b799-e1e64d929f58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.942, 0.135, 0.977, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4184", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_9756bb38-2000-423d-b77a-30db19b21f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK\n[span] No trade-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.71, 0.938, 0.812] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4185", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_392b753f-57be-49d0-bcc1-0b44af7ec1a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK\n[textbox] Pick-up location -> TYPE: O'hare Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.183, 0.284, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4186", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_5d6062eb-95c5-4098-85ff-ac3fd095c9d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Toronto\n[button] Toronto, ON, CA (YYZ) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.497, 0.478, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4187", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_461b3ec5-fb73-4017-b2e2-07d17b336e0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Free cancellation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.465, 0.045, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4188", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_966ba3c5-09f3-4484-85b9-82df82f9af62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[div] 14 -> CLICK\n[button] Apply -> CLICK\n[span] From $73 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.314, 0.202, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4189", "image": {"bytes": "", "path": "./images/74226fab-6285-45da-8582-d25a876aa7b0_40391570-2276-4e50-b19e-5677a25066e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for the next pop concert.\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.089, 0.282, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4190", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_f4121080-df90-4bbe-bf17-79c8b584ef9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.29, 0.715, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4191", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_d5e383de-e2e1-4615-a52c-c0e09c504d91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo\n[listitem] Central Park Zoo, East 64th Street, New York, NY, ... -> CLICK\n[searchbox] To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.191, 0.359, 0.209] in the image\nAnd my action is Action: TYPE\nValue: broadway"}]}, {"id": "mind2web_4192", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_69a737f7-e943-4d47-87e7-54c115520042.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[img] Blue -> CLICK\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.243, 0.691, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4193", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_f0c02b8e-48d9-4b91-aff4-829b7a9d82c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.464, 0.482, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4194", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_3f1a8555-f976-4d2d-a9cc-d53a972709bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.163, 0.734, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4195", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_d86ccff6-5a66-4228-8fd7-92644017347d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag\n[button] Search -> CLICK\n[textbox] Upper Bound -> TYPE: 40"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.573, 0.244, 0.594] in the image\nAnd my action is Action: TYPE\nValue: 0"}]}, {"id": "mind2web_4196", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_f9f59927-eae1-481b-9bbc-c3cca1b2fb0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Locations -> CLICK\n[button] Germany -> CLICK\n[button] Posting Dates -> CLICK\n[button] Less than 7 days -> CLICK\n[span] Voyage Program, European Union Member States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.739, 0.606, 0.772] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4197", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_65dc9fff-ffa8-4c1c-abef-2e06d1af7b05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami\n[option] Mint. Miami area -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.39, 0.274, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4198", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99cd5f2e-d013-4b2e-864c-902cb13df909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK\n[link] Advanced search -> CLICK\n[textbox] Search by keyword -> TYPE: Taylor Swift"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.308, 0.983, 0.331] in the image\nAnd my action is Action: SELECT\nValue: 1 Months Ago"}]}, {"id": "mind2web_4199", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_4c33082e-6e43-478e-a153-e427d1b17fc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] Selected Pick-Up Date 03/22/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/30/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.213, 0.837, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4200", "image": {"bytes": "", "path": "./images/edbac1c3-5409-48b0-a0ac-402a4900c59f_3be026ae-9dc4-4f3a-aefe-230af68e72dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the planned service alerts for the subway red line\nPrevious actions:\n[link] subway Subway Lines -> CLICK\n[span] Red Line -> CLICK\n[link] Alerts 8 alerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.399, 0.295, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4201", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_31af69cc-8439-4cfd-8d2b-1a335a0e1e5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.255, 0.61, 0.277] in the image\nAnd my action is Action: SELECT\nValue: Train"}]}, {"id": "mind2web_4202", "image": {"bytes": "", "path": "./images/099a9da4-c8db-4900-ada2-76600f3655a4_d1da3a0f-8824-4fcf-ba81-24debe082563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of baby products that are on sale and under 10 dollars.\nPrevious actions:\n[link] BABY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.121, 0.727, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4203", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_76a9fd32-80ff-45ce-879a-aa7d959c9b62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.564, 0.247, 0.728, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4204", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_e3b4f654-b33f-429f-8327-feb0da8ca5e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER\n[link] Action Figures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.207, 0.503, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4205", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_c8af097d-59f1-4eae-ac13-da33e869dd8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.242, 0.291, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4206", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_90a2701a-0940-4f95-a93c-d61d8a2cecaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.303, 0.122, 0.492, 0.174] in the image\nAnd my action is Action: TYPE\nValue: las vegas"}]}, {"id": "mind2web_4207", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_92169b81-9840-4ae7-af42-73477badeb33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[span] Silver -> CLICK\n[p] Black -> CLICK\n[p] Lexus -> CLICK\n[p] Backup Camera -> CLICK\n[combobox] Select Sort Order -> SELECT: Newest first (by car year)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.194, 0.617, 0.437] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4208", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_1b6cc686-4b86-41f5-9df2-290404ccfc53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.13, 0.789, 0.155] in the image\nAnd my action is Action: TYPE\nValue: Canyon de Chelly"}]}, {"id": "mind2web_4209", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_656ed1ca-1b96-4f94-9342-a57e054a3cdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Itinerary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.454, 0.424, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4210", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_8b0ee93f-8fc6-4664-930a-4f58525661ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[tab] One-way -> CLICK\n[button] 1 adult -> CLICK\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.093, 0.432, 0.115] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_4211", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_a0c205cb-1a8c-44e3-af38-ff210c95571e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK\n[tab] MOVIES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.121, 0.677, 0.141] in the image\nAnd my action is Action: TYPE\nValue: Prometheus"}]}, {"id": "mind2web_4212", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_05a173a8-2088-4da9-97e3-f8988811b5b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.549, 0.501, 0.58] in the image\nAnd my action is Action: SELECT\nValue: New York"}]}, {"id": "mind2web_4213", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_898bc6da-8851-414f-9e66-eacba595b118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.887, 0.463, 0.956, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4214", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_35dd7c87-f653-4de6-a617-d3503d0bdc73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.321, 0.688, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4215", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_94ea2d0e-1ab8-4d5f-bdd8-a9735cfc485e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Size -> CLICK\n[label] L -> CLICK\n[heading] Color -> CLICK\n[label] BLACK -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.464, 0.266, 0.7] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4216", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_5612cccb-4e39-483a-8b5b-9d4f9261f5b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago\n[a] CHI - Chicago, IL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.372, 0.62, 0.391] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_4217", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_3253cf50-b912-4446-9ca0-00ff7813f42a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.122, 0.984, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4218", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_45f14dab-05eb-4113-a9ba-6dd6aad8acd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.251, 0.277, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4219", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_6f204b33-e51a-4ce0-ae28-d22278162aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.094, 0.923, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4220", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_a82c1270-924d-4758-86cd-30ba60260eb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK\n[svg] -> CLICK\n[img] SUV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.128, 0.23, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4221", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_d4f1c897-c5aa-4cb7-afa2-3ddd845df114.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.799, 0.787, 0.82] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_4222", "image": {"bytes": "", "path": "./images/84f19aba-ad0a-46db-84bb-c279b5353b8a_fe69325d-0689-46ae-b411-dbb199d259aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cafes that have outdoor seating and is dog friendly.\nPrevious actions:\n[textbox] Find -> TYPE: cafe\n[span] Cafe -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.301, 0.066, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4223", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_23f591fd-977a-437a-931d-4be0a372db4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.467, 0.5, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4224", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_92c31707-5c0d-450e-a9bd-b0290f28f907.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK\n[link] US Deals -> CLICK\n[button] Save Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.018, 0.598, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4225", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_7e70b73f-5b14-457e-b4cc-532742d72dcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.164, 0.045, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4226", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_0bc81932-85d7-467f-8ef7-294f55118587.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[button] 18 -> CLICK\n[combobox] Time -> SELECT: 5:00 PM\n[svg] -> CLICK\n[span] 2 guests -> CLICK\n[combobox] Size -> SELECT: 1 guest"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.691, 0.525, 0.732] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4227", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_8a1d16e9-9527-42fd-9a08-6a0e9d39c051.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.315, 0.033, 0.44, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4228", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_9b7e228b-1dc4-478c-9137-946f5ef3034c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.183, 0.966, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4229", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_9294ad81-1046-46ae-a950-85a3a34e1b77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK\n[button] Furniture -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.556, 0.184, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4230", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_cb9d3a61-b898-4330-8767-fb2a56c37b64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER\n[link] Action Figures -> CLICK\n[img] Hasbro -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.915, 0.274, 0.969, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4231", "image": {"bytes": "", "path": "./images/020bc054-a829-4af5-8f0a-6efce012c7ac_72174279-0b65-4da3-8ed8-69a5f4bd03cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the number 4 ranked board game on the geekmarket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.011, 0.183, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4232", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_31ddff52-15df-4d0d-916c-18e1fb240ea2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: washington\n[span] Washington County Regional Apo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.302, 0.905, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4233", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_d194ee95-87f0-4ad2-a6ce-d06cf89fec9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.117, 0.78, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4234", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_4672b855-b26c-4b81-9010-18d6ec210c9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.157, 0.715, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4235", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_64c3c386-4170-4ca4-a34b-5e3c589da638.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles & Hardware -> CLICK\n[link] PlayStation 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.033, 0.378, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4236", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_9331a1b5-54f8-4de6-acd5-dd60c9a19d53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK\n[link] Find a bike shop near you -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 10013\n[button] Search nearest REI stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.141, 0.318, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4237", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_2e6cf0b7-800a-42f3-af4a-d2f3c85a6bf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK\n[div] Fri., Apr. 21 -> CLICK\n[button] April 17, 2023. -> CLICK\n[combobox] Guests -> SELECT: 2 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.313, 0.119, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4238", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_9e8314a0-1e1e-4db1-9e85-422dfa0bb165.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.091, 0.343, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4239", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_a686b3cc-e59b-43b8-bb1e-22e1bef857da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.229, 0.391, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4240", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_3dd8bf82-b783-4ede-b42a-0b632c8cb365.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Women -> HOVER\n[link] Swimwear -> CLICK\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK\n[link] Black (294) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.707, 0.213, 0.877, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4241", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_a5338369-622d-496d-b922-ce73b9e1b5df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] Online Paperwork (4)\uf05a -> CLICK\n[span] Vehicle History -> CLICK\n[checkbox] No Accidents (4) -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[link] Confirm Availability for Used 2019 Buick Encore Pr... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.104, 0.494, 0.124] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_4242", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_f49b8bab-88cc-4b73-a5a1-d63b597c4b0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] COLUMBUS, OH -> CLICK\n[path] -> CLICK\n[textbox] Zip Code -> TYPE: 60538"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.163, 0.335, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4243", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_3107856a-910f-40db-a9d0-abe314f18545.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.323, 0.705, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4244", "image": {"bytes": "", "path": "./images/4c623130-32db-4cc8-928a-68b0bc816efc_ab0c775c-0a0a-4b43-85ea-e348652a38da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find artist Mark Knight from London and follow him and add his most liked track to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Mark Knight\n[button] Search -> CLICK\n[link] Mark Knight -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.773, 0.302, 0.838, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4245", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_12e7f7fd-5ca7-42df-8962-903e1a49e4fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\n[link] National Football League NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.035, 0.775, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4246", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_f783514e-6e62-4f91-bf42-00e2edb90295.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.051, 0.181, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4247", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_0a904ba2-dca6-4ad1-8af2-f6568ea6d8eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.174, 0.641, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4248", "image": {"bytes": "", "path": "./images/9c9e89c1-fdb9-424c-b544-b9fd2f1ef46e_5968648f-ed49-45e3-903a-229b30081048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Limited Time Offers available in the Kids section and filter by size, 11-12Y(150).\nPrevious actions:\n[link] KIDS -> CLICK\n[link] Limited-Time Offers -> CLICK\n[gridcell] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.3, 0.165, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4249", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_3f694618-f0a3-47d5-98c5-024c53562900.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\n[searchbox] Search Site -> TYPE: 600w power supply"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.011, 0.546, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4250", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_fb3aafe7-6077-4b37-9cfc-65b1e614cdea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.149, 0.782, 0.177] in the image\nAnd my action is Action: TYPE\nValue: adele"}]}, {"id": "mind2web_4251", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_ffd3f2e7-1bd0-4bd4-b057-e7b05e92d656.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK\n[div] Narrow By -> CLICK\n[link] Adventure -> CLICK\n[generic] TOP RATED -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.732, 0.632, 0.748] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4252", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_3abb0c61-a81b-4dc7-ac70-d88eb176a529.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] 05:30AM -> CLICK\n[button] SEARCH -> CLICK\n[span] Distance -> CLICK\n[link] Price -> CLICK\n[button] Book now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.11, 0.622, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4253", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_9c7ed1f9-73cf-40b2-807e-d0e4aa0cd853.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK\n[span] Sony -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.326, 0.158, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4254", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_61a9ea08-e8d9-4584-affd-51e292f094e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.211, 0.721, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4255", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_1b19b03c-7423-46e6-abea-ab1bb37a520b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.11, 0.129, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4256", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_2f874e77-6f98-43bf-a476-bb69382c7197.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK\n[heading] Weddings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.48, 0.943, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4257", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_0cd3a314-e9db-447c-ac17-e07b23307fca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] City -> CLICK\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.202, 0.792, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4258", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_d8a64f26-d3fa-47cc-a614-7ac555797a95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[span] 25 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[div] 1 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.204, 0.927, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4259", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_d0fa9c16-fbc1-4ab6-bdc5-13758977249b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[button] Add to Wish List -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.925, 0.17, 0.947, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4260", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_753df684-6110-40eb-88b5-aae9df30ed15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.452, 0.308, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4261", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_db4941b4-a391-4fff-911c-fd74052dfd3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.054, 0.753, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4262", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_d153d7e1-bde1-467f-a08a-77052c38a054.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[svg] -> CLICK\n[label] Brown -> CLICK\n[svg] -> CLICK\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.547, 0.062, 0.559] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4263", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_b6481fae-c97a-4af4-a416-ccd071c8cdc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.004, 0.042, 0.041, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4264", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_a1d7fbd9-6bde-48f1-aa51-307d0fcac7a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[div] Product Type -> CLICK\n[link] Polos -> CLICK\n[div] Size -> CLICK\n[link] M -> CLICK\n[span] Now Trending -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.192, 0.947, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4265", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_e7072a8a-6a01-4c81-a4f3-9a80483927f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.706, 0.501, 0.711] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4266", "image": {"bytes": "", "path": "./images/fdcac1e8-1dd0-4846-b2cf-16d916ea9e17_42ebb272-99de-4d4e-b103-5062fbbb61ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for tickets for los angeles kings this weekend\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: los angeles kings\n[option] Los Angeles Kings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.31, 0.65, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4267", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_f95e947b-7409-4178-aa26-31b38f194d40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.37, 0.34, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4268", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_3de56dba-079d-48d7-ae00-1612cbd66ca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.054, 0.387, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4269", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_f2e91900-b7dc-48c1-86c8-0add81412717.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.169, 0.908, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4270", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_b36d2bf4-1700-4742-a245-e46c85973a6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[checkbox] Outdoors -> CLICK\n[checkbox] Wine tasting -> CLICK\n[button] Update search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.419, 0.141, 0.452] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4271", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_e11c9f8b-0365-4908-b2cd-c64e9f0e9b7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[gridcell] Thu, Jun 1, 2023 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.3, 0.959, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4272", "image": {"bytes": "", "path": "./images/2159d768-6657-40af-b336-ad5726fec1e2_37493ad9-bdcf-4e45-a8bc-de60d4dc7fde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my wishlist the top rated JRPG game.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.453, 0.06, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4273", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_6197c144-0a0c-4e0d-abcf-1b380989feed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.063, 0.153, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4274", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_3cf4529f-653a-47d7-9d84-f577dd79329f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[button] Type -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.956, 0.008, 0.988, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4275", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_7d20bcbf-3186-404e-bea7-f553986347de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John\n[textbox] last name maxlimit is 30 -> TYPE: Smith\n[select] Confirmation Code -> SELECT: Ticket Number"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.367, 0.562, 0.402] in the image\nAnd my action is Action: TYPE\nValue: 123456780"}]}, {"id": "mind2web_4276", "image": {"bytes": "", "path": "./images/117c1176-b5bd-4b9a-9be2-80a7f390e207_96808158-b0a9-46ad-bddf-d79b9823a094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the US box office revenue for the highest tomatometer rated movie that the actress playing Sam Carpenter in the most recent Scream movie has been in.\nPrevious actions:\n[textbox] Search -> TYPE: scream\n[p] Neve Campbell, Courteney Cox, David Arquette -> CLICK\n[link] Melissa Barrera -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.769, 0.388, 0.783] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4277", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_23f74267-67c7-467c-a379-2b044cca97f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\n[span] Top activities -> HOVER\n[span] Colosseum -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.279, 0.247, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4278", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_d0e650ae-54ec-4146-9ae4-b3380b3d6c02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.016, 0.542, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4279", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_ef74239b-9f8a-4f92-aeba-6ba4ef836c53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.096, 0.488, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4280", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_5a217967-5bc9-47a5-8827-7b36d5e4c9ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK\n[input] -> TYPE: hawaii"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.048, 0.788, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4281", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_fe6c0a7d-c18c-4084-b223-e178afccd592.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.183, 0.635, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4282", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_d816bb8a-ffc1-44f5-b1d4-87e9d0c46851.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.151, 0.964, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4283", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_d9c94040-4de0-473a-a1c5-6a909a5c5319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] Premium -> CLICK\n[label] Distance -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[div] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.599, 0.632, 0.677] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4284", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_aff3dec2-7e47-483f-b156-9ff640444b30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.028, 0.45, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4285", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_7030a4e7-b607-4f7e-92e4-8ca04044bd8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.17, 0.25, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4286", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_48077a71-0bd3-484d-8f19-596861e7e8fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.007, 0.441, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4287", "image": {"bytes": "", "path": "./images/ae969e05-d10e-4255-99f7-c27e071fad69_1f17b922-6a57-438e-84cd-bb7a5d08ddcc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the standings for the English Premier League.\nPrevious actions:\n[div] \u2026 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.093, 0.861, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4288", "image": {"bytes": "", "path": "./images/fb73611b-dc68-4a75-bf5b-7e151dc151af_015ff302-508f-4b98-934e-31687f84b870.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get tickets for the Buckeyes football game on April 15th for a group of 5 people.\nPrevious actions:\n[link] BUY TICKETS -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.916, 0.209, 0.926, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4289", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_d7ddf3e3-19d0-496a-b683-73230cc1be3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.136, 0.543, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4290", "image": {"bytes": "", "path": "./images/fb73611b-dc68-4a75-bf5b-7e151dc151af_f20c0850-fa76-4979-b946-e7e48831e68a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get tickets for the Buckeyes football game on April 15th for a group of 5 people.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.766, 0.533, 0.857, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4291", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_4b61c893-4f21-4ecb-8f84-7ce763a40e70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK\n[link] Confirm Membership to Checkout -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.344, 0.38, 0.375] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_4292", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_18dba795-9a54-4dfb-bb8d-b3b849528278.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.561, 0.113, 0.621, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4293", "image": {"bytes": "", "path": "./images/40fbda9d-22c5-4aab-9798-3db50d981c5c_a651f53a-5897-49f2-b132-6e19082d77c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to answer a question in the home improvement section.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.0, 0.288, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4294", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_75d85232-55ad-4c33-8fa6-f604a65b9a08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.389, 0.397, 0.402] in the image\nAnd my action is Action: SELECT\nValue: Sort by Distance"}]}, {"id": "mind2web_4295", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_dc3b0597-0b0b-44a3-922a-df69bcb0df24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Tab -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.142, 0.97, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4296", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_21008ae4-2f47-4263-93cf-a947f4c43b2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali\n[generic] Indonesia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.292, 0.584, 0.3] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4297", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_32691a48-f07e-4724-8c49-ba80367012ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.009, 0.867, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4298", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_96d386cb-2247-46a8-8589-52d65dd3f735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.275, 0.233, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4299", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_1efe7992-b578-46ea-bbc0-6720f221b9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.102, 0.313, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4300", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_4836d82b-9f3b-4116-b7a9-0130346c4835.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Distance & Shipping -> CLICK\n[button] Change Location -> CLICK\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.16, 0.249, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4301", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_d5d0ca15-7313-4bb4-8d8d-0bf611109aef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.396, 0.452, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4302", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_7b310218-134f-4b1d-aff8-4c79ffd81728.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.392, 0.848, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4303", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_ba296304-ef11-42fd-a39e-3d7c465a811a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty\n[textbox] * Required Fields Last Name * Required Fields Las... -> TYPE: Lue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.315, 0.238, 0.338] in the image\nAnd my action is Action: TYPE\nValue: 4847"}]}, {"id": "mind2web_4304", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_3944fa19-d153-448f-82e6-3c32ea641127.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.868, 0.218, 0.997, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4305", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_614dad39-55ae-45d7-8e8b-f51b7daa07fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.018, 0.058, 0.137, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4306", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_520c72ed-8115-475e-8bcf-6f01cc2526f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.616, 0.846, 0.669] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4307", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_594751d7-d499-4286-ba1a-207ebba0d47a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.874, 0.209, 0.93, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4308", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_bcf1c6bd-f9df-41a2-b31f-2547f79a5ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Caribbean -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.318, 0.871, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4309", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_e47cbe0a-a59a-4622-8016-d9d8f32cf08e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.026, 0.93, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4310", "image": {"bytes": "", "path": "./images/96c35c7a-a0d6-42c3-9814-eb2698c802a4_2386ece1-b158-438a-ac9b-aad2f882a746.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the news page with the UFC schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.076, 0.679, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4311", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_16d6591a-59b8-4700-9681-4750058e8157.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.104, 0.374, 0.115] in the image\nAnd my action is Action: TYPE\nValue: faro"}]}, {"id": "mind2web_4312", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_e0888309-3a6f-4b59-9c24-1eda62b45b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK\n[combobox] Flying from -> TYPE: MUMBAI\n[option] Airport Chhatrapati Shivaji Maharaj International ... -> CLICK\n[combobox] Flying to -> TYPE: NEW DELHI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.305, 0.792, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4313", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_0c9ca57e-f570-49cd-bcd3-cafce120d060.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\n[link] Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.199, 0.325, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4314", "image": {"bytes": "", "path": "./images/14f0e837-af77-44b9-9cad-a8911aab30c6_5c14ea08-04a7-4e9d-b602-4c6a24be2182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the status of flight from Columbus, number 1234 on April 5th, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.167, 0.082, 0.278, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4315", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_8ffbcf91-77e2-469f-be1c-a9fc64ea6f62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK\n[select] 1 -> SELECT: 8"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.347, 0.791, 0.381] in the image\nAnd my action is Action: SELECT\nValue: 37"}]}, {"id": "mind2web_4316", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_7393d933-1951-4632-880e-50e665f52a82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota\n[combobox] Select Model -> SELECT: Corolla\n[textbox] Zip -> TYPE: 10019\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.165, 0.429, 0.182] in the image\nAnd my action is Action: SELECT\nValue: Lowest price first"}]}, {"id": "mind2web_4317", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_182ccf4f-51ff-45df-badd-9fddd96a70bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.449, 0.587, 0.49, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4318", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_a2a1777d-0072-4151-ba64-a138c5158bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] To -> TYPE: broadway\n[listitem] Broadway Theatre, Broadway, New York, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.204, 0.359, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4319", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_cc9cb352-2aea-4969-a19f-e40d05fad832.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.617, 0.054, 0.693, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4320", "image": {"bytes": "", "path": "./images/2ce0a80e-a049-434d-8b99-8343d2a0b21d_6592e896-575d-4583-9c89-7cb0a9a099c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the location of the Altavista bus stop.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.397, 0.149, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4321", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_306b6b4a-ad42-4cea-8a57-5f9e54bd1f04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.313, 0.045, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4322", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_a8e10c19-7b5f-40c7-9779-b0a96c1e1733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK\n[heading] to next step -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.41, 0.629, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4323", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_bffe389f-8f10-4e39-870d-51c2d169992c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[textbox] Zip* -> TYPE: 59316\n[combobox] distance -> SELECT: 500 mi\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.225, 0.429, 0.264] in the image\nAnd my action is Action: SELECT\nValue: Lowest mileage first"}]}, {"id": "mind2web_4324", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_a0f039d4-ca5d-4ad5-aec8-3e2db31d10b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.076, 0.713, 0.095] in the image\nAnd my action is Action: TYPE\nValue: busch stadium"}]}, {"id": "mind2web_4325", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_f6fd8a06-3181-48db-a79e-4bae343e1ee1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.16, 0.292, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4326", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_5606d646-3bbc-46a1-abec-deb28c34e776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Venice Beach\n[em] Venice -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.286, 0.379, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4327", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_c26ad15c-1ac6-4940-b5e5-4b16ad0d23e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.333, 0.277, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4328", "image": {"bytes": "", "path": "./images/66625c9d-5bf3-42d1-b463-ab2767307201_4d3a86ff-5ae7-4d7a-b585-f7555dd04d67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Learn more about the Partner Deal that gives 25% off for Veterans.\nPrevious actions:\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.079, 0.749, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4329", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_f6a205a1-171b-4d72-ba1e-49aeaff0f3c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.304, 0.222, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4330", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_c4c50fd3-c84e-455d-84e8-0276aa7a9aab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[span] 11\" & Larger -> CLICK\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK\n[button] APPLY -> CLICK\n[span] $200 - $300 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.508, 0.192, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4331", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_db42143b-781e-4365-b5ee-5c02269ede06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK\n[button] + -> CLICK\n[button] Apply Filters -> CLICK\n[checkbox] Passenger with Disability or Assistance Needed? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.142, 0.957, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4332", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_222a5bfe-e904-480f-8a24-10d338acdc22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\n[link] Locations -> CLICK\n[combobox] Search by ZIP code, city, or state -> TYPE: 43215\n[span] Columbus, Ohio -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.402, 0.302, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4333", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_de127ffa-a357-431b-9f94-8ad89dfbe7c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Size -> CLICK\n[link] S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.573, 0.073, 0.592] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4334", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_2bc7cc9b-a4f6-477c-a8e6-91f2bd06d27b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[listitem] Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: staten island\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.334, 0.348, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4335", "image": {"bytes": "", "path": "./images/4faf56f2-217b-4d2d-b510-a24e30e6b20e_5fbd504c-aa43-420a-9f09-73ebbb6b7e0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of all movies that are being released in May 2023\nPrevious actions:\n[button] Reviews -> CLICK\n[button] HOME -> CLICK\n[button] Discover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.428, 0.216, 0.447] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4336", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_a28d7d39-b925-4d55-b0e6-ab865d7409ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[div] Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.09, 0.697, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4337", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_b39e9045-b25c-47a9-afce-478fbf734715.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags M\u00e9xico -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.813, 0.058, 0.932, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4338", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_bdea650c-4e2f-49e6-85c2-22989794fba9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (18) -> CLICK\n[button] Show 18 Results -> CLICK\n[combobox] Start Time -> SELECT: 10:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.322, 0.3, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4339", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_a5259ece-0829-477f-b30d-f47c1f508515.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.852, 0.141, 0.859] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4340", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_ba0752ba-76d5-439e-baaa-e9f077356cc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Size -> CLICK\n[link] YXL -> CLICK\n[div] Size -> CLICK\n[div] Sports -> CLICK\n[div] Fit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.257, 0.194, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4341", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_a63671f9-def0-48d6-bcac-289e2360a5c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.187, 0.682, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4342", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_d8db2dc7-0796-421d-86ab-314c2f1ea86e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.153, 0.392, 0.169] in the image\nAnd my action is Action: TYPE\nValue: India"}]}, {"id": "mind2web_4343", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_db292549-3e50-409d-9242-d3fed37a72d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[label] XXS -> CLICK\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.586, 0.784, 0.627] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4344", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_ac051332-2898-4bc0-96ac-0c7c39c53824.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW\n[combobox] Select Model -> SELECT: i3"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.219, 0.13, 0.238] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_4345", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_6e31d60d-78ec-444d-b5b2-09cddb6700d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.412, 0.234, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4346", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_713c45db-226a-41c6-adb6-c348424b9e20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK\n[input] -> TYPE: los angeles\n[option] Los Angeles, CA - Union Station (LAX) -> CLICK\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.178, 0.58, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4347", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_aeacd270-9832-4504-8b6b-2767cc583100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[combobox] Pickup time Selected 10:00 a.m. -> CLICK\n[option] 8:00 a.m. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.211, 0.481, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4348", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_0739722b-b2ed-44ee-9d7b-4442f4e241b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.243, 0.914, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4349", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_6e91dd35-e5c5-4066-9548-60e357a91b79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BERLIN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.071, 0.326, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4350", "image": {"bytes": "", "path": "./images/17fffbee-e41b-46e4-ab75-675b263ca7b7_a58f44bd-baa3-4a90-8fca-7abece0f83bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest power supply unit with at least 600W power output.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.014, 0.39, 0.043] in the image\nAnd my action is Action: TYPE\nValue: 600w power supply"}]}, {"id": "mind2web_4351", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_897e782f-f661-462f-9b43-bfe25ae73ffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.135, 0.35, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4352", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_2b13428f-ca4c-4db2-bec9-35b0966a4e75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.08, 0.199, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4353", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_8c63e1f7-d7ba-4b22-97a1-c688e34f5959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[tab] SkyMiles -> CLICK\n[link] SkyMiles Award Deals -> CLICK\n[combobox] Origin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.721, 0.334, 0.75] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4354", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_cb8feb86-5c07-4ad6-bcb4-9e4bd08ee0ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[button] Paris France -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Food Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.534, 0.236, 0.561] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4355", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_ea68d691-915a-45f6-b4ce-fc194d1a5207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: ohio\n[div] Ohio -> CLICK\n[generic] Run Search -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.116, 0.478, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4356", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_a515f870-4760-4d67-b2bf-1d756fe18960.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK\n[link] Price (lowest first) -> CLICK\n[button] Add Filter -> CLICK\n[link] Ship Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.163, 0.491, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4357", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_7d2f0f0a-fb03-4063-a5e9-5f047e6285fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.374, 0.48, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4358", "image": {"bytes": "", "path": "./images/e031c695-28e2-4507-949e-bbb65edf9f3d_56319ea6-1d3f-4ea2-8ab7-5a064a3d4502.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an article about a new restaurant and share it on Twitter\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.148, 0.988, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4359", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_e1f18ee3-1577-44fb-a283-1be215e5ae52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: June 2023\n[link] 6 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.205, 0.891, 0.233] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_4360", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_fe20b467-a94f-48d5-a52e-dac99270a61f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[link] Search open jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.25, 0.582, 0.267] in the image\nAnd my action is Action: TYPE\nValue: TX"}]}, {"id": "mind2web_4361", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_8fdcf5a9-ea15-4bc4-961f-bf32820c84c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.064, 0.327, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4362", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_418c5ba7-fa3b-477d-a6f9-939e21fd0c7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK\n[span] No trade-in -> CLICK\n[span] Buy -> CLICK\n[span] No AppleCare+ -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.546, 0.938, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4363", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_3752eb02-5cf2-4205-b0b7-3ffe26b7d0be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Start Journey -> SELECT: Train\n[combobox] End Journey -> SELECT: Bus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.368, 0.848, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4364", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_66435b68-2782-4173-be98-4b9456a69591.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $16.99/Day$6.55/Day -> CLICK\n[checkbox] $5.99/Day$1.38/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.69, 0.777, 0.725] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4365", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_8dfab805-e808-4b48-a914-5fd5765be1aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK\n[textbox] CHECK IN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.868, 0.245, 0.892, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4366", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_bc4fec9c-046b-407c-ab83-4c635522ec54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM\n[combobox] Return Time -> SELECT: 9:00 AM\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.163, 0.493, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4367", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_bb6f1a40-1fd1-4508-973c-5492eac6636a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK\n[div] Downtown Bangkok -> CLICK\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 24 May 2023 -> CLICK\n[checkbox] 28 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.55, 0.129, 0.637, 0.167] in the image\nAnd my action is Action: SELECT\nValue: 9 30 AM"}]}, {"id": "mind2web_4368", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_717f5404-ab6d-4271-b550-e620c34e6c75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[textbox] Search by Make, Model, Body Style or Keyword -> TYPE: Mustang"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.102, 0.85, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4369", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_5b94aca3-e2e8-4f77-8fdb-1ba0de275494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.869, 0.0, 0.945, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4370", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_b0beeee0-4e2a-477e-8e63-b6195edd64f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK\n[button] Book now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.145, 0.631, 0.178] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_4371", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_5bd0bd84-5701-40c3-88eb-20c1cf1c37c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.492, 0.409, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4372", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_da522562-f672-4425-bfb0-d6afe495664d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.542, 0.32, 0.558] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4373", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_f09e8313-161d-4b4b-90ba-c795643614be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.006, 0.67, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4374", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_57599408-9c94-4845-b966-d78e7c2fdd24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Real Madrid\n[link] Real Madrid LaLiga -> CLICK\n[link] Fixtures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.346, 0.138, 0.364] in the image\nAnd my action is Action: SELECT\nValue: UEFA Champions League"}]}, {"id": "mind2web_4375", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_e23c889f-caa3-430f-87f1-00c0ed71a29e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[link] Categories -> CLICK\n[span] Luggage -> CLICK\n[span] Carry-on Luggage -> CLICK\n[img] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.383, 0.988, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4376", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_1b93049b-6898-492f-b0a7-fe1adb3bcd9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.072, 0.513, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4377", "image": {"bytes": "", "path": "./images/718ccfb6-687e-4260-ad6e-9fa3942abec5_60a5012f-5b05-4bd5-b6f1-9a6932903e03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the ghost frame to your avatar using steam points.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.0, 0.334, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4378", "image": {"bytes": "", "path": "./images/2bce1096-f573-4752-94a9-e139ce37eb27_c8a5297b-22ae-40b2-9e2e-b4950bd670f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of off-Broadway theatre shows on April 19\nPrevious actions:\n[button] All dates -> CLICK\n[textbox] Select Date Range -> CLICK\n[gridcell] 19 -> CLICK\n[input] -> CLICK\n[gridcell] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.16, 0.426, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4379", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_a68e9d26-9737-4b6b-853d-5f3675ce82d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\n[link] \ud83d\ude9aOrder Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.244, 0.766, 0.276] in the image\nAnd my action is Action: TYPE\nValue: X123456789"}]}, {"id": "mind2web_4380", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_a4df195b-f418-472c-be2f-9883758c1acb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\n[div] Sports -> HOVER\n[link] NBA -> HOVER\n[link] Los Angeles Lakers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.155, 0.158, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4381", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_d2348c91-b246-41b8-8215-b8ab7894ba2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK\n[link] Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.712, 0.19, 0.771, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4382", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_76999e4d-1134-413a-8def-ee37b4d1c84d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.153, 0.364, 0.174] in the image\nAnd my action is Action: SELECT\nValue: A6"}]}, {"id": "mind2web_4383", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_73b6d459-3322-42df-b999-02a0b249731d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.218, 0.615, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4384", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_fb91be3b-ec6b-40ee-8ca0-c9a87489b5cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\n[link] Coupons -> CLICK\n[input] -> TYPE: new york\n[link] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.009, 0.867, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4385", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b546074a-9ac5-4304-9d50-cb5dbc2fb3da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[span] -> CLICK\n[button] Next -> CLICK\n[div] License Plate -> CLICK\n[textbox] License Plate -> TYPE: AZXA46\n[combobox] State -> SELECT: AZ"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.388, 0.618, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4386", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_eef408ce-d3d3-416e-8a22-75d2730b5cdf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER\n[link] PS5 Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.193, 0.467, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4387", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_50e29032-1d28-41a0-9348-754e15b4cfa9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Lebron James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.173, 0.061, 0.447, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4388", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_22568b7b-dd05-491b-880f-c1c3e5df037a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.314, 0.728, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4389", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_825f81a5-2ff7-4beb-8ba3-c99fe1d14250.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.018, 0.564, 0.029] in the image\nAnd my action is Action: TYPE\nValue: 44012"}]}, {"id": "mind2web_4390", "image": {"bytes": "", "path": "./images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_78355c99-0145-4534-9eeb-48c77afa1487.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all used Tesla cars for 10017 zip code.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.154, 0.358, 0.174] in the image\nAnd my action is Action: SELECT\nValue: Tesla"}]}, {"id": "mind2web_4391", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_76836f77-682c-4d0d-a708-0e890ec81eb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.176, 0.938, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4392", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_38ed349f-c786-4ede-ad54-2636970b733e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK\n[link] Tom Brady -> CLICK\n[link] STATS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.279, 0.181, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4393", "image": {"bytes": "", "path": "./images/63529dc2-7b13-493e-9a0c-b1ce50256a16_b6225a8b-0610-400b-859b-f9e1b5e4fe1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the flight status for the flight 12345678 leaving on april 7\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.24, 0.234, 0.322, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4394", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_5ca31bb4-3862-4dba-a5e2-25444ff45cf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.104, 0.206, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4395", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_8ba30792-56ba-4381-9106-0a693cd4b83f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.362, 0.239, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4396", "image": {"bytes": "", "path": "./images/fc81025d-f5a3-4b68-9551-e84175b87a63_3bc606e8-219f-40a1-ac23-7465abf97b1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the video highlights of the most recent Super Bowl.\nPrevious actions:\n[svg] -> CLICK\n[link] Super Bowl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.104, 0.981, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4397", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_0fa434b2-302e-4839-bd13-a2426c8a7367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 0.082, 0.573, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4398", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_2dd318cd-167f-4a33-9395-981c43cd92ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK\n[span] New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.152, 0.192, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4399", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_493e29f6-6afc-495a-a79d-e419581db53b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.47, 0.117, 0.661, 0.126] in the image\nAnd my action is Action: TYPE\nValue: Dota 2"}]}, {"id": "mind2web_4400", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_11a73dfc-fd0b-4135-94dc-02552e25ead2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[link] More -> HOVER\n[span] Phone Repair -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.015, 0.564, 0.024] in the image\nAnd my action is Action: TYPE\nValue: houston"}]}, {"id": "mind2web_4401", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_cd121173-fad8-49c8-9a0e-05fb88fc82f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.757, 0.804, 0.835] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4402", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_64d2cf14-ff84-4d7e-8dfa-fa0fd7eb2bec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[button] Maytag in Electric Dryers -> CLICK\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK\n[img] expand -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.746, 0.059, 0.759] in the image\nAnd my action is Action: TYPE\nValue: 0"}]}, {"id": "mind2web_4403", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_f9c51916-2ac2-4cd4-b949-bce0411788a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Continue -> CLICK\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.329, 0.514, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4404", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f99aa013-809e-4fb4-8fd9-80ca0220ca54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[tab] BOOK -> CLICK\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.251, 0.324, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4405", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_38f65ad6-587d-45ea-9b87-d3c973ca9acf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.386, 0.512, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4406", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_f135e797-d76a-4008-a5d1-7de7cb7b4e15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[textbox] Sun Mar 26 \uf073 -> CLICK\n[link] 27 -> CLICK\n[combobox] \uf0d7 -> SELECT: 1 Room\n[combobox] \uf0d7 -> SELECT: 2 Adults\n[combobox] \uf0d7 -> SELECT: 1 Child"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.466, 0.201, 0.529, 0.222] in the image\nAnd my action is Action: SELECT\nValue: 0"}]}, {"id": "mind2web_4407", "image": {"bytes": "", "path": "./images/4985e844-15bc-4fa4-9a76-f2c6fb1e6c16_ee3e4de5-183f-4367-aab7-af3f83f6191c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find me the cheapest red Model Y available to register in 94043\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.054, 0.454, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4408", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_444f21bd-d835-4249-980c-92b55df4b4c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[button] Careers -> CLICK\n[link] View All Jobs -> CLICK\n[textbox] Search by Keyword -> TYPE: developer\n[textbox] Search by Location -> TYPE: dallas\n[button] Search Jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.263, 0.295, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4409", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_164ac1a3-6c06-47a9-93f8-0dd205f683dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.336, 0.384, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4410", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_6aa1c326-0259-434d-98e9-cb78d7e25950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.019, 0.183, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4411", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_90113f80-02c9-4c12-ad3c-a2324f74842b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.511, 0.846, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4412", "image": {"bytes": "", "path": "./images/059327ab-e26e-4af6-9063-f58c50ecd2d2_dc6589d2-8730-4e3b-8b8a-905044c2167f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the schedule and maps for the orange line\nPrevious actions:\n[link] subway Subway Lines -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.192, 0.339, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4413", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_3653852b-16f2-4d36-8496-d814ef3f9c56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.239, 0.937, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4414", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_b48151cc-387f-4ee5-828f-77dd0d8b0209.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK\n[checkbox] Ford \ue066 -> CLICK\n[checkbox] Jeep \ue066 -> CLICK\n[checkbox] Toyota \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.399, 0.408, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4415", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_f973e7a4-81f8-4019-8c27-0eed5a0883fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[link] See availability -> CLICK\n[button] Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.309, 0.923, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4416", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_672f3990-9c90-4b51-9727-a2ebfd6c5ffe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.134, 0.271, 0.151] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_4417", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_bf4f08dd-0a44-4f6e-abc7-5d2272eb0b50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.112, 0.277, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4418", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_4c0199ea-38cf-4516-9beb-08e2fcf2e5c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Medical -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.3, 0.196, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4419", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_1f130b1f-91a7-4219-b5fa-a656b6fdba86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.303, 0.359, 0.333] in the image\nAnd my action is Action: SELECT\nValue: AM"}]}, {"id": "mind2web_4420", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_ff1e132b-8f0d-41a4-a915-ae5332d7612e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.14, 0.48, 0.86, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4421", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_3eec5eec-1aed-40dd-bc93-8742767cf94d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: Lubbock, Texas\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.143, 0.515, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4422", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_05857621-902c-41b0-b42e-96bb7a9958bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.012, 0.05, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4423", "image": {"bytes": "", "path": "./images/099a9da4-c8db-4900-ada2-76600f3655a4_d6e40d10-518e-4a8e-95a4-ff8756b67c8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of baby products that are on sale and under 10 dollars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.006, 0.371, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4424", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_63bb5158-324b-4f1e-9cf1-226e776b0641.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.148, 0.438, 0.16] in the image\nAnd my action is Action: TYPE\nValue: BANGKOK"}]}, {"id": "mind2web_4425", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_e41f8a52-5ce6-4d8f-8baa-3f605832a080.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] Scores -> CLICK\n[button] Calendar -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.211, 0.514, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4426", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_9cbe4f75-eb82-4ae9-a013-d12eea58f7a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Swimwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.491, 0.509, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4427", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_59acd30d-a2a0-4546-86d2-667a574ec341.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.266, 0.492, 0.284] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_4428", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_f12823bb-cb42-43f8-b311-6ec6b90b82bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.524, 0.76, 0.539] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4429", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_44a3d5ab-4a1d-4104-b3a5-67f097ea5778.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[span] -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.149, 0.93, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4430", "image": {"bytes": "", "path": "./images/30e310ca-af64-46b4-a0f6-14b8f04fa734_0eb8808f-9c59-4b77-ae36-5cdab2faa0dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up Popular Photos in the Community featuring Aegean Airlines\nPrevious actions:\n[span] Community -> CLICK\n[link] Popular Photos -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.227, 0.269, 0.242] in the image\nAnd my action is Action: SELECT\nValue: Aegean Airlines \"Aegean\" (AEE) (452)"}]}, {"id": "mind2web_4431", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_763e2e66-b9b4-4a26-87bc-e6969bb9fb7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.188, 0.588, 0.215] in the image\nAnd my action is Action: TYPE\nValue: COOPER"}]}, {"id": "mind2web_4432", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_b7e5976b-55cf-4461-a63b-e6cb9a069717.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\n[button] Location Atlanta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.064, 0.47, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4433", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_9f25fc17-b721-4977-a0dc-11a07f97ed5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK\n[span] No Apple Pencil -> CLICK\n[span] No USB-C to Apple Pencil Adapter -> CLICK\n[span] No keyboard -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.826, 0.938, 0.863] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4434", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_bf67284f-ff0d-423c-ac34-ec7359de7867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.253, 0.77, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4435", "image": {"bytes": "", "path": "./images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_55b748c3-742e-4732-89a5-6966da49d829.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if there are tickets availabe for the Hamilton musical in Richmond, VA.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Hamilton"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.267, 0.408, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4436", "image": {"bytes": "", "path": "./images/f17fa68b-27dd-4b9c-bc11-7a5dbe206741_ea452bf8-dd86-41d7-91fd-c461362e9c16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Premium Cars available to rent at LondonHeathrow Airport.\nPrevious actions:\n[label] Pick-up location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.124, 0.429, 0.137] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_4437", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_90eaeb46-910e-4fd2-8657-16cef6654d28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.223, 0.759, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4438", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_1e0238fc-8df5-4b99-bf1b-f1c1e7c88e26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[link] One Day Add-Ons -> CLICK\n[link] Buy Now -> CLICK\n[menuitem] Meal Deals -> CLICK\n[menuitem] All Season Dining -> CLICK\n[img] One Meal Season Dining Pass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.254, 0.781, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4439", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_99d8f741-6ca7-4310-9914-0a821e9e9e48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[textbox] Destination -> TYPE: Boston\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.763, 0.563, 0.797] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4440", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_91682497-693e-4ef4-8f75-fc5329114dbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\n[textbox] Search IMDb -> CLICK\n[textbox] Search IMDb -> TYPE: Prometheus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.034, 0.657, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4441", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_5d7ae1da-80a4-41de-a2e0-8088ad791b79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.448, 0.027, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4442", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_f2be8876-e549-4589-b545-6cfe1baed3e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\n[span] Nearby Stations & Stops -> CLICK\n[searchbox] Address, station, landmark -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.179, 0.335, 0.191] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_4443", "image": {"bytes": "", "path": "./images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_823c594c-a988-4977-9651-e7eef65e4f8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Travel Pack for hiking.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.113, 0.499, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4444", "image": {"bytes": "", "path": "./images/f4623be1-31c6-4546-a567-92bfd1da9cd7_16766ce2-4e9e-4955-80fd-4578cec08085.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Upgrade the count of the current SSD in my cart to 10\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.903, 0.008, 0.984, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4445", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_44247c08-488a-41ec-ac6c-8eafb6ef3703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[input] -> CLICK\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.263, 0.249, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4446", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_5df94a66-8e54-4048-adb6-54fe66727e42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.075, 0.429, 0.113] in the image\nAnd my action is Action: TYPE\nValue: orlando"}]}, {"id": "mind2web_4447", "image": {"bytes": "", "path": "./images/99b9a850-d5e3-4ecc-b1eb-99a5c5029918_97b059c4-5e26-47d1-ad8f-575d1ce528c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all used Tesla cars for 10017 zip code.\nPrevious actions:\n[combobox] Select Make -> SELECT: Tesla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.146, 0.599, 0.17] in the image\nAnd my action is Action: TYPE\nValue: 10017"}]}, {"id": "mind2web_4448", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_d2ee59f3-468a-4eb4-a530-2babf9e4d776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.075, 0.16, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4449", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_9789cf64-f7e6-4e99-b1b4-77eb41a6e876.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[gridcell] Thu Jun 01 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.532, 0.743, 0.558] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4450", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_fdd7c5bb-257a-4e04-9762-7079f631669a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Canyon de Chelly\n[option] Canyon de Chelly National Monument -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.224, 0.854, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4451", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_5ab24f88-6b27-4bb0-8a0d-91e54d6f8dae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.36, 0.385, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4452", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_37e3a163-4d51-41c1-bd49-ec440145578d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.023, 0.553, 0.041] in the image\nAnd my action is Action: TYPE\nValue: bath towels"}]}, {"id": "mind2web_4453", "image": {"bytes": "", "path": "./images/41b8202c-9092-4307-ac58-2283c76df3b7_2adcf298-cf71-48ac-9531-fdc5708bd6a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hot Dogs in Oakland, CA that Offers Delivery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.023, 0.45, 0.036] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4454", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_72b18f17-346a-41ad-887f-c8a7dfb072a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000\n[input] -> TYPE: 10000\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.319, 0.473, 0.345] in the image\nAnd my action is Action: TYPE\nValue: 4"}]}, {"id": "mind2web_4455", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_62b32b28-721f-4b72-a3e8-a2b1483e66d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Adults-Only -> CLICK\n[button] Romantic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.377, 0.772, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4456", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_0dc80fc9-d1d3-48b8-abfa-8e7025ea84e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[checkbox] Refurbished Refurbished -> CLICK\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.282, 0.261, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4457", "image": {"bytes": "", "path": "./images/effb9df8-3b3f-4349-8033-f79ba1587a4d_f3b29e1a-40cc-42f2-91d6-1d06d66f7941.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a projector which accepts crypto currency as payment.\nPrevious actions:\n[searchbox] Search Site -> TYPE: projectors"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.019, 0.546, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4458", "image": {"bytes": "", "path": "./images/6eeaa528-88a8-416f-94f2-ae1425d9c4a3_7fdff63e-288e-4dc3-b053-5253f6c23c15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a park in the state of California called Castle Mountains National Monument and find out it's Basic Information.\nPrevious actions:\n[button] Find a Park by State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.358, 0.788, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4459", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_a32fbe21-0c34-441f-ad48-e12583c525a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.054, 0.702, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4460", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_88df9183-9dc1-4c06-9622-20981a4cf886.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.258, 0.454, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4461", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_7a035d11-468f-404b-b6d4-b45b72f78c6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.305, 0.702, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4462", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_17f036e1-04aa-4a66-828d-e19685efb75c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[link] Shoes -> CLICK\n[link] Running -> CLICK\n[div] Size -> CLICK\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.167, 0.947, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4463", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_0e2ae006-05fe-4806-bb31-742c673af29f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK\n[img] Women's Sonoma Goods For Life\u00ae Everyday V-Neck Tee -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.338, 0.725, 0.348] in the image\nAnd my action is Action: TYPE\nValue: 10"}]}, {"id": "mind2web_4464", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_bd3e1205-bb50-4e1e-87ac-9e39b9b46b1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.266, 0.5, 0.307] in the image\nAnd my action is Action: TYPE\nValue: Harry Reid Intl Airport, LAS"}]}, {"id": "mind2web_4465", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_6a2988b2-cb53-4b88-8132-7cb3c86dee20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent\n[span] Analyst - Sales Programs -> CLICK\n[button] Apply Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.589, 0.5, 0.65] in the image\nAnd my action is Action: TYPE\nValue: jacksparrow@gmail.com"}]}, {"id": "mind2web_4466", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_c42e1fad-4d83-4494-bd83-247af16e8ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla\n[combobox] Style -> SELECT: L Sedan 4D\n[button] Select Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.17, 0.539, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4467", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_b70a73e5-9154-46c8-8498-5790c0807ec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: La Bomba\n[button] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.209, 0.259, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4468", "image": {"bytes": "", "path": "./images/85bd1881-6efd-458d-97c9-ae507ecba1ca_f74957d1-682d-41c3-a460-6813810fd440.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the odds for upcoming NHL matches.\nPrevious actions:\n[span] Odds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.737, 0.035, 0.758, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4469", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_9bf9f632-5dce-4566-8ce7-b94a42b1908e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK\n[div] Narrow By -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.506, 0.297, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4470", "image": {"bytes": "", "path": "./images/cb07d410-75ff-483a-920c-3ce2a295524f_ee5016b8-c8d1-4c30-9157-5cef60f94e71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the kicker with the most made field goals in the 2022-2023 NFL season.\nPrevious actions:\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.448, 0.153, 0.53, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4471", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_ac166cc9-fe74-40f0-8f14-eb93e372c3e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\n[button] Vehicles -> CLICK\n[link] Exotic Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.104, 0.146, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4472", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_62d5d1f6-80a3-4d6b-93fc-18c08f34309b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.273, 0.95, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4473", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_92ca8e78-e5a3-407d-bea8-5a1000c3f54b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK\n[p] Skin, Hair & Nails -> CLICK\n[span] In-Person Care -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.511, 0.233, 0.518] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4474", "image": {"bytes": "", "path": "./images/e93fe82b-c3ed-4661-92f6-ea0abb1b2b63_f6247f9c-9e1b-43d9-a842-0ee512d1cbef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the page to find classic rock concert tickets.\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.084, 0.282, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4475", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_d51b4396-e234-4878-91a8-2e31706e71ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER\n[button] Go -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.524, 0.196, 0.542] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4476", "image": {"bytes": "", "path": "./images/f296f6a6-dbb7-47dd-bd6f-4ac2336a7d28_4f3c0b77-487b-4fe7-b29a-8b691d8fd423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the French best sellers under $40 available in digital that are currently in stock.\nPrevious actions:\n[link] Bestsellers -> CLICK\n[select] All -> SELECT: US$20 to US$40\n[select] All -> SELECT: In stock (53476)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.185, 0.196, 0.207] in the image\nAnd my action is Action: SELECT\nValue: Digital"}]}, {"id": "mind2web_4477", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_c866b19d-d657-4385-9c9f-c43f7e09d2f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.375, 0.783, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4478", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_d90ea9f6-714f-4585-92f0-7e3eecf2e396.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.219, 0.259, 0.248] in the image\nAnd my action is Action: TYPE\nValue: Pune"}]}, {"id": "mind2web_4479", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_4ff39709-c42b-4174-ab7e-bbff789845f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.33, 0.42, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4480", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_64252790-add7-4554-9918-d7c39f24a67c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK\n[span] English Language -> CLICK\n[link] English Language: Reading & Writing Skills -> CLICK\n[select] All -> SELECT: In Stock (41,088)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.595, 0.196, 0.628] in the image\nAnd my action is Action: SELECT\nValue: Hardback (13,067)"}]}, {"id": "mind2web_4481", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_c2398364-0bf3-4627-8450-2d6b21c767c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.016] in the image\nAnd my action is Action: TYPE\nValue: rock"}]}, {"id": "mind2web_4482", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_6a1d5aa2-cd89-47b8-83c1-7f435c7c4b3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[button] 20th March (Monday) -> CLICK\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.182, 0.522, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4483", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_f299ac63-fe5e-4e4a-b93c-db89bfcabadf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[group] RETURN -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.245, 0.391, 0.391, 0.414] in the image\nAnd my action is Action: SELECT\nValue: 5-15"}]}, {"id": "mind2web_4484", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_92044c3e-58bd-45b2-b161-6eea8af0c53f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york knicks\n[option] New York Knicks -> CLICK\n[link] TICKETS -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.244, 0.78, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4485", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2c1fe6d9-48b1-4b39-9d5d-09b14fb70ff9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[li] Business -> CLICK\n[span] -> CLICK\n[textbox] Guest rooms -> TYPE: 1\n[textbox] Event space -> TYPE: 7\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.881, 0.188, 0.958, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4486", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_08e49083-5052-42c3-b813-4591b4e718c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK\n[span] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.631, 0.138, 0.749, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4487", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_558a5ab4-4e4c-48e0-b00d-6ee44a495bae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.395, 0.037, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4488", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_73c4997a-aec5-4943-b19d-803a0e57ca5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[option] New York JFK International Airport JFK Jamaica, 11... -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] Next Month -> CLICK\n[button] 05/02/2023 -> CLICK\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.387, 0.432, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4489", "image": {"bytes": "", "path": "./images/c4ca9c7a-d31a-4b3b-b6b4-c442854e9da1_4fbee4b2-dd73-4f1f-b6e7-11092cc67c5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Gloomhaven's ratings?\nPrevious actions:\n[combobox] Search -> TYPE: gloomhaven"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.046, 0.986, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4490", "image": {"bytes": "", "path": "./images/2a8ae104-6f06-47cb-80a0-045188125868_57b4fe29-38c0-4171-8721-a773b02c3366.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Display details of new lanched iPad pro 11-inch\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.0, 0.291, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4491", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_c27a29c4-d67c-47ac-93f0-713b9aed25ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[searchbox] To -> CLICK\n[div] Recent Stations -> CLICK\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.317, 0.791, 0.347] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_4492", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_ec602108-1535-425d-be37-bc2d202490c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email Address (required) -> TYPE: allan.smith@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.493, 0.754, 0.536] in the image\nAnd my action is Action: TYPE\nValue: allan.smith@gmail.com"}]}, {"id": "mind2web_4493", "image": {"bytes": "", "path": "./images/e8603513-2740-485e-adf9-86361dd015f4_69f51f8b-8192-4638-beed-fcc9f187a9a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare FlightAware subscriptions and signup for Enterprise plan.\nPrevious actions:\n[span] Products -> CLICK\n[span] Premium Subscriptions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.428, 0.399, 0.572, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4494", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_78fab376-fd1d-406e-9818-c9ec36f48546.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER\n[link] Order Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.324, 0.872, 0.376] in the image\nAnd my action is Action: TYPE\nValue: 24124124091"}]}, {"id": "mind2web_4495", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_ad867891-0aa7-44b2-a033-6f297e36b85f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK\n[button] April 7, 2023. -> CLICK\n[combobox] Guests -> SELECT: 3 Guests\n[button] 5:00 PM Outdoor Table -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.329, 0.523, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4496", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_18511120-cc2f-421c-97a5-7ed7fc32cdb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[textbox] Minimum price filter -> TYPE: 150\n[textbox] Maximum price filter -> TYPE: 200\n[checkbox] list-filter-item-label-4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.113, 0.748, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4497", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_98338e7d-fa1c-4ab3-a522-00aa99888699.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.458, 0.512, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4498", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_83aba46c-31c6-4a64-bd2f-dfc6ce379419.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.515, 0.224, 0.783, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4499", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_5ec56760-b47d-4c6d-bdac-c3a6640b443a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK\n[textbox] Flight origin input -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.225, 0.573, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4500", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_4498554c-48ae-409f-ab30-cdd208c0ae22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\n[heading] Check-in -> CLICK\n[textbox] Confirmation or ticket number* -> TYPE: 123456\n[textbox] Last name* -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.276, 0.478, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4501", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_e1d11f3d-bddc-40e4-9b38-e2ab641223d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[textbox] Enter a town, city or zipcode -> TYPE: Sheboygan\n[span] Sheboygan, WI -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK\n[gridcell] March 26, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.285, 0.831, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4502", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_ba8b539c-78b7-4c10-ad5d-a34107d8aa38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: pedicure salon\n[input] -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.408, 0.341, 0.585, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4503", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_5c9d8232-4c6b-4537-923a-dd0727032022.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.35, 0.844, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4504", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_3c33b382-bf0c-4407-8f1d-128cd260334b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK\n[button] Filter -> CLICK\n[checkbox] Self Park (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.678, 0.452, 0.715] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4505", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_fb405733-e00b-4fc3-b323-85e0f0a76157.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Los Angeles\n[span] , CA, USA -> CLICK\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.674, 0.192, 0.709, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4506", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_2dcc9e7f-6127-4495-9e4a-c3a5d59725f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK\n[div] SUVs -> CLICK\n[link] See Details -> CLICK\n[span] 11% -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.515, 0.662, 0.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4507", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_971dc47e-71e7-475e-9eef-87d837b34356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Socks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.891, 0.188, 0.959, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4508", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_18f516f8-beaa-4338-a0dd-659293279207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.409, 0.39, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4509", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_fad7bc53-f4c5-435a-abee-ee54d8595ecd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.18, 0.442, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4510", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bf884456-1c0e-4856-8141-57bf30f5da56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.192, 0.984, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4511", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_c1fda10f-e5ef-47f2-852f-a6385a2ed99f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick\n[textbox] Last Name -> TYPE: Smith\n[textbox] Email -> TYPE: smith@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.84, 0.678, 0.877] in the image\nAnd my action is Action: SELECT\nValue: Six Flags Magic Mountain / Los Angeles, CA"}]}, {"id": "mind2web_4512", "image": {"bytes": "", "path": "./images/0245df99-2909-465a-861e-7fbca948e82f_83e37cb8-d3a2-4121-a740-a1d75d3dd80e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show car guide info for economy car and reserve.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.02, 0.74, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4513", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_241b1896-c37f-452b-b824-73ff06f7df2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.055, 0.248, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4514", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_59b4a48b-83b0-4522-a66e-fec2fa0c6069.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.534, 0.469, 0.565] in the image\nAnd my action is Action: TYPE\nValue: SFO"}]}, {"id": "mind2web_4515", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_33a7ea3f-5ecb-4453-9465-1a9d3739e42a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[button] Fast-responding -> CLICK\n[button] Open Now -> CLICK\n[button] Request a Quote -> CLICK\n[radio] Installation or replacement -> CLICK\n[radio] Light fixture -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.46, 0.722, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4516", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_dfdb363b-157c-44ca-9aad-be92a8572f15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.29, 0.31, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4517", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_d9e3b9bf-8a2c-41c6-9c58-d3f0ae781f45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.08, 0.203, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4518", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_a2078d5e-eaad-4060-a2e4-c26ecefb4a9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.314, 0.512, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4519", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_58fb22af-7875-4218-861b-bfca9cc56c7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.952, 0.183, 0.994, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4520", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_c0c80763-722c-4265-b734-24d8908b159c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew\n[textbox] Email address -> TYPE: johndoew@gmail.com\n[textbox] Phone number -> TYPE: 4533234565"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.514, 0.495, 0.531] in the image\nAnd my action is Action: TYPE\nValue: 234567895"}]}, {"id": "mind2web_4521", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_1b9a4422-dbca-4194-9891-f0fefedd9e5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[tab] My flights -> CLICK\n[textbox] Confirmation number (required) -> TYPE: 10000002\n[textbox] Last name (required) -> TYPE: Son"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.392, 0.691, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4522", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_cc2547c1-82f2-4e44-8419-61fc4536e234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[link] Fitted -> CLICK\n[div] Size -> CLICK\n[span] Now Trending -> CLICK\n[li] Newest -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.226, 0.716, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4523", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_40c833ba-e627-4bdc-9593-c749ee3807a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Flavor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.111, 0.802, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4524", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_0d993232-3746-4084-95fc-0dd93e7de7f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK\n[link] Past year -> CLICK\n[link] Any length -> CLICK\n[link] 2-10 min -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.514, 0.212, 0.537] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4525", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a911199f-5c06-4042-8dff-d1d095ba7f21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to New Releases At The Kiosk See More -> CLICK\n[img] Plane (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER\n[link] select to navigate to New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.19, 0.352, 0.333, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4526", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_bc99632b-a2a3-4848-88cd-6917ff4e3596.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.139, 0.891, 0.16] in the image\nAnd my action is Action: SELECT\nValue: 3 00 AM"}]}, {"id": "mind2web_4527", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_4af813e6-fc10-468a-bc8e-cba17f545e06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.729, 0.678, 0.772] in the image\nAnd my action is Action: TYPE\nValue: Dick"}]}, {"id": "mind2web_4528", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_a3e5cb5c-e864-45c7-98d1-9518af067926.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Roanoke\n[span] Roanoke Regional Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.24, 0.567, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4529", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_a86258c1-e7d5-4cb9-9e2f-2ed9966b5cf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK\n[button] January 2024 -> CLICK\n[button] DURATION -> CLICK\n[button] 10+ Days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.302, 0.871, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4530", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_42d37c11-0990-4059-8327-9f2c132e1b28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK\n[div] Price -> CLICK\n[link] $10 to $25 (3) -> CLICK\n[link] $25 to $50 (18) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.397, 0.121, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4531", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_24f091f2-2302-45cf-8a3b-6926028a8c8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.168, 0.414, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4532", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_29c3163c-4c53-49b0-a0a1-49bc3b1e21ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.494, 0.509, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4533", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_f16dc8f3-48c9-43db-9468-9db70f01934a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[link] Within sight of downtown Miami, yet worlds away, B... -> CLICK\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.748, 0.38, 0.769] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4534", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_224ce086-9076-4f11-8961-b3ccb3285081.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.873, 0.399, 0.941, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4535", "image": {"bytes": "", "path": "./images/f8428085-905f-4190-9404-3e28fb691252_314dfd22-8e83-4475-b8eb-430c8eb22cef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the newest on-demand releases.\nPrevious actions:\n[link] Visit the On Demand page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.106, 0.488, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4536", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_2755eead-c61e-4ce0-b14c-e041ca4d1562.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK\n[radio] Return -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.146, 0.194, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4537", "image": {"bytes": "", "path": "./images/ef23fbf3-f05e-41e2-b847-a27028f42470_bb1ae489-b33e-475a-83fd-3ecabe7d1fab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me \u201cpizza\u201d restaurants near Atlanta\nPrevious actions:\n[svg] -> CLICK\n[button] Atlanta -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: pizza"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.032, 0.659, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4538", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b740bff0-dc6c-49fc-8895-96b5959e3fc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK\n[link] Find Users -> CLICK\n[link] User Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.131, 0.614, 0.143] in the image\nAnd my action is Action: TYPE\nValue: Joe Bloggs"}]}, {"id": "mind2web_4539", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_7d650769-7360-42e5-9686-c24cfbaf2a2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[button] APPLY -> CLICK\n[span] Logitech -> CLICK\n[button] APPLY -> CLICK\n[textbox] price to -> TYPE: 70\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.132, 0.4, 0.145] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_4540", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_03937750-0402-4b65-b1c4-f83e6b72cca9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.179, 0.926, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4541", "image": {"bytes": "", "path": "./images/4132002e-5ba6-4e36-a1b7-6bbe61503be5_a0ec89c8-5b88-4f8e-9547-e6f22bed7148.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a living History event to attend in in April .\nPrevious actions:\n[button] Open Menu -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.595, 0.404, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4542", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_be4e1af7-e734-4d31-bba3-fd751a4fd8a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Most Popular TV Shows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.591, 0.673, 0.6] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4543", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_d02d1e0d-1558-49ea-a007-fd43a7560a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[link] NEW PLAYLIST -> CLICK\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.183, 0.699, 0.199] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_4544", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_6aa2eb7f-be4a-467b-b5c5-96e9dd543d22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.128, 0.58, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4545", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_df54b5a8-f70c-4695-9c8c-5780019eedb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Electronic -> CLICK\n[img] Madonna -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.203, 0.297, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4546", "image": {"bytes": "", "path": "./images/f3850ec8-bf7c-42c3-9469-457836914f77_56682bee-956b-4d10-ab12-b895346b9589.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for events in Boston.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.057, 0.464, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4547", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_2efe3302-5f5f-4b26-ba7b-7348f700afe8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.098, 0.129, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4548", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_a56e7cd6-7657-431b-8ae4-cb15032e2f97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.6, 0.473, 0.62] in the image\nAnd my action is Action: TYPE\nValue: 6000"}]}, {"id": "mind2web_4549", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_68d2bf36-92a5-4fc5-a7c0-5c1f2fe3cffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.301, 0.191, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_4550", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_b2a2f8a2-4de6-48e2-bbf7-6eef4fff3631.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[heading] Price -> CLICK\n[label] $0-$10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.138, 0.463, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4551", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_e7501c60-a8f3-453a-8f8b-bbb68c545ace.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Caribbean -> CLICK\n[button] SEARCH CRUISES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.121, 0.212, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4552", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_ed26e713-359e-4d11-b4ac-600a1d0d1610.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK\n[link] Tablets -> CLICK\n[img] Samsung -> CLICK\n[span] 11\" & Larger -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.441, 0.192, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4553", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_38057c1f-4752-4761-a83d-b914e6702b85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[li] My trips -> CLICK\n[link] My trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.432, 0.426, 0.472] in the image\nAnd my action is Action: TYPE\nValue: Atas"}]}, {"id": "mind2web_4554", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2151f674-7324-4d95-a39d-4fdf73e0b0ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.311, 0.312, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4555", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_e474adb2-ec08-4464-b477-30f533b43209.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.082, 0.957, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4556", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_017f0e17-837f-451d-aed3-0c99dd21581f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.0, 0.191, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4557", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_74ff6222-caf3-40c2-abc4-2ec3029d571e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.676, 0.368, 0.694] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4558", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_50b39168-3d21-4d0d-8664-8a507729784e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.026, 0.553, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4559", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_eff68b62-ec7c-4049-8586-0a770d5b987d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia\n[select] Model -> SELECT: Carnival"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.192, 0.711, 0.233] in the image\nAnd my action is Action: TYPE\nValue: 11101"}]}, {"id": "mind2web_4560", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_a94ff5fd-45f1-46e0-bfba-90fd5f6dc7d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.048, 0.664, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4561", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_cba1e975-0bdf-4726-b146-be1142353fe5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK\n[link] Insurance -> CLICK\n[textbox] Ask a Question -> TYPE: Health Insurance Top Up plans beneficial or not?"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.18, 0.588, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4562", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_5e676867-a433-4845-8aa2-777f5e66b86e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[button] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.264, 0.943, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4563", "image": {"bytes": "", "path": "./images/932c0ec6-d500-495a-a7a7-0f632acbf6a5_e9b80c7a-04d6-40ee-87d9-c678b93317d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse camping stoves that have an auto ignition feature.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.051, 0.128, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4564", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_85649f4f-ff05-45bf-870c-6154412c5750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.273, 0.611, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4565", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_d0df5da7-08b6-4ba6-a359-e6f4de52d074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Airfare Included -> CLICK\n[button] All-inclusive -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.304, 0.772, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4566", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_45d4c97a-1f19-4b89-9069-3f4820b8484d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, OH, US Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.218, 0.84, 0.238] in the image\nAnd my action is Action: SELECT\nValue: 18"}]}, {"id": "mind2web_4567", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_7e8dffb8-17a7-40c5-9344-b115886fd488.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK\n[link] Stats \ue00d -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.104, 0.403, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4568", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_7412202e-0c5f-47b7-a72f-0570cd883473.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[textbox] Where to? -> TYPE: Los Angeles\n[b] Los Angeles -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.502, 0.556, 0.531] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4569", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_56328637-4511-4d5f-87dd-f73738934bf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.008, 0.83, 0.022] in the image\nAnd my action is Action: TYPE\nValue: oak grove station"}]}, {"id": "mind2web_4570", "image": {"bytes": "", "path": "./images/5b433cc4-26bf-4e62-b406-f00dc09c274d_eb2f0b10-9e1a-410a-b238-358836e1ed04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a CVS brand covid home test kit to the cart.\nPrevious actions:\n[img] -> CLICK\n[span] Shop all at-home COVID-19 tests -> CLICK\n[div] CVS Health At Home COVID-19 Test Kit, 2 CT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.828, 0.219, 0.969, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4571", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_03783199-4419-495a-897f-12d1d1e5b7f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: LONDON\n[span] London Paddington -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.299, 0.194, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4572", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_236326dd-e3a4-4b2a-98b0-a495fe869504.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.086, 0.957, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4573", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_88b3d2aa-9a00-423e-9dcf-8527c310e228.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK\n[link] Route Map -> CLICK\n[textbox] From -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.106, 0.28, 0.158] in the image\nAnd my action is Action: TYPE\nValue: Miami"}]}, {"id": "mind2web_4574", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_1f3155ce-428a-4cd1-bb4a-b7fbf7469ddd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.008, 0.917, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4575", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_39e3abcc-6e56-4032-b225-9e56cbd89bb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK\n[span] 12 -> CLICK\n[img] Add -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.236, 0.686, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4576", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_71afffe2-cba0-43d4-abc9-095a2bcd083a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[link] More -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.075, 0.449, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4577", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_897fecac-fd54-4b7f-bfeb-5ed4dcc72950.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.257, 0.187, 0.51, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4578", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_ccf8e5a6-cc0b-46b9-93a9-a725eb195bc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.046, 0.271, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4579", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_d77a38e2-0f86-4a9e-8466-acaa6d9b8aa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Weekend -> CLICK\n[div] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.129, 0.819, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4580", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_9284698b-67c4-43a4-8150-0bf06a0fd54e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.338, 0.476, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4581", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_c6088520-5a6b-4e2e-bbdb-d9a7e1f5a605.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.265, 0.568, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4582", "image": {"bytes": "", "path": "./images/da386775-280b-4a84-9801-4ae3098044b0_8b42d9a9-7e40-4030-bda3-b84edc4d852b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in California city for Limos which also offers military discounts and free wi-fi.\nPrevious actions:\n[link] Auto Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.485, 0.217, 0.587, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4583", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_ad546f94-f9c4-4693-9e29-6dab15f82b4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK\n[a] Software Development -> CLICK\n[a] Hybrid -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.091, 0.769, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4584", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_98aca18e-749f-4dcb-a26a-02f3c7b20917.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.053, 0.153, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4585", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_2dc6fa55-e5aa-4511-92b2-71dde630cdcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\n[textbox] Enter artist name or song title -> TYPE: ukulele"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.07, 0.897, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4586", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_6ae8e6ca-ed17-4af9-937c-cd2666364100.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[span] Airport taxis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.166, 0.284, 0.192] in the image\nAnd my action is Action: TYPE\nValue: O'hare Airport"}]}, {"id": "mind2web_4587", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_2a57a0b2-0e58-4fd9-b6b2-eaf59e4e6d5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[link] 29 -> CLICK\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at\n[listbox] hour -> SELECT: 10\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.101, 0.391, 0.118] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_4588", "image": {"bytes": "", "path": "./images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_de075009-e20b-4800-8460-2bb57f6db9c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are today's Limited Time Offers deals?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.523, 0.082, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4589", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e50a5cc2-36cd-44a5-8540-32d37ae310bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK\n[radio] Confirmation or eTicket number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.244, 0.271, 0.269] in the image\nAnd my action is Action: TYPE\nValue: 12345678"}]}, {"id": "mind2web_4590", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_32a87b2c-3e26-45ba-9fe9-32c4dc3949b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Kevin Durant\n[link] Kevin Durant Phoenix Suns -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.228, 0.164, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4591", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_c09c8617-efb7-47b0-b638-3aa6dab6eb6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK\n[input] -> TYPE: 105307"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.302, 0.617, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4592", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_3bce3264-f5ca-4d47-9ab5-95af75dd15ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\n[link] Filters -> CLICK\n[checkbox] RV Site -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.106, 0.434, 0.122] in the image\nAnd my action is Action: TYPE\nValue: California"}]}, {"id": "mind2web_4593", "image": {"bytes": "", "path": "./images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_6f9d42c6-bb53-4235-aae9-30a81afc7180.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of Cleveland's animal shelters.\nPrevious actions:\n[link] CITY PAGES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.13, 0.43, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4594", "image": {"bytes": "", "path": "./images/2b562465-e325-4743-8e68-6e7852594f93_02557082-babe-4a38-a66a-4b2f4a170b35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the terms and conditions of Amtrak RideReserve for Multi-Ride Passes\nPrevious actions:\n[button] DEALS -> CLICK\n[link] MULTI-RIDES & RAIL PASSES USA Rail passes, monthly... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.064, 0.494, 0.299, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4595", "image": {"bytes": "", "path": "./images/7f1f085b-5765-40f8-86c7-8df6e8b68053_43ab932b-04e2-4282-86c2-2e7af016655b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about baggage allowance for business class.\nPrevious actions:\n[button] Experience -> CLICK\n[link] Baggage allowance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.488, 0.641, 0.569] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4596", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_9311ff44-ee59-4214-a920-2b5fb38d43f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.152, 0.568, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4597", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_766c0830-7e36-42fb-8f3e-9473f6322736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue\n[textbox] City Name -> TYPE: New York\n[select] Alabama -> SELECT: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.49, 0.685, 0.518] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_4598", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_4da18a4b-7a48-4342-aced-13ac7ea17785.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Las Vegas\n[span] Las Vegas, Nevada, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.128, 0.964, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4599", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_da185646-5517-4406-ad3b-28bae9edf30a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[link] Categories -> CLICK\n[span] Luggage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.238, 0.316, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4600", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_8fd55a42-471e-4418-b2f5-bca74ede84ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.07, 0.374, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4601", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_68c2b8e3-b806-4602-ac7d-027a7865a754.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[div] Anywhere -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.066, 0.964, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4602", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_9cbdc533-8352-4da3-b64c-bdc59d0517a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.279, 0.492, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4603", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_b6170a50-fd4d-4d7c-930f-66231442bda0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.757, 0.583, 0.764] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4604", "image": {"bytes": "", "path": "./images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_16c54e19-5ee4-4204-9d47-a622771a3506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the Dallas Mavericks.\nPrevious actions:\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.638, 0.17, 0.794, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4605", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_62b92129-4035-4743-aef3-0b72cc301caa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK\n[div] 24 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.238, 0.077, 0.381, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4606", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_016e7d79-50f7-4e96-b822-70b91f99a2e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH\n[button] find store -> CLICK\n[button] filter by services -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.108, 0.62, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4607", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_0f034570-81d0-41ed-9f4e-e3ad4241112b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.227, 0.925, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4608", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_ca152b05-af0c-47e0-8958-bce808d51e93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 500"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.404, 0.786, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4609", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_81b1149d-7ff0-4e12-a33c-f093e82f71de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.017, 0.509, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4610", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_84042e46-ecd1-428d-b72c-53232329ed7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Women's -> CLICK\n[link] add filter: 6(220) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.296, 0.142, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4611", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_be6f3ede-4d0a-4a03-8f49-78f91329c5e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.128, 0.274, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4612", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_538f32f7-02af-4098-b8e7-d1861bd5819f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.121, 0.83, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4613", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_a8ec1dff-5f2e-4bf7-be21-9a534e37ac41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.451, 0.887, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4614", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_fb6edc44-52b6-4f64-8245-8ce967249d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.296, 0.087, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4615", "image": {"bytes": "", "path": "./images/6df317e6-3414-4f2e-b5fc-b70914def4eb_3a79506e-4983-4f73-800e-97010e8017a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Jazz music genre albums and tracks.\nPrevious actions:\n[link] Music -> CLICK\n[link] jazz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.576, 0.219, 0.645, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4616", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_1fad7c31-f747-4e0f-b2c7-99e5a4febcfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.402, 0.414, 0.441] in the image\nAnd my action is Action: TYPE\nValue: Las Vegas"}]}, {"id": "mind2web_4617", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_3359fd6e-131e-481b-8a7f-dad00e69757b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.455, 0.83, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4618", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d3c88cc2-8226-4464-b356-e448c7a3e5dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.173, 0.325, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4619", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_9a556b99-9709-438d-8d96-73c977afe480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.196, 0.282, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4620", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_4885ec31-8249-4992-8ce2-c661f339be98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] Sign up for job posting alerts -> CLICK\n[span] -> CLICK\n[button] Subscribe -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.193, 0.977, 0.232] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_4621", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_6a0e5751-e659-44d8-b355-64280b94b4a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.595, 0.489, 0.855, 0.551] in the image\nAnd my action is Action: SELECT\nValue: Next month"}]}, {"id": "mind2web_4622", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_254d076a-6aec-4696-b23f-a83c21573d62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.543, 0.012, 0.584, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4623", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_d28d30a1-9e44-4374-aa29-49d616e71df2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.804, 0.333, 0.822] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4624", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_79dd5af5-6248-4261-916d-6a5be124e417.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.04, 0.333, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4625", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_23a34dd1-6a2d-4b3b-b0ae-bd4472286e89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John\n[textbox] last name maxlimit is 30 -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.369, 0.354, 0.4] in the image\nAnd my action is Action: SELECT\nValue: Ticket Number"}]}, {"id": "mind2web_4626", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_d684a2b4-143e-4851-b9e2-6b1bdef467fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.295, 0.595, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4627", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_6ecf4fe7-562e-42cd-8cb4-871246f9d45a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[span] Mar 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.394, 0.351, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4628", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_d147dd83-a0b3-4263-9db4-30b58e266a21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.035, 0.668, 0.064] in the image\nAnd my action is Action: TYPE\nValue: Kayaks"}]}, {"id": "mind2web_4629", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_7a6574e9-178c-4a06-8a4f-3854f3d5279c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.032, 0.673, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4630", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_495d586d-b1a2-41e0-a289-1abc2365840e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[img] -> CLICK\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.322, 0.352, 0.341] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_4631", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_644f0928-6069-4c8e-9ed7-51ec7e259184.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.288, 0.077, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4632", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_0495b180-3090-4b4a-901c-07cd307f9e82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.283, 0.639, 0.303] in the image\nAnd my action is Action: TYPE\nValue: Cincinnati"}]}, {"id": "mind2web_4633", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_9a0d4689-c0ba-46ce-acd9-03b108d9dd8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK\n[li] -> CLICK\n[spinbutton] Max Price -> TYPE: 75"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.224, 0.217, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4634", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_2212f16a-7a5d-446c-a124-7afa61604d92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW\n[combobox] Select Model -> SELECT: i3\n[textbox] Zip Code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.526, 0.324, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4635", "image": {"bytes": "", "path": "./images/f408cdf3-06c8-459c-ba08-71bd471341a0_c854a73c-ba42-4f57-b19b-514c037bdf3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find places that serve burgers in 44012 zip code and sort the results by highest rated\nPrevious actions:\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: Burgers\n[span] Burgers -> CLICK\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 44012"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.011, 0.62, 0.032] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4636", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_ba18ee7c-dbe9-4345-b2c9-b4f7b2fa559a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.596, 0.25, 0.746, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4637", "image": {"bytes": "", "path": "./images/1f84888a-bebf-45aa-b8e3-2d9383ff01d3_ada350af-59cc-4e4c-aedb-8b128a8ee14e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated chords for the song La Bomba\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.03, 0.838, 0.045] in the image\nAnd my action is Action: TYPE\nValue: La Bomba"}]}, {"id": "mind2web_4638", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3407d7f3-e070-45e0-8ee6-cb9b2512b40c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.15, 0.43, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4639", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_785d0f3e-72f0-4f03-80cd-dc73dcf41af2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.062, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4640", "image": {"bytes": "", "path": "./images/d637c171-dc6e-4a4e-a162-9c230e822932_3cdab44c-9799-48ba-a720-3dc25eb00579.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show popular news which is at number one in comics.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.094, 0.047, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4641", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_6ef520ac-7d19-4d52-852c-5a128b13cc40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.054, 0.532, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4642", "image": {"bytes": "", "path": "./images/3236b068-8eaf-4a39-913f-b71884a35c39_dea7fb0b-b89c-4513-b5f3-9156a5463b72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most played games sorted by daily player count.\nPrevious actions:\n[link] New & Noteworthy -> CLICK\n[link] Most Played -> CLICK\n[generic] By Current Players -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.213, 0.543, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4643", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_bb6ec3fd-7f24-4864-a4cc-1f7df779b2b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.306, 0.463, 0.317] in the image\nAnd my action is Action: TYPE\nValue: skiing"}]}, {"id": "mind2web_4644", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_99759d39-96b3-4093-881d-b50db542dd56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.005, 0.675, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4645", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_2fe0fae8-1e67-4bfa-92d5-61c9c7eb65ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.172, 0.481, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4646", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_2188f829-c7c5-4e97-b301-26caa57486ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.048, 0.129, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4647", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_3f033257-fd1c-4875-8b86-4f0b4cd589c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Women -> HOVER\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.228, 0.233, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4648", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f8928c84-d27a-42db-a6d1-dcd1f656d6ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.219, 0.159, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4649", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_f444dc13-937a-4b04-8052-da002702db08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gifting -> CLICK\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.448, 0.48, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4650", "image": {"bytes": "", "path": "./images/87989b8e-0b6a-4dbe-a8bf-6adc2bdf3c29_3de2c511-3989-41b9-8f34-d4cb2d3853b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an hourly parking at Atlanta International Airport.\nPrevious actions:\n[tab] Hourly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.202, 0.73, 0.229] in the image\nAnd my action is Action: TYPE\nValue: Atlanta International Airport"}]}, {"id": "mind2web_4651", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_1e255fc2-932c-41e6-b97e-1d1c15da28bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[textbox] Pick-up -> TYPE: San Francisco\n[button] San Francisco California, United States -> CLICK\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.338, 0.568, 0.37] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4652", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_36028024-168b-4da8-a0fc-6452fcb120f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.214, 0.234, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4653", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_e95cc523-03ff-48de-a9d7-0f07b4906ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7\n[tab] 7 -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.219, 0.193, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4654", "image": {"bytes": "", "path": "./images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_4f0c10f1-48d0-4d3a-af3b-f9d37f97fc96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending searches in Columbus.\nPrevious actions:\n[link] CITY PAGES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.107, 0.43, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4655", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_c5e5e76f-39da-440f-a771-be5ef5b7e0c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi\n[combobox] Select Model -> SELECT: A6\n[combobox] Select Year -> SELECT: 2020"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.151, 0.624, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4656", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_9efe842d-9955-4569-9692-f96a5edd3d49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\n[textbox] Search for parking -> TYPE: stripe, 5th avenue\n[li] Stripe, 5th Avenue, Seattle, WA, USA -> CLICK\n[tab] Monthly -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.37, 0.397, 0.383] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_4657", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_5dea1dea-02c6-4442-845b-c06ff9529037.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\n[textbox] Search -> TYPE: black sleeping bag\n[button] Search -> CLICK\n[textbox] Upper Bound -> TYPE: 40\n[textbox] Lower Bound -> TYPE: 0"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.692, 0.073, 0.701] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4658", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_093733bb-a6a3-48b0-9aa5-bbe2a4b258aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.219, 0.484, 0.247] in the image\nAnd my action is Action: TYPE\nValue: Allan"}]}, {"id": "mind2web_4659", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_158b6c1d-31c1-4888-ae71-7f8cb5bebcd0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[b] Paris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.266, 0.777, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4660", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_88c22cbc-3eaa-4221-8790-92ab48659205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: UNITED STATES\n[input] -> TYPE: smith\n[input] -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.415, 0.416, 0.585, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4661", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_05faad15-74f4-4e7d-b3ec-1fecd007f9d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK\n[button] Sail To -> CLICK\n[button] Caribbean -> CLICK\n[div] Sail From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.689, 0.09, 0.82, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4662", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_cb1c3f11-8fe0-41ee-bb51-2e8061bdfc57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Activity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 0.524, 0.382, 0.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4663", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_f8725f0b-18d9-4b4c-845d-ad4fcd1a9d6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Rail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.144, 0.609, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4664", "image": {"bytes": "", "path": "./images/6d963cc0-90d3-4908-bee4-29a8530536af_09291760-75ab-4a52-b1a3-763fd1ac9e02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all my offers for 2-5 day cruises\nPrevious actions:\n[button] Plan -> HOVER\n[use] -> CLICK\n[select] DURATION -> SELECT: 2 - 5 Days"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.202, 0.969, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4665", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_0cef9097-2798-4c61-bc77-7ed372327135.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Back -> CLICK\n[path] -> CLICK\n[link] Shower Essentials -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.48, 0.309, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4666", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_c2addf44-300d-4f7e-9bec-b2b5471e0d2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK\n[button] select to browse a kiosk -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.257, 0.17, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4667", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_d55bc99d-7725-453e-b01d-c0cd6d36e985.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[img] Add -> CLICK\n[img] Add -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[textbox] Minimum price filter -> TYPE: 150"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.217, 0.264, 0.229] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_4668", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_0c9c1694-75c7-446b-b8a1-4585a8561f79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.035, 0.938, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4669", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_1bb44b82-30be-4dc1-910e-458594103813.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.004, 0.791, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4670", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_b847be3f-0e83-44b6-900e-cd2c4d162f97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK\n[label] Accessible Trip -> CLICK\n[link] Plan my Trip - Press enter key to submit the form ... -> CLICK\n[tab] Fastest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.324, 0.332, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4671", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_f3309336-f65d-423d-943f-296c3d7a3b97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.147, 0.338, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4672", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_ee795994-e62a-47c3-a705-bb02487f3c6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\n[link] MOVIES -> CLICK\n[button] IN THEATERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.238, 0.672, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4673", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_2ea9701a-ab3d-4dbc-a9b4-bf8f615fe651.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[span] Scores -> CLICK\n[heading] SOCCER -> CLICK\n[a] FEATURED MATCHES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.213, 0.41, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4674", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_9bb22c1b-f45d-478a-bf4f-1a018c576906.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] From -> TYPE: empire state building\n[listitem] Empire State Building, West 34th Street, New York,... -> CLICK\n[searchbox] To -> CLICK\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.335, 0.359, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4675", "image": {"bytes": "", "path": "./images/693ae151-6a70-41b1-b016-87279c4c532e_68993e22-ae92-47b0-9712-e4e67c7c657e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the cheapest xbox 360 game available for purchase\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: xbox 360 games"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.055, 0.645, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4676", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_a9216e09-9cdf-4e76-961a-972569f94327.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] 30th March (Thursday) -> CLICK\n[combobox] Time -> SELECT: 8:15 PM\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.345, 0.663, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4677", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_639a3c92-a608-447c-9f65-176900f37e9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: CHIANTI\n[combobox] Guests -> SELECT: 4 Guests\n[button] March 30, 2023. Selected date. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.275, 0.466, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4678", "image": {"bytes": "", "path": "./images/63d1f820-37bf-4adb-aabb-65eb7925790c_73adafc4-ae3f-4cc6-89c4-64510b8e7910.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the current roster of the Miami Heat.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.066, 0.335, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4679", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_f8dc3296-ee92-4059-922e-f380c7f8a6ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK\n[checkbox] list-filter-item-label-4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.15, 0.089, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4680", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_af68a13f-cead-4bad-9b91-ac1fbc14b005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Health -> CLICK\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.284, 0.363, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4681", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_90d3a2c2-9fdb-4edf-ad99-15fd086454e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.023, 0.232, 0.037] in the image\nAnd my action is Action: TYPE\nValue: thai restaurants"}]}, {"id": "mind2web_4682", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_4a697de0-aa51-43f3-ad3e-d3312265bd48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504\n[button] Look up -> CLICK\n[heading] Barboursville -> CLICK\n[link] More info about Barboursville store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.497, 0.24, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4683", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_ba1cf05e-362c-499a-bd3a-1fbc5d649325.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl\n[button] Search for benadryl -> CLICK\n[img] -> CLICK\n[div] -> CLICK\n[button] Form -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.439, 0.143, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4684", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_b0d8ab74-8854-487a-805c-2f920b08af49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.555, 0.131, 0.643, 0.17] in the image\nAnd my action is Action: SELECT\nValue: 2 00 PM"}]}, {"id": "mind2web_4685", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_60fe2dd5-eaff-4563-9cf3-dd946f846edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[link] Search for cars -> CLICK\n[svg] -> CLICK\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.131, 0.573, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4686", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_56ec5ed4-9bd9-4caa-9fd3-21b38487f195.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK\n[button] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.304, 0.452, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4687", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_75b39e1c-d143-4ade-8ba1-6ebad9aacff8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[span] -> CLICK\n[button] Find flights -> CLICK\n[textbox] Date -> CLICK\n[button] Move backward to switch to the previous month. -> CLICK\n[button] Saturday, April 29, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.059, 0.807, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4688", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_362a4918-c9e4-43c7-b7c7-e6c5ab3b2f67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK\n[checkbox] 24 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.094, 0.753, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4689", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_bb5a4639-d62c-4155-8099-1ebe298b6bbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[button] Pick-up date April 2, 2023 -> CLICK\n[button] Apr 8, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.209, 0.045, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4690", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_50d10c0a-7be8-4680-97b3-b7047b61e733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.814, 0.284, 0.837] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4691", "image": {"bytes": "", "path": "./images/eab97f0c-38b3-4421-bff6-697b3267f23c_78cd3d9b-a495-4c39-9102-2dc14b522e61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find carnival cruise options that include Alaska.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.117, 0.285, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4692", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_b0412512-a7db-4bf6-9cf1-06586305037f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.394, 0.042, 0.546, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4693", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_88e0ed6a-8359-41a6-99d4-e55bd567816c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search by location -> TYPE: Seattle, WA\n[button] Search by location -> CLICK\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.243, 0.462, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4694", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_a9210afa-f255-4b10-9dc0-401b91e86fb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Ages 6-8 -> CLICK\n[link] Education -> CLICK\n[span] English Language -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.153, 0.196, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4695", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_a451cb2d-a5e8-4808-88cb-c026cbda67da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[div] WOMEN / Tops -> CLICK\n[gridcell] Size -> CLICK\n[label] S -> CLICK\n[gridcell] Color -> CLICK\n[label] YELLOW -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.497, 0.25, 0.516, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4696", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_3a7df161-0055-43f8-a7b3-0705eb5f73a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.089, 0.487, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4697", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_a309368f-6646-468c-8039-1867c9223c7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK\n[link] Insurance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.155, 0.602, 0.175] in the image\nAnd my action is Action: TYPE\nValue: Health Insurance Top Up plans beneficial or not?"}]}, {"id": "mind2web_4698", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_8cd6279c-0398-4d6d-8efd-cc77ddc492c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.051, 0.082, 0.073] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4699", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_caecd46f-1c1b-4494-bb4f-2d64fa469b04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.053, 0.223, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4700", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_5df012f2-ae71-4ede-b641-41f8e3e454f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: New York\n[option] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.794, 0.264, 0.864] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4701", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_b9fe007b-b768-4fd2-96c9-21e99e1fc443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.011, 0.418, 0.035] in the image\nAnd my action is Action: TYPE\nValue: wireless printer"}]}, {"id": "mind2web_4702", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_cdc883f2-d336-484b-88e6-badadb5a758f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[textbox] Pick up -> TYPE: Houston\n[a] Houston, US -> CLICK\n[button] Search -> CLICK\n[button] Economy cars 5\u00a0Seats 1 Large bag 1 Small bag From ... -> CLICK\n[p] Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.389, 0.917, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4703", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_475e620f-02b0-4a95-ba4b-bce28ab58e23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.308, 0.675, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4704", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_aa7bf5bf-a02a-47e5-8a36-dbdb3b463d2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.472, 0.153, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4705", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_1d0e7048-7e14-408c-b0ec-c8d6a200c859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[combobox] Year -> SELECT: 2016\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.351, 0.317, 0.634, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4706", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_cb270e9a-7513-44a0-992e-1db9994bb336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.077, 0.556, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4707", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_ba11d9d1-a12c-4fea-99a3-b8c63f58c538.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.451, 0.034, 0.551, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4708", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_149a80ed-1e2c-4a63-941f-99a5fec6a11e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.024, 0.45, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4709", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_6fcf3fd4-8f58-4bd7-8267-d4fa5cd2b6e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK\n[option] One way -> CLICK\n[gridcell] 14 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.585, 0.922, 0.613] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4710", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_1ec386a0-9145-4cc5-ad86-8df75e74fa30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Home Essentials Under $20 -> CLICK\n[button] Save to favorites, KUDDARNA, Chair pad, outdoor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.349, 0.384, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4711", "image": {"bytes": "", "path": "./images/cdbd410d-170a-426d-b6d2-60dafaffe853_e7cce11b-e3ef-43c9-bc41-3f6db08f9d2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the best seller accessories for the Model X\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.054, 0.753, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4712", "image": {"bytes": "", "path": "./images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_82693ec7-34ef-40f8-b3b7-daca962c2a76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the San Francisco 49ers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.014, 0.211, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4713", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_5c046918-7e8b-44d4-a49e-521c81e5d12b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS\n[span] Paris -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: MILAN\n[span] Milano (Milan) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.167, 0.194, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4714", "image": {"bytes": "", "path": "./images/1b82bda7-a360-49c4-b54a-adaa1ae388cb_5e729cf2-659d-4934-bd09-364bcb174861.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the birth place of Ali Wong.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Ali Wong"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.314, 0.595, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4715", "image": {"bytes": "", "path": "./images/f385156c-4f2e-410f-bc73-7ec6d0f44448_8d193167-cec0-4e41-b471-99c194209723.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare all membership tier benefits.\nPrevious actions:\n[rect] -> CLICK\n[textbox] e.g.: New York -> TYPE: Membership tier\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.376, 0.295, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4716", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_7dec7f53-f08b-4658-993a-b6121c95d246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[button] go -> CLICK\n[button] Deals -> CLICK\n[div] -> CLICK\n[button] Update -> CLICK\n[button] Discount -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.079, 0.834, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4717", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_bf438b4c-9dbc-4369-82fb-005f6e63e14c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.048, 0.378, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4718", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_2c9414e4-66fa-4d75-befd-bfffdfcb6497.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.574, 0.263, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4719", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_66327fbb-2be0-43d0-9f9e-d4776f150711.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.236, 0.668, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4720", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_db324c07-d4c8-4133-bf12-0b1be073d6e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.268, 0.23, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4721", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_2a577542-e4e4-4c33-b14b-3f01975609da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[combobox] Size -> SELECT: 10 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.534, 0.095, 0.666, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4722", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_aed97d15-86ae-44d5-b329-3b10758f50bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK\n[div] -> CLICK\n[button] Contact -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.298, 0.48, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4723", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_b9c1f781-bdec-4323-a3d6-2930774d05bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> TYPE: Chicago Bulls\n[div] Chicago Bulls -> CLICK\n[heading] SCHEDULE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.101, 0.715, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4724", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_4bf5478b-1450-424b-80a6-b2acb4798bd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.419, 0.12, 0.45] in the image\nAnd my action is Action: SELECT\nValue: 2006"}]}, {"id": "mind2web_4725", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_7404da48-043c-4f90-99cf-3a2e99a7bf80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[svg] -> CLICK\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.197, 0.968, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4726", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_dd54cc31-18ee-44a1-92e7-fdef1940d932.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Reservations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.049, 0.628, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4727", "image": {"bytes": "", "path": "./images/3c098275-ff82-48b1-bd00-6313b4caf215_b933a80f-1cc1-4d21-86dd-2e4e50ddaa91.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car rentals in San Francisco for a weekend trip and filter the results by the rental company Avis and car type Economy.\nPrevious actions:\n[tab] Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.148, 0.495, 0.186] in the image\nAnd my action is Action: TYPE\nValue: San Francisco"}]}, {"id": "mind2web_4728", "image": {"bytes": "", "path": "./images/779cec8e-eef5-4de8-a42e-b449363664df_d5585212-4b78-49b6-8185-eae5dec350f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a theatre near 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.021, 0.443, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4729", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_e73d92f8-b366-4344-9c6a-f8e671a75728.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[div] Shanghai, China -> CLICK\n[textbox] Where to? -> TYPE: SEOUL\n[div] Seoul, Republic Of Korea -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.319, 0.799, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4730", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_a885209f-e3dc-4d1b-a292-b3631c292916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Venice Beach"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.41, 0.748, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4731", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_b93b8c53-e686-471d-bcf7-ed74fe2190a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK\n[link] VIEW RATES -> CLICK\n[button] Member Rate Prepay Non-refundable -> CLICK\n[label] I have read the rate details and accept the cancel... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.501, 0.647, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4732", "image": {"bytes": "", "path": "./images/9e9d7935-0c16-46e6-9e5c-3ea9124a6bf7_b82f1731-57ab-4129-9d15-c006e6895af7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the top 50 hip hop chart and play the top track, then add this track to a newly created private playlist named Top Hip Hop.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Top Hip Hop\n[div] -> CLICK\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.696, 0.066, 0.712, 0.083] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4733", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_888fcb67-1235-4bb3-9cdc-b96d07a8dc10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.033, 0.178, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4734", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_c4146be9-5d24-4618-975d-5ebfba34bf9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 94115\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.017, 0.335, 0.027] in the image\nAnd my action is Action: TYPE\nValue: electrician"}]}, {"id": "mind2web_4735", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_658d1d45-7bab-4d3a-8ece-e898e819cc5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK\n[textbox] Near -> TYPE: Texas City, Texas\n[button] Search -> CLICK\n[button] Open Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.162, 0.621, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4736", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_c269bd1c-a1bb-485b-9fc9-5eaca199ab2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: kashi vishwanath temple"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.12, 0.573, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4737", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_73d19c95-32f0-4eed-ac23-4de4484fa210.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Show filter modal Category -> CLICK\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.318, 0.121, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4738", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_c280a5bd-f3af-43ab-a64b-29e6984be6b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.643, 0.159, 0.654] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4739", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_6860793b-b094-4e1f-88c6-07680327486c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] From -> TYPE: TEL AVIV\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.253, 0.131, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4740", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c27e0edb-166a-4fca-afd7-de2a8823e3dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (10) -> CLICK\n[button] Show 10 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.396, 0.397, 0.409] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_4741", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_6928e47d-af7a-4f49-89b0-1b72a2516909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Alien Worlds\n[div] Alien Worlds -> CLICK\n[link] Seasons & Episodes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.592, 0.129, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4742", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_e7771434-c972-4fb1-9915-82fd77e5ec6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK\n[label] Medium Light -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.273, 0.222, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4743", "image": {"bytes": "", "path": "./images/6b831239-435b-494e-9aa8-a49e8605d0b3_97df459e-9422-40ca-88fc-0a6f15b4fbfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is trending now on AMC on-demand?\nPrevious actions:\n[link] Visit the On Demand page -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.525, 0.079, 0.729, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4744", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_1e69c4c2-c2a1-4bb3-b5ad-3a4c6b19dd76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[div] Size -> CLICK\n[div] Sports -> CLICK\n[div] Fit -> CLICK\n[link] Fitted -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.836, 0.072, 0.925, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4745", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d68603fa-5c88-4cc0-a276-31fbc5052bd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram\n[button] \uf002 -> CLICK\n[img] 8GB (1x8GB) DDR3L 1600 (PC3L-12800) Desktop Memory... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.807, 0.206, 0.963, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4746", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_87f3b880-2540-4b64-b688-f10fca9ea957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.027, 0.292, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4747", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_6d69a62b-9667-42ef-bc20-3c552c0e2e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[div] -> CLICK\n[checkbox] SUV -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] Avis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.631, 0.241, 0.662] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4748", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_ac795b03-c8fc-4cdb-9ed7-600429a37873.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.108, 0.339, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4749", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_3ab94658-7eb5-4f3f-9a47-d87421d1a4d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK\n[button] 1 Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.343, 0.481, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4750", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_d694c8b7-923e-4d3b-97ec-3a475e4463f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.113, 0.571, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4751", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_046e8cef-b409-41ce-a840-3daf9c4f05a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Electricians -> CLICK\n[textbox] Near -> TYPE: WESTMINSTER\n[span] Westminster -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.588, 0.136, 0.682, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4752", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_b66287f6-86b3-4e91-97b5-be53a7338c72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK\n[link] Navigate to New Releases At The Kiosk See More -> CLICK\n[img] Plane (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.139, 0.127, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4753", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_f55421a2-42c3-4ceb-934b-620dae199c4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.123, 0.902, 0.176] in the image\nAnd my action is Action: TYPE\nValue: Fiji"}]}, {"id": "mind2web_4754", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_a1884569-f749-4e3d-96b6-2808db697b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.153, 0.0, 0.191, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4755", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_fa7ae12a-1f5d-4463-820e-4ff9e9211281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.191, 0.76, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4756", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_a18c0387-6182-447c-99ba-74444b28d91b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.322, 0.688, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4757", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_15214483-2534-48e2-bb40-d84e4daf3540.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Flights & Car Rentals -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.218, 0.442, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4758", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_0a333e24-0a4d-4c6c-a466-3a1807f60957.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK\n[link] White -> CLICK\n[div] Condition -> CLICK\n[checkbox] Refurbished Refurbished -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.642, 0.375, 0.688] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4759", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_dae9e143-9012-43de-aa95-496ce9cabb17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK\n[searchbox] Location (required) * Required Field -> TYPE: 02199\n[span] 02199 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.158, 0.287, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4760", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_725df4d5-bb6a-418d-a6d5-1dc6fd8cc328.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[span] Delivery -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.256, 0.095, 0.47, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4761", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_adcc697d-c4b8-4329-8efb-83af89c3ad55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: MANILA\n[option] Manila Luzon,\u00a0Philippines -> CLICK\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.371, 0.93, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4762", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_9981ef68-97b3-4388-906b-0b285e5b74f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.009, 0.546, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4763", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_ade61336-8901-48d1-9c8a-f14332ed9aa2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2018 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.35, 0.234, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4764", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_0bc62f29-131d-4a0f-a05b-5bb6471dd1b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.667, 0.284, 0.692] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4765", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_10249103-9f3d-4098-9ea7-b80db7f8af9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK\n[div] Need an extra part? -> CLICK\n[link] Read more -> CLICK\n[span] Order spare parts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.246, 0.714, 0.278] in the image\nAnd my action is Action: TYPE\nValue: 105307"}]}, {"id": "mind2web_4766", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_ddc83651-4e21-47b3-8ccc-50fc0d5e4783.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\n[link] \ud83d\ude9aOrder Status -> CLICK\n[textbox] Order number * -> TYPE: X123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.341, 0.766, 0.372] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_4767", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_be3a6710-98f4-4b0c-8508-ef55b5dca3d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK\n[span] 1 -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.296, 0.332, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4768", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_40470783-4757-4eac-a28c-fa1bfa9a8517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[span] , United States -> CLICK\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 0.193, 0.524, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4769", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_f14aee92-2270-4ef1-a4f2-6f3c03627989.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[circle] -> CLICK\n[button] -> CLICK\n[div] Food Tours -> CLICK\n[label] Free Cancellation -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.254, 0.926, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4770", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_9e12befd-5920-4a3a-a8c7-6f47a1a13b4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA\n[div] Goa -> CLICK\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.287, 0.927, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4771", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ae57d1a1-a97c-4a44-aec7-1f1d6bd7c8d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.656, 0.308, 0.672] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4772", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_ff1cb425-d1e1-4bf8-8ac2-2e6ef4dbf5d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Locations -> CLICK\n[link] United States Car Rental Locations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.288, 0.666, 0.323] in the image\nAnd my action is Action: TYPE\nValue: 02199"}]}, {"id": "mind2web_4773", "image": {"bytes": "", "path": "./images/ef23fbf3-f05e-41e2-b847-a27028f42470_145d33f0-a819-4a3b-b6d3-3ae7980c8dda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me \u201cpizza\u201d restaurants near Atlanta\nPrevious actions:\n[svg] -> CLICK\n[button] Atlanta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.038, 0.424, 0.085] in the image\nAnd my action is Action: TYPE\nValue: pizza"}]}, {"id": "mind2web_4774", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_60166de7-b684-4fd1-a01e-814cabfb53ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.26, 0.35, 0.294] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_4775", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_60794dd3-fdc5-4c9a-9b1f-c84d44ea1544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: Boston\n[span] Boston -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.404, 0.64, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4776", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_dce74858-cfe5-48b1-92a4-6ed0b917dc77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Spain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.616, 0.206, 0.681, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4777", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_276f37d7-195e-4140-8216-7cd9f629c82d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Price -> CLICK\n[label] $40-$60 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.473, 0.463, 0.495] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4778", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_7745dc56-1d07-4d83-b0b6-b196f26a0413.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Next -> CLICK\n[link] Personal Care -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.314, 0.309, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4779", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_f4fb861e-2b80-49a8-9c84-909518e0e7c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK\n[button] 1 Adult -> CLICK\n[button] Substract one Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.143, 0.481, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4780", "image": {"bytes": "", "path": "./images/84d8a4df-0bba-45f9-b4c8-f5d455de451c_59cfb94c-6b8b-4897-a5b9-a0ec07c8afda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add one baby one piece to cart.\nPrevious actions:\n[link] BABY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.139, 0.351, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4781", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_13d2a2b2-7b05-4bd4-9a33-659aa7490405.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[checkbox] EV Charging (1) -> CLICK\n[button] Show 1 Results -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[combobox] Start Time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.301, 0.384, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_4782", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_d3001f53-d5a7-4ef6-b3f3-30fd3b8fdd2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: albany\n[span] Albany, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.166, 0.566, 0.188] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_4783", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_d183a68c-3454-480a-9d79-b2d033f7853d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Search jobs at CarMax -> CLICK\n[textbox] Search for Job title -> TYPE: Accounting\n[link] Accounting & Finance 7 jobs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.201, 0.331, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4784", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_70ab65e1-8095-4743-b42b-90879639ae57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK\n[link] Denzel Washington -> CLICK\n[button] Expand Upcoming -> CLICK\n[button] add to watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.245, 0.059, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4785", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_51b4faa6-0136-4f49-baad-9bd1bc178051.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.274, 0.512, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4786", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_9fd3a843-608c-4ab6-9bd2-3adee3a14559.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK\n[combobox] Enter a location -> TYPE: south station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.204, 0.695, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4787", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_0b780e87-4aa8-4eaf-a19b-cb3457052141.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Katy Perry\n[button] Search -> CLICK\n[a] -> CLICK\n[button] BOOKMARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.469, 0.457, 0.531, 0.485] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4788", "image": {"bytes": "", "path": "./images/2d18cb36-5628-49e8-a336-c25c153c5527_94da50d6-e71f-4997-abeb-db862c325ecc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find John Atas's trip with the confirmation number 1000001\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.362, 0.17, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4789", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_c14edccf-de52-47b8-928c-f87f13139747.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\n[button] Camp & Hike -> HOVER\n[link] Women's -> CLICK\n[link] add filter: 6(220) -> CLICK\n[link] add filter: Waterproof(171) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.431, 0.204, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4790", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_89654b34-bfdf-4514-b1a1-1aa91462bf85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.204, 0.93, 0.227] in the image\nAnd my action is Action: SELECT\nValue: Low to High"}]}, {"id": "mind2web_4791", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_80087b51-ce0f-4a04-b7ef-512f6c67dfc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[link] Fly to Miami (MIA) -> CLICK\n[button] New York/Newark (EWR) Miami (MIA) Roundtrip|Econom... -> CLICK\n[textbox] Departure -> CLICK\n[gridcell] Saturday, May 13, 2023 -> CLICK\n[gridcell] Thursday, May 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.486, 0.699, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4792", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a20c534d-630c-4fa9-94d6-ef298b8e67ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.381, 0.393, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4793", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_4326dff5-bcbc-46fd-a64f-37c77bc38404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[select] April 2023 -> SELECT: July 2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.305, 0.666, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4794", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_a2f646a1-bb6e-4bef-8e80-9f65de82161c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK\n[searchbox] Search carmax locations. -> TYPE: california"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.326, 0.11, 0.34, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4795", "image": {"bytes": "", "path": "./images/03ac581c-ef95-4435-9d35-435e198b82de_ca7b5174-50e8-410c-882e-d33568d72b38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the Emergency Sickness Plan policy certificates for Connecticut.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Protections & Coverages -> CLICK\n[heading] Emergency Sickness Plan (ESP) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.396, 0.748, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4796", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_5f017f7d-93a9-4835-b53f-c1af4eccc6e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.242, 0.783, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4797", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_61dd1ff8-f57e-465b-8b8f-90340bb4c4d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.163, 0.675, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4798", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_6ed5c79c-dd28-42e1-af23-8a7962616627.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: professional boxing\n[option] Professional Boxing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.126, 0.251, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4799", "image": {"bytes": "", "path": "./images/27724810-0bc8-446a-a2f4-b53a87e190df_36b4afb0-08a0-487e-9118-53846861391d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the location and operating hours of the nearest CVS pharmacy to zip code 90028\nPrevious actions:\n[button] change store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.151, 0.593, 0.169] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_4800", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fadf5fda-b09f-4516-b1a7-9ec58dc23e1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[link] Training -> CLICK\n[div] Size -> CLICK\n[link] YXL -> CLICK\n[div] Size -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.689, 0.233, 0.713] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4801", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_9c2f6d1a-a5c7-4094-97ed-0dfaf2eff284.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.162, 0.291, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4802", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_f48ac382-e936-49f1-944b-ed81052b0e12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\n[textbox] search -> CLICK\n[textbox] search -> TYPE: Dota 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.085, 0.949, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4803", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_4aede9a4-0099-4d40-8b0d-4399bd3bd274.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.484, 0.539, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4804", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_7864d3db-532e-478a-b365-5533d458f2d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.256, 0.618, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4805", "image": {"bytes": "", "path": "./images/d743815d-b7be-43c5-99b6-a224bd2f6a1e_36aefdb1-aee2-4743-a3b8-54eaf1a6beed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information on how to find lost AirPods.\nPrevious actions:\n[link] Support -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.327, 0.5, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4806", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_253077b6-b883-4263-808e-e8cd35f2b6b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[span] -> CLICK\n[button] Subscribe -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: Smith\n[textbox] Email Address -> TYPE: abc@abc.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.711, 0.354, 0.799, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4807", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_61738502-0b06-46b8-910b-266b5ccfbe97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[textbox] Where to? -> TYPE: Dubai\n[button] Dubai United Arab Emirates -> CLICK\n[path] -> CLICK\n[button] -> CLICK\n[div] On the Water -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.32, 0.107, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4808", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_d1079663-66c1-47c7-a0fa-6f0420a99469.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.246, 0.233, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4809", "image": {"bytes": "", "path": "./images/020bc054-a829-4af5-8f0a-6efce012c7ac_7949a928-22fa-4f0c-824a-d6b3ea062f7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the number 4 ranked board game on the geekmarket.\nPrevious actions:\n[button] Browse -> CLICK\n[link] All Boardgames -> CLICK\n[link] Ark Nova -> CLICK\n[span] Ark Nova (English edition, third printing) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.814, 0.14, 0.97, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4810", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_d1d704cb-2130-4933-b894-6c0a492dc4c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.084, 0.324, 0.098] in the image\nAnd my action is Action: TYPE\nValue: chicago"}]}, {"id": "mind2web_4811", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_b9746274-9171-4823-b007-455876cc5a17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Search for events -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.403, 0.42, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4812", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_39a01968-5e21-459c-82ec-924e69ae3041.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.246, 0.5, 0.27] in the image\nAnd my action is Action: TYPE\nValue: YAW639"}]}, {"id": "mind2web_4813", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_7ae94499-171d-4bee-a8f8-12cd500daf3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.002, 0.348, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4814", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_90692452-f027-4608-a74d-8382631f665f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK\n[div] Recommended -> CLICK\n[tab] Price (low to high) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.653, 0.84, 0.685] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4815", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_6219d481-0512-4dee-8054-a5a7b9fac49c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.573, 0.611, 0.582] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4816", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_51dfec56-5700-4f95-b3dd-34a07aae5856.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.43, 0.277, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4817", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_1b8d8699-8875-4d1e-a4b0-594a3f659771.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[link] Herbs -> CLICK\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.855, 0.994, 0.982] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4818", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_6bd9289f-2ad1-42a9-81fc-f1719e3e9d89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.174, 0.264, 0.225, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4819", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_4cc72811-d9b9-4f6f-8ae1-5556a8f76045.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] Tablets -> CLICK\n[img] Samsung -> CLICK\n[span] 11\" & Larger -> CLICK\n[button] APPLY -> CLICK\n[span] 8.7\" -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.405, 0.192, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4820", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_77ae34f7-69b8-4da8-90c9-5420ce7b170c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK\n[svg] -> CLICK\n[img] SUV -> CLICK\n[div] $75 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.519, 0.345, 0.631, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4821", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2cc502de-3f64-4412-9dfa-d6311cbc490a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.149, 0.122, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4822", "image": {"bytes": "", "path": "./images/cd8f1f63-f6c3-4b1b-9ec9-3b13b9f0386c_af0979f8-d69c-48e8-a772-a069f0d24a84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Read the 1 star reviews of DayQuil Severe Cough Liquicaps.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.009, 0.804, 0.023] in the image\nAnd my action is Action: TYPE\nValue: dayquil"}]}, {"id": "mind2web_4823", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_b05415f3-ea39-499f-b8d0-25e061aa16e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.104, 0.463, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4824", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_1fb87a1c-d99f-48e1-ae71-ba6f01482933.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.016, 0.546, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4825", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_bdc9ce47-f8ec-422f-a746-44d33de2b5a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] Champions League -> CLICK\n[link] Manchester City -> CLICK\n[link] Stats -> CLICK\n[select] English FA Community Shield -> SELECT: UEFA Champions League\n[select] 2022-23 -> SELECT: 2022-23"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.253, 0.717, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4826", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_01ef4b13-5aec-4c24-9e21-67d5c3f3caeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.232, 0.249, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4827", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_4abb3e7b-7920-47cd-9268-2df8e7a4c4c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\n[button] Reviews -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.142, 0.216, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4828", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_8b782e73-72f5-42e8-89e0-197104dfbedd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.078, 0.181, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4829", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_4db51223-64ed-46ee-aee9-c61490715f38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec\n[button] Search for zyrtec -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.176, 0.33, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4830", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_e26a7560-027d-4467-b206-33ac5f582855.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.086, 0.492, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4831", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_20f7373e-4912-4000-aab2-2097e31b32e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK\n[link] See all open MTA positions. -> CLICK\n[textbox] Enter a Location -> TYPE: brooklyn\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.043, 0.206, 0.254, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4832", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_8b3ecf8e-9155-4d96-9c07-4068e5782c66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\n[textbox] search -> TYPE: Fallout 4\n[link] Fallout 4 $19.99 -> CLICK\n[select] 1900 -> SELECT: 1995\n[link] View Page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.716, 0.34, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4833", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_da0b4cb1-d93d-4810-a74b-cb1a47baded5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.286, 0.093, 0.39, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4834", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_f9650777-77a8-4cf0-9fc6-788cb349e8e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[label] Pick-up location -> TYPE: BANGKOK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.206, 0.438, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4835", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_74a7bdfb-8e01-47ae-8251-2c1ee845131e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.073, 0.613, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4836", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_9b472dc3-9d6c-466a-ac5b-3b787e64dbd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.409, 0.777, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4837", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_82d3b3a2-ab6a-4d43-be93-b933685cab2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.083, 0.237, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4838", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_aa3adabf-f4e4-4b54-ab6d-9e8fbf8b11e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.566, 0.558, 0.841, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4839", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_b14fb2e2-d8b2-41f2-9cf2-517a4a832935.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] -> CLICK\n[label] Most popular -> CLICK\n[span] See availability -> CLICK\n[button] Show more dates -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.177, 0.801, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4840", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_f8261747-9bb4-4b98-98a2-f34f5eaba467.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[button] Get trip suggestions -> CLICK\n[button] Depart at 8:40 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.304, 0.246, 0.326] in the image\nAnd my action is Action: TYPE\nValue: 00"}]}, {"id": "mind2web_4841", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_1063a7d1-40b8-4b02-a6cb-f320875209b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.697, 0.046, 0.722, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4842", "image": {"bytes": "", "path": "./images/f45b0783-4325-4b3e-959d-c181971d72f6_fbd5b363-535a-4675-9c4f-ce3b14af687e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest news and rumors about the NBA team the Los Angeles Lakers.\nPrevious actions:\n[link] NBA . -> CLICK\n[link] Teams -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.364, 0.498, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4843", "image": {"bytes": "", "path": "./images/16886ec7-3301-4103-b175-9fa817335984_672dc62a-a88d-468f-9b1e-eee5818cb7a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the highest average points scored in the current season\nPrevious actions:\n[button] NBA -> HOVER\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.036, 0.563, 0.062, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4844", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_3169cbf4-b8dd-4854-af0b-bad280e9950d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[link] Restaurants -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.166, 0.186, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4845", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_662d83be-f7cd-4480-ae69-20aeaf275ce8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.291, 0.233, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4846", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_b7bcc68c-c9f7-4069-84c1-6c3fe43d24ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine\n[button] Search -> CLICK\n[textbox] Zip code -> TYPE: 90026\n[button] Apply within filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.757, 0.027, 0.764] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4847", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_5e934cdf-7af9-40d2-a4f0-ada6b371432e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK\n[span] Any -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.565, 0.21, 0.728, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4848", "image": {"bytes": "", "path": "./images/e8637690-bb8c-4596-a608-5b40a29d77c9_4a582ba5-4347-4b8b-8e83-40d48174cd24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a large store in Washington that has kids' and maternity products, also check if they have a parking lot, and see the directions of the nearest store.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.721, 0.2, 0.736] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4849", "image": {"bytes": "", "path": "./images/e437082b-8383-4322-aa58-a6a683113970_01b4466a-8da6-4894-9b73-283acd8a8d89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Las Vegas that will cost two people the least for a week starting from 10, April\nPrevious actions:\n[button] Book -> CLICK\n[link] Hotels -> CLICK\n[searchbox] Type your destination -> TYPE: Las Vegas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.451, 0.409, 0.489] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4850", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_25614f00-cfb4-4d96-9189-80974032e6bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.095, 0.797, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4851", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_c75460c1-761c-4db0-ae39-226820fe160b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK\n[button] load Vitamins A-Z Menu -> CLICK\n[link] Vitamin C -> CLICK\n[div] Relevance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.479, 0.98, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4852", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_31f5929e-78b5-4fb3-9722-4fe9085bf63b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK\n[label] Medium Light -> CLICK\n[svg] -> CLICK\n[label] 32\" -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.257, 0.222, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4853", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_eac36fe2-54cf-4f5c-b064-426223357844.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.855, 0.055, 0.9, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4854", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_06967b32-70d8-492b-8521-dbfafd2504f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.112, 0.32, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4855", "image": {"bytes": "", "path": "./images/4b33554f-eb29-4f8d-93cf-5e947e4f8e47_708d6e85-370a-4de2-b507-35df29ee9a42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for restaurants near my location with pizza and for 6 customers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.029, 0.456, 0.065] in the image\nAnd my action is Action: TYPE\nValue: Pizza"}]}, {"id": "mind2web_4856", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_1874df15-0121-4c6c-9489-9ca06fbc20fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\n[link] REWARDS PROGRAM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.449, 0.219, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4857", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8e2b91b9-de9e-4e98-8b53-a53d238427cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.157, 0.478, 0.196] in the image\nAnd my action is Action: TYPE\nValue: Chennai"}]}, {"id": "mind2web_4858", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_08dce56f-9e31-44cc-b247-2ff269bbd19e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[img] netflix -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Sort: Most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.27, 0.232, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4859", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_3bef15a6-9466-4ee9-b485-4f7dd16e1291.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] To , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.199, 0.492, 0.22] in the image\nAnd my action is Action: TYPE\nValue: CDG"}]}, {"id": "mind2web_4860", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_27fa3863-1da9-4a0e-849c-837c9f0abeaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.735, 0.225, 0.923, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4861", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_c99ec0f1-3a4a-4e38-bc04-4ecab395b872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.705, 0.659, 0.718] in the image\nAnd my action is Action: TYPE\nValue: Adams"}]}, {"id": "mind2web_4862", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_fad534dc-b5e4-4d2b-85ae-4b2f16b7dce8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.319, 0.326, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4863", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_6d93218f-905d-4f25-a513-a1e03c29eed5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.22, 0.729, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4864", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_bbad698b-ef52-4fda-85eb-5df447581cf7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[button] \ue9571 NIGHT Sat, Apr 08 - Sun, Apr 09 -> CLICK\n[span] 7 -> CLICK\n[span] 14 -> CLICK\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.229, 0.255, 0.359, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4865", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_96dbde61-5327-46c8-8e3d-d2380577f324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.627, 0.236, 0.649, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4866", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_1cf0e95f-e911-42e3-a18f-a2c4ae24e04b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[select] April 2023 -> SELECT: July 2023\n[link] 7 -> CLICK\n[link] 15 -> CLICK\n[combobox] Pick Up Time -> SELECT: 1:00 PM\n[combobox] Return Time -> SELECT: 2:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.225, 0.463, 0.256] in the image\nAnd my action is Action: SELECT\nValue: Vietnam"}]}, {"id": "mind2web_4867", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_45f50459-f551-4081-8177-ecefc94cae72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.547, 0.184, 0.594] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4868", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_df45ef47-c2eb-4bb6-9b1d-aac6c641c2e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK\n[span] Active -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.592, 0.192, 0.61] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4869", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_8bc96816-a9be-4f98-b864-15092427a0b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.012, 0.384, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4870", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_a8c71db4-4c7d-45a0-befd-2bf4ea75e2fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.372, 0.447, 0.418] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4871", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_7128c85b-93dc-496e-9ffc-d8e624bf9036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] START FROM SCRATCH -> CLICK\n[h1] My New Playlist -> CLICK\n[h1] My New Playlist -> TYPE: Dirty\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.199, 0.699, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4872", "image": {"bytes": "", "path": "./images/e8603513-2740-485e-adf9-86361dd015f4_f12321ef-5a11-4132-ab68-7660be13e08c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare FlightAware subscriptions and signup for Enterprise plan.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.037, 0.17, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4873", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_3badcc5d-9a3e-408d-92d6-46206293a333.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[link] Grocery -> CLICK\n[link] Bread -> CLICK\n[label] In Stock Today -> CLICK\n[checkbox] Tortillas (4) -> CLICK\n[button] Increase Quantity -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.969, 0.463, 0.99, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4874", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_ddcf9faa-d926-4c7b-bdc8-ac481e2daddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.0, 0.279, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4875", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_c01c84b0-81b3-4e6a-93f8-d1d319e101c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.064, 0.44, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4876", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_71a6b122-0c7f-49f6-8f00-496bc997c596.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK\n[span] Cultural Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.148, 0.741, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4877", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_88c6723c-7217-4add-893d-bf5d72b68db8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK\n[button] Price (lowest first) -> CLICK\n[link] See availability -> CLICK\n[button] Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.595, 0.923, 0.61] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4878", "image": {"bytes": "", "path": "./images/fc21339a-5dc0-489c-b348-2fac79483f76_a726af10-e02c-4e08-846c-e5d79fc1f8cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Metformin 1000mg tablet price and details.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Amazon Health -> CLICK\n[link] Amazon Pharmacy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.425, 0.741, 0.455] in the image\nAnd my action is Action: TYPE\nValue: Metformin 1000mg"}]}, {"id": "mind2web_4879", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_acdbfdec-a930-4e6c-bbb3-2734441739b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK\n[link] Explore the directory -> CLICK\n[searchbox] Refine Location -> TYPE: Boston\n[span] MA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.492, 0.151, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4880", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_233a182b-880c-4fc2-883d-b5f7db449fa8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at\n[listbox] hour -> SELECT: 10\n[listbox] minutes -> SELECT: 45\n[svg] -> CLICK\n[listbox] select adults -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.269, 0.391, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4881", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_eb050362-242d-4e15-bf1a-82c746f71bc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.519, 0.975, 0.56] in the image\nAnd my action is Action: TYPE\nValue: Tim Stebee"}]}, {"id": "mind2web_4882", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_ad3cdc43-66c2-4833-b233-5df774bfceab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.163, 0.5, 0.188] in the image\nAnd my action is Action: TYPE\nValue: Manhattan"}]}, {"id": "mind2web_4883", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_b68ec086-9eaf-4b7d-977a-b2a6a6417d65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.675, 0.856, 0.687] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4884", "image": {"bytes": "", "path": "./images/8f41d9db-29fd-4c1b-9cbd-0f74fecdf520_05468c6a-dee3-4b1e-a923-9004409dc1ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show information about careers with MBTA and signup for customer service job alerts. Name: James Smith. Email: abc@abc.com\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.521, 0.823, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4885", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_12af0a4e-ac29-4730-9fef-81f52558f981.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Reggae -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.262, 0.881, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4886", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_da39254d-694d-449a-9fd4-61d73f28d2a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[button] Paris France -> CLICK\n[circle] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.417, 0.093, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4887", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_0a60f460-55f6-4c2d-a535-36cdf07eeebe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: New York City\n[option] New York City\u00a0\u00a0 City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.096, 0.628, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4888", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_e4951d09-66ec-4136-bac3-44a43647d534.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[button] Departments -> HOVER\n[button] Electrical -> HOVER\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.444, 0.098, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4889", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a559a3c5-70ba-425d-a2c7-ee28846020dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[button] Distance & Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.264, 0.237, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4890", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_41cf4d7a-1f81-42f6-8711-5c1a16ed9d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.224, 0.134, 0.277, 0.15] in the image\nAnd my action is Action: TYPE\nValue: 07055"}]}, {"id": "mind2web_4891", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_638d3c1a-7e71-476f-bd07-42cfcf96f211.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.765, 0.457, 0.78] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4892", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_178b5b80-98d2-4169-895c-8e4eada72f72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.248, 0.459, 0.278] in the image\nAnd my action is Action: TYPE\nValue: 15000"}]}, {"id": "mind2web_4893", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_98318d24-fc5b-4031-bff4-008759505c93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.307, 0.875, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4894", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_562c577d-ae8f-4c46-bb04-2877a53444f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.532, 0.138, 0.543] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4895", "image": {"bytes": "", "path": "./images/c5cc7e71-6fd6-4a33-b450-89565dcff0a3_b1ca9ca5-f756-40f0-9e77-3ee9207a3e2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Hotels in Mexico.\nPrevious actions:\n[textbox] What type of deals? -> TYPE: Hotels\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: Mexico"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.134, 0.536, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4896", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_2da8d261-db2a-478c-b02d-cd0694309653.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.221, 0.066, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4897", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_529bf27f-0365-49a0-a525-a223e2d1d091.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\n[link] Music -> CLICK\n[link] indie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.431, 0.127, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4898", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_2f0b502b-6280-4361-9d9b-77f44c23c9c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.579, 0.307, 0.594] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4899", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_9a0c78ef-cc11-4975-8d79-e59d7a5e6d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.129, 0.266, 0.151] in the image\nAnd my action is Action: TYPE\nValue: SPRING, TX"}]}, {"id": "mind2web_4900", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_a0fd3657-be32-476b-8d86-b7dd38afd2a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.012, 1.0, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4901", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_69c07f95-bec3-47b2-964c-db723e729b84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[div] \u00a3 -> CLICK\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.346, 0.916, 0.393] in the image\nAnd my action is Action: TYPE\nValue: Bloom"}]}, {"id": "mind2web_4902", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_4d289945-1ccb-4aca-9ca5-00c19003c28b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[div] Spain -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.152, 0.442, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4903", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_4c159fd1-de41-432a-8c93-4ffef904d093.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER\n[heading] Find your trip - Find a reservation - American Air... -> CLICK\n[textbox] Last name -> TYPE: sin\n[textbox] Trip Credit / Ticket number -> TYPE: 1234567"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 0.35, 0.874, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4904", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_548cec03-b81f-4bfd-8d26-a5bd57383fa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Departing April 11, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 1, 2023 -> CLICK\n[button] Jul 7, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.928, 0.43, 0.984, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4905", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_694d5209-df32-4dd2-a885-72b559a39cb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.016, 0.418, 0.048] in the image\nAnd my action is Action: TYPE\nValue: motherboard"}]}, {"id": "mind2web_4906", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_d321135b-7227-4764-933b-d0ce804aa88a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[button] DONE -> CLICK\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK\n[link] $189 -> CLICK\n[link] $259 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.371, 0.902, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4907", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_394dcce1-2df9-4a3c-8088-31e132733f32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.141, 0.211, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4908", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_18379f86-26b5-4f32-8c38-cfd07d6f4ec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK\n[span] Castles -> CLICK\n[button] Filters -> CLICK\n[textbox] max price $ -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.448, 0.786, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4909", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ccc8fff7-4673-4e12-b66d-87ebf14f3a02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK\n[combobox] Passengers -> SELECT: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.199, 0.927, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4910", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_14413e25-3474-43a7-88a4-8c6017dfefc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.302, 0.913, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4911", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_d3ce6c2b-fa4b-473f-9d10-2980148592c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK\n[span] 27 -> CLICK\n[textbox] Return Press DOWN ARROW key to select available da... -> CLICK\n[span] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.752, 0.213, 0.853, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4912", "image": {"bytes": "", "path": "./images/7cbd1771-ca62-47ed-97db-5577644d285c_445a3b2c-9bf3-48c9-bb84-97c89020d5e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my favorites a cruise for 4 people going to Caribbean.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.285, 0.285, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4913", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_0d7f064a-aa60-43bf-a75a-a1e7ff4351ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[img] undefined -> CLICK\n[button] Continue without a seat -> CLICK\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.81, 0.316, 0.86, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4914", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_f9801b57-8f15-4dab-9e72-aa767e19f1c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK\n[link] Black (294) -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Price Low-High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.583, 0.138, 0.592] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4915", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_7b90ff88-5507-4a4a-8c8b-52d348e46ff7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] Pop Rock -> CLICK\n[link] 958,222 United Kingdom -> CLICK\n[link] 246,685 CD -> CLICK\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.233, 0.163, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4916", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_2fda5161-1368-4436-8d1f-fc75151db6ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK\n[div] -> CLICK\n[button] Apply promo code -> CLICK\n[textbox] Apply promo code -> TYPE: 1000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.205, 0.953, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4917", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_9e2ad598-a31c-48d7-809a-7482f0e22074.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK\n[button] DURATION -> CLICK\n[button] 6 - 9 Days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.331, 0.871, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4918", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_4c7c0d34-e5bd-4c51-b699-e4ff6f392fce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.045, 0.219, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4919", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_645e7a33-cc4e-47ee-bbe7-06941488d9f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK\n[textbox] Zip Code: 43085 -> TYPE: 26807"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.584, 0.206, 0.701, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4920", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_f463f8e4-acbf-45ce-b77a-59e6eadc7213.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[span] Mansions -> CLICK\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[tab] Flexible dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.159, 0.598, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4921", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_4c1a9de4-e5d4-4be6-80e0-1e60fcfb3386.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK\n[button] DURATION -> CLICK\n[button] 6 - 9 Days -> CLICK\n[button] SEARCH CRUISES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.083, 0.212, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4922", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_f02d5411-dd85-430b-a6fe-47ea3fc45474.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.629, 0.927, 0.656] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4923", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_557a0c86-28aa-4838-b6d5-84c2383074df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK\n[span] Exterior Color -> CLICK\n[div] -> CLICK\n[checkbox] Online Paperwork (4)\uf05a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.736, 0.296, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4924", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_95141533-9d98-44d8-892a-27fafb078c64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.147, 0.392, 0.162] in the image\nAnd my action is Action: TYPE\nValue: belo horizonte"}]}, {"id": "mind2web_4925", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_77c7b44a-4641-49f4-8c49-b7268e7e1c6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[div] 10 -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[span] 0 -> TYPE: 7"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.845, 0.437, 0.908, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4926", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_56a0811a-d418-4d40-a9c6-3db908dfbfe7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.297, 0.743, 0.449, 0.784] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4927", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_d36af357-3992-4fdc-af97-755183ecfd0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] European trains -> CLICK\n[menuitem] Spain train tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.702, 0.226, 0.794, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4928", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_bc05be9f-e46b-4654-83c0-862c601f263f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.191, 0.782, 0.227] in the image\nAnd my action is Action: TYPE\nValue: New york knicks"}]}, {"id": "mind2web_4929", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_f69f089a-e5a4-4995-9df6-4b564436b806.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.762, 0.027, 0.777] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4930", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_9e20b613-6f39-45e4-b248-a3383bb160bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI\n[b] DEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.262, 0.906, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4931", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_b512eb3a-d22d-4b97-9602-8accf6088ddc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] Fresno -> CLICK\n[img] -> CLICK\n[span] Order Online -> CLICK\n[link] All -> CLICK\n[label] Japanese Restaurants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.441, 0.559, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4932", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_7e75bead-d0bb-4243-ab8b-7c062cd37053.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK\n[option] Price -> CLICK\n[link] VIEW RATES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.601, 0.943, 0.633] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4933", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_eb660037-cbb5-4b1a-be71-d1b5ad6fd160.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK\n[span] New -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.524, 0.158, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4934", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_916bd8e4-c9a3-4837-8144-c9fc5cbdcf41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\n[span] Top activities -> HOVER\n[span] Colosseum -> CLICK\n[link] Tours & Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.144, 0.143, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4935", "image": {"bytes": "", "path": "./images/28d54466-de85-45e6-9649-2575d38adfd4_14d0b2bf-2ab6-4ab6-abad-772760082d0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse documentaries streaming on Netflix.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.213, 0.077, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4936", "image": {"bytes": "", "path": "./images/ccf98191-100e-441a-93e8-8ff4076aeaa0_e3cf226c-7d99-41ea-89d5-a56659d29b84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my cart a women's T-shirt priced under 10 dollars\nPrevious actions:\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK\n[heading] Price -> CLICK\n[label] $0-$10 -> CLICK\n[polygon] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.293, 0.256, 0.49] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4937", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_8782a791-9cd8-4ff2-be7e-865859dd7fc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK\n[link] Medical -> CLICK\n[link] Diseases & Disorders -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.382, 0.366, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4938", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_4762d735-9dc2-4717-ae8b-baab0b3446e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK\n[textbox] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.282, 0.143, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4939", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_f5a28d3e-5195-4f57-9cb0-b69fc4e39b1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[span] New York, NY -> CLICK\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK\n[input] -> CLICK\n[gridcell] March 19, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.12, 0.213, 0.279, 0.235] in the image\nAnd my action is Action: TYPE\nValue: 3"}]}, {"id": "mind2web_4940", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_20dce0e2-8e16-4412-aa55-23f7a1d13681.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.139, 0.42, 0.181] in the image\nAnd my action is Action: TYPE\nValue: Ohio"}]}, {"id": "mind2web_4941", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40137cca-0d78-4d63-9635-8352aa17f0c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK\n[button] Let's go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.13, 0.625, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4942", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_2e3da2eb-62df-434d-b787-bbb106ebfb4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.439, 0.32, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4943", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_7dcf08f4-e760-4be7-be8c-0a533074883e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK\n[textbox] Recipient Name -> TYPE: Tim Stebee\n[textbox] Recipient Email -> TYPE: scisoorbros@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.683, 0.975, 0.729] in the image\nAnd my action is Action: TYPE\nValue: Jeerimiah Waton"}]}, {"id": "mind2web_4944", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_763f2654-e332-4ba0-b78b-81110a53ff6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: GOA\n[div] Goa -> CLICK\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.378, 0.263, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4945", "image": {"bytes": "", "path": "./images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_906c8603-da25-403f-b16b-7258c1f73735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about the NBA Finals schedule.\nPrevious actions:\n[div] Sports -> HOVER\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.101, 0.161, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4946", "image": {"bytes": "", "path": "./images/9ea9ac0d-8c31-49bc-aa3d-e85b80580409_8368fa5f-1af2-4abb-bcf6-cf089a8ca346.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight schedule between London and New York for April 7 and check the upgrade details of the noon flight.\nPrevious actions:\n[heading] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.222, 0.272, 0.255] in the image\nAnd my action is Action: TYPE\nValue: london"}]}, {"id": "mind2web_4947", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9b195a60-73db-47f5-a2f0-d5a47fbdeb06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001\n[combobox] State -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.445, 0.609, 0.484] in the image\nAnd my action is Action: TYPE\nValue: 1234"}]}, {"id": "mind2web_4948", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_57f7d43c-5d53-4a00-8fa7-5feafe218409.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[tab] 7 -> CLICK\n[path] -> CLICK\n[div] 8+ -> CLICK\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.402, 0.226, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4949", "image": {"bytes": "", "path": "./images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e4610afd-1311-4ec1-97ee-3ecf4c573381.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of jazz albums released in 1890.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Jazz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.599, 0.093, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4950", "image": {"bytes": "", "path": "./images/4af615be-4472-4357-8737-5286b06b385d_e8bc0cef-e7f2-447c-8393-356a10b812b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used 2006 Honda Civic.\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2006\n[combobox] Make -> SELECT: Honda\n[combobox] Model -> SELECT: Civic\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.264, 0.981, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4951", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_72ec56be-d53d-4b32-acb8-ac991e6ca999.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.347, 0.474, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4952", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_fd1d5d23-7a0f-4576-be97-833ebf8126bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.06, 0.181, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4953", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_43f63fb5-a96b-4da5-a251-cf7829d4501f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\n[link] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.232, 0.249, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4954", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_94bad45f-8cab-4d3f-9c10-ede8de8da2a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Alinea\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.88, 0.448, 0.908, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4955", "image": {"bytes": "", "path": "./images/92cfe78f-0385-4ef6-b829-ae34291e766f_93138842-6d51-48af-aa67-d6214bc11bfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an LED TV below $1000.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] TV & Home Theater \uf105 -> CLICK\n[div] TV & Accessories -> CLICK\n[link] LED TV -> CLICK\n[dt] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.672, 0.192, 0.687] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_4956", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_26337816-766e-4897-b7df-e4d62ea83cda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.109, 0.292, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4957", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ffc2d7ad-0691-466b-b825-956744be5a2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[button] Increment -> CLICK\n[path] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.218, 0.573, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4958", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_4ed75775-bdd0-455f-abf7-f105531035b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Car Sales -> CLICK\n[combobox] All Makes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.372, 0.472, 0.383] in the image\nAnd my action is Action: TYPE\nValue: 26807"}]}, {"id": "mind2web_4959", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_796c803d-8f81-4cfe-a335-d7313478fdb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city\n[span] Nevada City -> CLICK\n[span] 4831 Granite Dr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.205, 0.691, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4960", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_2190574e-5045-4a7c-aeab-5f9d88e544cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.701, 0.882, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4961", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_2fd38569-ac81-4db5-8534-ac8b52302caf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.469, 0.388, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4962", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_7e00abe3-6fa7-4b74-b2be-7505a7270e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.139, 0.367, 0.238, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4963", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_054fab53-5095-4ef9-a358-ccfae23ddabf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.237, 0.239, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4964", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_e7bb4a75-73a7-4320-95ec-03516f734caa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\n[span] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.183, 0.234, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4965", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_78b56f22-a09c-4cbf-8e7e-fe5dd97a1305.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\n[searchbox] Search Site -> TYPE: lenovo laptop\n[button] \uf002 -> CLICK\n[div] Newest Lenovo Ideapad 3i Laptop, 14\" FHD Display, ... -> CLICK\n[div] Price Alert -> CLICK\n[textbox] price from -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.292, 0.727, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4966", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_b0204daf-d53c-416b-bbf3-fe924f4d9d25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Add to List -> CLICK\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK\n[button] Explore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.088, 0.485, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4967", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_95ec1d75-39b4-41c3-bdd2-fd4404dbe49f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.697, 0.059, 0.722, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4968", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_4c9dfd61-fbf4-424e-a505-a20212944a9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.287, 0.539, 0.308] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4969", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_a2e3ce74-6960-44a8-9352-ce292abb6b25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[heading] Barboursville -> CLICK\n[link] More info about Barboursville store -> CLICK\n[button] make it my store -> CLICK\n[path] -> CLICK\n[span] Easter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.118, 0.819, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4970", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_02461d6b-dd73-4855-9d43-5545b559e29c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[tab] Vehicle Price -> CLICK\n[textbox] Monthly Payment -> TYPE: 250"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.117, 0.459, 0.14] in the image\nAnd my action is Action: TYPE\nValue: 3000"}]}, {"id": "mind2web_4971", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_5fdc4213-5ec5-4e87-9984-4b602c1a2368.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK\n[link] Phil Collins - Both Sides (CD, Album, RE + CD + Dl... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.072, 0.897, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4972", "image": {"bytes": "", "path": "./images/789b7d2d-fb01-453c-b933-383965e6123c_5e40a7b5-18ac-44c8-959a-a530a564942a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cast and crew of Titanic and add to watchlist\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.111, 0.594, 0.123] in the image\nAnd my action is Action: TYPE\nValue: Titanic"}]}, {"id": "mind2web_4973", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_cffee1ce-d1d4-44c7-8978-a4a91b399818.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.383, 0.196, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4974", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_905be250-0f48-4c69-a6c0-82997c490294.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.062, 0.819, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4975", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_1ef89271-0828-431c-8ad2-83ba8b886666.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.054, 0.491, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4976", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_490963a7-541a-4739-836f-b305f77e41ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[searchbox] Search by keyword -> TYPE: women t-shirts\n[div] WOMEN / Tops -> CLICK\n[gridcell] Size -> CLICK\n[label] S -> CLICK\n[gridcell] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.465, 0.139, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4977", "image": {"bytes": "", "path": "./images/6d963cc0-90d3-4908-bee4-29a8530536af_604638dc-e4b7-4183-9b31-ea41921cdd3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all my offers for 2-5 day cruises\nPrevious actions:\n[button] Plan -> HOVER\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.609, 0.779, 0.645] in the image\nAnd my action is Action: SELECT\nValue: 2 - 5 Days"}]}, {"id": "mind2web_4978", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_865bb951-c7cd-439d-8d45-50b44019491b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.031, 0.176, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4979", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_068d6834-e98b-463c-a33b-df3480f7731d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog bed 30 inches\n[button] Go -> CLICK\n[RootWebArea] Amazon.com : dog bed 30 inches -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.79, 0.032, 0.796] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4980", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_47173eeb-574c-43c5-a937-a7da3b445094.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: TYPE\nValue: 10000001"}]}, {"id": "mind2web_4981", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_07cf23ef-c4c8-4f6e-8ea7-acd2ea457987.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] MEN -> CLICK\n[svg] -> CLICK\n[heading] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.311, 0.471, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4982", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_97773a8e-0f6a-46e6-b900-726dc84c0b7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.283, 0.292, 0.319] in the image\nAnd my action is Action: SELECT\nValue: Wineries"}]}, {"id": "mind2web_4983", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_59161f2b-4e6d-4fb2-be23-4c27eeedefce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[link] Car Values -> HOVER\n[link] My Car's Value -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.23, 0.112, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4984", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_b76a522b-8917-4f56-a7bd-f0ff4fa2cabf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK\n[img] CVS Health Vitamin C Tablets 250mg, 100CT -> CLICK\n[button] Add CVS Health Vitamin C Tablets 250mg, 100CT to B... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.081, 0.186, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4985", "image": {"bytes": "", "path": "./images/3c9442f9-5542-4395-918a-6551dbba3e3a_9a0801ff-e639-4dd4-98a9-fa35268526a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Massively Multiplayer games which can be played on VR.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.342, 0.143, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4986", "image": {"bytes": "", "path": "./images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_cb45e6f4-73da-49fe-85ca-746a2424c6c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the complexity rating for Frosthaven.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.006, 0.995, 0.025] in the image\nAnd my action is Action: TYPE\nValue: frosthaven"}]}, {"id": "mind2web_4987", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_b37d0499-8f2c-42f3-98a1-93d81e2cae6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.49, 0.027, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4988", "image": {"bytes": "", "path": "./images/31a74ae0-4b8a-407b-8f3d-a094a6966254_38ab39b0-d855-4990-91e4-801450b4c9ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show books of Dr Seuss with price range of $20 to $40 and add 2 to basket.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Dr. Seuss -> CLICK\n[select] All -> SELECT: US$20 to US$40"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.384, 0.196, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4989", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_0f8f7d21-0a40-44f8-8683-ca8d046e3e2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[searchbox] To -> TYPE: Grand Central, NY\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK\n[label] Express Bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.12, 0.609, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4990", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_8f341320-6179-4cb0-b145-5d5b9d59e8a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.556, 0.767, 0.598] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4991", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_79f76036-6a56-4ff6-8f25-49bda6beaa0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK\n[span] 11 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.575, 0.38, 0.584, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4992", "image": {"bytes": "", "path": "./images/00cef5a2-4d07-4dd9-837d-5b85ac3a63d8_04943aa9-b541-411a-8ed3-e4c259733e76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Chicago Bulls team schedule for the month of April.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.031, 0.37, 0.041] in the image\nAnd my action is Action: TYPE\nValue: Chicago Bulls"}]}, {"id": "mind2web_4993", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_270f2f26-4be9-4b51-8347-ec9ed1712b35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty\n[textbox] * Required Fields Last Name * Required Fields Las... -> TYPE: Lue\n[textbox] * Required Fields House Number -> TYPE: 4847"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.23, 0.476, 0.247] in the image\nAnd my action is Action: TYPE\nValue: 10019"}]}, {"id": "mind2web_4994", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_81adcddc-419d-4f81-b70c-348be8137bae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] \ue602 All Filters -> CLICK\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.13, 0.853, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4995", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_019508db-f61f-4343-93fe-7df53859be47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.099, 0.668, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4996", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_61cb1a42-d50a-4a13-a642-4519069dae8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\n[link] Music -> CLICK\n[link] hip-hop -> CLICK\n[gridcell] Clint Eastwood -> CLICK\n[button] More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.704, 0.724, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4997", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_f1dbd69a-ff2d-4ee1-8fd8-6773599ab87b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.104, 0.109, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_4998", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_578dd203-e4de-4ea6-bb5a-d65d7c71e63d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\n[heading] Check-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.168, 0.481, 0.192] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_4999", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_98bafeb7-8d43-4fe8-bdd5-a3b1aaf920d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK\n[link] 9 - apply US Shoe Size filter -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 100\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.088, 0.906, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5000", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_3b91e850-1a70-4008-9e51-f0c6dbdf6a74.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK\n[checkbox] 30 April 2023 -> CLICK\n[combobox] Drop off time -> SELECT: 1:00 PM\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.128, 0.765, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5001", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_bd621503-fa0c-4902-80aa-28bca4aaa791.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.7, 0.278, 0.731, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5002", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_b4ba250f-9281-419f-8443-0ae4a34417ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.186, 0.359, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5003", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_5afa6856-13c2-405b-b830-a5ca14fe587f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Black (1) Black (1) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.325, 0.253, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5004", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_983e2ebf-fc39-4f67-9991-ae7008c8a9e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.464, 0.333, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5005", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_8c8991bb-aa38-4939-b1bb-0b4b358b991d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[searchbox] City -> TYPE: fre\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.841, 0.281, 0.972, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5006", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4359bad3-5e98-42a3-8ae2-157730acf87f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Free Dealer Price Quote -> CLICK\n[select] Make -> SELECT: Kia\n[select] Model -> SELECT: Carnival\n[textbox] ZIP -> TYPE: 11101\n[button] Find Dealers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.202, 0.928, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5007", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_18f5cbed-b040-4dcb-a90f-5aecfd8e43fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Sort -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.279, 0.969, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5008", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_57f1736e-adf3-46e0-bc47-5cb8910dd878.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Products & Services -> CLICK\n[div] Learn More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.279, 0.341, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5009", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_1ec53584-9015-42d8-b9ef-b956a061181e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.626, 0.843, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5010", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_3eb24abe-68ed-45f0-b53f-9873bc0d09f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[button] Shop -> CLICK\n[button] load Vitamins Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.253, 0.574, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5011", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e4249fd9-eaf4-4209-a6fc-81cd2b3267ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.217, 0.167, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5012", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_6547e39b-5ccc-4df5-8668-44a769d70fd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[input] -> CLICK\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.263, 0.803, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5013", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_e62a0a6c-d17d-4675-9dcf-80b0aebd0e3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.735, 0.846, 0.785] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5014", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_fabde200-2878-4346-ba10-23b269a827fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Check the status of a refund -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.255, 0.488, 0.301] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_5015", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_2bc91564-8a2d-4caa-968e-f6d6713349e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK\n[link] Womens -> CLICK\n[div] Product Category -> CLICK\n[link] Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.267, 0.452, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5016", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_a75f21c5-093f-435c-ac68-cfeef7b29ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.453, 0.686, 0.496] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5017", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cb92fa99-6e9b-4b9e-983c-a85d76580669.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK\n[span] -> CLICK\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.758, 0.178, 0.981, 0.21] in the image\nAnd my action is Action: SELECT\nValue: Price Low to High"}]}, {"id": "mind2web_5018", "image": {"bytes": "", "path": "./images/9326b908-cbe6-41f6-957f-00b84c26bfcc_40db3113-9b8c-433f-a36e-b2bce9ea6527.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find recently added guitar pro non-acoustic tab intros\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Non-acoustic 1,805,658 -> CLICK\n[link] Intro 65,171 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.28, 0.305, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5019", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_73327f56-01db-46fd-b7d5-b3c3d84a563d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK\n[combobox] Guests -> SELECT: 3 Guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.199, 0.228, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5020", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_06ae5b02-03a9-45f9-a324-e9961b31c3e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.244, 0.277, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5021", "image": {"bytes": "", "path": "./images/22509b64-b643-44ec-b486-9828e686303c_a0b11591-7d3a-41ce-a01b-fb76318531da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the full menu for AMC Dine-In\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.038, 0.524, 0.09] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5022", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_a3dff442-0bde-420e-8a86-013bc958198a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.313, 0.837, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5023", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_2d7c0f04-8f46-4ca8-bd6d-950c31e920f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.12, 0.902, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5024", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_287f3852-35ea-4874-8d4f-64e2292bc1f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] $99 Or Less -> CLICK\n[menuitem] Free to home or store -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.948, 0.08, 1.002, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5025", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_3ee8c662-4498-4f40-8eff-7320a2470dd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.104, 0.342, 0.132] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5026", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_8800c1bb-c7a7-4b80-8edc-13b2fa4a5c29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.197, 0.866, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5027", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_2533c6aa-8fbe-4a89-8047-a7346e530fe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\n[combobox] Search products and services -> TYPE: cough medicine\n[button] Search for cough medicine -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.466, 0.143, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5028", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_d99b4983-2ce2-4e02-b12d-1d5f4ead49ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[link] Shop -> CLICK\n[img] Sports car icon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.151, 0.249, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5029", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_7ed0f607-4961-4ea1-b6c9-7ca428f4f9d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\n[combobox] Search -> TYPE: Atlantis\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.221, 0.326, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5030", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_4e9abe92-bb44-4c26-b5b2-c782737e121d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK\n[checkbox] Multi-Purpose Tool (11) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.504, 0.087, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5031", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_19b2c816-4c83-48d8-877e-71017a0fc5d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[link] Used Gear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.062, 0.184, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5032", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_94c51afd-2b7a-47e3-b33e-711ca7f9cd4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[div] Leather -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.733, 0.284, 0.767] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5033", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_dbbb21ad-4cd5-4166-a7f6-121ce7bf34e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.376, 0.368, 0.419] in the image\nAnd my action is Action: TYPE\nValue: 1234567890123"}]}, {"id": "mind2web_5034", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_788721c8-ba93-44be-af15-056a8fd86356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.199, 0.808, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5035", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_ae0eb36b-220a-432b-99da-eb328e43f411.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.122, 0.894, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5036", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_946345bf-762a-48d7-99fd-8ff65665c304.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK\n[textbox] Shipping Address -> TYPE: 7528 East Mechanic Ave. Fargo, ND 58102\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.529, 0.892, 0.553] in the image\nAnd my action is Action: SELECT\nValue: Two-Story"}]}, {"id": "mind2web_5037", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_52f50c15-5013-43bc-b055-f287c38e0d96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.304, 0.969, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5038", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_a1a020e3-7bbc-464e-8ea3-cdff088f36db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] List Explorer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.069, 0.89, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5039", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_46b11c44-c75d-44bf-8f50-74ef5dc7513f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK\n[button] Back to all categories -> CLICK\n[button] Make -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.125, 0.045, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5040", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_005d0877-8b37-4969-b673-51a0e9ff85ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA\n[input] -> TYPE: car accident lawyers"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.184, 0.405, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5041", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_a25fd485-84b7-4b83-ab29-4bb5a58ec54c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.119, 0.969, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5042", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_8abefbf4-3265-488e-921c-d391ae6096c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.641, 0.945, 0.7] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5043", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_3756ef1a-2931-4990-a272-b1bf2b76a68c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[link] SEARCH CARS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.188, 0.249, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5044", "image": {"bytes": "", "path": "./images/d29fd2a4-2305-4276-8a0e-2599291d0a17_22a19b2e-f799-4a08-bd41-16a246e36019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of reviews I wrote about my games.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER\n[link] GAMES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.214, 0.606, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5045", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_9e7c3106-335f-4ee7-8dc2-9638fbeb8f6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.217, 0.464, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5046", "image": {"bytes": "", "path": "./images/eab97f0c-38b3-4421-bff6-697b3267f23c_6b8873a4-d11a-4e18-ba66-664584b4be5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find carnival cruise options that include Alaska.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.375, 0.274, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5047", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_038530a6-b2a4-4695-8a15-81312f121013.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.206, 0.434, 0.237] in the image\nAnd my action is Action: TYPE\nValue: Lave Hot Springs East KOA"}]}, {"id": "mind2web_5048", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_bf997ba6-3daf-48f3-9fdf-0beef8edc37a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\n[span] Menu -> CLICK\n[span] Agencies -> CLICK\n[link] Bridges & Tunnels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.446, 0.5, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5049", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_4e232a35-ea10-415c-8e5f-c9ac22cd0350.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK\n[link] Find A Travel Agent | Carnival Cruise Line -> CLICK\n[textbox] City -> TYPE: Grand Junction\n[combobox] State -> SELECT: Colorado"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.802, 0.398, 0.903, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5050", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_e0785f0c-5b78-480e-96b3-ef282ad0f38e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER\n[span] Tokyo -> CLICK\n[link] Food & Drink -> CLICK\n[link] Coffee & Tea -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.468, 0.121, 0.483] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5051", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_04db667a-0b22-4a1a-a420-f17742d94391.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK\n[button] Select Activities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.37, 0.551, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5052", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_ab382337-5bf6-47b5-a717-1589609ab85f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.037, 0.181, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5053", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_ef3c3757-9751-4d0e-a336-271b2e09c353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.349, 0.452, 0.368] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_5054", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_eec4751e-9ac2-4842-98ac-2edd26e0d41f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York\n[a] NYC - New York, NY -> CLICK\n[combobox] Date -> SELECT: Friday, April 7\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.308, 0.847, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5055", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_54a94b9b-4c79-49ef-a0db-d62109ac4ff6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\n[link] Navigate to on-demand -> HOVER\n[link] select to navigate to Supported Devices -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.458, 0.171, 0.59, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5056", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_f491a74d-ea2a-4ce7-b73c-a8493517b790.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.264, 0.165, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5057", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_80c2d342-8948-49b9-b18b-846b6b5dd105.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.151, 0.129, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5058", "image": {"bytes": "", "path": "./images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_4b35d4cd-0a74-4c3e-82a9-a1804592ae3d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a podcast about F1\nPrevious actions:\n[link] \ue028 -> CLICK\n[link] F1 -> CLICK\n[span] The Gab & Juls Show -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.472, 0.36, 0.528, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5059", "image": {"bytes": "", "path": "./images/3596733f-6427-4ab5-9ff1-c65baaf524f1_8f824ede-447c-4c6c-b620-18425d58bbe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the list of restaurants with reservations available for 04/11 with vegetarian food.\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: vegetarian"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.101, 0.273, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5060", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_229bf1f4-803a-4d87-9a8b-1715ae4dd3a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[button] Show all 14 -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.756, 0.263, 0.765] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5061", "image": {"bytes": "", "path": "./images/bb31a9f4-3465-4a91-aae6-bf70aa6b729f_ebeceea9-c367-4eb8-97aa-b96615e4671e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a podcast about F1\nPrevious actions:\n[link] \ue028 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.416, 0.111, 0.573, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5062", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_f59ec5b0-8588-44d1-b254-2a83421b4b23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.707, 0.095, 0.721, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5063", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_ff696e9f-af71-48b7-a4cb-fda242e97114.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.311, 0.785, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5064", "image": {"bytes": "", "path": "./images/01dcf2f1-0b80-4819-a4d1-5063062d1aa5_2aaf6417-c1df-4b09-9d93-18d067f6930b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Romantic Audio fiction books with the lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.174, 0.331, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5065", "image": {"bytes": "", "path": "./images/7bdebbfa-2512-48e2-93b1-d92e2c87c6e1_1e192c02-4f8c-4ad8-b6de-6efa760df8bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vintage clothing and sort the results by price from high to low.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.033, 0.652, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5066", "image": {"bytes": "", "path": "./images/c14078dd-a4be-4784-a46b-cb01333e3019_99951f39-43d4-41a0-aef5-e95a0a34b32f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two $50 playstation store gift cards to cart\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Gift Cards -> CLICK\n[img] -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.291, 0.975, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5067", "image": {"bytes": "", "path": "./images/d3ca5294-89aa-4028-8776-be08edc63783_e491ad98-a4ad-48c3-aadc-ace9647b8eb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a campground in Phoenix with wi-fi to check in today.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Phoenix\n[textbox] CHECK IN -> CLICK\n[link] 9 -> CLICK\n[link] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.269, 0.465, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5068", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_ae1ce8a0-f74c-454f-b0ee-ad9054d61a1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.373, 0.605, 0.627, 0.636] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5069", "image": {"bytes": "", "path": "./images/d7ab9bf0-99e2-4f32-92c9-c1a10bc68f82_269e9a3c-8bf4-4d0e-81f0-3a2488d00298.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check Manchester City Performance Stats of 2022/2023 season in the champions league\nPrevious actions:\n[link] ESPN Soccer Home Page -> HOVER\n[link] Champions League -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.755, 0.363, 0.822, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5070", "image": {"bytes": "", "path": "./images/f86b0a14-ed91-491d-85be-b0af0b849353_b3025bc4-bb1f-4587-b640-2f7606f07007.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movies in theaters that are certified fresh and sorty by highest audience score.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.122, 0.587, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5071", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_df4d6445-8f4b-4462-8798-32c0b9d0aaea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.387, 0.824, 0.42] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5072", "image": {"bytes": "", "path": "./images/effb9df8-3b3f-4349-8033-f79ba1587a4d_b9aabd63-0aa0-4871-b683-29daf286e242.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a projector which accepts crypto currency as payment.\nPrevious actions:\n[searchbox] Search Site -> TYPE: projectors\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.294, 0.192, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5073", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_48cd4b76-4638-41bb-8ee1-04df8cbba952.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] Certified Cars -> CLICK\n[combobox] makes -> SELECT: Jeep\n[combobox] models -> SELECT: Compass\n[textbox] Zip* -> TYPE: 59316"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.245, 0.407, 0.267] in the image\nAnd my action is Action: SELECT\nValue: 500 mi"}]}, {"id": "mind2web_5074", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_7d95104b-2cee-40c4-b43f-914ab0c77461.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shoes -> CLICK\n[link] Running -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.409, 0.233, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5075", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_84c30b1d-9a13-4f5a-9afc-716c157523b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK\n[img] Decrease adults in room 1 -> CLICK\n[button] Search -> CLICK\n[checkbox] Hotel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.58, 0.22, 0.612] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5076", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_99c566eb-d382-4848-8302-73ac22a42e9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.198, 0.153, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5077", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_67c7cc50-4331-4340-93d5-90fa2a691741.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.317, 0.059, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5078", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_ca98286b-ed03-4f7b-be6c-f1da235ef72c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[link] Jaguar XF -> CLICK\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.443, 0.63, 0.493] in the image\nAnd my action is Action: TYPE\nValue: Jaguar"}]}, {"id": "mind2web_5079", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_59129ef1-cf47-4b83-b2f6-89a4f17166c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[button] Topic -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.265, 0.154, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5080", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_70e105e0-2679-445b-990c-4c167caaa6cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 08\n[group] RETURN -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.256, 0.384, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5081", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_42393190-d323-4591-a206-ae9287b98ff7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[menuitem] category pillows -> CLICK\n[span] Pillow Protector -> CLICK\n[combobox] Select a Size -> SELECT: Queen\n[spinbutton] Main item quantity -> TYPE: 2\n[button] ADD TO BAG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.619, 0.302, 0.736, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5082", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_356098a8-e05f-4dd3-abf5-7740e225140e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: radio city music hall\n[em] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.129, 0.336, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5083", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_0dcb9111-8f0c-47a9-a1ab-f8d5b5043ae5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK\n[button] Table of Contents -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.125, 0.216, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5084", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_92ca37c7-e58f-496a-ada7-4d5c078c20d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: shirt\n[textbox] Search by keyword or web id -> ENTER\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.208, 0.974, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5085", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_80fe87ad-ddcd-4427-9c67-11e293082f8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.046, 0.443, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5086", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_cfaa3b20-c491-40d0-ad34-ebcf44393172.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK\n[button] Open Guide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.226, 0.047, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5087", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_7157211f-9282-4318-8f16-d51a815e9e8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.202, 0.807, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5088", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_fcedbc87-f73f-4420-994e-2977bd3bab6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: ohio\n[div] Ohio -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.06, 0.398, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5089", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_fd2ab52b-93bd-48aa-8b72-5f8a0835e72c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.167, 0.953, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5090", "image": {"bytes": "", "path": "./images/1ec300ff-d45f-495f-8fbe-36802fdd8c57_a15a2c86-9065-4217-990d-60b0a09cf1a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out about planned service changes on the 4 Subway on 5/3/2023.\nPrevious actions:\n[svg] -> CLICK\n[link] Planned Service Changes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.205, 0.391, 0.233] in the image\nAnd my action is Action: TYPE\nValue: 4"}]}, {"id": "mind2web_5091", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_7afa03ca-d746-4e09-aa3d-3b2b4d0805e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.172, 0.73, 0.198] in the image\nAnd my action is Action: TYPE\nValue: New York University"}]}, {"id": "mind2web_5092", "image": {"bytes": "", "path": "./images/8ab30c9e-f768-46e8-8174-83651d361b9e_509995b9-2de6-4613-94b8-4da7b566ba2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the scores of today's NBA games.\nPrevious actions:\n[span] Scores -> CLICK\n[heading] NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.127, 0.592, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5093", "image": {"bytes": "", "path": "./images/1c1af35c-a0d7-4fe8-bcd3-baf30862235b_a45c4879-9bb8-4c59-b2af-754d7f00f809.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most expensive cruise deals in Europe and Mediterranean.\nPrevious actions:\n[link] Cruises -> CLICK\n[link] Europe & Mediterranean Cruises 16 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: High to Low -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.469, 0.772, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5094", "image": {"bytes": "", "path": "./images/bb7c3e38-dd9c-4472-9774-0012586a1dcd_20dd0a1b-ca66-4ea6-973b-3ce45a7fff77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show weekly deals of Columbus, Oh 43215 store.\nPrevious actions:\n[link] Locations -> CLICK\n[combobox] Search by ZIP code, city, or state -> TYPE: 43215"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.177, 0.287, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5095", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_aade8bc2-34c6-4374-8812-2fa5b8bba84a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.018, 0.577, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5096", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_c95ca89d-2256-4891-bbe7-98503507593e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[button] Move forward to switch to the next month. -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[button] Wednesday, August 2, 2023 -> CLICK\n[button] Monday, August 7, 2023 -> CLICK\n[button] 1 Adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.182, 0.481, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5097", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_c006a047-8042-442d-ae2c-a608af6664b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 958,222 United Kingdom -> CLICK\n[link] 246,685 CD -> CLICK\n[link] 122,512 Album -> CLICK\n[link] Show more\u2026 -> CLICK\n[link] 1,342 2016 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.345, 0.163, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5098", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_9a4627a2-fa39-4c85-b295-a6ebe37f5a95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[combobox] From -> TYPE: Abbotsford"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.273, 0.363, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5099", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_b8bd887f-f0a3-4977-9bbd-8ab6d095f115.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] SEARCH -> CLICK\n[span] View -> CLICK\n[link] I don't have the password -> CLICK\n[textbox] Your Name -> TYPE: Michael Cahill\n[textbox] Your Email -> TYPE: cahillm@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.4, 0.58, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5100", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_6de1827e-c854-41bf-86ca-4ebe2a33339c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK\n[div] Price -> CLICK\n[link] $25 to $50 (2,237) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.132, 0.986, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5101", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_a18e6348-cedb-4bbb-9fb5-a4a982378a3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.049, 0.643, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5102", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_76bc3358-5f1f-416e-acf7-b934c7231a1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK\n[img] -> CLICK\n[textbox] To: -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 0.191, 0.99, 0.203] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_5103", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_85c9acad-b16f-4c31-bc8b-86e56639c5e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[div] Madurai, Tamil Nadu, India -> CLICK\n[path] -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[label] Air India -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.251, 0.074, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5104", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_e4018dc3-e21a-46c7-b1c3-4add061eb3ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.026, 0.151, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5105", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_c4d4d763-c6fa-47ba-8efb-eb4fb52f41dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.54, 0.95, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5106", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_68e483d8-9bfd-4c8c-9327-82577a11be18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.0, 0.445, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5107", "image": {"bytes": "", "path": "./images/7a632871-a6a8-40fd-b48f-6fea49cf48da_3f3803f1-9aa7-4da7-807e-d31136723db3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the NBA score board on March 11th\nPrevious actions:\n[link] NBA -> CLICK\n[link] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.176, 0.717, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5108", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_50f2d198-a0f4-4230-9b83-ed557dc56d79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[svg] -> CLICK\n[heading] Wireless Bra (Ultra Relax) -> CLICK\n[checkbox] BROWN -> CLICK\n[button] ADD TO CART -> CLICK\n[button] CONTINUE SHOPPING -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.138, 0.843, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5109", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_fef9b6f7-061e-4b76-b684-5505c9d7eb70.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.304, 0.195, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5110", "image": {"bytes": "", "path": "./images/f4c21e9f-fbd7-4c45-a282-de06ae3b73c5_4fc94d2c-6706-4bf3-8e0c-3c65f2f15b5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Thai restaurant and get notified when they have an open table between 5 to 7 PM\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: thai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.183, 0.102, 0.691, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5111", "image": {"bytes": "", "path": "./images/7f640279-bd9d-45ae-b3fc-43338977e2c1_95f17546-5fa4-44c6-a51c-d57bf20770b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of Museums in New York City.\nPrevious actions:\n[link] CITIES -> CLICK\n[link] NEW YORK CITY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.258, 0.36, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5112", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_5de17e26-948b-45a1-8b27-0a3a8a79b72d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.117, 0.326, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5113", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_4aaa493a-ae2e-4cb4-8081-485b49488432.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK\n[link] Produce -> CLICK\n[link] Fresh Fruits -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.252, 0.815, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5114", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_d20e2519-15c9-4c7d-943b-75513d98fbb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[searchbox] To -> TYPE: little caribbean\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking\n[button] Done button - Press enter key to submit travel pre... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.383, 0.359, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5115", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_3d415174-370e-47c0-bb34-e9e442c78a84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.035] in the image\nAnd my action is Action: TYPE\nValue: Katy Perry"}]}, {"id": "mind2web_5116", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_7e1f4374-8e17-49c2-be93-d6bba3d0ec0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.323, 0.316, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5117", "image": {"bytes": "", "path": "./images/5c52af02-ccc7-491a-bea7-05de278bf7da_dc06c01d-2251-4bed-b48d-e4f0c2639a7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest flight from any New York airport to Aruba for 1 person using TrueBlue points which leaves on May 1 and returns on May 5\nPrevious actions:\n[option] New York City area (NYC) -> CLICK\n[textbox] To -> TYPE: Aruba\n[option] Aruba (AUA) -> CLICK\n[span] 1 -> CLICK\n[button] return Friday, May 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.234, 0.853, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5118", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_3132d536-bc3a-4ad1-a326-4db8b75dc6ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.273, 0.691, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5119", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_a848bcb3-b6ff-4c1a-9a10-e66ec68c5196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK\n[span] Filters -> CLICK\n[textbox] min price $ -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.198, 0.739, 0.209] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_5120", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_442b805f-3033-4997-a3d7-ef6375d4f79e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.06, 0.352, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5121", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_7dbbe8fd-aead-477a-92b2-485de3aec565.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.001, 0.417, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5122", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_0ab87d28-e48a-4d69-abc3-8606181a83dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.496, 0.331, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5123", "image": {"bytes": "", "path": "./images/978376c1-8545-4160-81d5-722bdea60434_97e6816f-d2a6-4372-95a4-4801d088446e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Start the process to sign up for CVS ExtraCare\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.022, 0.194, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5124", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_45e135d4-8e5f-4562-bd2d-e03101466691.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz\n[link] Gorillaz Gorillaz Artist -> CLICK\n[link] Gorillaz -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.104, 0.832, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5125", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_a0430d87-5a11-4409-8dc3-fff996002ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\n[button] Reviews -> CLICK\n[link] Game Reviews -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.191, 0.55, 0.217] in the image\nAnd my action is Action: SELECT\nValue: PC"}]}, {"id": "mind2web_5126", "image": {"bytes": "", "path": "./images/126b4604-8b04-4eb1-9027-266631c96f01_3f13fb42-633d-48a5-8d77-afcf9de93569.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book an outdoor table for 3 people at a Los Angeles restaurant for 5:00 PM on April 7th\nPrevious actions:\n[button] Los Angeles -> CLICK\n[link] La Cha Cha Cha -> CLICK\n[div] Today -> CLICK\n[button] April 7, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.199, 0.192, 0.224] in the image\nAnd my action is Action: SELECT\nValue: 3 Guests"}]}, {"id": "mind2web_5127", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b0b1bb1a-52a6-45bd-b8bc-b97ddb5f9e5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] $99 Or Less -> CLICK\n[menuitem] Free to home or store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.281, 0.249, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5128", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_2a8e8987-2a95-4d21-a7b6-11eed00c07d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] Thu, Apr 20 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM\n[combobox] Drop off time -> SELECT: 11:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.193, 0.923, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5129", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_d8059cb9-a62c-4a11-811c-e185798ece8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Open Jobs -> CLICK\n[span] -> CLICK\n[button] Country -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.479, 0.218, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5130", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_e0bfc000-6c7f-49c9-bfea-873bbac85dab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[button] Condition -> CLICK\n[link] Certified - Refurbished -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Price + Shipping: lowest first -> CLICK\n[button] Features -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.451, 0.868, 0.481] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5131", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_c2cf99e0-83fb-4746-9a9d-7b151d9c60b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[LabelText] Sell My Car -> CLICK\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.224, 0.096, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5132", "image": {"bytes": "", "path": "./images/551ab381-5bfe-4491-9602-0b0c584d1346_51d5a5f6-926f-4a16-98db-bffa5b3c9436.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give me the IMDB recommendations of what to watch.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.008, 0.144, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5133", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_478e1cc4-76bf-46aa-beb1-599a90f1a9b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[dt] Customer Ratings -> CLICK\n[i] -> CLICK\n[button] APPLY -> CLICK\n[span] Compare -> CLICK\n[span] Compare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.906, 0.59, 0.984, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5134", "image": {"bytes": "", "path": "./images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_cba371fd-cedd-44b5-bc73-f66ef9af18f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of Cleveland's animal shelters.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.001, 0.417, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5135", "image": {"bytes": "", "path": "./images/73de3022-2f58-4bd4-8de6-4a201f2bbc98_f0b16e02-0be2-4f9e-9cf5-c08950f7b267.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if there are tickets availabe for the Hamilton musical in Richmond, VA.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.226, 0.782, 0.269] in the image\nAnd my action is Action: TYPE\nValue: Hamilton"}]}, {"id": "mind2web_5136", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_86f0129f-5ca2-437f-ace5-ada6f8fda4ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: barclays center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance\n[combobox] Start Time -> SELECT: 3:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.27, 0.384, 0.307] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_5137", "image": {"bytes": "", "path": "./images/14b5885e-5454-465f-92bf-d8f7315c4a46_dfd559fc-eb25-4cda-9a91-8c60b0bbce36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a ticket from Abbotsford to Sheboygan on March 26, 2023.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Route map -> CLICK\n[textbox] Pick an origin Pick a destination -> TYPE: Abbotsford"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.297, 0.168, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5138", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_c6851a43-51ca-4a93-b937-d560a9c4ce56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Leeds\n[span] Leeds -> CLICK\n[span] Sheffield -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.194, 0.194, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5139", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_8321c8e6-a5b7-45a2-b38e-6d0b5fba0bf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.406, 0.104, 0.713, 0.115] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5140", "image": {"bytes": "", "path": "./images/7b5b2188-afd4-4279-b738-c37a1b5f2142_fcec7df7-3669-4c5f-8162-19849487f0c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a list of available opportunities for a volunteer event for year 2023\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Volunteer -> CLICK\n[link] Become a VIP and Volunteer with Us Today!\u203a -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.339, 0.178, 0.416, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5141", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_e97577ad-f25d-42c2-98a2-74fda1a588c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york\n[textbox] Postcode/ZIP code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.588, 0.95, 0.647] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_5142", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_8e52b340-62a8-48f8-8d18-ce80711db210.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: doha\n[strong] Doha -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.188, 0.942, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5143", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_4b90093f-4363-4c73-8a02-87ab5e4686d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK\n[button] Flavor -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.013, 0.988, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5144", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_10ce4c27-0114-485f-b18a-e5fed2af6d57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.656, 0.625, 0.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5145", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_4208985a-6f68-4493-a1fb-3abbe9503a0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.601, 0.884, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5146", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_dd2bc1cb-54ad-4246-8005-f7287aa435c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.054, 0.869, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5147", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_016bf36a-0f5f-4e4b-a312-232d7232cea2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[menuitem] Buy -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.11, 0.205, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5148", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_93b28944-f841-4fa4-a303-7c39d9c73332.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.018] in the image\nAnd my action is Action: TYPE\nValue: game mix"}]}, {"id": "mind2web_5149", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_e62362e7-8e6f-4005-a005-b8bdb6c3d5bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.265, 0.299, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5150", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_85649359-30cc-4848-9453-3f2577813005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.205, 0.222, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5151", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_76bcc527-fafd-4e31-b315-fac7ab42f06f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[link] Award travel -> CLICK\n[span] Flight + hotel -> CLICK\n[label] Basic Economy -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: las vegas\n[span] Las Vegas, NV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.762, 0.622, 0.782] in the image\nAnd my action is Action: TYPE\nValue: san francisco"}]}, {"id": "mind2web_5152", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_2911643c-507a-480b-b496-9cfb4b77d8f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[textbox] Date use format: 01-Apr-23 -> CLICK\n[rect] -> CLICK\n[link] 26 -> CLICK\n[polyline] -> CLICK\n[span] Youth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.196, 0.291, 0.223] in the image\nAnd my action is Action: SELECT\nValue: 16"}]}, {"id": "mind2web_5153", "image": {"bytes": "", "path": "./images/60e1de47-efdb-47e8-935f-2ac38af1ead3_638420e6-97e6-466f-a168-587f17749282.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Prometheus movie to watchlist.\nPrevious actions:\n[textbox] Search IMDb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.007, 0.613, 0.025] in the image\nAnd my action is Action: TYPE\nValue: Prometheus"}]}, {"id": "mind2web_5154", "image": {"bytes": "", "path": "./images/d88f70e8-9b66-4f08-9aa4-e49375b14920_23546e33-8ba3-46f1-b1b7-f8e0acf5ebc2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for places to stay in Santa Fe, New Mexico for a weekend starting on Jun 16th for 3 guests and 2 rooms.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Santa Fe\n[span] Santa Fe, New Mexico, United States -> CLICK\n[div] 16 -> CLICK\n[div] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.904, 0.268, 0.92, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5155", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_bc008675-e4f9-468b-a15a-02d622cc6f06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\n[link] Smart Home -> CLICK\n[link] Smart Lighting -> CLICK\n[button] Indoor Lighting (7) -> CLICK\n[checkbox] Strip Light (3) -> CLICK\n[button] Sort By: Best Match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.177, 0.991, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5156", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_07e02302-548c-4332-9e6c-188c7e6baade.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[textbox] Near -> TYPE: houston\n[span] Houston -> CLICK\n[button] Fast-responding -> CLICK\n[radio] Data recovery -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.118, 0.612, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5157", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_e7444f63-7c77-4462-a723-ab00e729c46d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.213, 0.894, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5158", "image": {"bytes": "", "path": "./images/d29e8a14-ee66-4330-b282-09cb1955aad0_5600ca04-7659-49c0-b34a-7c7de417fea1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the weekly ad in List View.\nPrevious actions:\n[button] Savings & Memberships -> CLICK\n[link] Weekly Ad -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.097, 0.714, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5159", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_5a806dc6-8254-4dee-a2cc-c981755d5bb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\n[textbox] search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.08, 0.93, 0.095] in the image\nAnd my action is Action: TYPE\nValue: Dota 2"}]}, {"id": "mind2web_5160", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e3e842b2-02c0-4a19-8fbc-05f8de17a805.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.734, 0.829, 0.744] in the image\nAnd my action is Action: TYPE\nValue: Tokyo"}]}, {"id": "mind2web_5161", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_e478223e-6ef8-445f-9130-d6f6645f4f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.131, 0.41, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5162", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b442079b-5565-4361-b7f3-666110df8ba4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.408, 0.29, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5163", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_b7d8a0ab-d6fb-4d63-aef6-6a71b72079ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: dog treats"}]}, {"id": "mind2web_5164", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_2d3970c4-2b0a-43cd-bd19-103d5bcb4dd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.228, 0.592, 0.25] in the image\nAnd my action is Action: TYPE\nValue: stewart hotel"}]}, {"id": "mind2web_5165", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_39d3c8ab-ef00-491e-87fa-2c87e399e835.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.34, 0.113, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5166", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_f4d5366b-3609-465c-a8e6-285b40935b03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search -> TYPE: Matthews winery\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.508, 0.464, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5167", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_e60b21c2-3627-44e4-9b13-975c218d9d1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square\n[li] Times Square, New York, NY, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (12) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.503, 0.458, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5168", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_b08451d0-5987-4e39-a51a-6ff6fb83cf22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK\n[button] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.215, 0.777, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5169", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_bebe4a89-a653-4ad1-8562-d2d151c0fa90.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[span] Columbus Easton -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.265, 0.847, 0.295] in the image\nAnd my action is Action: TYPE\nValue: 08817"}]}, {"id": "mind2web_5170", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_ae649734-cc80-41d9-a091-a527d4701cf0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.405, 0.922, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5171", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_1387e66f-88ee-4ac8-8cc8-363de89dd7bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.079, 0.181, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5172", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_7c115bfc-020b-4e0e-a063-b947e23e0649.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.47, 0.863, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5173", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_aa2386b0-24c1-4193-add3-fb6646cfc330.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK\n[button] Lighting -> CLICK\n[link] Decorative lighting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.229, 0.41, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5174", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6fea70db-8ef9-4ac9-b662-4ee385b4af59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.447, 0.132, 0.553, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5175", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_8a5133cf-2c25-469b-97f9-4451368b96a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.075, 0.257, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5176", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_cd172bd3-2207-4a53-99df-1609f3cf87a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[button] Get cheapest tickets -> CLICK\n[span] Continue -> CLICK\n[listbox] Direction -> SELECT: Forward facing\n[listbox] Position -> SELECT: Window\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.185, 0.925, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5177", "image": {"bytes": "", "path": "./images/747429fa-7dac-4ab0-b604-13cb9bf787fe_74c03d0b-0836-477c-ad20-05abb250cd56.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest unused wireless Logitech Keyboard under $70.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.7, 0.158, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5178", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_88c687c0-458c-43e5-b265-561b2efdf331.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[select] Alabama -> SELECT: New York\n[textbox] Zip Code -> TYPE: 10001\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.268, 0.685, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5179", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_8ca47df3-bfb0-474d-bb4c-705ed1bbf199.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.017, 0.592, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5180", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_d61748b4-b9f8-457e-bc90-a8a516c8e12a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[link] Homes -> CLICK\n[searchbox] Please type your destination -> TYPE: MANILA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.087, 0.405, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5181", "image": {"bytes": "", "path": "./images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_09bfdb44-76c7-465e-ba08-dab8c6dc2e1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals in New York.\nPrevious actions:\n[a] City Pages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.571, 0.176, 0.712, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5182", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_8f549f9d-9187-4482-bc70-2a244ffd8c8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.283, 0.153, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5183", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_205dfea0-1032-44c3-8c8d-3b2e3c7d1daf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.034, 0.485, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5184", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_f8c7c15b-6d3a-4e6c-b1af-f569d552ca2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[div] -> CLICK\n[div] -> CLICK\n[span] 36 -> CLICK\n[button] 34 -> CLICK\n[button] Confirm Seats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.295, 0.926, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5185", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_e28c2d70-46de-45d6-b3ed-20ea099217a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\n[button] Deals -> HOVER\n[button] Deals -> CLICK\n[link] US Deals -> CLICK\n[button] Save Now -> CLICK\n[button] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.061, 0.749, 0.088] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5186", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_89f2b1aa-4e07-4d42-858c-a415206f1d5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.545, 0.619, 0.572] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_5187", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_b5539183-9fb0-420f-a745-564979f75b5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.075, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5188", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_049acc7d-e917-45d0-99ed-6c273ee77075.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK\n[link] Notify me -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.24, 0.973, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5189", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_01cf52dc-6cba-4c51-a75d-5f2a7730b67c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[span] -> CLICK\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.899, 0.172, 0.969, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5190", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_39c6496a-5532-4d83-8d37-d129f42d5ea2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.197, 0.202, 0.397, 0.247] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_5191", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_a360bd43-6df1-447e-a026-95d0a8b1cac9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.452, 0.718, 0.474] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5192", "image": {"bytes": "", "path": "./images/577ac962-aefc-46c5-9050-a6069bf2a8fb_6c38c5ba-28c2-45c8-bc24-96c1388d7b6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find spare parts with part number 105307 and add 2 of them to cart.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Customer Service -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.475, 0.887, 0.491] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5193", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_b28fa573-26af-4855-b623-b308ad84fd80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.149, 0.159, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5194", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_acffa63c-d008-44bd-9e78-eb0247cd85cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.443, 0.12, 0.49] in the image\nAnd my action is Action: SELECT\nValue: 2005"}]}, {"id": "mind2web_5195", "image": {"bytes": "", "path": "./images/14d4edf5-e5ca-40e8-8e5d-77068a22c32c_52743c90-fd78-45d7-bfbe-99eb4ee84c1b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the scores of the premiership on 19th of March.\nPrevious actions:\n[span] Scores -> CLICK\n[heading] SOCCER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.065, 0.267, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5196", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_4fc61431-330e-4811-a880-f1d5e77c1642.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.169, 0.559, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5197", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_07da8bf9-a101-42cb-865c-597a7150f981.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.267, 0.256, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5198", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_71adc0ae-641d-4097-929f-2698381759d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\n[link] Coupons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.01, 0.789, 0.042] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_5199", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_94d5fd23-1ec9-4921-8aeb-0f577f753252.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[input] -> TYPE: James Smith\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456\n[textbox] Date you received your order (DD/MM/YY) * -> TYPE: 08/04/23\n[textbox] Please cancel my order for the following products ... -> TYPE: Harry Potter Box Set"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.549, 0.759, 0.577] in the image\nAnd my action is Action: TYPE\nValue: Not available at address"}]}, {"id": "mind2web_5200", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_5c130bc2-2d39-4454-9e2b-13392f1fbe4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[gridcell] Fri Jun 30 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Sightseeing Tours -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.471, 0.105, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5201", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_b9cf6d20-8607-44e1-841c-7a2fac6505e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK\n[textbox] booking reference -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.353, 0.419, 0.647, 0.466] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_5202", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_b7b26215-0fc1-4125-824d-34fab74c6e32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.037, 0.181, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5203", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_119a3ffb-4740-4938-8d4e-ebe3d288562a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.542, 0.011, 0.592, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5204", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_2faa9698-198d-4c20-a35f-8e569196c53e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[div] Pudong Intl Airport (PVG), China -> CLICK\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.136, 0.253, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5205", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_60718d13-f9e1-432f-9901-b1bdea2cc50d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK\n[div] SUVs -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.092, 0.753, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5206", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_09199a3b-66e4-43b0-9207-2b16c63f458d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.133, 0.668, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5207", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_8b05e2c4-bf97-4b1b-a37f-8c1cf88209f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.218, 0.926, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5208", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_05fdd74b-f274-4e42-8108-8a4d93a95506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[checkbox] Beachfront -> CLICK\n[checkbox] Hot tub -> CLICK\n[checkbox] Pool -> CLICK\n[checkbox] Towels -> CLICK\n[button] Property style -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.223, 0.089, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5209", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_496a9af2-4f3f-48b8-aaa4-2c0e5f30309a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.245, 0.894, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5210", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_18773f07-6800-4427-8cc0-f846379f6f4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Golden State Warriors\n[link] Golden State Warriors NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.137, 0.122, 0.182, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5211", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_4d976fd8-c4fb-49c4-bc13-c59b2122b543.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\n[button] Menu -> CLICK\n[button] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.226, 0.32, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5212", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_8031f639-c35b-4b15-b569-2d863a8cd52f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.202, 0.259, 0.229] in the image\nAnd my action is Action: TYPE\nValue: MUMBAI"}]}, {"id": "mind2web_5213", "image": {"bytes": "", "path": "./images/d7631fa2-ce46-4e20-a043-71ce70627c46_8177f07d-7a0e-40d0-8cd1-7185952cceb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse hip hop concerts that are happening this weekend.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.112, 0.038, 0.205, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5214", "image": {"bytes": "", "path": "./images/10de6ac5-a69e-4d09-87fd-5737ca6f4b99_51296200-a767-42ec-86ee-fe3bc80412f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check ticket listing on floor B row 17 seat listing to Beyonc\u00e9 music concert on Mon Aug 14 at 7:00pm.\nPrevious actions:\n[link] Music -> HOVER\n[a] Trending -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.066, 0.517, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5215", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_7305cebb-6397-41a8-a67f-cec246c6c821.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[button] Okay, got it. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.545, 0.203, 0.582, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5216", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_6c045a29-7c8f-408d-bf6d-e75aafa65bac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] To Destination Airport or City Your Destination -> CLICK\n[textbox] Destination City or Airport -> TYPE: New York City\n[link] NYC New York City Area Airports, NY -> CLICK\n[combobox] Trip Type:, changes will reload the page -> CLICK\n[option] One Way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.474, 0.07, 0.652, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5217", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_7d0cf261-5cc6-41cf-8142-21fbdd4ffda8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.276, 0.2, 0.321] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5218", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b959a299-bca3-4ce1-a18c-1605c44fc90a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.198, 0.931, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5219", "image": {"bytes": "", "path": "./images/4b2030ff-b83c-445f-bf87-9c8fbc68498b_8a5b0737-d36f-4476-bfdc-64c4c76d5551.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for wall art with a price range of $25 to $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: wall art\n[input] -> CLICK\n[link] Wall Art -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.39, 0.127, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5220", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_0564ac9b-e9d9-4084-a3f6-7688481a04d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM\n[button] 3/25/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK\n[button] Open Travel Preferences modal. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.204, 0.609, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5221", "image": {"bytes": "", "path": "./images/521aa0aa-e099-4fc9-bb4e-f8a593a29f81_ea4f8853-82be-4579-8d16-ba3a72401d3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the user reviews for the game \"Cyberpunk 2077\"\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.06, 0.047, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5222", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_b0fe0bea-76e5-4c18-9b01-925d5f4d247e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.206, 0.168, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5223", "image": {"bytes": "", "path": "./images/0991035b-6acb-4dca-aaef-5384a0739781_b57c2e90-05e1-41db-9d48-e8af6eb99120.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find careers at the company on the Product Management team\nPrevious actions:\n[link] Company -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.342, 0.161, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5224", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_c72364e6-89d8-4b78-8d47-2636bcd591d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] Adults-Only -> CLICK\n[button] Romantic -> CLICK\n[button] DONE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.331, 0.491, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5225", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_ca69e7b5-562b-4bf2-9457-9254ea31cee2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[listitem] X5 (87) X5 (87) -> CLICK\n[path] -> CLICK\n[switch] COMPARE -> CLICK\n[button] Add to Compare -> CLICK\n[button] Add to Compare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.196, 0.899, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5226", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_346e50e8-6e74-4963-907f-f63753b97234.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.062, 0.421, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5227", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_43243a2c-3d76-40fc-8e5e-7fc1f48328e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.279, 0.359, 0.309] in the image\nAnd my action is Action: TYPE\nValue: 52nd street, brooklyn"}]}, {"id": "mind2web_5228", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_d3fae0d1-475d-4570-b0bf-7288bf69fc36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.218, 0.078, 0.81, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5229", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_7c2b9362-222c-4103-b794-de349596c06d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.159, 0.595, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5230", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_5b95df26-ff7a-49e4-b1ea-3abec2316d97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Color -> CLICK\n[label] BLACK -> CLICK\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.496, 0.906, 0.531] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5231", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_d5348938-9876-40b0-81d5-01436fa10e76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Electricians -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.018, 0.564, 0.029] in the image\nAnd my action is Action: TYPE\nValue: WESTMINSTER"}]}, {"id": "mind2web_5232", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_5dc7ac8f-f789-4d98-9805-733815243c37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.729, 0.08, 0.787, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5233", "image": {"bytes": "", "path": "./images/dc1f0824-5483-4d3b-87d2-3f760a42a25e_afa03dc3-151d-4448-8936-79d4ce60b351.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the events at any six flags park in Texas\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Fiesta Texas -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.046, 0.714, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5234", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_7ca8088d-aaf6-4a6a-a81f-854a7fddcd12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: SHELDON\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: COOPER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.139, 0.839, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5235", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_a3a0d384-cfcc-439e-a071-d50217cb46b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK\n[option] One way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.197, 0.16, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5236", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_fe0d8eda-4222-4b4f-989d-c947c8219867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\n[heading] Check-in -> CLICK\n[textbox] Confirmation or ticket number* -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.218, 0.481, 0.243] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_5237", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_57dfa0ac-76e8-4753-948d-3d86bac41a80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.51, 0.225, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5238", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_d143425a-b021-4736-b687-76deed6509ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK\n[span] -> CLICK\n[div] Leather -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.445, 0.284, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5239", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_fa48db1f-c5f0-48de-863a-93d7dbd7f15a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[link] YXL -> CLICK\n[div] Size -> CLICK\n[div] Sports -> CLICK\n[div] Fit -> CLICK\n[link] Fitted -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.483, 0.233, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5240", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_7db38950-78ce-4a65-a62e-a5df13e62ff5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.391, 0.282, 0.443, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5241", "image": {"bytes": "", "path": "./images/693ae151-6a70-41b1-b016-87279c4c532e_80c51282-2c04-482f-a0f3-1dbbc5d4574b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the cheapest xbox 360 game available for purchase\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: xbox 360 games\n[searchbox] Search games, consoles & more -> ENTER\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.32, 0.261, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5242", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_f60eedf6-96f7-429a-bb55-803977b8efe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK\n[label] XXS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.306, 0.463, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5243", "image": {"bytes": "", "path": "./images/b5c98548-4a3f-4e7c-8287-c36963930348_b74a4253-bfcd-4616-9d96-4219baf3cce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a rental car to pick up at Paris Charles de Gaulle Airport CDG on april 13 at 10 am and drop off on april 18 at the same time\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.039, 0.263, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5244", "image": {"bytes": "", "path": "./images/bdc1f69a-da73-4e51-9dd8-83494d4983c2_0add092e-4553-40ee-8190-317048d85eb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find check-in flight for John Smith having ticket no. 123456780\nPrevious actions:\n[link] Check In -> CLICK\n[textbox] first name maxlimit is 30 -> TYPE: John"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.388, 0.26, 0.562, 0.294] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_5245", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_a1e3d261-9e4d-4aa2-b851-4df6032e1794.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[button] 10, Monday, April 2023. Available. Select as check... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.092, 0.359, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5246", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_032dd419-18b5-4870-988f-085b2fa6d74a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.959, 0.124, 0.984, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5247", "image": {"bytes": "", "path": "./images/978376c1-8545-4160-81d5-722bdea60434_025ff1b1-db7d-4df4-ad4b-77f3a2b2ee2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Start the process to sign up for CVS ExtraCare\nPrevious actions:\n[button] Prescriptions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.387, 0.397, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5248", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_9d29ae83-8236-4a48-96a6-61cc6b26aab2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50\n[button] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.76, 0.228, 0.807, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5249", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_760ccf46-fb45-43e6-adde-f9f3799c52bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[button] All Location Details -> CLICK\n[button] Selected Pick-Up Date 03/23/2023 -> CLICK\n[button] 03/27/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.554, 0.351, 0.582] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5250", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_5fe867ec-cc98-4ef2-85ce-691bb9dadb48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[link] Ferry -> CLICK\n[span] East Boston Ferry -> CLICK\n[link] Schedule & Maps -> CLICK\n[button] Connections \uf107 -> CLICK\n[button] Fares \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.648, 0.416, 0.868, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5251", "image": {"bytes": "", "path": "./images/479bdc82-ec52-447b-a577-fa2bcdc3886f_ffb9e96b-9a62-4d47-b786-609d07e1a214.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show offers on home essentials under $20 and add the first 3 items to favorites.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Home Essentials Under $20 -> CLICK\n[button] Save to favorites, KUDDARNA, Chair pad, outdoor -> CLICK\n[button] Save to favorites, GULLBERGS\u00d6, Cushion cover, in/o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.76, 0.609, 0.784] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5252", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_466cf611-17b2-457e-97cc-7dc9d643ef86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.123, 0.441, 0.136] in the image\nAnd my action is Action: TYPE\nValue: Elevated Escape"}]}, {"id": "mind2web_5253", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_acc19e2a-1a0f-4208-a5a2-8c63425767d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Shape -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.492, 0.966, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5254", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_6bf9852e-ea2c-456d-8c70-2fc0f68b13dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.363, 0.845, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5255", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_fa213183-95d3-41e5-a2ed-9593cb0934c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK\n[link] Seattle, WA -> CLICK\n[button] Find -> CLICK\n[span] Coupons -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.116, 0.23, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5256", "image": {"bytes": "", "path": "./images/ccee2694-0d3e-4d81-8d52-5cdf82a9b81f_33c5ade5-919c-4875-be8a-5061b1ed3947.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the nearest apple store to zip code 60540 and check its opening time tomorrow.\nPrevious actions:\n[link] Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.771, 0.112, 0.867, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5257", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_c744062d-fb8a-4354-b660-adb22d70dc2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Condition -> CLICK\n[link] Used -> CLICK\n[button] Style -> CLICK\n[link] French -> CLICK\n[button] Material -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.328, 0.671, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5258", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_4b083d95-5ecd-4c5e-b53c-f9940aa9134d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.243, 0.205, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5259", "image": {"bytes": "", "path": "./images/cfb351f8-804b-4074-9826-0b3525f68727_57afa34e-90f7-4742-b214-dbeae90b3f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the 2022 highest goal scorer in the MLS league\nPrevious actions:\n[button] Soccer -> HOVER\n[link] Leagues & Cups -> CLICK\n[link] Stats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.198, 0.091, 0.213] in the image\nAnd my action is Action: SELECT\nValue: 2022"}]}, {"id": "mind2web_5260", "image": {"bytes": "", "path": "./images/e6643cfb-567e-4e11-8cab-f85483573539_82ac4858-e319-4d30-b3e5-f4a4c395f697.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow one of the team leaders of one of the NHL teams from the Atlantic Division\nPrevious actions:\n[link] ESPN NHL Home Page -> HOVER\n[link] Teams -> CLICK\n[heading] Boston Bruins -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.716, 0.903, 0.723] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5261", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_af658e24-111b-4b86-bc49-099bb5c8baec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[link] 1,342 2016 -> CLICK\n[link] 98 \u00a315 - \u00a320 -> CLICK\n[link] Condition -> CLICK\n[link] Phil Collins - Both Sides (CD, Album, RE + CD + Dl... -> CLICK\n[link] Add\u00a0to\u00a0Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.39, 0.485, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5262", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_091dfb3c-d0d9-49c2-a922-18e6468bc29a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK\n[button] Check in / Check out Any week -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.353, 0.284, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5263", "image": {"bytes": "", "path": "./images/63e3020c-bf52-4950-9e26-50f98a59b5e3_408335c6-cd6f-4e47-a018-845ce17a180d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fast responding highest-rated electrician, who is currently open for replacement of some light fixtures in Westminster, and request a quote.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.032, 0.243, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5264", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_8af6839d-a6e9-4945-ba6a-3cacefda382a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.197, 0.711, 0.227] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_5265", "image": {"bytes": "", "path": "./images/4b99412b-6be2-4274-8843-4fc97f0c8247_c33f9822-9241-43c5-99b9-d2f95f871ed9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the bus with service number 10000001\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK\n[combobox] Search by Service Number -> TYPE: 10000001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.212, 0.715, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5266", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_23bd27eb-bad8-45f9-88df-ba23d36bc19d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.203, 0.215, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5267", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c75eb377-9d6e-4a59-866a-86b6912f4e6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Features -> CLICK\n[listitem] Sunroof(s) (2) Sunroof(s) (2) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Black (1) Black (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.175, 0.249, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5268", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_f93c512a-0e48-4c6d-9271-edbd7a0af295.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.052, 0.287, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5269", "image": {"bytes": "", "path": "./images/7b7079f0-8891-426a-8e53-0818a02a0159_39c6a073-6af9-4a14-9be0-2a6d782ae73d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me things to do nearby in Mar 30, 202\n3\nPrevious actions:\n[textbox] Where to? -> CLICK\n[button] Nearby -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.123, 0.705, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5270", "image": {"bytes": "", "path": "./images/26d3a803-b0aa-4a2c-bd31-3fe97a63388c_2ff008e7-d6e6-46bb-893d-375d5dd41af9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vacation packages to Hawaii.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.171, 0.509, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5271", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_3208832e-8eae-44a6-afde-a00344187ea6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\n[textbox] From -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.268, 0.385, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5272", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_bfb2a8b3-ca1e-4ce4-8be1-65c9a7ddad63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK\n[radio] 5 -> CLICK\n[radio] GRADE_A -> CLICK\n[button] Add To Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.086, 0.793, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5273", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_86fafa00-08fd-4d15-9ca1-88658c080ec1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK\n[span] Best match -> CLICK\n[option] Price: lowest first -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.405, 0.227, 0.466, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5274", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_98f238b5-18bb-4181-816a-6d9b5a5d3b55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] Stays -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.208, 0.841, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5275", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_492f1698-0267-4a06-b636-cc4f0480d04c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK\n[select] All -> SELECT: Audio (376)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.35, 0.196, 0.373] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_5276", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_20ad31c2-db7f-461e-ae21-1fc7eb68b0bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.306, 0.478, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5277", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_d4189d13-7ce7-4aeb-80da-c3fe1b0ac5a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK\n[link] Rock 2,420 -> CLICK\n[link] Tab -> CLICK\n[div] Today's most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.19, 0.97, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5278", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_eb02273c-148b-4fdf-9b98-90ddaec0236a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Things To Do -> CLICK\n[div] Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.008, 0.781, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5279", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_8f862cf0-ac81-4f52-84aa-550ecc2e259c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.004, 0.686, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5280", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_04457367-8505-4973-b9af-ecd5eb814182.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] Close -> CLICK\n[button] Instant Quote -> CLICK\n[textbox] What city would you like to depart from? -> TYPE: Chicago\n[checkbox] I'm interested in the reverse trip of the selected... -> CLICK\n[checkbox] Deluxe Bedroom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.192, 0.447, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5281", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_c30f4d49-9308-4bb4-95e9-925b39ffde9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: Ohio\n[button] Ohio United States -> CLICK\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.169, 0.981, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5282", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_f6af5393-0ce0-46f0-8b74-4e24be4e2eb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.357, 0.335, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5283", "image": {"bytes": "", "path": "./images/66625c9d-5bf3-42d1-b463-ab2767307201_ef8fc370-3a59-4d06-bcc6-a8048ad914f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Learn more about the Partner Deal that gives 25% off for Veterans.\nPrevious actions:\n[button] Deals -> CLICK\n[link] Partner Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.74, 0.618, 0.759] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5284", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_963d0e12-9794-4fa0-bf40-2c8b8d7a7885.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK\n[img] Right -> CLICK\n[gridcell] Choose Saturday, April 22nd 2023. It's available. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.532, 0.464, 0.562] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5285", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_0ebc6a29-19cc-47fc-bd87-454c1635c3a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM\n[combobox] Return Time -> SELECT: 6:00 PM\n[button] Select My Car -> CLICK\n[link] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.569, 0.882, 0.608] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5286", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_1abdf652-813a-4b30-8713-7c1777b532cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[link] $50 to $100 -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[link] Amazon Basics 7-Piece Lightweight Microfiber Bed-i... -> CLICK\n[button] Red Buffalo Plaid $57.81 In Stock. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.811, 0.333, 0.97, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5287", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_1c9b36d1-16c5-4ca4-9e82-ce67012536be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harrt Reid Intl Airport, LAS\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.528, 0.666, 0.551] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5288", "image": {"bytes": "", "path": "./images/48ca542f-5346-40b9-b586-9294a2f64519_3d969a7c-5bb4-45b6-9fd1-ba7943641510.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Like a game mix album.\nPrevious actions:\n[searchbox] Search -> TYPE: game mix"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.008, 0.553, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5289", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_d5cf93d2-7fa2-4971-8668-436c866e37c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[link] tours. -> CLICK\n[li] Boat Tours -> CLICK\n[div] Show Info -> CLICK\n[link] View Tickets Now -> CLICK\n[button] Check Availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.156, 0.754, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5290", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_d7bfb473-8c73-4808-96bc-187d00be5ad7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.735, 0.846, 0.779] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5291", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_bddaad73-e494-46f5-a14b-25dddc2c137b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 0.141, 0.587, 0.175] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_5292", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_db1a7212-3913-4f4a-97d9-dea87a43cf1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[combobox] Return Time -> SELECT: 1:00 PM\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $14.45/Day$13.95/Day -> CLICK\n[checkbox] $12.99/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.812, 0.508, 0.93, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5293", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_6f6671ae-98f0-4b14-8c7c-870d0ed1d39d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] Postal code, address, store name -> TYPE: 10017"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.149, 0.727, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5294", "image": {"bytes": "", "path": "./images/a88676d0-c252-408f-b796-93c95f6b71fc_5499a9c1-e7dc-4ed6-a400-bbb551015eba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my trade offers.\nPrevious actions:\n[link] BUCKEYE.FOOBAR -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.496, 0.249, 0.552, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5295", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_85b15500-7ef7-44a8-bb3c-d956b2a1361b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[button] Okay, got it. -> CLICK\n[img] undefined -> CLICK\n[button] Continue without a seat -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.361, 0.516, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5296", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1004b2e9-f35b-41b0-8d61-f0b4bc059024.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.06, 0.673, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5297", "image": {"bytes": "", "path": "./images/164cf025-e767-4781-930d-5bd46ad2c4c1_9329b2b4-204c-456d-803b-fd5be3bb63a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the Resy Staff Favorites in Seattle?\nPrevious actions:\n[button] Location Atlanta -> CLICK\n[button] Seattle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.026, 0.423, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5298", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_91fc5db3-bc62-423e-ac0f-c4b6fa8cb02f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.079, 0.259, 0.105] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5299", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_c0a0ef81-6f9f-44d3-9189-18f73175c4ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] Where? -> TYPE: mexico\n[div] Mexico -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] May -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.146, 0.266, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5300", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_d8abd1e3-05b6-4a3c-8f94-95268d8eb712.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.634, 0.169, 0.692, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5301", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_cdea18a2-830d-4169-81c4-6750fefe1837.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami\n[strong] Miami -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.228, 0.218, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5302", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_89abfe33-14ce-492d-ab90-2fe0710f6f7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.181, 0.45, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5303", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_3e93eb48-8370-4f9e-8adb-36dde059ff13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\n[div] Sports -> HOVER\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.243, 0.297, 0.446, 0.313] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5304", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_91c7893c-d68e-4a03-80d1-ea26d677995e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.161, 0.825, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5305", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_0a2b00df-658b-4670-ae54-556abe0f89dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[link] Kindle Books -> CLICK\n[textbox] Search Amazon -> TYPE: roman empire history\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK\n[link] English -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.49, 0.078, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5306", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_c5dd3eae-488c-4ece-a2fd-8bf08531a739.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.06, 0.958, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5307", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_eeecfa15-c5a3-4487-bfc1-6c14e0030ccb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: BRISTOL\n[span] Bristol Airport (By Bus) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.113, 0.327, 0.14] in the image\nAnd my action is Action: TYPE\nValue: LONDON"}]}, {"id": "mind2web_5308", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_08e81213-3c6e-46da-9b79-9286f704685d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.907, 0.037, 0.934, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5309", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_dad6902a-d307-4bf4-822c-922230877a59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[span] Asia -> CLICK\n[div] Kyoto -> CLICK\n[div] Select your dates -> CLICK\n[svg] -> CLICK\n[checkbox] 25 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.108, 0.221, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5310", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_cb395227-5541-4782-8fd4-5262f8c4f95e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.531, 0.421, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5311", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_4cfe3658-8ba0-4c53-b71a-15ef5a8820c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND\n[span] Auckland Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.225, 0.891, 0.26] in the image\nAnd my action is Action: SELECT\nValue: 10 00 AM"}]}, {"id": "mind2web_5312", "image": {"bytes": "", "path": "./images/bbfed209-df70-434a-aece-5c5fc7a38f4f_d7d7ba00-17c0-4836-a944-04a73a6eeeff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the push notification settings\nPrevious actions:\n[svg] -> CLICK\n[link] Your account settings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.371, 0.198, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5313", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_620c6af5-4ea4-4b24-9a37-6d5b9f511ead.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK\n[span] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.835, 0.491, 0.912, 0.523] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5314", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_0d69ea98-ed44-4420-9611-46a13ab910fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: new york\n[button] Remove -> CLICK\n[span] New York, United States -> CLICK\n[textbox] Flight destination input -> TYPE: london\n[span] All airports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.143, 0.928, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5315", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_0ef8edbc-14a0-42b9-9b98-3551bd624d87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[textbox] When? -> CLICK\n[li] Summer -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Airfare Included -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.321, 0.568, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5316", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_a5e3b4dd-7550-4133-ab81-97aff1f3e12d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Spain -> CLICK\n[button] All cities -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.497, 0.12, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5317", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_bef2efa7-2137-4ab1-b2e1-158c78b052cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl\n[button] Search for benadryl -> CLICK\n[img] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.011, 0.682, 0.153, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5318", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_8b11a754-1a38-47d8-8712-457499d2b048.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK\n[label] Brown -> CLICK\n[svg] -> CLICK\n[label] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.408, 0.126, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5319", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_8b324073-038b-40d6-b5b3-7566305fb60e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.777, 0.177, 0.812, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5320", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_7fea74c7-272b-4eca-a381-ce0ae5ec874e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[link] See availability -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.301, 0.93, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5321", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_92018224-3ef4-440f-aace-50d82122188c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.012, 0.863, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5322", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_2702fbb5-0714-4760-8194-cf4cbf66de8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.104, 0.419, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5323", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_ddb2dc50-ccd8-4d59-a989-a955e7f43f9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK\n[link] \ue902 CPU -> CLICK\n[button] START A NEW BUILD -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.693, 0.868, 0.719] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5324", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_086ab8cc-7f5e-43b0-9b03-ba376d208f4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[button] 15 -> CLICK\n[combobox] Time -> SELECT: 10:00 AM\n[combobox] Party size -> SELECT: 4 guests\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.095, 0.621, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5325", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_4f50c9e8-dcf6-44d5-b5d4-0cc813b9f8a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK\n[link] Reservation -> CLICK\n[button] Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.222, 0.691, 0.309, 0.714] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5326", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_3ed13d1d-b299-4bc0-87d9-be6a6aa17641.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.394, 0.0, 0.493, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5327", "image": {"bytes": "", "path": "./images/6c0a3b1e-6ce8-4955-9359-dd4378aacc82_14d7a703-5b89-4d70-a04d-48db66be0fc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of HomePod mini\nPrevious actions:\n[link] Accessories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.173, 0.226, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5328", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_09210be8-ee5d-4061-87d4-9f48ede5dafa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old\n[div] Age range -> CLICK\n[select] All -> SELECT: Ages 3-5 (31)\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.303, 0.196, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5329", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_67529ff9-0625-47b7-bf03-7a04f7556fd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Coming soon to theaters -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.32, 0.43, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5330", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_c3ee6477-58ce-4d62-bd31-236bfd3babe9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[link] 20 -> CLICK\n[link] 20 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 PM\n[combobox] Return Time -> SELECT: 6:00 PM\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.17, 0.349, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5331", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_2996fbd4-6cdc-42a2-a6ad-cc26162df9bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[span] Select store -> CLICK\n[textbox] Enter zip code to save preferred delivery location... -> TYPE: 60173\n[span] Update ZIP code -> CLICK\n[span] See all bathroom boxes & baskets -> CLICK\n[button] Show filter modal Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.53, 0.546, 0.627] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5332", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_a7d026d8-89be-4bfb-b9f9-98e603c88313.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.369, 0.064, 0.582, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5333", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_71442cf6-a544-45d1-8185-2965fe1171b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.052, 0.553, 0.093] in the image\nAnd my action is Action: TYPE\nValue: diamond stud earrings"}]}, {"id": "mind2web_5334", "image": {"bytes": "", "path": "./images/612653f8-defe-41be-ae48-26ed859d98ca_fd02da0a-4aff-4945-b1c6-b3d6c65623b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate Dry Tortugas in the state of Florida and find out the Current Conditions.\nPrevious actions:\n[button] Find a Park by State -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.498, 0.788, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5335", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_efdf1978-a4d6-4b14-8198-ec383c1f8703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK\n[checkbox] 1 Stop (49) -> CLICK\n[checkbox] Seat choice included -> CLICK\n[checkbox] No cancel fee -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.24, 0.825, 0.266] in the image\nAnd my action is Action: SELECT\nValue: Price (Lowest)"}]}, {"id": "mind2web_5336", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_1cda2d0c-fc85-46e2-9352-deea3a3d9d8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Tab -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK\n[link] Stairway To Heaven -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.091, 0.852, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5337", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_d20044ad-f983-497b-a184-5cc46fe9b448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.825, 0.468, 0.888] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5338", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_23038b09-7316-4566-ac68-64d95c9eccbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[textbox] From -> TYPE: Los Angeles\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami\n[option] Mint. Miami area -> CLICK\n[button] Explore flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.448, 0.122, 0.552, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5339", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_de2d5ac6-ca8b-4dd1-a72a-f464a3709a05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK\n[a] Software Development -> CLICK\n[a] Hybrid -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.166, 0.692, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5340", "image": {"bytes": "", "path": "./images/1a833106-368a-41e4-a36e-32f6b1d36d16_758f22f5-565b-4381-ae08-76f669e70273.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip with the least amount of walking from Central Park Zoo to the Broadway Theater.\nPrevious actions:\n[searchbox] From -> TYPE: central park zoo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.197, 0.359, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5341", "image": {"bytes": "", "path": "./images/4f395aad-6f10-4055-932a-d2af443e6bfa_88e84f41-446a-49a3-a1ae-fd9d685f93c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Star Wars The Mandalorian statue and add to cart.\nPrevious actions:\n[searchbox] Search... -> TYPE: Star Wars The Mandalorian statue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.061, 0.562, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5342", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_481eba4d-b954-4f4a-9def-fa3045120562.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: New York, United States, 100\n[textbox] Enter your pick-up location or zip code -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.134, 0.349, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5343", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_79a47f9f-c64f-40d5-a039-2f51a467d145.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK\n[option] Full time -> CLICK\n[button] SEARCH BY JOB TITLE OR KEYWORD -> CLICK\n[link] Accounts Payable Associate -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.331, 0.888, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5344", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_ad2e9a55-fccc-47c3-addf-579d53655742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] Rock -> CLICK\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK\n[radio] New List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.224, 0.675, 0.24] in the image\nAnd my action is Action: TYPE\nValue: New"}]}, {"id": "mind2web_5345", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_3f7b29b7-0875-4dc9-8d95-f024555edf4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK\n[combobox] Destination -> TYPE: Tokyo\n[span] (HND) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.333, 0.541, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5346", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_98ae2435-3f48-4ed5-a069-ca2cd6f44cd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK\n[select] All -> SELECT: Audio (376)\n[select] All -> SELECT: Under US$20\n[button] Refine results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.561, 0.366, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5347", "image": {"bytes": "", "path": "./images/05c4da5b-263d-40a4-9982-6cf6311b57a1_a89f2fe4-6202-487f-b994-ddcd7cdac194.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an Xbox Wireless controller rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: xbox wireless controller\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.573, 0.145, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5348", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_6ba27ea0-5559-4c96-b207-7a504a0f96c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK\n[i] -> CLICK\n[region] 4 Stars & Up -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.099, 0.882, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5349", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_9992efab-d6f6-4d4e-81f3-0ce885f45457.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK\n[textbox] Flight destination input -> CLICK\n[textbox] Flight destination input -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.213, 0.833, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5350", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_8ed71c65-50b2-4399-9ccd-41e9efbd5525.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.038, 0.352, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5351", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_1882a323-7275-4bea-95a4-89908286cee0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[button] Cabin -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[div] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.679, 0.523, 0.692] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5352", "image": {"bytes": "", "path": "./images/54112d86-1d85-4abf-9e12-86f526d314c2_e00a7248-aab6-4799-9307-6f4750f0a727.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the BGG rules for Game Submissions?\nPrevious actions:\n[button] Help -> CLICK\n[link] Guide to BGG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.644, 0.28, 0.661] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5353", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_84171eea-4480-415c-a5cd-77899aae8110.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK\n[select] 2023 -> SELECT: 2019\n[select] All -> SELECT: UFC\n[link] UFC Fight Night: Edgar vs. The Korean Zombie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.27, 0.222, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5354", "image": {"bytes": "", "path": "./images/41ff100f-582a-422e-b387-3abd9008cee4_c92672cc-f930-4bee-a4cc-abaf9489d0d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open red line subway schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Subway -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.205, 0.339, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5355", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_f063a765-bdb0-49b3-916e-7297e2dd0019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[path] -> CLICK\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.007, 0.988, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5356", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_e3071752-60a1-41f5-9b69-33df9f273c08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.402, 0.473, 0.441] in the image\nAnd my action is Action: TYPE\nValue: 10000"}]}, {"id": "mind2web_5357", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_254dd796-b699-4c64-9b37-efaf31f2eac2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI\n[div] Pudong Intl Airport (PVG), China -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.224, 0.636, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5358", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_bb8662a9-6602-4a34-814b-7c8c9177374e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Ballet\n[button] Search -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.164, 0.871, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5359", "image": {"bytes": "", "path": "./images/29d6b448-a688-4c2f-8f6d-a13546d506d8_e79b9f3c-2ebf-4731-b982-935811aeddf1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of jazz albums released in 1890.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.0, 0.465, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5360", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_c1a76ce4-fd5d-4879-8605-31cbeee8f12a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.093, 0.763, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5361", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_be0e790e-1b06-41b4-ae7a-26e06db06d59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] Apr 18 -> CLICK\n[gridcell] 20 -> CLICK\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.87, 0.314, 0.899, 0.349] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5362", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_c98a20ca-42e8-470f-ba7c-a78cbedd0804.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.267, 0.335, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5363", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_b2fae695-2147-4942-8629-9379ac0a96e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, NY, US Select -> CLICK\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.306, 0.471, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5364", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_d7ce815c-bfdb-421e-b2a4-c5ccbb3a1470.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.108, 0.153, 0.438, 0.169] in the image\nAnd my action is Action: TYPE\nValue: Heathrow"}]}, {"id": "mind2web_5365", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_fd9ff4a8-72af-417a-a5ed-69dd689c1143.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[link] \uf054 Next -> CLICK\n[link] \uf054 Next -> CLICK\n[link] 7 -> CLICK\n[textbox] CHECK OUT -> CLICK\n[link] 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.099, 0.771, 0.117] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5366", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_3532f564-c8fa-40c9-ae22-fb22f6068baf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.191, 0.441, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5367", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_5089f916-bda7-4572-a489-5174ee03e1bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.058, 0.463, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5368", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_fc65a09b-bdf4-48e6-899b-e01ec2453e7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> TYPE: 15000\n[textbox] Down Payment -> TYPE: 5000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.102, 0.459, 0.127] in the image\nAnd my action is Action: SELECT\nValue: Michigan"}]}, {"id": "mind2web_5369", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_14307382-3b81-4395-88fd-b75a99a93339.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[link] Tickets & Passes -> CLICK\n[span] Buy Now > -> CLICK\n[button] Buy Now -> CLICK\n[button] Add to Cart -> CLICK\n[button] No Thanks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.158, 0.605, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5370", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_aff1da14-373b-4bd3-b9e4-248ae4224872.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK\n[link] VACATIONS & RAIL TOURS Train vacation packages thr... -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.804, 0.156, 0.857, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5371", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_d8cb1754-877f-4815-9831-75dfd9de4b51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.422, 0.196, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5372", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_22bbc1e3-9c82-4dcb-a01d-a34c70a62cef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK\n[input] -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.838, 0.037, 0.869] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5373", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_c3feded9-8223-48bf-becb-6538339f3784.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.113, 0.554, 0.127] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_5374", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_d0bb7e5d-b098-470f-926b-27415618e851.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK\n[button] + -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.173, 0.459, 0.264, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5375", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_95f9528e-68a1-4ce4-9f96-4a9888e5eefd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris\n[b] Paris -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.299, 0.777, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5376", "image": {"bytes": "", "path": "./images/bd2b5866-dced-4ec1-8b3e-8a8a24e71a96_1a541cb9-2269-426c-8687-241e040beb84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all English Reading and Writing skills Educational Books for Ages 6-8 currently in stock that are available in hardback under 20$.\nPrevious actions:\n[link] Shop by category\u2228 -> CLICK\n[link] Shop by category\u2228 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.314, 0.195, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5377", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_c19e9a86-378c-4f24-b5bf-8c5c78cfc272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK\n[link] Thursday, May 4th | American Express Presents CARB... -> CLICK\n[combobox] 2 tickets for Thursday, May 4th | American Express... -> SELECT: 3 Tickets\n[button] Book Now a ticket for Centurion\u00ae Member Access to ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.324, 0.523, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5378", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_d4b41cf2-20f4-4ed5-bcd0-ae109880502e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.296, 0.488, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5379", "image": {"bytes": "", "path": "./images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_8450177b-97fb-4355-8b95-ac90354952fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up the scores for the previous day's NBA games\nPrevious actions:\n[link] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.119, 0.312, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5380", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_a031109e-9ed4-4b76-8497-83fe74913b87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] New -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.088, 0.999, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5381", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_698847eb-4f57-4615-90b7-cde1436b7612.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[switch] COMPARE -> CLICK\n[path] -> CLICK\n[button] Add to Compare -> CLICK\n[button] Go button to Compare the Selected Car's -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.386, 0.583, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5382", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_dde2fc42-7cd0-4124-8cdb-3f51b425bf9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Santa Fe\n[button] Santa Fe, NM, US (SAF) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.415, 0.571, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5383", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_9d81fbb9-c6c8-4473-bb74-f9725bc210ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: adele"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.189, 0.483, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5384", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_a093b329-7c9f-44b6-ae32-b2ee3a114cc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK\n[textbox] To -> TYPE: Boston\n[option] Boston area -> CLICK\n[button] Today, Tue Apr 11 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.598, 0.2, 0.798, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5385", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_a320d96f-1ba9-4eac-978f-0716a62c6f42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: 94115"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.015, 0.335, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5386", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_b036523c-2ab5-412e-b346-8ca1741f8efb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.141, 0.693, 0.163] in the image\nAnd my action is Action: TYPE\nValue: 04/23/2023"}]}, {"id": "mind2web_5387", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_1e9e9fcf-9e4e-4520-9442-9f6cdef14eac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.645, 0.401, 0.704, 0.429] in the image\nAnd my action is Action: SELECT\nValue: OH"}]}, {"id": "mind2web_5388", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_0551b291-b54f-4dc7-9c5e-60e28d345655.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.112, 0.35, 0.131] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles, CA"}]}, {"id": "mind2web_5389", "image": {"bytes": "", "path": "./images/9f76268f-9a4e-4eab-ab2c-262d2fe7bdee_e0021e91-9a16-4aee-8a31-b7efe4147c37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse crossbows on sale with limited stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.079, 0.104, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5390", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b82baee9-9e68-4f5e-bf8d-287b72418176.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK\n[link] NPGallery -> CLICK\n[span] Search all Parks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.239, 0.864, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5391", "image": {"bytes": "", "path": "./images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_6222129b-f9e3-4f1d-b425-baeb45366cb9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get me info about planning a wedding cruise\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.094, 0.679, 0.216, 0.698] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5392", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_7bf6f0e5-aacb-414b-aa02-8cdd5e2677c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2005"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.132, 0.264, 0.266, 0.292] in the image\nAnd my action is Action: SELECT\nValue: Toyota"}]}, {"id": "mind2web_5393", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_0031514c-8d35-43c7-ab3f-8723ef5b8647.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[link] Careers -> CLICK\n[link] here -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.118, 0.092, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5394", "image": {"bytes": "", "path": "./images/4ee87dc8-2fa1-4c98-828c-9c0c8dd8225f_683aa55c-8275-4665-901c-4148a4b9ba73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Edit my movie watchlist.\nPrevious actions:\n[button] Watchlist5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.522, 0.232, 0.559, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5395", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_fcf2f0d4-5415-4a02-9ed9-5cd383824fe0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.307, 0.13, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5396", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_f6c8620e-b239-4c5f-a904-a73fca89bc97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[img] -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.205, 0.739, 0.216] in the image\nAnd my action is Action: TYPE\nValue: 500"}]}, {"id": "mind2web_5397", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_c82b0897-8695-4678-9894-9e6dc3f0dec4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.529, 0.373, 0.553] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5398", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_b413aea4-57db-4202-ae50-8c0adbc9e2d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.207, 0.652, 0.26] in the image\nAnd my action is Action: TYPE\nValue: changi"}]}, {"id": "mind2web_5399", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_432a59c6-c207-4996-8339-e180f43164bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.309, 0.341, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5400", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_cfd7af8e-2b20-4e37-9c53-bc573db84b80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.004, 0.204, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5401", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_e4bfc892-619e-42af-a6d8-c208fbf54abc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[menuitem] View All -> CLICK\n[label] Ice Fishing -> CLICK\n[svg] -> CLICK\n[label] Medium Light -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.135, 0.051, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5402", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_31fa2e7e-646b-4f4b-aaca-fed108191241.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[textbox] Origin -> TYPE: Ohio\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.338, 0.639, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5403", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_14b29326-0525-482c-a3f7-ac9b37978045.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.199, 0.41, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5404", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_d1fd77e6-3905-49e3-8aee-58aa0a2df50d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\n[combobox] Search for anything -> TYPE: laptop\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.694, 0.027, 0.701] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5405", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_50f46316-29db-42fb-9b52-320a814c5355.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse TV Shows by Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.786, 0.182, 0.797] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5406", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_62cf5905-dba2-4936-abd9-9b6e872672d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK\n[div] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.626, 0.404, 0.654] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5407", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_0ee2e627-345c-4b15-8542-3c13034733b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK\n[div] Thu, Apr 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.358, 0.647, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5408", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_83929b11-5af8-4c6d-ad37-5ca0f73ad849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.163, 0.888, 0.192] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_5409", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_0200f0fe-002c-4088-8037-f34bfff4156c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.364, 0.795, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5410", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_ba9f81d9-3f78-4f0c-95c4-5795d13a3183.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.133, 0.831, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5411", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_8fb7f444-5f4c-47e0-998b-193424bfc319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.101, 0.301, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5412", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_21aada65-2d3c-4713-abae-5b5693c2de68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy\n[textbox] Phone Number -> TYPE: 123-999-0000\n[textbox] Email Address -> TYPE: RA@gmail.com\n[textbox] Zip Code -> TYPE: 90001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.794, 0.401, 0.859] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5413", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_f69045c0-0476-4c5d-9f6e-c84d5488fb80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.115, 0.652, 0.487, 0.669] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5414", "image": {"bytes": "", "path": "./images/8ab30c9e-f768-46e8-8174-83651d361b9e_36bdd9ba-ddea-4b12-81a7-7d1e8fb3a665.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the scores of today's NBA games.\nPrevious actions:\n[span] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.048, 0.611, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5415", "image": {"bytes": "", "path": "./images/bb02400d-e9da-416a-839a-0068f511a630_a23b9fbd-1e7a-41fa-9116-32747b5b9649.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get WWE tickets for April 10 event happening in seattle, and book 4 cheapest lower level tickets.\nPrevious actions:\n[link] WWE Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.361, 0.941, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5416", "image": {"bytes": "", "path": "./images/4097c577-e637-4543-87a3-09b2f4734163_2c488a7d-0773-4c99-9420-d0e8103c6d3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pedicure salon in New York and add two to favorites.\nPrevious actions:\n[input] -> TYPE: New York\n[link] New York, NY -> CLICK\n[button] Find -> CLICK\n[link] Beauty & Youth Village Spa -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.007, 0.867, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5417", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_18ed9e91-64f4-4929-8827-5d7634c0101a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.677, 0.154, 0.686] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5418", "image": {"bytes": "", "path": "./images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_2e2727b7-9ba0-46a3-8338-849b1a5ed4fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out details about cancellation fees.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.618, 0.24, 0.631] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5419", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_a957bb96-e539-4222-b03f-b8c371629b9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.292, 0.492, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5420", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_a148f761-c294-45ac-a94f-292cbf472e4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 08\n[group] RETURN -> CLICK\n[link] 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.184, 0.265, 0.2] in the image\nAnd my action is Action: SELECT\nValue: 23"}]}, {"id": "mind2web_5421", "image": {"bytes": "", "path": "./images/9223ed29-5abb-4f4d-8108-1c3a584a7017_71c0293f-f272-4abb-96b5-f08d24560f51.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about reduced bus fares.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.01, 0.441, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5422", "image": {"bytes": "", "path": "./images/49372757-4c58-4cb5-bdb4-eed0c3e83199_0d1d347a-d1b1-4f96-884b-502fa81b3184.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an electrician that serves the 94115 zip code and is considered Fast Responding\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.021, 0.564, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5423", "image": {"bytes": "", "path": "./images/0cbdfafd-822f-4f61-bb57-05fc146752ce_a3a1bddc-b996-4166-829c-41ba7edc29a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check car toll rate for bridges and tunnels\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.062, 0.058] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5424", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_da47c157-9f8f-414e-a839-ea1a2dfb5244.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.102, 0.13, 0.126] in the image\nAnd my action is Action: SELECT\nValue: 2010"}]}, {"id": "mind2web_5425", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_528e9700-0759-4a2e-a6b2-b5eceaa76ec4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.35, 0.084, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5426", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_fc5ded8c-80f4-42d5-b087-79319e6d4d09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[button] All Filters -> CLICK\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.327, 0.757, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5427", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_b81f2774-e594-49a0-a9fc-07df56177c9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Beverly Hills -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] 03/31/2023 -> CLICK\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.39, 0.387, 0.429, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5428", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_a79f5fc4-c635-43b6-8229-911c45a5874c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.165, 0.524, 0.184] in the image\nAnd my action is Action: TYPE\nValue: South Station"}]}, {"id": "mind2web_5429", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_23f0080b-9e6b-46fe-8c36-a1ea6f957e0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\n[button] View / Modify / Cancel Reservation -> CLICK\n[textbox] Confirmation Number (required) -> TYPE: 123456\n[textbox] First Name (required) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.521, 0.488, 0.565] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_5430", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_121e1a49-f7dc-441e-a922-e668447ccb12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.629, 0.302, 0.648, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5431", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_804c3901-8299-4c08-b8b9-3ec0bee96528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.185, 0.463, 0.192] in the image\nAnd my action is Action: TYPE\nValue: Montana"}]}, {"id": "mind2web_5432", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_d5c808ea-5cdc-4d6e-b820-2b1a6406910b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\n[button] Community -> CLICK\n[link] Record Stores -> CLICK\n[link] Explore the directory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.139, 0.287, 0.169] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_5433", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_e6655c90-5529-4167-9f8b-b3f458a83f8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[heading] Weddings -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.422, 0.648, 0.449] in the image\nAnd my action is Action: TYPE\nValue: 100"}]}, {"id": "mind2web_5434", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_cebfac0b-996a-4c18-b6e8-08e9f22c8751.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK\n[label] Brown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.322, 0.223, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5435", "image": {"bytes": "", "path": "./images/b307117b-e10c-470f-a85d-968b2e442b19_9800f6fc-8573-4f1d-bbf8-e425e6dc4fdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a coffee shop with wi-fi.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.091, 0.012, 0.232, 0.019] in the image\nAnd my action is Action: TYPE\nValue: coffee shop"}]}, {"id": "mind2web_5436", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_0a054763-8af3-4199-864e-2582834bd49d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.187, 0.156, 0.198] in the image\nAnd my action is Action: TYPE\nValue: Jerry Trainor"}]}, {"id": "mind2web_5437", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_c8d4e4d8-3926-494b-b1ae-2f1317e4cfd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK\n[div] Most Popular (this week) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.278, 0.436, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5438", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_b770f788-e3a6-45d2-96df-e3a62380ac46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.49, 0.486, 0.523] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5439", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_dffd4c05-f61c-46df-8ab9-a2c7da5b03af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: sofi stadium"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.261, 0.314, 0.326, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5440", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_1c0713cb-8c16-4984-a9d8-a39278a27255.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2\n[listbox] select child age -> SELECT: 5-15\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.269, 0.327, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5441", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_36fb74bd-494a-45a8-9dd0-de77fd479449.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[b] Columbus -> TYPE: NEW YORK\n[span] -> CLICK\n[svg] -> CLICK\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.233, 0.103, 0.298, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5442", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_43dc260b-617f-4482-b487-26b210b2a179.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] Make -> CLICK\n[listitem] Chevrolet (101) Chevrolet (101) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Price -> CLICK\n[button] $10,000 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.015, 0.187, 0.125, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5443", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_438596ce-4f48-4b91-987f-08aae356b4ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.301, 0.191, 0.338] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_5444", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_3b8ad033-513a-4bff-9546-bda8e4d9c844.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.449, 0.83, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5445", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_67bf019d-0b9f-412d-abd8-1b30480269c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.547, 0.39, 0.844, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5446", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_40384767-ea26-43fd-af97-41d9f4f1070c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\n[link] Store Locator -> CLICK\n[textbox] Please enter City, State, or Zip Code -> TYPE: SPRING, TX\n[div] Spring, TX, US -> CLICK\n[link] Click to submit search form -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.37, 0.147, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5447", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_892aaf63-9463-482a-95b8-2a7e145c429f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.474, 0.277, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5448", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_55d5d079-4d18-450a-8fe7-9eee0ea3d7cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK\n[link] Reservation -> CLICK\n[button] Menu -> CLICK\n[tab] Dinner Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.289, 0.452, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5449", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_e96f5968-a7e3-4e14-90a4-c528877899fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\n[link] Manage trips / Check-in -> CLICK\n[textbox] Passenger last name , required. -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.149, 0.365, 0.172] in the image\nAnd my action is Action: TYPE\nValue: X899987799"}]}, {"id": "mind2web_5450", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_3785a3c5-0358-4871-bc29-918d4d0b6fc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK\n[link] 05:30AM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.09, 0.959, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5451", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_84e6c79d-94e9-4e04-b994-04ebf807383a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.032, 0.402, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5452", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_1429f24c-c8ac-40fb-aaea-d2c48942177c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\n[button] Search -> CLICK\n[searchbox] Leagues, teams, players, shows, personalities... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.04, 0.405, 0.053] in the image\nAnd my action is Action: TYPE\nValue: Lebron James"}]}, {"id": "mind2web_5453", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_23187c57-820d-46f2-9022-ea9050f9f41d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[span] 12 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[tab] Lowest price first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.291, 0.089, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5454", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_58a721bb-f7cf-4303-b880-5014865024b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.078, 0.83, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5455", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_d49c529a-3bf3-4aa5-a54f-0de7ededcd0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.216, 0.75, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5456", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_9f7bc34f-467d-4f7f-bbea-3cf3193b675d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK\n[searchbox] From -> TYPE: Bay Shore\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.212, 0.474, 0.233] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5457", "image": {"bytes": "", "path": "./images/b4362dec-6803-415a-a112-819f694b84d9_795d2e6f-3df8-4c64-a81e-fd2558515b2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase a one meal season dining pass for the Six flags great america\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Great America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.123, 0.843, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5458", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_83616b73-0718-430d-979e-39e05350f0a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.788, 0.69, 0.906, 0.72] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5459", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_578f1744-b335-4030-b230-ae524d9563ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.284, 0.664, 0.314] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5460", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_13f39fc7-d314-4c0a-afec-4a96349324c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK\n[button] - -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.466, 0.263, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5461", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_b7887659-969f-4d57-b28d-4c563523c87a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.263, 0.352, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5462", "image": {"bytes": "", "path": "./images/7b05f537-af7a-4fd7-972b-123ce5a34294_d59b9cb4-58a1-43ef-885c-cbb45c1d1897.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check status of flight 6944 on April 6\nPrevious actions:\n[link] Flight status -> CLICK\n[span] Flight number -> CLICK\n[spinbutton] Flight number , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.283, 0.365, 0.297] in the image\nAnd my action is Action: TYPE\nValue: 6944"}]}, {"id": "mind2web_5463", "image": {"bytes": "", "path": "./images/cb07d410-75ff-483a-920c-3ce2a295524f_7ba6a708-e79c-4eb7-a274-40acd111584b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the kicker with the most made field goals in the 2022-2023 NFL season.\nPrevious actions:\n[link] Stats -> CLICK\n[link] Field Goals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.252, 0.124, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5464", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_9c268783-bff3-4bc7-8657-d596565595c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.032, 0.164, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5465", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_ac6c5b85-a57e-437d-88c5-ba2902646ae3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[span] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: SHANGHAI\n[div] Shanghai, China -> CLICK\n[textbox] Where to? -> TYPE: SEOUL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.267, 0.303, 0.656, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5466", "image": {"bytes": "", "path": "./images/10593972-6b1d-4b07-ba61-cb56ca0f06e3_ad5f42ba-311a-4ef7-9762-870220d71672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Lenovo laptop under $800 and create a price alert for $400.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.011, 0.39, 0.034] in the image\nAnd my action is Action: TYPE\nValue: lenovo laptop"}]}, {"id": "mind2web_5467", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_6699b58e-e6c0-46f8-8547-370f9a9e6248.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK\n[span] Grocery -> CLICK\n[span] Easter Baking -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.178, 0.459, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5468", "image": {"bytes": "", "path": "./images/851ed4e6-51ee-47ad-a861-a28bdc61a102_077c4e0c-a79b-448b-8bfe-0973673ac73a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the page to schedule a Model X test drive.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.819, 0.054, 0.869, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5469", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_a9f12745-aa3a-4825-b3f5-06459fe37511.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[combobox] Reservation type -> SELECT: Delivery\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.271, 0.636, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5470", "image": {"bytes": "", "path": "./images/78c52592-76e4-4c45-afd5-f94cf213314e_0a182a4e-608c-4def-8a52-ea6734b8cc01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a star wars movie trailer.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.189, 0.008, 0.613, 0.027] in the image\nAnd my action is Action: TYPE\nValue: Star Wars"}]}, {"id": "mind2web_5471", "image": {"bytes": "", "path": "./images/3c9442f9-5542-4395-918a-6551dbba3e3a_5ebe5f2c-a94a-441c-9a90-094aa2e89e97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Massively Multiplayer games which can be played on VR.\nPrevious actions:\n[link] Massively Multiplayer -> CLICK\n[generic] VR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.392, 0.536, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5472", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_7cbb3878-36cd-48df-96b4-d28fad34a7c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn\n[button] Brooklyn New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.429, 0.202, 0.571, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5473", "image": {"bytes": "", "path": "./images/8cb94647-b0a8-4b8c-b00a-4788ef522b79_0ddfeb2e-f66f-45bf-83fa-9e1e9b969560.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find trains from New York to Washington DC leaving on april 18 for 2 adults with disability\nPrevious actions:\n[textbox] Depart Date -> CLICK\n[gridcell] Tuesday, April 18, 2023 -> CLICK\n[img] Add travelers and discounts -> CLICK\n[button] + -> CLICK\n[button] Apply Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.178, 0.254, 0.192, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5474", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_d7bb0019-1d5f-4911-ad86-8eb40fe86004.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.23, 0.891, 0.264] in the image\nAnd my action is Action: SELECT\nValue: 3 00 PM"}]}, {"id": "mind2web_5475", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_58ba99b5-6faa-44dd-b30f-0e2896aa3265.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK\n[combobox] Search for anything -> TYPE: rare books\n[button] Search -> CLICK\n[link] Auction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.718, 0.085, 0.792, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5476", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_83bc3946-6fe5-400e-b95a-ba5c990b552c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Group Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.445, 0.481, 0.465, 0.508] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5477", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_9e72492a-35b1-496e-9260-942c1aaf9854.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.191, 0.895, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5478", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_397474e3-703a-42e4-b314-d556bd57c30c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[link] Shop -> CLICK\n[link] SHOP ALL CARS -> CLICK\n[div] Best match -> CLICK\n[label] Lowest mileage -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.171, 0.249, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5479", "image": {"bytes": "", "path": "./images/e592a68f-567f-4d6a-a1d6-4166cd129638_d70dfa91-1ad9-4da1-acd8-cc6f2f92b944.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a truck for 10001 zip code on Sunday 11:30 am with flexible dates.\nPrevious actions:\n[button] Reservations -> CLICK\n[link] Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.172, 0.352, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5480", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_d345b944-ded9-4a4b-b8c0-e1eaf822f340.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[select] All -> SELECT: Ages 9-11 (13,217)\n[select] All -> SELECT: Hindi (59)\n[button] Refine results -> CLICK\n[link] Notify me -> CLICK\n[link] Add to wishlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.115, 0.716, 0.148] in the image\nAnd my action is Action: TYPE\nValue: Must buy"}]}, {"id": "mind2web_5481", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_843eff2d-9962-4a8b-9e30-2c0b32f05d88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK\n[button] Selected Pick-Up Date 03/21/2023 -> CLICK\n[button] Selected Return Date 03/22/2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.377, 0.376, 0.406] in the image\nAnd my action is Action: SELECT\nValue: 11 00 AM"}]}, {"id": "mind2web_5482", "image": {"bytes": "", "path": "./images/47072aee-1cb7-436c-8dc6-b2a6d109a100_4f8ec5f7-cb1b-4f13-9702-36f547598319.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse black strollers within $100 to $250.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.023, 0.553, 0.042] in the image\nAnd my action is Action: TYPE\nValue: black stroller"}]}, {"id": "mind2web_5483", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_c87db43b-1abd-4fd3-83a9-fe2edd7bfa4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK\n[option] MEN -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.192, 0.461, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5484", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_b10d981a-f02e-4c54-b95c-df1ac0369cdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] Furniture 356 -> CLICK\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.945, 0.056, 0.977, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5485", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_c8d056c9-ef8e-4e07-9631-352f60776776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.368, 0.29, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5486", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_cff4214f-6139-4db1-9095-650cfe5bbd79.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.394, 0.568, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5487", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_b900e955-2f87-44cd-aff7-61b5ec066da7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.326, 0.48, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5488", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_898acdb2-6360-4662-92d3-040ee591da52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.0, 0.557, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5489", "image": {"bytes": "", "path": "./images/15a0ffe5-8462-4a8e-8938-91b05a40756f_c8c86f76-7509-46a1-bb49-dc9b4b8a0664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a new address to the account. The address is 2983 Marietta Street, APT 2. Business name is Buck.\nPrevious actions:\n[span] 171 2nd street -> CLICK\n[textbox] Add a new address -> TYPE: 2983 Marietta Street"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.301, 0.442, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5490", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_654b35b4-a888-4328-a473-69f63632a8e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Age -> CLICK\n[button] Add Less than 1 -> CLICK\n[button] Add 1 -> CLICK\n[button] Add 2 -> CLICK\n[button] Find Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.214, 0.601, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5491", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_5a234296-fb23-449c-877a-cbc770096ab7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\n[textbox] Where to? -> TYPE: London\n[button] London England -> CLICK\n[circle] -> CLICK\n[link] Unique Experiences -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.371, 0.107, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5492", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_e0fd3f28-3f04-455d-8bde-a480f0ec1b0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.181, 0.158, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5493", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_e92228fd-d6ce-45f2-9dfd-42fc9c17c776.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12\n[combobox] Minute -> TYPE: 00\n[combobox] AM or PM -> SELECT: PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.169, 0.353, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5494", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_70b1e9a6-c1b1-42d3-8b25-a284ee385e10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 25000\n[input] -> TYPE: 5000\n[input] -> TYPE: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.363, 0.265, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5495", "image": {"bytes": "", "path": "./images/059327ab-e26e-4af6-9063-f58c50ecd2d2_b567c00f-f405-4acf-999c-13b2ccdc84f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the schedule and maps for the orange line\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.24, 0.397, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5496", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_7ace8414-2b72-436a-84f4-f81ce2d5ecc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[link] Sports Bras -> CLICK\n[div] Size -> CLICK\n[link] S -> CLICK\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.643, 0.102, 0.655] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5497", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_ca9851d3-60f2-424e-9945-db5862f53d2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\n[link] Order Tracker -> CLICK\n[textbox] Order number EXAMPLES: ECEA12345, 01234567 -> TYPE: 456481897"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.241, 0.908, 0.27] in the image\nAnd my action is Action: TYPE\nValue: 898-448-6474"}]}, {"id": "mind2web_5498", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_4e3a9490-e833-4e4c-957f-e0556fb8e96c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[link] Group Travel for Students -> CLICK\n[textbox] Destination -> TYPE: washington\n[menuitem] Washington D.C., DC, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.164, 0.478, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5499", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_20a9bb0e-d885-42ee-bfb2-0a0ab6c13706.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[div] Goa -> CLICK\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.444, 0.263, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5500", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_a2f2bb0d-a51e-4c53-9468-5b4fa030a112.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.394, 0.219, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5501", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_dc3c5e75-a90a-4e6f-877f-dd1b5c40e9c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.28, 0.394, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5502", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_223fed6c-ab5a-40ca-8fd7-4ca5fdc52d9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[link] More -> HOVER\n[span] Dry Cleaning -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.015, 0.564, 0.025] in the image\nAnd my action is Action: TYPE\nValue: new york city"}]}, {"id": "mind2web_5503", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_38ec759c-f290-4d3f-8336-d7f5d20f5580.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Computers -> CLICK\n[link] Drives & Storage -> CLICK\n[link] External Solid State Drives -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.899, 0.092, 0.929, 0.099] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5504", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_c859da83-a276-460b-ba2b-d37555a94449.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.073, 0.523, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5505", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_991f70b5-0160-4220-bfc2-f69b70d2b1f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\n[link] NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.068, 0.411, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5506", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_6bb0ea02-190d-46c8-98fb-ebfe04f58ecd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.35, 0.194, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5507", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_13288e86-8e09-4608-93f3-ed250f087a42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[div] Search -> CLICK\n[div] Premium -> CLICK\n[label] Distance -> CLICK\n[svg] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.272, 0.916, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5508", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_21c94120-79c7-4305-af25-b347848f9b6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK\n[combobox] To -> TYPE: new orleans"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.251, 0.561, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5509", "image": {"bytes": "", "path": "./images/03103341-f9d3-40a8-8435-f7946dd707b3_81263190-8c66-422d-ba92-ece80af4d80b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the budget price for the vehicle with most seats availible at Roanoke Regional Airport from April 5-6.\nPrevious actions:\n[span] Roanoke Regional Airport -> CLICK\n[button] Select My Car -> CLICK\n[link] Price (Low to High) -> CLICK\n[div] Sort by: -> CLICK\n[link] Number of Seats (High to Low) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.307, 0.918, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5510", "image": {"bytes": "", "path": "./images/0c577209-47dc-4645-8d10-0b659663a969_88cb6de4-642d-4878-916b-7ab443d2af7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nba game played by the phoenix suns.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.037, 0.728, 0.069] in the image\nAnd my action is Action: TYPE\nValue: phoenix suns"}]}, {"id": "mind2web_5511", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_ecec671d-d6ef-4da5-ac94-b680f5e904dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter\n[button] Search -> CLICK\n[link] Harry Potter and the Cursed Child - Parts I & II -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.285, 0.973, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5512", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_9cce8c69-8195-4b45-822d-283e082837b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.046, 0.152, 0.278, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5513", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_feb0706b-a5b2-4b57-b2f4-a4574d9af828.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.029, 0.652, 0.052] in the image\nAnd my action is Action: TYPE\nValue: mens timberland boots"}]}, {"id": "mind2web_5514", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_b12e1589-6f9a-4f0c-9123-81a42039d8c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.022, 0.804, 0.054] in the image\nAnd my action is Action: TYPE\nValue: benadryl"}]}, {"id": "mind2web_5515", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_6d1f7b66-139a-4ec9-ad57-c5a574f6988f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[searchbox] To -> TYPE: stoney brook\n[span] Stony Brook -> CLICK\n[select] 1 -> SELECT: 10\n[select] 00 -> SELECT: 00\n[link] Find Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.346, 0.866, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5516", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_8a76a674-3387-477b-95a5-02919a9dd32d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.174, 0.734, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5517", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_4038a1d1-b391-48f7-9093-45bec729f442.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK\n[listbox] hour -> SELECT: 08"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.177, 0.194, 0.193] in the image\nAnd my action is Action: SELECT\nValue: 00"}]}, {"id": "mind2web_5518", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_82978229-c1f9-4bb2-a23f-900adb290f39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[button] Next month -> CLICK\n[gridcell] April 10, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.256, 0.831, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5519", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_348c21cb-4bc5-454a-b3b3-3955c93b08ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[i] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.643, 0.309, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5520", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_5200e3d5-946c-41fe-b34d-015858be3dec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.176, 0.701, 0.211] in the image\nAnd my action is Action: TYPE\nValue: 10017"}]}, {"id": "mind2web_5521", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_d5c1426a-876f-4bb5-945e-be4c53a4afc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[option] Adele -> CLICK\n[link] TICKETS -> CLICK\n[div] Quantity -> CLICK\n[label] 2 -> CLICK\n[button] $3,535/ea -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.182, 0.213, 0.479, 0.244] in the image\nAnd my action is Action: TYPE\nValue: adelefan@hotmail.com"}]}, {"id": "mind2web_5522", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_1048aad4-2ed3-4bb9-8c0d-234cdb6b90ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.029, 0.376, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5523", "image": {"bytes": "", "path": "./images/3d83f3de-58f2-4aba-9b02-4a230d35a4ef_2ec3122c-4656-4a11-b38a-ace3f4ecb082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated adventure game available for early access.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Early Access -> CLICK\n[div] Narrow By -> CLICK\n[link] Adventure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.365, 0.371, 0.432, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5524", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_9c998cb2-c740-4cc3-8c99-58824e200687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[ins] -> CLICK\n[ins] -> CLICK\n[ins] -> CLICK\n[link] Show all 10 cars -> CLICK\n[link] Opel Insignia\u00a0or Similar , View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.283, 0.599, 0.298, 0.611] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5525", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_a0335e1b-4305-40b9-9379-c6ecb06799ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.178, 0.3, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5526", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_25c59d1d-3e28-490b-a832-aa15ee2497d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.821, 0.312, 0.851] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5527", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_b88c28d6-7d12-42fc-95b3-f4267b4fd200.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.194, 0.214, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5528", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_ff268edf-5481-45b7-87dd-16072ddacf02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[span] Pet Type -> CLICK\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.252, 0.974, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5529", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_6fe96e04-6f9b-4de6-960e-14f70df89eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.495, 0.32, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5530", "image": {"bytes": "", "path": "./images/5d69e640-7765-4381-979c-9881afc048dd_85a2842f-8d86-4d33-b7ae-a9a5af111f9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the monthly payment of a 15k car with a down payment of 5k in Michigan for a fair-rated person for a term length of 48 months, then shop with this budget for the cheapest car and add it to the favorites.\nPrevious actions:\n[textbox] Down Payment -> TYPE: 5000\n[combobox] State -> SELECT: Michigan\n[combobox] Credit Score -> SELECT: Fair (580-669 FICO\u00ae Score)\n[combobox] Term Length -> SELECT: 48 months\n[link] SHOP WITH BUDGET -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.27, 0.137, 0.288, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5531", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_dc7e4e12-7d61-48a8-a1ec-2c52646d5975.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.148, 0.629, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5532", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_aa22d0e0-82c3-4792-afb9-94cc4366b61b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 25000\n[input] -> TYPE: 5000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.109, 0.473, 0.132] in the image\nAnd my action is Action: TYPE\nValue: 12"}]}, {"id": "mind2web_5533", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_5a1b9d08-5eb0-4ae0-8fa1-7183a2c8a7c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.134, 0.719, 0.161] in the image\nAnd my action is Action: TYPE\nValue: Coldplay"}]}, {"id": "mind2web_5534", "image": {"bytes": "", "path": "./images/a52774d2-550d-475a-922c-25c37d44557c_1ad12bdc-c43e-4bd9-acfe-a5fdd5d8e2ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see mansions for 3 adults to stay in in europe during any month\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[tab] Flexible dates -> CLICK\n[label] Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.621, 0.076, 0.737, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5535", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_3eb249cb-72cb-4fdb-be0b-adee49627c52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK\n[checkbox] SUVs -> CLICK\n[checkbox] 2+ -> CLICK\n[button] Select Nissan Kicks Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.437, 0.93, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5536", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_0e088632-253f-4d11-af58-c48d7b276f16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.361, 0.281, 0.369] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5537", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_34ecbbb5-7d8b-4f1a-b63c-b940c801e7e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit\n[searchbox] Search AA.com\u00ae -> ENTER\n[heading] Find your trip - Find a reservation - American Air... -> CLICK\n[textbox] Last name -> TYPE: sin"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.377, 0.196, 0.618, 0.24] in the image\nAnd my action is Action: TYPE\nValue: 1234567"}]}, {"id": "mind2web_5538", "image": {"bytes": "", "path": "./images/0dc0190c-57cc-4c0f-b939-c0617102166e_080b7f6e-4cf4-4bcd-b8b7-5de3e9fb5337.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to favorites a cruise from San Francisco to the Panama Canal departing Oct 2024\nPrevious actions:\n[span] Sail To -> CLICK\n[button] Panama Canal -> CLICK\n[span] Sail From -> CLICK\n[button] San Francisco, CA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.314, 0.591, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5539", "image": {"bytes": "", "path": "./images/39b037ac-0a11-4b05-8919-b4f9863fd0cd_885432a5-3a1f-40a1-9c83-3e2222987162.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show review of Prometheus movie.\nPrevious actions:\n[path] -> CLICK\n[tab] MOVIES -> CLICK\n[searchbox] Search -> TYPE: Prometheus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.156, 0.677, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5540", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_ff237f12-b8da-44a6-a94b-44c986bde324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.232, 0.5, 0.264] in the image\nAnd my action is Action: TYPE\nValue: nevada city"}]}, {"id": "mind2web_5541", "image": {"bytes": "", "path": "./images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_10bba13a-2813-4375-9199-70db338f88c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guide for paying with commuter benefits.\nPrevious actions:\n[p] About -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.04, 0.627, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5542", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_eaa6d53f-db1e-4d44-a4bf-95031cb2e235.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK\n[link] GIGABYTE B450M DS3H WIFI AM4 AMD B450 SATA 6Gb/s M... -> CLICK\n[a] Processors - Desktops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.719, 0.376, 0.745] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5543", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_a2787ae6-4fa3-444f-a098-41c7fe2c1de0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Beachfront -> CLICK\n[checkbox] Hot tub -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.335, 0.089, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5544", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_95063aac-bdf3-44d0-9ac9-ff9893c88672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.154, 0.561, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5545", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_943d00d4-11cc-4ffc-8bad-584d783ae37a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> TYPE: new orleans"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.233, 0.573, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5546", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_2c773e90-ebe7-4e70-ad41-8b5e3ea66c7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.138, 0.223, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5547", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_596faa46-ddf1-4243-8f12-aae808036582.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[link] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.177, 0.365, 0.192] in the image\nAnd my action is Action: TYPE\nValue: san antonio"}]}, {"id": "mind2web_5548", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_d4a307c1-04e4-46fa-8d17-2bd949c405ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.07, 0.777, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5549", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_9a6d2ff8-b4aa-4bfe-8962-01e45d156bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.962, 0.273, 0.974, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5550", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_bbe4a682-8575-43f2-919e-1e66ee9af9ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[span] Spa -> CLICK\n[button] Price: -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[span] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.162, 0.63, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5551", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_a0ae2016-64e5-4317-b950-1a708db07062.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Year -> CLICK\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.364, 0.238, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5552", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_f4f9eaa4-d365-4194-ac5c-02412f49c7e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK\n[button] Confirm -> CLICK\n[button] Show flights\ue99a -> CLICK\n[heading] QAR\u00a04,980 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.358, 0.712, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5553", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_1faa68ab-b59e-4b69-85ad-8f3f1c42d617.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.196, 0.473, 0.222] in the image\nAnd my action is Action: SELECT\nValue: Corolla"}]}, {"id": "mind2web_5554", "image": {"bytes": "", "path": "./images/4baa4918-3312-4b9b-9506-333057d6856b_0d2a5b19-2a2b-4397-b66e-ddb186bdebde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for coupons and deals in New York.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.168, 0.961, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5555", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_00248976-2b6a-47d6-a025-29c82ff112f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.041, 0.377, 0.071] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5556", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_b209f671-5caa-487b-a43c-666ba609b584.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.259, 0.128, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5557", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_542b1db1-6a74-4c6c-bd1c-ff43a1309b99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.01, 0.469, 0.015] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5558", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_c1fa9374-0367-4285-a5f2-3f7cfa4f5379.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.007, 0.307, 0.036, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5559", "image": {"bytes": "", "path": "./images/5f9182dc-d35d-4c0e-9abe-cd913c136528_8450e2c6-f8aa-40bc-876e-21cf29a8cb77.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find career openings in the marketing department\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.35, 0.706, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5560", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_777270ff-78c9-4c91-81f2-776107cbcd41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[button] Apr 26, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Price: low to high\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.157, 0.422, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5561", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_9f57308e-e4bb-4c1c-8514-5c0d179fa5db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: organic dog food\n[link] organic dog food -> CLICK\n[link] Tender & True Organic Small Breed Chicken and Live... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.748, 0.225, 0.988, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5562", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_86b669c9-2974-40ed-8fc8-6c3f5b709c7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: bali"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.244, 0.795, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5563", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_2146a4a1-d5b8-42c5-94b9-547d31930e44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.347, 0.609, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5564", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_dbe89b77-d51e-4e13-8e3f-970b5ce60eca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine\n[button] Search -> CLICK\n[textbox] Zip code -> TYPE: 90026"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.694, 0.108, 0.702] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5565", "image": {"bytes": "", "path": "./images/16886ec7-3301-4103-b175-9fa817335984_96b11da3-438a-468c-a35a-da87b5a3b3ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the highest average points scored in the current season\nPrevious actions:\n[button] NBA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.116, 0.312, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5566", "image": {"bytes": "", "path": "./images/1c8b3d98-ffcf-42b9-ae0b-73d22d87bad7_00d49134-71de-43ed-9c37-19452b46685e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pair of diamond stud earrings to the cart.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: diamond stud earrings"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.074, 0.342, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5567", "image": {"bytes": "", "path": "./images/839ad551-da04-4c8e-96c3-093e762ea167_83ce3ed0-9d5e-4e13-885c-6cd6b0291439.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and book the cheapest daytime only monthly parking option in Seattle near Stripe, 5th Avenue\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.069, 0.785, 0.086] in the image\nAnd my action is Action: TYPE\nValue: stripe, 5th avenue"}]}, {"id": "mind2web_5568", "image": {"bytes": "", "path": "./images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_41b08949-112b-453b-83cf-1426058407d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite the top rock track\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.111, 0.645, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5569", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_230a1bfe-cf97-4bde-8268-f70287809032.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[combobox] Return Time -> SELECT: 11:00 AM\n[textbox] Return to same location -> TYPE: washington\n[div] Washington Dulles Intl Airport -> CLICK\n[generic] Vehicle Type * -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.285, 0.782, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5570", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_0be743dd-2860-4ed8-81aa-211cb3c67518.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.231, 0.754, 0.255] in the image\nAnd my action is Action: TYPE\nValue: allan.smith@gmail.com"}]}, {"id": "mind2web_5571", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_a3d23bf6-5aa6-4245-8bb3-0c4d05470750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] Close -> CLICK\n[searchbox] Search Site -> TYPE: 8GB Ram\n[button] \uf002 -> CLICK\n[img] 8GB (1x8GB) DDR3L 1600 (PC3L-12800) Desktop Memory... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.235, 0.852, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5572", "image": {"bytes": "", "path": "./images/1f28fed3-bed9-444a-bf2b-3700b516b97f_d5d286c4-6f8e-4b7a-8fff-e50d13cf9ada.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking garage near Thalia Hall in Chicago that offers pre purchasing.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Thalia Hall\n[span] South Allport Street, Chicago, IL, USA -> CLICK\n[strong] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.257, 0.379, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5573", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_2ca10cfe-4ab1-488f-b16d-305182e3c99f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[gridcell] Category -> CLICK\n[input] -> CLICK\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK\n[checkbox] PINK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.89, 0.246, 0.918, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5574", "image": {"bytes": "", "path": "./images/d6dd19a2-c57e-4e4d-b7e1-8919a86f1157_f4a04bcc-0ce6-4a2c-a076-b96a65d0a7e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest likely to sell out activity in Los Angeles on April 2\nPrevious actions:\n[gridcell] Sun Apr 02 2023 -> CLICK\n[circle] -> CLICK\n[link] Likely To Sell Out -> CLICK\n[svg] -> CLICK\n[span] Price (Low to High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.249, 0.785, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5575", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_75341aa4-49d6-43ad-86f6-b82d2b6c95fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[path] -> CLICK\n[span] Delivery -> CLICK\n[button] Change location -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.119, 0.719, 0.163] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_5576", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_f26ebe1e-3767-41cc-9263-447a47ea8ce3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Tickets & Passes \uf078 -> CLICK\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.435, 0.04, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5577", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_4ac20b82-db8a-4e3f-94c8-d76357986448.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.331, 0.151, 0.365] in the image\nAnd my action is Action: SELECT\nValue: 2022"}]}, {"id": "mind2web_5578", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_33dc45c6-f292-4b68-8df3-95a76a20a619.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.05, 0.369, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5579", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cbf7f977-c76b-4ae7-bfec-2ff4b8c4f362.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Transmission -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.317, 0.249, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5580", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_c3c60d0c-7d49-46c8-ba77-3b9a28a14d52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.07, 0.58, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5581", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_7a9bc022-4dfa-4b45-bcf3-35db5c5902c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.06, 0.266, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5582", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_7577fa72-6d3d-468b-9c75-a4dd8f2d35bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[textbox] Type a date, or use enter to open, escape to close... -> CLICK\n[gridcell] Saturday, March 25, 2023 -> CLICK\n[button] Find Schedules -> CLICK\n[img] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.689, 0.397, 0.727] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5583", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_8fbb902f-04fa-4bd7-a4e9-d0ba0f793c6a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.326, 0.271, 0.339] in the image\nAnd my action is Action: TYPE\nValue: WASHINGTON"}]}, {"id": "mind2web_5584", "image": {"bytes": "", "path": "./images/f8428085-905f-4190-9404-3e28fb691252_2326f36e-ad6b-4850-a5cd-83eb7df45721.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the newest on-demand releases.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.022, 0.614, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5585", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_d9f6a27e-20e4-4711-939c-c1d832462aa2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK\n[input] -> TYPE: South Station\n[option] South Station, 700 Atlantic Ave, Boston, MA 02110,... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.277, 0.344, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5586", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_36517be2-c47a-4c23-8d97-fefc258aa5b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights multiple cities -> CLICK\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.288, 0.29, 0.305] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_5587", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_2fbd874e-8a6d-4382-8e29-670c173354bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[span] Frozen Foods -> CLICK\n[span] Frozen Pizza -> CLICK\n[button] Type -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.269, 0.634, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5588", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_30499164-5c21-4aa3-861e-81c8b848a22d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[button] EUR -> CLICK\n[div] GBP -> CLICK\n[button] -> CLICK\n[div] \u00a3 -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.695, 0.916, 0.77] in the image\nAnd my action is Action: TYPE\nValue: Happy Birthday Love"}]}, {"id": "mind2web_5589", "image": {"bytes": "", "path": "./images/b20d38a9-8c0f-4ed5-becc-f0f54fb90798_91811be9-5687-41af-9800-b2ad1470e844.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about the NBA Finals schedule.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.059, 0.958, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5590", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_30c13bcc-6f9d-4265-8c57-1073030ce44f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] View Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.27, 0.144, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5591", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_f96e7319-7712-4074-9b25-48a0c4769033.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.644, 0.045, 0.656] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5592", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_f5195da6-57eb-4def-a279-ec2069340b01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 222900\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.269, 0.102, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5593", "image": {"bytes": "", "path": "./images/1a807a1c-d3b2-425e-9684-2a9e79846676_77ccb021-cc33-47a9-9637-3cf72d44d1af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for restaurant that offers African Cuisine in east village under $30\nPrevious actions:\n[link] restaurants. -> CLICK\n[li] Cuisine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.391, 0.164, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5594", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_070d9bc0-242b-4d83-ae47-c1a17c0040e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.254, 0.571, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5595", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_aa72534c-0de6-4c4c-8d2e-378dd9bb25ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: red sox vs yankees"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.149, 0.871, 0.177] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5596", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_5e338d2e-d804-4b2b-8f81-7311a89ccce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[span] for 12 -> CLICK\n[div] Every iPad can connect to Wi\u2011Fi, so you can stay c... -> CLICK\n[radio] Add engraving It won\u2019t impact returns or trade-in ... -> CLICK\n[textbox] First line -> TYPE: Hello World\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.707, 0.938, 0.76] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5597", "image": {"bytes": "", "path": "./images/4f395aad-6f10-4055-932a-d2af443e6bfa_214bef59-0758-44eb-886f-b6745b668e05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Star Wars The Mandalorian statue and add to cart.\nPrevious actions:\n[searchbox] Search... -> TYPE: Star Wars The Mandalorian statue\n[img] The Mandalorian ArtFX+ 1/10th Scale Statue - Manda... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.269, 0.969, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5598", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_efdcb0e6-e118-4995-bf76-1dc64e6f6e0e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[span] Good -> CLICK\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.505, 0.76, 0.698, 0.786] in the image\nAnd my action is Action: TYPE\nValue: doew"}]}, {"id": "mind2web_5599", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_d32919a6-4663-42fb-a081-9cbd842bc551.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.262, 0.084, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5600", "image": {"bytes": "", "path": "./images/78e346d2-3c80-4243-b921-ed35c4c4d923_3eec37dd-f749-468a-9e10-8cd36f12e224.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track an order using the order number of 456481897 and phone number of 8984486417.\nPrevious actions:\n[link] Order Tracker -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.215, 0.459, 0.244] in the image\nAnd my action is Action: TYPE\nValue: 456481897"}]}, {"id": "mind2web_5601", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_89d0a43a-593a-46f4-92e3-d1b1615293e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Video Games -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Nintendo Switch -> CLICK\n[checkbox] Everyone Everyone -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.239, 0.375, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5602", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_e2bedd9a-7758-4d86-b0d2-517ee761c274.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[img] -> CLICK\n[span] Add to list -> CLICK\n[checkbox] Walgreens New -> CLICK\n[button] Done -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.25, 0.969, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5603", "image": {"bytes": "", "path": "./images/aa1a4414-f141-42b8-9e8b-28d5bd1eeef2_21f8bfdc-d53f-40a3-8c4c-ee39f0f4a7d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Venice Beach that allows mobile coupons.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Venice Beach\n[em] Venice -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.128, 0.336, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5604", "image": {"bytes": "", "path": "./images/50bd08bd-989f-44ba-8a99-71e75ba602f1_e69a6088-4873-41f8-be56-5ee72a989dac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cheapest monthly price for parking near Shubert Theatre.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Shubert Theatre\n[option] Shubert Theatre\u00a0\u00a0225 West 44th Street, New York De... -> CLICK\n[link] MONTHLY -> CLICK\n[span] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.226, 0.14, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5605", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_128f72a9-5531-4202-a730-5c09d7d9aaa0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[span] 17 -> CLICK\n[span] 20 -> CLICK\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.189, 0.341, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5606", "image": {"bytes": "", "path": "./images/b5cb859d-8460-4b27-b6c9-d6fea6de9889_15a35d9f-c3aa-44be-8f0f-4827042e2f95.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find kids 13Y size t shirts and add 3 to cart.\nPrevious actions:\n[link] KIDS -> CLICK\n[link] T-Shirts -> CLICK\n[gridcell] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.23, 0.471, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5607", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_f2510dca-2b8a-4d16-9824-8bd6f3e5274f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.164, 0.488, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5608", "image": {"bytes": "", "path": "./images/0f9dd411-f3ab-494e-8a3a-5dc9a908f4ee_185d50aa-4bbf-4107-8913-200ee426102d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the cvs pharmacy nearest to zip code 45201 that provides hair loss evaluation and treatment.\nPrevious actions:\n[link] Schedule a women's health exam -> CLICK\n[link] Explore all MinuteClinic health services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.274, 0.908, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5609", "image": {"bytes": "", "path": "./images/74f01011-9bcd-433b-8405-975ca5c3f356_c7548fe6-29eb-4ffb-a431-24ad7f535f5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the address and store hours for the Armageddon Shop record store in Boston.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.557, 0.0, 0.645, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5610", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_665f4508-101d-406e-b5f6-ebfe574eb34d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[checkbox] 29 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 6:00 PM\n[combobox] Drop off time -> SELECT: 11:00 PM\n[div] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.211, 0.682, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5611", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_df533272-7f21-43f3-a50f-89b97eb99bc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[svg] -> CLICK\n[button] Miami -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> CLICK\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.31, 0.186, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5612", "image": {"bytes": "", "path": "./images/d9c160e7-b179-4d42-8570-e6f3f85aa412_2918942d-cf82-4992-ac54-7ce758ca697f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated adults-only romantic beach vacation deals and book the cheapest one.\nPrevious actions:\n[link] Vacations -> CLICK\n[link] Beach Vacation Deals 95 DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.307, 0.396, 0.332] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5613", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_3a673d2c-870c-483f-8337-b1359c2cd031.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM\n[generic] Vehicle Type * -> CLICK\n[p] Compact -> CLICK\n[button] Select My Car -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.204, 0.882, 0.222] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5614", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_dc38e438-dc4a-4fea-8621-383fb449ebf2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[span] -> CLICK\n[div] Leather -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.129, 0.286, 0.149] in the image\nAnd my action is Action: SELECT\nValue: Good To Go"}]}, {"id": "mind2web_5615", "image": {"bytes": "", "path": "./images/980d35af-6c00-4d6c-94cd-0c41e0fabb76_4559b512-72e0-43a0-93f8-38b7f1688a06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse mens Timberland boots in size 9 and under $100 and filter by lowest price.\nPrevious actions:\n[combobox] Search for anything -> TYPE: mens timberland boots\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.165, 0.312, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5616", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_4256fd38-43d4-49a8-a0da-618b5264ed20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK\n[combobox] Select Make -> SELECT: Audi\n[combobox] Select Model -> SELECT: A6"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.222, 0.545, 0.253] in the image\nAnd my action is Action: SELECT\nValue: 2020"}]}, {"id": "mind2web_5617", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_27988d8a-0da4-41ff-bb40-f20d4a1a7749.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.327, 0.114, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5618", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_8782d364-4e18-44ff-9aee-4e1c21c11ed6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Bottoms -> CLICK\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.387, 0.471, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5619", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_e6cb09cb-edbd-44c6-a911-d51f39af7dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] set store -> CLICK\n[button] Make -> CLICK\n[span] (954) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Series -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.006, 0.192, 0.045, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5620", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_6dec575d-ef6c-419c-98f4-c906218623e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK\n[button] Let's go -> CLICK\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.187, 0.34, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5621", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_67dad40a-d63a-4cbf-9271-85500b8de12d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.871, 0.043, 0.98, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5622", "image": {"bytes": "", "path": "./images/644b7bed-b5fb-4090-b04f-a669d41ac93a_aa168c45-dd50-42df-acda-564969d01e5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule of the East Boston ferry, check connections, and fares and download the pdf schedule.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Ferry -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.193, 0.339, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5623", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_be2b8420-57de-4edb-8ee8-1316eabea49a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[link] 25, Saturday March 2023 -> CLICK\n[radio] Arrive by -> CLICK\n[select] 1 -> SELECT: 9\n[select] 00 -> SELECT: 45\n[select] AM -> SELECT: AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.406, 0.348, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5624", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_b651a6f3-40e8-4541-bb42-45c812a7017b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\n[link] Manage trips / Check-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.13, 0.365, 0.146] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_5625", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_8dc97709-9a15-4255-b63f-010da99ade05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.112, 0.587, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5626", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_7ad70888-fa59-43d7-8787-aa207662d59a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Events -> CLICK\n[button] All -> CLICK\n[input] -> CLICK\n[button] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.625, 0.517, 0.64] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5627", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_34c6c7d3-aea8-4755-978d-ba476644df1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.736, 0.279, 0.757] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5628", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_5bebb9b8-5943-41f5-b871-76d06302dbfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.113, 0.336, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5629", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_d25f3ac6-5b8b-4c1d-a4f1-905223ab9ea1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: surge protector\n[button] surge protector -> CLICK\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.462, 0.067, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5630", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_31cb54e5-737b-4c62-8e2f-b2b8b74d551d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[heading] $139 Vermont: Cozy Kimpton in the heart of Manches... -> CLICK\n[button] CHECK DATES -> CLICK\n[link] $189 -> CLICK\n[link] $259 -> CLICK\n[button] CONTINUE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.714, 0.259, 0.87, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5631", "image": {"bytes": "", "path": "./images/28d54466-de85-45e6-9649-2575d38adfd4_dffa71c7-91d4-4a03-8642-c9af8bc7a05c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse documentaries streaming on Netflix.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK\n[button] Filter -> CLICK\n[button] Documentary -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.334, 0.639, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5632", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_fa28ad4c-66b5-46f6-8cdb-9f52c1ef5404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK\n[menuitem] View All -> CLICK\n[div] Type -> CLICK\n[label] Dome -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.494, 0.488, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5633", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_5d992829-9076-470c-9e36-dd1dd1918ccd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.104, 0.713, 0.115] in the image\nAnd my action is Action: TYPE\nValue: berlin"}]}, {"id": "mind2web_5634", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_3f4f7403-be24-4fc0-a33a-961f7dc478a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK\n[option] Sales & Ad Ops -> CLICK\n[textbox] Locations -> CLICK\n[option] US, CA, San Francisco -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.272, 0.472, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5635", "image": {"bytes": "", "path": "./images/36f2e415-356b-4e44-9ab1-6c3054d4de1f_6a8d7913-dea5-453b-b17d-6652f31792b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if cars are available for tomorrow at Elevated Escape in North Carolina.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Elevated Escape\n[div] Elevated Escape -> CLICK\n[div] Tue, Mar 28 -> CLICK\n[checkbox] 24 March 2023 -> CLICK\n[div] Sat, Mar 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.32, 0.627, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5636", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_e9377db7-e0c7-4d52-b555-c18621895092.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK\n[span] -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.103, 0.519, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5637", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_61810484-2d4f-4d88-b9b3-25dc95d9719b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.222, 0.452, 0.234] in the image\nAnd my action is Action: TYPE\nValue: MUMBAI"}]}, {"id": "mind2web_5638", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_a77dd692-2148-4a62-8cf9-b62a855abf40.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.274, 0.941, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5639", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_2da08de7-e183-43a5-850b-1b41d9cdf907.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.26, 0.375, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5640", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_4a8b112d-25b2-4430-8ca3-275372e7ecbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email (required) -> TYPE: buckeye.foobar@gmail.com\n[checkbox] Career opportunity Career opportunity -> CLICK\n[checkbox] Office location Office location -> CLICK\n[checkbox] Company success Company success -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.531, 0.829, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5641", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_1ee44a3e-5f91-4a34-bbed-16b7b4fbb81d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[span] 31 -> CLICK\n[span] Search flights -> CLICK\n[button] Continue to flight results -> CLICK\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.225, 0.84, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5642", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_fd6c3519-38ba-4091-be70-5c82a7f542f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK\n[link] VIEW ALL -> CLICK\n[span] Tomatometer -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.305, 0.805, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5643", "image": {"bytes": "", "path": "./images/bafd6a44-5938-431f-8e2e-17d680d5c48b_bd359ca1-6647-4b90-9465-583fbc71a119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about Wi-Fi subscriptions.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.047, 0.664, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5644", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6d3daffd-e582-43ec-9bde-6823e140ab89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[link] Toys -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.086, 0.08, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5645", "image": {"bytes": "", "path": "./images/783d5a91-5f7e-4184-8467-e7ea88e81c5b_13ed3791-9c2f-4f2c-a0e5-2d2a472e1fd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Set the 3rd top hip hop track as my current obsession\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.331, 0.801, 0.49] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5646", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_65751985-a337-44ea-92ee-e6539bda7fd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.161, 0.158, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5647", "image": {"bytes": "", "path": "./images/a5c1095b-bba1-4029-8b8d-fa5848702827_ffd31f7b-a7cf-4c8b-9a96-5ca46768637c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City from June 5th to 9th.\nPrevious actions:\n[i] -> CLICK\n[i] -> CLICK\n[gridcell] 5 -> CLICK\n[span] Jun 5 -> CLICK\n[gridcell] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.09, 0.959, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5648", "image": {"bytes": "", "path": "./images/db289bef-3d18-43c6-8ee3-a1ebc5d285b4_ba175789-2fbd-4694-80a4-dc507e353aae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me news about the ps5.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.198, 0.677, 0.231] in the image\nAnd my action is Action: TYPE\nValue: ps5"}]}, {"id": "mind2web_5649", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_d1c03c4f-03c1-42df-a1eb-752d2d674a7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[link] TICKETS -> CLICK\n[span] -> CLICK\n[label] 2 -> CLICK\n[span] -> CLICK\n[input] -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.877, 0.175, 0.965, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5650", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_f36f8381-72f0-49e3-b691-c855827719b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city\n[option] Mexico City - Benito Juarez Intl, MX (MEX) Mexico -> CLICK\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.475, 0.331, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5651", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_7ff13337-9f2f-4ca3-874a-76cacb179479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.37, 0.508, 0.492, 0.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5652", "image": {"bytes": "", "path": "./images/90bd64ec-d48e-4796-a4e2-c46b866093c2_e1c57852-be09-49d4-b6c5-8b08ffa4dbc1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for newest girls' training leggings in size yxl and save the top three results.\nPrevious actions:\n[menuitem] Kids -> HOVER\n[menuitem] Pants & Leggings -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.284, 0.194, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5653", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_8b009b24-ae1b-40ce-b188-25c36447b588.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.261, 0.455, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5654", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_f86868ba-77b6-40ce-afe6-ec0cdbf31f08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[listitem] Bay Shore -> CLICK\n[searchbox] To -> TYPE: Breakneck ridge\n[span] Breakneck Ridge -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 23, Thursday March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.253, 0.727, 0.277] in the image\nAnd my action is Action: SELECT\nValue: 8"}]}, {"id": "mind2web_5655", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_e8dcebbf-9804-4061-9c08-d4008deb715e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[span] Select store -> CLICK\n[link] Flatware -> CLICK\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.062, 0.969, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5656", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_5b3eb865-638b-48aa-8415-2acfb4905ade.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[button] Search flights + cruise External Link should open ... -> CLICK\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.716, 0.238, 0.972, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5657", "image": {"bytes": "", "path": "./images/4f395aad-6f10-4055-932a-d2af443e6bfa_8dada5a6-6c79-452b-9908-de98a25c6f5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Star Wars The Mandalorian statue and add to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.006, 0.613, 0.023] in the image\nAnd my action is Action: TYPE\nValue: Star Wars The Mandalorian statue"}]}, {"id": "mind2web_5658", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_93ed2d34-334e-4c25-9bdd-b1ed285fdd11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK\n[span] 12 -> CLICK\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.295, 0.686, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5659", "image": {"bytes": "", "path": "./images/4c3b2a4f-4b54-4724-9d8a-0ea95ab180c5_fe665efe-5d42-48d3-ae92-66c30e8134ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find forum threads which include the name Taylor Swift in the title within last month\nPrevious actions:\n[link] Forums -> CLICK\n[link] Advanced search -> CLICK\n[textbox] Search by keyword -> TYPE: Taylor Swift\n[combobox] Find Posts from -> SELECT: 1 Months Ago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.412, 0.223, 0.427] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5660", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_247a11f8-00a7-4f2d-a549-c4bafb74faf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.006, 0.789, 0.026] in the image\nAnd my action is Action: TYPE\nValue: Union City Nj"}]}, {"id": "mind2web_5661", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_3c7bf6a2-a75f-434d-8bc2-e34824e43dbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[tab] Flights -> CLICK\n[combobox] Flying from -> TYPE: London\n[option] Destination London -> CLICK\n[combobox] Flying to -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.139, 0.792, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5662", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_21d16a48-dcdb-4226-92ba-31ea01da9118.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[combobox] By State -> SELECT: Colorado\n[button] Activity -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.191, 0.154, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5663", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_43e06d15-4af3-477d-8d5a-2be93ca570e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.043, 0.448, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5664", "image": {"bytes": "", "path": "./images/ef23fbf3-f05e-41e2-b847-a27028f42470_6d29058c-c968-4817-a15a-99a4667e39f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me \u201cpizza\u201d restaurants near Atlanta\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.13, 0.223, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5665", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_39e9097a-7ac8-4543-8ad6-91b40f932b34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] TRIP PLANNER \uf0da -> CLICK\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena\n[button] Get Directions -> CLICK\n[li] Cheyenne, WY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.41, 0.43, 0.578, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5666", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_1654a37a-cc71-4bac-88f3-efe73a2675f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.454, 0.686, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5667", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_f74e6f89-7fd4-4a49-9204-750b69c96b67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK\n[img] Tesla Shop Gift Card -> CLICK\n[textbox] Name of Recipient -> TYPE: April May"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.339, 0.929, 0.356] in the image\nAnd my action is Action: TYPE\nValue: april.may@gmail.com"}]}, {"id": "mind2web_5668", "image": {"bytes": "", "path": "./images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_dcf6e978-dc7f-436d-80c6-2f8ad9445bcf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of Broadway events sorted by date.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.051, 0.17, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5669", "image": {"bytes": "", "path": "./images/27a8bf82-2e7e-48c3-8fb1-66909c13d585_1ae1d6c7-b4d4-4b78-a3f9-5e6974eb5bde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Buick for sale within 250 miles from 26807.\nPrevious actions:\n[button] Cars & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.026, 0.83, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5670", "image": {"bytes": "", "path": "./images/e6c7934b-6949-46ee-8f72-edb46abd3da7_2b807397-c070-4c9f-9438-75fe88d865d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the winning odds in the Champions League for Real Madrid against Chelsea.\nPrevious actions:\n[span] Odds -> CLICK\n[heading] SOCCER -> CLICK\n[heading] ENGLISH PREMIER LEAGUE -> CLICK\n[link] UEFA CHAMPIONS LEAGUE -> CLICK\n[heading] GAMES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.197, 0.846, 0.211] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5671", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_e0790ebc-b02a-4b78-abb2-ec03cf320458.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[button] Substract one Adult -> CLICK\n[button] Substract one Child -> CLICK\n[generic] Economy -> CLICK\n[option] Premium economy -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.402, 0.636, 0.441] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5672", "image": {"bytes": "", "path": "./images/779cec8e-eef5-4de8-a42e-b449363664df_05b42c16-2b58-423e-9f0d-c4ef3203b528.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a theatre near 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.147, 0.888, 0.173] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_5673", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_07eaf7cf-bc33-4fd1-9e7a-5b4c915112c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.193, 0.335, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5674", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_a0620450-f297-4f91-9643-1324d3373687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York\n[button] Times Square, New York, NY, USA -> CLICK\n[button] 1 adult \u00b7 0 children \u00b7 1 room -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.588, 0.301, 0.811, 0.319] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5675", "image": {"bytes": "", "path": "./images/b5b56e9a-afef-4d1a-bd62-dddc058fdc81_2b8696c1-be11-47de-af3c-141664f86b58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an engineering open job in Madrid, Spain, if found apply with my details after picking office location, company success and career oppurtunity as applying reason. My name is James Smith and email is buckeye.foobar@gmail.com\nPrevious actions:\n[checkbox] Spain (18 items) -> CLICK\n[checkbox] Madrid (2 items) -> CLICK\n[link] Title: IT Support Engineer -> CLICK\n[textbox] First Name (as per passport) (required) -> TYPE: James\n[textbox] Last Name (required) -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.569, 0.92, 0.591] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_5676", "image": {"bytes": "", "path": "./images/29fde741-075c-446c-8e8a-f432e81e7ac0_2b73cbf1-4de3-4555-9070-0c329cd919b7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate vehicle price with a challenge credit score, monthly payment of $250 with a $3000 down payment and 24% APR for 48 months in Tennessee.\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.257, 0.656, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5677", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_552ceafa-2cc7-46fc-a178-1ffd27f5ef89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK\n[button] Continue with this address -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.692, 0.2, 0.716] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5678", "image": {"bytes": "", "path": "./images/b5d47068-b773-4061-b7ba-17bd25e88e06_d0560cf3-9b90-4a29-a8b8-08577ec0c19b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Save two deals and offers for US.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.038, 0.598, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5679", "image": {"bytes": "", "path": "./images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_464ec933-1237-478f-a390-08e1168b4498.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the complexity rating for Frosthaven.\nPrevious actions:\n[combobox] Search -> TYPE: frosthaven\n[link] Frosthaven (2023) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.144, 0.773, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5680", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_e1fe0472-1d20-446d-a70d-80ff72131b1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.152, 0.568, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5681", "image": {"bytes": "", "path": "./images/54d60a7c-f52d-4d79-b879-34698507e22c_66342372-6689-4084-8008-5bdd51746855.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip from Albany to New York leaving Mar 18 and returning Mar 19 for 3 people\nPrevious actions:\n[input] -> CLICK\n[gridcell] March 18, 2023 -> CLICK\n[input] -> CLICK\n[gridcell] March 19, 2023 -> CLICK\n[spinbutton] How many travelers? -> TYPE: 3"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.124, 0.919, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5682", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_93b6ddac-92d1-4133-9adc-86d8ce49f9d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[link] SEARCH CARS -> CLICK\n[div] Best match -> CLICK\n[label] Nearest distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.263, 0.249, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5683", "image": {"bytes": "", "path": "./images/998d121b-c858-485d-9dd3-4609575d144b_afaeea13-bdcc-4dfb-820b-5ca847f3103e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular song this week by the no. 1 weekly charts ranked artist\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.806, 0.005, 0.838, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5684", "image": {"bytes": "", "path": "./images/f4623be1-31c6-4546-a567-92bfd1da9cd7_b845c300-477d-4935-97cc-1ea84ec96398.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Upgrade the count of the current SSD in my cart to 10\nPrevious actions:\n[link] Shopping Cart -> CLICK\n[textbox] qty -> TYPE: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.48, 0.147, 0.523, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5685", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_12c1b3bf-3325-4612-9902-b097acc4a6e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 1 -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.183, 0.3, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5686", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_489a9668-fabb-4591-aa4f-a235753a96fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[button] Browse Vehicles -> CLICK\n[checkbox] Trucks -> CLICK\n[combobox] SORT BY -> SELECT: Low to High\n[checkbox] 4+ -> CLICK\n[button] Select Chevrolet Colorado Vehicle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.799, 0.163, 0.951, 0.197] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5687", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_a9d256e6-2222-4953-8e33-4444408df4ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK\n[mat-pseudo-checkbox] -> CLICK\n[button] Close -> CLICK\n[button] First from $722 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.82, 0.236, 0.945, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5688", "image": {"bytes": "", "path": "./images/7180c4d1-971b-418d-8a64-87ab0d29c20e_610a704a-42dd-43c7-b6f6-8800697dc2d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest rent truck for 4 people, pick up from JFK airport at 11 am on March 27 and return at noon on March 30.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.216, 0.533, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5689", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_2d2144f4-0a3d-482e-95f0-7f07ca0bbf5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.172, 0.259, 0.195] in the image\nAnd my action is Action: TYPE\nValue: BWI"}]}, {"id": "mind2web_5690", "image": {"bytes": "", "path": "./images/6407babe-fe48-492a-8211-1f22a81b9ac0_b5b09d80-00ed-4295-82de-d967b31efaa5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find men's running shoes of size 9 with lowest price and add to bag.\nPrevious actions:\n[link] Running -> CLICK\n[div] Size -> CLICK\n[link] 9 -> CLICK\n[span] Now Trending -> CLICK\n[li] Price (Low - High) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.215, 0.495, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5691", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_86abe01d-3a02-4f50-86be-bc8454ad2f8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] View Results -> CLICK\n[span] Watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.342, 0.466, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5692", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_342bae0c-0a5a-4040-8b4e-238906800a1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK\n[button] Food & Drink -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.341, 0.262, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5693", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_aa68d1df-0533-407f-b2e3-6f8118babb0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.254, 0.625, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5694", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_2bb85139-e8e3-45ca-8e49-d99aea4df215.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Search -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK\n[span] Select store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.197, 0.71, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5695", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_5d09b93b-3839-4e67-83bc-9cfde7194124.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Add to cart\u2014$799.95 -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue without membership -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.298, 0.462, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5696", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_58359ff4-73dd-45ee-b703-026fd4666acf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.262, 0.748, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5697", "image": {"bytes": "", "path": "./images/f5d4d405-9714-47a5-a66a-49a899b16b97_6ac79817-591c-4763-b856-e3a201786417.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check all available tickets within $100 to $200 for the New York Knicks first game on April2 nd.\nPrevious actions:\n[link] sports. -> CLICK\n[textbox] Search by Name -> TYPE: New york knicks\n[p] New York Knicks -> CLICK\n[link] View Tickets -> CLICK\n[span] (0) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.872, 0.435, 0.972, 0.468] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_5698", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_77f619a4-6625-416b-823f-da4c81e06018.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\n[link] Surge Protectors -> CLICK\n[link] 6 to 8 Outlets -> CLICK\n[link] Under $25 -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] New -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.832, 0.088, 0.866, 0.095] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5699", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_6564754c-cf0b-4099-a320-28dabde5f587.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[label] One way -> CLICK\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.191, 0.568, 0.208] in the image\nAnd my action is Action: TYPE\nValue: Washington"}]}, {"id": "mind2web_5700", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_85de1ef5-b340-4275-924e-4ad340d35a4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.007, 0.189, 0.024] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5701", "image": {"bytes": "", "path": "./images/1bf4f465-99cb-483b-aac1-a7512b150755_f6e5cdcb-7b34-4d49-9c97-e74cb6428e87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hydropack and make the results to show only items that have a capacity of 21 to 35L.\nPrevious actions:\n[button] Camp & Hike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.24, 0.21, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5702", "image": {"bytes": "", "path": "./images/01bede1e-91ec-4fe0-a6bc-173826497800_3740da81-2f79-42c0-be33-7f148bf3f1d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a person by phone number 123456789.\nPrevious actions:\n[link] Find People -> CLICK\n[link] BY PHONE\u00a0NUMBER -> CLICK\n[input] -> TYPE: 123456789"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.225, 0.389, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5703", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_d7504220-9487-4929-8b6f-608bf6883e93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[link] 2022 -> CLICK\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.236, 0.609, 0.261] in the image\nAnd my action is Action: TYPE\nValue: Directors"}]}, {"id": "mind2web_5704", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_15235466-cb9e-45a0-baf0-c2715e127ad9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[button] Continue -> CLICK\n[textbox] *First Name -> TYPE: John\n[textbox] *Last Name -> TYPE: Mark\n[textbox] *Email Address -> TYPE: Johnmark@gmail.com\n[textbox] *Phone Number -> TYPE: 234567890"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.436, 0.566, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5705", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_7470ad8d-b2a2-4965-827b-7a794991454e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Unlock deals External Link should open in a new wi... -> CLICK\n[div] Search location -> TYPE: TEXAS CITY\n[div] Texas City, TX -> CLICK\n[textbox] Select a date range. -> CLICK\n[img] Right -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.315, 0.62, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5706", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_1ef99a0e-af72-404c-a371-1815204eea54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> CLICK\n[textbox] Near -> TYPE: Concord\n[span] Concord -> CLICK\n[textbox] Find -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.015, 0.335, 0.025] in the image\nAnd my action is Action: TYPE\nValue: mexican"}]}, {"id": "mind2web_5707", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_2a60c96b-b69a-4763-9e83-c7ad02c58d8f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.345, 0.397, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5708", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_19088135-f0c7-424d-8c4b-c28c88f3c7db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight\n[textbox] Enter your pick-up location or zip code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.121, 0.891, 0.145] in the image\nAnd my action is Action: SELECT\nValue: noon"}]}, {"id": "mind2web_5709", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_83ca13ff-29f2-4738-bcd2-859f003ae40d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.005, 0.67, 0.023] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5710", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_00b8010a-a12a-4481-b961-a21322bb3972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.21, 0.752, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5711", "image": {"bytes": "", "path": "./images/8f567f79-e197-4d7e-9a49-877daae6dde5_eebe61c3-9d1e-450d-a54c-0e428e7c9dd9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Blue iPhone 12 Pro 128gb and add to cart.\nPrevious actions:\n[combobox] Search for anything -> TYPE: iPhone 12 Pro\n[button] Search -> CLICK\n[link] 128 GB - apply Storage Capacity filter -> CLICK\n[heading] Apple iPhone 12 Pro - 128GB - All Colors - Unlocke... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.458, 0.244, 0.76, 0.256] in the image\nAnd my action is Action: SELECT\nValue: Blue"}]}, {"id": "mind2web_5712", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_df559d9e-9ae9-42b5-833d-1268b513e3db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: ohio"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.137, 0.42, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5713", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_cdab6084-b5e6-4dc2-908a-907ef2e36ce8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[link] SHOP HOTELS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.127, 0.42, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5714", "image": {"bytes": "", "path": "./images/24de7f7d-f3f0-41a6-a578-e35b513b46e5_d4441fd5-a932-4be8-9301-89a7764372d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Maytag electric dryers that are at least 7.3 cubic foot costing less than $1000 that are available in store at Montgomery, Illinois with zip code 60538.\nPrevious actions:\n[checkbox] 7.3 cubic foot (5) -> CLICK\n[checkbox] 7.4 cubic foot (6) -> CLICK\n[span] Price -> CLICK\n[img] expand -> CLICK\n[spinbutton] Enter Minimum Price -> TYPE: 0"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.403, 0.121, 0.434] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_5715", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_a8b1678d-bc7c-412c-b9ad-f628c66d0f63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.2, 0.077, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5716", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_84cfe885-5fe0-4f65-bebd-c0f56bf02c16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.211, 0.492, 0.241] in the image\nAnd my action is Action: TYPE\nValue: ewn"}]}, {"id": "mind2web_5717", "image": {"bytes": "", "path": "./images/d0ce3db1-720b-4333-a487-2bdc8fd74a80_80261cdf-aaba-4a97-976e-a2d72d013c4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show episodes of Alien Worlds and check where to watch Episode 4 of Season 1.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.22, 0.594, 0.246] in the image\nAnd my action is Action: TYPE\nValue: Alien Worlds"}]}, {"id": "mind2web_5718", "image": {"bytes": "", "path": "./images/2a45ede7-6449-40b2-b032-149ddbe205a9_6dfde72b-7747-444c-835b-2feaf91878ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Contact Yeah You Right Events who are the organisers of the Food Fight 2023 event being held in New Orleans on May 25th to ask a question about the event\nPrevious actions:\n[combobox] autocomplete -> TYPE: New Orleans\n[div] LA, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.309, 0.683, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5719", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_08739645-479a-4ec6-8ace-d73e2de59ebe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.197, 0.463, 0.204] in the image\nAnd my action is Action: TYPE\nValue: Paris"}]}, {"id": "mind2web_5720", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_7e7219df-cb90-454e-aeb9-988780eced12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[button] PLAN TRAVEL -> CLICK\n[link] Flights -> CLICK\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.341, 0.29, 0.363] in the image\nAnd my action is Action: TYPE\nValue: JFK"}]}, {"id": "mind2web_5721", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_b716dec6-b13d-4e4c-bfbb-96a9fbd930ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK\n[span] View all 20+ items -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.823, 0.16, 0.969, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5722", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_4b07f157-afb9-41cc-bc51-78f88a227dfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[textbox] Find -> TYPE: thai restaurants\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK\n[span] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.2, 0.066, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5723", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_a2e74740-9137-4289-afcf-e7975501f39d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[button] CHECKOUT -> CLICK\n[button] Pick up in store Shipping: Free -> CLICK\n[searchbox] City, State, or ZIP code -> TYPE: 10005\n[svg] -> CLICK\n[label] UNIQLO SOHO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.571, 0.325, 0.618] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5724", "image": {"bytes": "", "path": "./images/351568c6-452d-4f32-9375-2b6301f0cb36_18ed8582-6390-4c5e-834d-a8c52a81fd04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the next available subway train to Grand central station, NY departing from Queensboro plaza station, Long Island, NY.\nPrevious actions:\n[listitem] Grand Central Terminal, East 42nd Street, New York... -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[label] Bus -> CLICK\n[label] Express Bus -> CLICK\n[label] Rail -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.332, 0.848, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5725", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_c8172921-931f-4897-badb-a46e41361d4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: Chicago\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (10) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.607, 0.458, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5726", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_d27c83d7-a7bf-4035-be1a-7cfe70abd291.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2\n[combobox] Minute -> TYPE: 30\n[combobox] AM or PM -> SELECT: PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.46, 0.353, 0.487] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5727", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_79c44cf2-97a2-4876-8e7d-99b6d5b1855d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Landscaping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.017, 0.564, 0.027] in the image\nAnd my action is Action: TYPE\nValue: WEST HOLLYWOOD"}]}, {"id": "mind2web_5728", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_00e89ec7-a6d4-4c75-ae50-335ba459f64d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[button] 4K -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.518, 0.429, 0.533] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5729", "image": {"bytes": "", "path": "./images/0592744b-ea69-4724-80f8-3924916b7758_e85650b5-205b-4c62-8430-558ab5a7a477.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out the cancellation policy\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.01, 0.659, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5730", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_b2fd1297-19ee-4a76-89d7-39842b79a223.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.129, 0.568, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5731", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_95c8c608-6806-4dc5-95c1-ebad7ad6b1b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.174, 0.359, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5732", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_f5cda11c-d548-456b-a605-5b5857a87848.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, Sheffield selected. -> TYPE: manchester\n[span] Manchester -> CLICK\n[radio] Return -> CLICK\n[textbox] Date use format: 29-Mar-23 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.179, 0.501, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5733", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_bc3b63a8-06fc-4da6-a5d0-8a80cec2bdc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER\n[span] Gender -> CLICK\n[link] Neutral (7) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.241, 0.986, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5734", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_0f19dcc1-254d-4a15-a862-941158d86dde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[gridcell] Friday, April 14, 2023 -> CLICK\n[combobox] Hour -> TYPE: 12\n[combobox] Minute -> TYPE: 00\n[combobox] AM or PM -> SELECT: PM\n[button] Get trip suggestions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.342, 0.874, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5735", "image": {"bytes": "", "path": "./images/cd92cea9-a60f-4e09-90d7-32384ccdd61c_e2a55480-187b-4c9f-8f3a-28a19a3c7931.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show most played games based on daily players.\nPrevious actions:\n[link] New & Noteworthy -> HOVER\n[link] Most Played -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.372, 0.144, 0.548, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5736", "image": {"bytes": "", "path": "./images/a10e6232-923d-4635-bdc8-c6d989380e45_be9d5c1e-0aa0-43d7-8dff-c3ea5a77d4f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a blue iPad to your cart and select the option for free engraving with \"hello world\" with no other accessaries.\nPrevious actions:\n[link] iPad -> CLICK\n[link] Buy iPad 10.9-inch -> CLICK\n[img] Blue -> CLICK\n[span] for 12 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.389, 0.938, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5737", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_d05fb7e9-a294-4741-a6df-073f2cd22866.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK\n[link] CITIES: SKYLINES -> CLICK\n[link] Bundle info -> CLICK\n[link] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.441, 0.467, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5738", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_ac2eb761-67e0-413a-8388-b0e85e06601f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte\n[div] Belo Horizonte, Minas Gerais, Brazil -> CLICK\n[tab] Flexible dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.292, 0.471, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5739", "image": {"bytes": "", "path": "./images/b7e501a4-5dee-406e-b6ba-00eed783df0c_5aa3d6d6-e7b6-429c-815d-85b1df8eaab9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a KOA campground in California that has RV sites.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.198, 0.771, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5740", "image": {"bytes": "", "path": "./images/92869590-839d-4dfd-8884-4c52bef3b328_c5ef9f3a-728c-4744-bc1d-d112a9d73d99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pasta restaurant in Sydney and save it\nPrevious actions:\n[button] Location Columbus, OH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.102, 0.47, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5741", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_0af4d0bf-3b98-45b8-b7a4-a0c99d68398c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.162, 0.443, 0.192] in the image\nAnd my action is Action: TYPE\nValue: 12345678912345"}]}, {"id": "mind2web_5742", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_c6c213c6-e9c9-4ebd-b779-19fd733f7453.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.206, 0.5, 0.237] in the image\nAnd my action is Action: TYPE\nValue: 10023"}]}, {"id": "mind2web_5743", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_350fd79e-7572-4e46-b13c-7bb569bebc81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Log in later -> CLICK\n[button] - -> CLICK\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.671, 0.934, 0.737] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5744", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7624a77c-25cb-456d-96d6-a8f4841f7a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.101, 0.914, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5745", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_f5afe3cb-9632-40a4-a9e5-07bb9894e599.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> ENTER\n[strong] Filters -> CLICK\n[checkbox] \uf0e7EV Charging -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.282, 0.328, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5746", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ad396ee3-8490-4f70-9196-6da9a1d68166.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.311, 0.693, 0.331] in the image\nAnd my action is Action: TYPE\nValue: 150"}]}, {"id": "mind2web_5747", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_9a76e93b-f2bc-4cad-ab30-cd3ffbbd96c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.185, 0.792, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5748", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b44a010e-fd15-4659-a3e5-e01c7fd86c81.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[tab] Rail Passes -> CLICK\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.35, 0.554, 0.364] in the image\nAnd my action is Action: TYPE\nValue: NEW YORK"}]}, {"id": "mind2web_5749", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_d13147e2-afaf-4608-bd27-65d8b4520f52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.0, 0.465, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5750", "image": {"bytes": "", "path": "./images/60e7ffd3-dda8-4ec6-9c10-115a5ab1bf90_8a3768a1-8b90-4fe8-ad47-a109170ea6c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the latest news from rotten tomatoes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.79, 0.064, 0.82, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5751", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_4ac7cf83-cd41-4c71-b91e-f48fa542319a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\n[textbox] Near -> TYPE: Concord\n[span] Concord -> CLICK\n[textbox] Find -> CLICK\n[textbox] Find -> TYPE: mexican\n[span] Mexican -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.532, 0.709, 0.631, 0.736] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5752", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_82d66396-a339-49bf-94d7-e088d54ab356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK\n[input] -> TYPE: 50000\n[input] -> TYPE: 10000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.333, 0.473, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5753", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_e499aff5-fada-4a43-a168-d2465e48c36f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[link] BABY -> HOVER\n[tab] Toddler (6M-5Y) -> CLICK\n[link] All Toddler (6M-5Y) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.236, 0.277, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5754", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_4e1fe0be-da2a-4005-8084-67028e46af25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[span] Locksmiths -> CLICK\n[textbox] Near -> TYPE: SAN FRANSISCO\n[span] San Francisco, CA -> CLICK\n[button] All -> CLICK\n[radio] Key extraction -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.519, 0.048, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5755", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_51aa7294-0daa-44c7-adc5-04a136b43a7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.373, 0.153, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5756", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_1755a651-6a6c-470d-8c28-8470e6038b82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK\n[textbox] When? -> CLICK\n[link] 17 -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.114, 0.902, 0.164] in the image\nAnd my action is Action: SELECT\nValue: 4 Guests"}]}, {"id": "mind2web_5757", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_ce4602ee-4097-4c4d-a52e-dd181d2ca5eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\n[link] Careers -> CLICK\n[link] See all open MTA positions. -> CLICK\n[textbox] Enter a Location -> TYPE: brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.675, 0.493, 0.743, 0.536] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5758", "image": {"bytes": "", "path": "./images/03e45ce0-4375-44aa-b57f-cf439ccbe363_073152ad-a25b-4229-b88e-710c06a9e4cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news article and send an email about it.\nPrevious actions:\n[link] Jets signing former Packers QB Boyle to 1-year dea... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.26, 0.768, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5759", "image": {"bytes": "", "path": "./images/6b4aa5a9-79c5-49a3-9cc6-ead02159a3dd_92ef851f-5e36-4b3b-826d-730a35f6816d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Queen size memory foam mattress with lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.029, 0.045, 0.06, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5760", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_ae3dbbe1-426e-4fff-9667-43fe2d1f382e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK\n[button] Preferred flight class Economy -> CLICK\n[menuitem] First class -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.207, 0.931, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5761", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_ca703888-1346-4c10-af36-2ecd3a7f5fcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Price -> CLICK\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.007, 0.988, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5762", "image": {"bytes": "", "path": "./images/f27bb47e-3aba-439a-b98a-2cf9b0e25e9c_b1062855-4c2b-4283-9b44-d7dc68373578.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Tokyo's best tea related attractions in the morning.\nPrevious actions:\n[span] Explore the World -> HOVER\n[span] Tokyo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.233, 0.104, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5763", "image": {"bytes": "", "path": "./images/eeeee9d8-5387-46bc-a741-5bcb66b46f31_ab08133d-0fb2-4fe2-abe7-fc145167b9b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a job opening in sales in San Fransisco, and if found, apply for the job.\nPrevious actions:\n[link] Our Team -> CLICK\n[menuitem] olink -> CLICK\n[link] See Open Roles -> CLICK\n[textbox] Select Department -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.258, 0.25, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5764", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_fb6ad166-2b4e-4439-bcb9-4024694fe8fc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.295, 0.35, 0.322] in the image\nAnd my action is Action: TYPE\nValue: belo horizonte"}]}, {"id": "mind2web_5765", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_05bed7b6-3573-4132-93c1-7cfe12b02c17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[combobox] Pick Up Time -> SELECT: 11:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[select] June 2023 -> SELECT: June 2023\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 1:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.391, 0.567, 0.431] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5766", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_da3ddac9-4749-42d8-9fab-3cd56b1ac44a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[span] New Orleans, Louisiana, United States -> CLICK\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.744, 0.083, 0.881, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5767", "image": {"bytes": "", "path": "./images/930803d7-4032-4144-89a2-e44f3c5c9ccf_2bfdc2d3-8e60-435d-9e21-c63207b3c90d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy tickets for a group at Hurricane harbor Phoenix on April 22\nPrevious actions:\n[span] Hurricane Harbor Phoenix -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK\n[span] Buy Now -> CLICK\n[button] April 22, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.099, 0.464, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5768", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_336e6fd2-269d-493a-b7dc-c6c145b02503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[textbox] Going to -> TYPE: changi\n[button] Singapore (SIN - Changi) Singapore -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.176, 0.568, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5769", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_ed54258d-f01a-4eb0-8b28-5c6b95d348fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lave Hot Springs East KOA\n[list] KOA Logo Icon Lava Hot Springs West KOA Holiday La... -> CLICK\n[button] FIND A KOA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.487, 0.49, 0.541, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5770", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_235eb14d-9210-4c53-a3d1-0afe2b3c737a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.124, 0.782, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5771", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_ed212c71-ebb3-483a-8e55-dee589fad20b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.599, 0.35, 0.614, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5772", "image": {"bytes": "", "path": "./images/d1de3d1a-3df1-4421-98f3-f8d078752893_14af7c4c-eb5c-4ec0-bb9f-33a24e6fcc22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest cab with GPS and unlimited e-toll, and pay now for an option for a military veteran with pick up from the nearest location to Nevada City, California, on June 6, 11 am, and drop off at the exact location on June 10, 1 pm.\nPrevious actions:\n[link] Military & Veterans -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: nevada city"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.268, 0.209, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5773", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_0c859da4-62dd-45c1-9935-aad323de8426.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.275, 0.446, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5774", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_b0cd091e-32b2-4506-9cb9-8259c8d63ce5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[button] Dubai United Arab Emirates -> CLICK\n[path] -> CLICK\n[button] -> CLICK\n[div] On the Water -> CLICK\n[label] Up to 1 hour -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.319, 0.105, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5775", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_cdddded4-b437-467d-99c4-8f76f89e0aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK\n[select] Most popular -> SELECT: Publication date, new to old\n[div] Age range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.267, 0.196, 0.292] in the image\nAnd my action is Action: SELECT\nValue: Ages 3-5 (31)"}]}, {"id": "mind2web_5776", "image": {"bytes": "", "path": "./images/bd32af6e-9fae-4968-b3e6-d143f148f5b8_313db28f-e14f-4a5d-af0a-7fca3e4fcd49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a copy of the Gorillaz first studio album.\nPrevious actions:\n[combobox] Search artists, albums and more... -> TYPE: gorillaz\n[link] Gorillaz Gorillaz Artist -> CLICK\n[link] Gorillaz -> CLICK\n[link] Buy a copy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.884, 0.155, 0.97, 0.174] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5777", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_329d8cfe-b9b5-4cb7-a9ed-bf622f9a3a98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] From -> TYPE: new york\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.129, 0.559, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5778", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_648bdb7d-6268-4937-afe4-50036e127c4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[button] Transit \uf0d7 -> CLICK\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.353, 0.417, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5779", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_b5988297-2c7c-4904-b027-838dccd562f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.905, 0.069, 0.956, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5780", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_78c3abc0-517b-4da4-b4eb-ce0788ed923a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[label] Pick-up location -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.121, 0.438, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5781", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_75bcd0ba-31a7-43c4-a6a9-c9eb75258065.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.019, 0.535, 0.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5782", "image": {"bytes": "", "path": "./images/d78e3aac-c01b-4ebb-957d-e70f8bb378f3_ad2ccb9e-f110-417f-97cb-e2595afe0dd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the design tool for a new home office.\nPrevious actions:\n[link] Design -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.053, 0.693, 0.245, 0.713] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5783", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_bc8a2b0a-b824-4d31-996a-98da91c17d68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.156, 0.302, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5784", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_fa8985dc-a6b2-4b61-8ee1-b532dff08e13.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Check-in April 5, 2023 -> CLICK\n[button] Apr 3, 2023 -> CLICK\n[button] Apr 6, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] 1 room, 2 travelers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.262, 0.853, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5785", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_b8806818-06e9-467c-8a42-067311698bfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.604, 0.277, 0.632] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5786", "image": {"bytes": "", "path": "./images/fa2828c5-44b3-446e-ae42-a26438ed8343_974c432b-99eb-42e8-a5a5-9ff19f60d0bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the upcoming departures for the oak grove station going southbound direction\nPrevious actions:\n[combobox] Search MBTA.com -> TYPE: oak grove station\n[button] search -> CLICK\n[link] T orange line bus commuter rail Zone 1A Oak Grove -> CLICK\n[link] Go to route -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.514, 0.429, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5787", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_b0495a6e-1270-4d11-8868-2413bc8f1272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.182, 0.539, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5788", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_d02f058f-0877-48fe-bec7-bb51b808656a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.377, 0.173, 0.385] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5789", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_521ea3e1-7da1-4fd9-94f0-6d5eafd32fe4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.037, 0.106, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5790", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_a8a56cd5-cf3c-46a5-a241-47d55e04c119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK\n[link] Shirts & Tops -> CLICK\n[link] Graphic T-shirts -> CLICK\n[div] Sports -> CLICK\n[link] Football -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.567, 0.495, 0.593] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5791", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_37743c80-cdfa-45ca-8318-679da8952f30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK\n[select] 2023 -> SELECT: 2019"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.228, 0.145, 0.242] in the image\nAnd my action is Action: SELECT\nValue: UFC"}]}, {"id": "mind2web_5792", "image": {"bytes": "", "path": "./images/c577375b-ecca-42f8-920c-b06809eef2b4_851fc5a4-d856-4f18-9634-c1e1a0669314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the retired players named James Smith\nPrevious actions:\n[link] Players -> CLICK\n[link] RETIRED -> CLICK\n[textbox] Search by player name -> TYPE: James Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.377, 0.666, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5793", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_4349ee88-0a9c-44d8-b554-f4952ee742fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.196, 0.29, 0.227] in the image\nAnd my action is Action: TYPE\nValue: bhz"}]}, {"id": "mind2web_5794", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_67cfe42d-b9db-4b88-a753-af5ee18af657.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Mens -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.284, 0.233, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5795", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_8bb3b4e6-7581-4aca-ac35-060f04786c75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.419, 0.844, 0.456] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5796", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_7e8bf2cf-620f-4e0f-9a98-cb0a178f6cfd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.787, 0.088, 0.85, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5797", "image": {"bytes": "", "path": "./images/2a7f7732-ec9b-4d78-91ad-f0fe10bf7daf_6fad4f05-f655-4e45-b926-c773034e90c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming pc which can run Call of Duty Warzone at 4k.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[button] 4K -> CLICK\n[div] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.377, 0.703, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5798", "image": {"bytes": "", "path": "./images/fc8342f9-3e2e-4f59-b54e-b35cd7285fdb_b605b086-ec49-460a-ba68-c3117d5a3499.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Purchase the classic bundle for the simulation game Cities Skylines.\nPrevious actions:\n[link] Categories -> HOVER\n[link] Simulation -> CLICK\n[link] CITIES: SKYLINES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.267, 0.231, 0.355, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5799", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_5e79823e-7c1a-455c-afb4-c9a536f0c4ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[button] Genre -> CLICK\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK\n[link] Price Low To High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.614, 0.184, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5800", "image": {"bytes": "", "path": "./images/5a181549-c79c-499c-b7d7-90860f0e0068_2682ad2f-8cd9-4b44-a3ac-40ed813b6192.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play Trailer of \"The Flash\" 2023.\nPrevious actions:\n[textbox] Search IMDb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.28, 0.156, 0.298] in the image\nAnd my action is Action: TYPE\nValue: The Flash"}]}, {"id": "mind2web_5801", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_5a39e836-5fa1-4b38-b70d-d1191480b770.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK\n[li] Outdoors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.215, 0.165, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5802", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_d01a0aa8-14a8-454c-8544-dcc082a22324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Driscoll's Organic Strawberries -> CLICK\n[span] All stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_5803", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_38ec61e1-77aa-4f6f-9bfe-c062d0f80e62.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city\n[input] -> TYPE: 252-654-5258"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.495, 0.076, 0.769, 0.103] in the image\nAnd my action is Action: TYPE\nValue: thomas.neo@gmail.com"}]}, {"id": "mind2web_5804", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_149c287a-3fb6-4483-929b-aee42e6e4527.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[span] Add -> CLICK\n[span] Add -> CLICK\n[link] Herbs -> CLICK\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.015, 0.981, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5805", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_07e08472-af90-4513-b934-c8893026dfc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[link] 22 -> CLICK\n[button] Continue -> CLICK\n[div] + -> CLICK\n[textbox] About Your Trip: -> TYPE: Wedding Anniversary\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.226, 0.711, 0.25] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_5806", "image": {"bytes": "", "path": "./images/4f069a59-c53b-4efe-bcfb-fc9fd864ea2e_c3c62fdc-7f5f-4b13-a9e0-2fce42f49db2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rare books and sort the results by auction end time.\nPrevious actions:\n[combobox] Search for anything -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.027, 0.665, 0.048] in the image\nAnd my action is Action: TYPE\nValue: rare books"}]}, {"id": "mind2web_5807", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_33cacba9-a4ac-459b-a7a0-8010dfef19e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[combobox] Sort by -> SELECT: Most recent\n[span] Analyst - Sales Programs -> CLICK\n[button] Apply Now -> CLICK\n[textbox] Email * is a required field. -> TYPE: jacksparrow@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.781, 0.132, 0.824] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5808", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_d4367241-a990-4cdb-909e-2a0e80135606.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] PS5 Accessories -> CLICK\n[generic] Controller -> CLICK\n[span] Sony -> CLICK\n[button] APPLY -> CLICK\n[span] Free Shipping -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.318, 0.192, 0.328] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5809", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_a89299a2-03da-445f-bad6-2ab49df34fa8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian\n[b] Indian -> CLICK\n[button] Today -> CLICK\n[button] April 12, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.176, 0.542, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5810", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_e63e526c-0f1e-4a26-8fb7-bcdabb7c51d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.491, 0.829, 0.508] in the image\nAnd my action is Action: TYPE\nValue: Tokyo"}]}, {"id": "mind2web_5811", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_ea03253e-d374-4d74-ad87-4190b34c30c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: Nintendo Switch Console\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.712, 0.147, 0.734] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_5812", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_94b84afe-117d-4fd2-a611-616055f7a86a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.36, 0.36, 0.375] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5813", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_89834bb1-075b-4540-8bbe-88224a51cb0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[svg] -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[svg] -> CLICK\n[button] Used for -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.106, 0.796, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5814", "image": {"bytes": "", "path": "./images/7c28f4e3-3dcf-477b-98c3-087c4fc16655_dc8e26e9-cdcf-4135-b829-4ef2137c2758.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store which is nearest to 10017 zip code.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] Postal code, address, store name -> TYPE: 10017\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.45, 0.362, 0.997, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5815", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_a1454cf5-5c62-4137-82c2-813f8dd4073c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\n[button] Cars & Services -> CLICK\n[link] Products & Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.28, 0.93, 0.471] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5816", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_de9053b8-703b-4782-a562-66e97a63276b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] Los Angeles -> CLICK\n[textbox] To -> TYPE: Miami\n[option] Mint. Miami area -> CLICK\n[button] Explore flights -> CLICK\n[span] 234 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.709, 0.196, 0.866, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5817", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_514bcfa3-e57a-4004-b98a-331a51bd1de1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[img] search icon -> CLICK\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK\n[input] -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.396, 0.347, 0.438] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_5818", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_3b6ab224-8bd0-4206-ada8-7e14e8308314.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email Address (required) -> TYPE: allan.smith@gmail.com\n[textbox] Confirm Email Address (required) -> TYPE: allan.smith@gmail.com\n[textbox] ZIP Code (required) -> TYPE: 10001"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.448, 0.754, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5819", "image": {"bytes": "", "path": "./images/f7c2c65f-9415-4e1e-8ce7-a2e22ff2492c_cb175ec6-1b33-4e7b-a205-3e5fae52fd07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the quickest one-way flight from New York To Paris on June 4 for two adults with one carry-on and one check-in bag in business class.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: NEW YORK\n[span] All airports -> CLICK\n[textbox] Flight destination input -> TYPE: PARIS\n[span] All airports -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.294, 0.641, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5820", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_dfc898b1-8a15-4482-8e9f-563a8d77ae89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.155, 0.082, 0.208, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5821", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_07db340a-8bc6-410d-9856-4888318261b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[svg] -> CLICK\n[gridcell] 1 June 2023 -> CLICK\n[gridcell] 8 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.193, 0.157, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5822", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_6cb303c4-1ce6-481a-aea4-4579b0be918e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[link] Music -> CLICK\n[svg] -> CLICK\n[div] Canada -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.407, 0.243, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5823", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_76377685-cdd6-4780-bfd8-b03bd4dec0cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[span] Dublin -> CLICK\n[button] France -> CLICK\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.706, 0.208, 0.725, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5824", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_65009bac-a256-4768-969f-c64e4ac76638.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK\n[button] Video Games & Consoles -> CLICK\n[link] Video Game Consoles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.902, 0.374, 0.955, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5825", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_6a5d4462-eb16-4b06-9b5d-e146aed21024.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.138, 0.248, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5826", "image": {"bytes": "", "path": "./images/549452ab-637a-4997-bce1-5898541bb288_1cba5090-1401-4ce5-ab29-6dbb9aaaac26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all NFL tickets\nPrevious actions:\n[button] SPORTS -> HOVER\n[tab] NFL -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.342, 0.068, 0.455, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5827", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_ce1dba77-1add-4cad-889f-7a90b54c5ccb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH\n[button] find store -> CLICK\n[button] filter by services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.247, 0.744, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5828", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_3f558697-ab28-4d4e-b047-333054eb40cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.221, 0.981, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5829", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_0d3428c6-2527-45c7-9bb4-64c3bca723bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: NYC\n[button] New York, NY, US (NYC - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.225, 0.144, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5830", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_c88bb1ab-d1fe-4205-af84-9542a145f787.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[link] Hotel , Opens another site in a new window that ma... -> CLICK\n[searchbox] Type your destination -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.212, 0.409, 0.257] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5831", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_0bb44b72-6b4c-4892-a91f-d640f266ff44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK\n[heading] Resident Evil 4 - Xbox Series X -> CLICK\n[span] Digital -> CLICK\n[li] Deluxe -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.266, 0.975, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5832", "image": {"bytes": "", "path": "./images/fb9c0e7f-02dd-453f-8c73-da19282abf84_4b195391-b9c2-4913-bfba-18cb820a9858.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ford Mustang with lowest price and save it.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5833", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_aa09ebe7-4fa3-49cd-9fd2-84b5ead50fa1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.747, 0.333, 0.944, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5834", "image": {"bytes": "", "path": "./images/f3bf7fe9-6987-4153-81b4-4b51fe25bbbc_112135b7-86d9-4229-9794-e472f3ca4544.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for rental cars in Seattle, WA and filter the results by the Best score.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Seattle"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.163, 0.573, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5835", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_61803cf9-7251-4771-ae40-e0694bec96e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester\n[span] Manchester Piccadilly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.092, 0.326, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5836", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_cd74b537-1276-4126-bf2e-d2135aba47ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK\n[button] Go! -> CLICK\n[link] Plan Your Visit \uf078 -> CLICK\n[link] Park Policies -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.359, 0.106, 0.372] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5837", "image": {"bytes": "", "path": "./images/38fe67f7-14af-4259-8309-aa350abdc395_03b96773-9634-4493-b857-612a778193b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest two rooms near Kashi Vishwanath Temple in India for three adults and a 7-year-old kid from June 6 to 10 in any 3-star and up air-conditioned family hotel with a review score of at least 8, free internet.\nPrevious actions:\n[div] 8+ -> CLICK\n[div] 3 -> CLICK\n[checkbox] Free internet -> CLICK\n[button] Style -> CLICK\n[checkbox] Family -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.541, 0.226, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5838", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_ad4ae519-c48d-4921-9da8-b102cae1e64f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] 2010 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] 2023 -> CLICK\n[menuitem] 2017 -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.321, 0.253, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5839", "image": {"bytes": "", "path": "./images/5307bed6-1664-4cf7-9e60-0f563f9fa4fe_e8ebcee6-59f0-4613-a5c7-fb120ac0a491.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel room for one traveler on April 3-6 that is closest to National University of Singapore and costs less than $500\nPrevious actions:\n[button] Going to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.195, 0.568, 0.254] in the image\nAnd my action is Action: TYPE\nValue: national university of singapore"}]}, {"id": "mind2web_5840", "image": {"bytes": "", "path": "./images/978760ca-7ec3-4b78-86c6-98b173c1a00e_72755349-6b54-4449-b255-f2560b342cae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow an artist from the chill category.\nPrevious actions:\n[searchbox] Search -> TYPE: Chill\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.169, 0.199, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5841", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_0f8922cc-c34b-40d7-a6f4-4c095f40a94f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240\n[button] UPDATE -> CLICK\n[checkbox] Pickup at Store Eligible (86) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.346, 0.132, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5842", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_39e0fe3d-64be-40eb-a9b5-65dcf8a97695.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.318, 0.344, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5843", "image": {"bytes": "", "path": "./images/dd057bda-33ea-40b8-9865-771242e22f40_33ed3481-1a77-422e-8dc8-adf0c11bec5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse fiction audio books sorted by lowest price.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Audio Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.49, 0.196, 0.515] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5844", "image": {"bytes": "", "path": "./images/f8027aa8-c193-47c8-a97b-432836093939_ebb6e2a1-73dd-4ef0-9dae-4f80fc30e110.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Las Vegas, NV that offer free airport shuttle service.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.17, 0.478, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5845", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_c6afb333-db4f-4c5e-a453-f71572c34a7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> TYPE: 04/19/2023\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: Heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.421, 0.393, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5846", "image": {"bytes": "", "path": "./images/a63b891b-4703-46ab-8633-b151b26574d1_00ea167e-ab9c-4cb5-ad27-3e2a9d4808c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the software requirements for iPhones that support live tv on demand streaming.\nPrevious actions:\n[link] Navigate to on-demand -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.192, 0.193, 0.312, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5847", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_847bd686-a068-494a-b37e-7d5679ff8cd4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[textbox] From -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] To -> TYPE: miami"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.224, 0.615, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5848", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_69323266-e943-4d87-a9a3-c38c6a97683d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK\n[tab] Hourly -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.397, 0.161, 0.566, 0.181] in the image\nAnd my action is Action: SELECT\nValue: 8 00 PM"}]}, {"id": "mind2web_5849", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_03e8e495-0e91-49be-902c-3a0f659ec428.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK\n[span] Top 250 Movies -> CLICK\n[link] Sci-Fi -> CLICK\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.08, 0.628, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5850", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_18882ce8-7875-4663-93ec-0807ef95ce96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[link] MOVIES -> CLICK\n[svg] -> CLICK\n[select-label] Audience score (highest) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.179, 0.376, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5851", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_ec54679e-f5df-407b-abb0-a75b7fe45356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK\n[button] Price: -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.107, 0.122, 0.123, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5852", "image": {"bytes": "", "path": "./images/a88676d0-c252-408f-b796-93c95f6b71fc_e8963296-becc-47f3-ad53-3d823ede9da4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open my trade offers.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.0, 0.589, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5853", "image": {"bytes": "", "path": "./images/6fd2a5e9-12e7-4f3c-bf1b-42702d56105e_f4d8f5c8-3590-4a21-a09a-085d4d732c2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that is related to the topics of Industry and also Laborer and Worker.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.141, 0.56, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5854", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_93da400b-38b2-4337-9b42-dae5b8caf0b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[span] -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.231, 0.687, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5855", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_a0404d19-6c64-4ba4-943c-303f416d93ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Browse Movies by Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.176, 0.278, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5856", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_03ea992d-d5ed-4a7f-a6a3-1d66a15aec50.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK\n[link] 18 March 2023, Saturday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.476, 0.45, 0.536, 0.478] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5857", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_03b3771e-2a05-4a39-8770-852b2e28652c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK\n[button] Find flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.101, 0.56, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5858", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_5ad16225-5e2c-4a50-97f0-c7742c5bc261.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.668, 0.395, 0.699, 0.415] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5859", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_2a784ab8-38ce-492f-8942-69b903f33a57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.115, 0.777, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5860", "image": {"bytes": "", "path": "./images/0fc202d2-4c12-48ca-b04b-b667aac49156_44b4fb46-9ac1-4433-a49f-92c78000593a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse benadryl items for Allergy Treatments in Liquid form.\nPrevious actions:\n[combobox] Search products and services -> TYPE: benadryl\n[button] Search for benadryl -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.551, 0.143, 0.564] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5861", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_8b97f306-2eff-498a-8a45-2e113edfc5dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[link] Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.198, 0.293, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5862", "image": {"bytes": "", "path": "./images/8aae9804-0765-4e14-ab71-4dc13ada32f7_2be5e5da-b142-4be7-9aca-8573136aa54e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find camping tents that can fit 6 people and sort the results by price from low to high.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.06, 0.128, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5863", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_e4fe74f4-0455-4d58-a108-1d2820295a1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise\n[span] Le Marais, Paris, France -> CLICK\n[div] 27 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.463, 0.209, 0.496, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5864", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_5af0f370-33fa-497b-a075-0a6acbc1cb7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[span] West Hollywood -> CLICK\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK\n[button] Recommended -> CLICK\n[span] Most Reviewed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.519, 0.287, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5865", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_cb306aa7-a977-4b22-a191-4e7ff1683495.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.515, 0.454, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5866", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_69d92e43-3d66-42f1-b437-29280a51214b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.152, 0.007, 0.491, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5867", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_826632f8-5f08-404f-855b-b7b3374dfde3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK\n[link] 1990 -> CLICK\n[link] Denzel Washington -> CLICK\n[button] Expand Upcoming -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.457, 0.059, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5868", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_40f87c26-9448-4531-b356-b08bfe0e831d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.349, 0.342, 0.651, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5869", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_d06ad609-36ae-4f0b-8623-247fa123cbb0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK\n[checkbox] 19\" Wheels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.117, 0.273, 0.133] in the image\nAnd my action is Action: TYPE\nValue: 60602"}]}, {"id": "mind2web_5870", "image": {"bytes": "", "path": "./images/cd8d723a-7a9f-4bd6-a8b9-75babb60835c_08a60925-4f62-45da-aa46-c69d90ef1915.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information on how to get a ticket refund.\nPrevious actions:\n[span] Help -> HOVER\n[div] Visit the help center -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.683, 0.607, 0.693] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5871", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_6cc4b85e-f193-43cb-a661-b6a4f7cb1c59.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK\n[button] SEARCH -> CLICK\n[span] Sort and filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.137, 0.766, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5872", "image": {"bytes": "", "path": "./images/4a9a05f8-6c23-46c5-bb38-eec63a477475_63c62cac-1560-44ee-baab-e349ce9a7fc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adults only, airfare included vacations in Mexico during the month of May\nPrevious actions:\n[textbox] What type of deals? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.193, 0.486, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5873", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_d4393929-345e-460d-859a-1600973ae800.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.544, 0.169, 0.807, 0.189] in the image\nAnd my action is Action: TYPE\nValue: boston"}]}, {"id": "mind2web_5874", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_d571bd82-235d-4db6-a852-0ad4320383e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK\n[button] Locations -> CLICK\n[button] Germany -> CLICK\n[button] Posting Dates -> CLICK\n[button] Less than 7 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.11, 0.338, 0.89, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5875", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_aac1c895-4aba-4a70-92e5-fcc5fb7e46e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.042, 0.746, 0.06] in the image\nAnd my action is Action: TYPE\nValue: Harry Potter"}]}, {"id": "mind2web_5876", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_563fa026-5ccb-4530-ba47-2733ea4e3f73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL\n[svg] -> CLICK\n[button] ADD TO CART -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.417, 0.953, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5877", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_d4035166-f027-406d-a033-54f1537852f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Adults -> CLICK\n[button] Add Children -> CLICK\n[button] Add Age -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.151, 0.341, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5878", "image": {"bytes": "", "path": "./images/1655f54a-31e5-4dda-9089-7ccff35a1095_d9c7b18b-2aed-4aa2-9e8f-1a2cb9fc509c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trip ideas for US travels and book small size or full-size pacific coast highway road trip for a Vietnamese from Los Angeles International Airport on July 7, 1 pm pick up and drop off at the same location on July 15, 2 pm.\nPrevious actions:\n[link] Trip Ideas -> CLICK\n[div] Pacific Coast Highway Road Trip -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: LOS ANGELES\n[span] Los Angeles Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.589, 0.308, 0.691, 0.321] in the image\nAnd my action is Action: SELECT\nValue: July 2023"}]}, {"id": "mind2web_5879", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_4852e3fe-905f-4e27-9a12-35d97fabc229.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK\n[button] January 2024 -> CLICK\n[button] DURATION -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.623, 0.254, 0.861, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5880", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_f73bb3ec-e0ee-4c9a-88f4-067c971d74af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.193, 0.271, 0.217] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_5881", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_e049fd1b-420c-4d5a-8879-da5d9e7c436d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[textbox] Email address used to place your order * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Order number * -> TYPE: X123456\n[textbox] Date you received your order (DD/MM/YY) * -> TYPE: 08/04/23\n[textbox] Please cancel my order for the following products ... -> TYPE: Harry Potter Box Set\n[textbox] Reason for cancellation (optional) * -> TYPE: Not available at address"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.589, 0.759, 0.624] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5882", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_05693c99-fd4d-4edb-8bc6-928ce06772f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.17, 0.26, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5883", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_9c2290bc-9528-494c-b4b2-6c24d402f0ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.164, 0.192, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5884", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_93d10c01-8038-4307-a588-04ff78151bb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\n[button] Find a Park by State -> CLICK\n[link] Alaska -> CLICK\n[link] 1 National Heritage Area \u00bb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.187, 0.391, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5885", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_a0511245-165d-42d4-984b-d22c988d5742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.085, 0.902, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5886", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_21d23ff9-ab59-4c28-9f7b-4c08ee362138.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.636, 0.881, 0.66] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5887", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_7b1492bb-0d9e-4311-ba8d-402a5ed99076.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Start Date -> CLICK\n[svg] -> CLICK\n[gridcell] Sat May 06 2023 -> CLICK\n[combobox] Start Time -> SELECT: 5:00 PM\n[combobox] End Time -> SELECT: 6:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.353, 0.3, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5888", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_502edf50-b8f9-44ea-8313-42addffed44f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.204, 0.764, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5889", "image": {"bytes": "", "path": "./images/e3016f6f-b0fe-46df-a5ae-d0ad80a6996c_fe721d29-2b19-4c71-8bdf-3be63712c52e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the Dallas Mavericks.\nPrevious actions:\n[link] NBA -> HOVER\n[link] Dallas Mavericks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.079, 0.185, 0.137, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5890", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_416cbb3e-b141-477f-b75a-2e4b3da93394.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[svg] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[heading] Car specs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.159, 0.331, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5891", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_ed3c1666-d006-4dfa-8ba0-9b84253364e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.249, 0.34, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5892", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_dcad1685-0929-496c-b434-2f408805f4bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.124, 0.868, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5893", "image": {"bytes": "", "path": "./images/763deda0-f51c-4520-b102-5277f702e8bd_49717bd1-bd15-48ca-a3e7-6e3bffe0ed44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the initial release date for Guardians of the Galaxy Vol. 3 the movie.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.068, 0.047, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5894", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_60e413f8-5da1-49af-9a07-7b8caaa3de3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.137, 0.891, 0.163] in the image\nAnd my action is Action: SELECT\nValue: midnight"}]}, {"id": "mind2web_5895", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_e0a29b99-5021-409d-b09e-cbf39a4b1dd8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[checkbox] 30 April 2023 -> CLICK\n[combobox] Drop off time -> SELECT: 1:00 PM\n[div] Search -> CLICK\n[div] Premium -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.566, 0.331, 0.58] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5896", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_9163c12d-351c-4892-bd5f-8918723bcf44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2012\n[generic] Your Opinion Counts! -> CLICK\n[img] Close -> CLICK\n[combobox] Make -> SELECT: Honda"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.248, 0.508, 0.274] in the image\nAnd my action is Action: SELECT\nValue: Civic"}]}, {"id": "mind2web_5897", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_35301bd1-f6f4-42b1-811c-f35b27afdc8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 30 -> CLICK\n[button] Search -> CLICK\n[button] Get alerts for this flight for flight 906 American... -> CLICK\n[textbox] Email -> TYPE: lin.lon@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.358, 0.828, 0.386] in the image\nAnd my action is Action: TYPE\nValue: lin.lon@gmail.com"}]}, {"id": "mind2web_5898", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_caf11e97-b1ea-4f59-aaf8-02f7a18f9536.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8\n[img] Add -> CLICK\n[combobox] Age of Child 4(Child's age (years)) -> SELECT: 12\n[span] SEARCH -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.413, 0.089, 0.423] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5899", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_e1c980b4-954f-44d5-8288-8b27eb6c7f24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.25, 0.221, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5900", "image": {"bytes": "", "path": "./images/51221157-cb79-407e-95c8-b2c730e95e01_8f46db0b-776d-4841-b186-b3c0faa3dd27.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a music party in Ohio and follow its organizer\nPrevious actions:\n[combobox] autocomplete -> TYPE: ohio\n[div] Ohio -> CLICK\n[generic] Run Search -> CLICK\n[label] -> CLICK\n[div] RANG BARSEY- HOLI MUSIC FESTIVAL, CINCINNATI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.489, 0.223, 0.575, 0.243] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5901", "image": {"bytes": "", "path": "./images/33b57a14-f469-405c-8843-f7e8ceb5b1ed_76da21b1-1ead-496c-841d-d52583fcd675.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 10 of the cheapest green shirt to my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.194, 0.03, 0.553, 0.053] in the image\nAnd my action is Action: TYPE\nValue: shirt"}]}, {"id": "mind2web_5902", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_82f59f60-849f-4b79-be21-114105330e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.048, 0.566, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5903", "image": {"bytes": "", "path": "./images/96e95a76-4a1d-491c-82ff-fac663d89ddb_823fc50e-e7ed-41be-8e54-2d9088a4da28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of adventure games under 30 dollars.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Adventure -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.411, 0.225, 0.434] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5904", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c9345036-4aaf-4175-9cd9-1ea6debe5fe1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK\n[img] Happy Birthday -> CLICK\n[button] EUR -> CLICK\n[div] GBP -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.485, 0.916, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5905", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_47ed0fb1-3ad0-495a-858d-e826a4481c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.122, 0.248, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5906", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_12632bc3-c1f3-4dc7-8320-0923fcbe924b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Swimwear -> CLICK\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK\n[link] Black (294) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.719, 0.33, 0.866, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5907", "image": {"bytes": "", "path": "./images/b910229f-6133-452c-a640-6a6ec67b668b_2e5cd3b2-dd5b-4055-bd2b-ffdfc93923ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get quote for Amtrak vacation for two adults across country while leaving from Chicago on the 22nd of April. It should include Deluxe bedroom. The passenger is named John Mark with email address of johnmark@gmail.com and phone number of 234567890.\nPrevious actions:\n[checkbox] Deluxe Bedroom -> CLICK\n[button] Continue -> CLICK\n[link] Close -> CLICK\n[textbox] *Preferred date of travel -> CLICK\n[link] 22 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.133, 0.447, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5908", "image": {"bytes": "", "path": "./images/bf008019-bffd-42ad-a48b-054488e1458c_cbd106f6-33c7-4094-9edb-03c35153f4b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking near any NBA arena to enter today after 5 pm and exit after 8 pm\nPrevious actions:\n[link] View All Stadiums -> CLICK\n[link] Book Now -> CLICK\n[tab] Hourly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.345, 0.372, 0.365] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_5909", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_cb88089d-5c77-4c71-b428-9815070ef35d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[listitem] Staten Island, NY, USA -> CLICK\n[button] Leave now Change Time -> CLICK\n[textbox] Select date (format month/day/year) -> CLICK\n[button] 03/18/2023 -> CLICK\n[link] 25, Saturday March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.24, 0.344, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5910", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_416946e2-39b4-459c-a21e-e3133c02fb04.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK\n[textarea] -> TYPE: Directors\n[textarea] -> TYPE: To Watch\n[combobox] Type of List * -> SELECT: People"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.555, 0.198, 0.583] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5911", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_4b90e183-4ddd-4768-a0b3-ba25a5dbd94a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: NIAGRA FALLS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.22, 0.369, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5912", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_31ffb3eb-ddb4-4ca0-ba8c-1a6dd6b4497b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Shop all -> CLICK\n[link] Filter -> CLICK\n[button] Genre -> CLICK\n[link] View More -> CLICK\n[checkbox] RPG RPG -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.025, 0.378, 0.043] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5913", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_b35e7803-4bfd-4c47-94eb-9055e61c98fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.212, 0.868, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5914", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_a8676a33-d16d-4331-b300-a79c7d73f3ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.037, 0.817, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5915", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_8a4a6571-1410-440e-a5f1-1ed1c39160a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.348, 0.28, 0.385] in the image\nAnd my action is Action: SELECT\nValue: AUSTRALIA"}]}, {"id": "mind2web_5916", "image": {"bytes": "", "path": "./images/e7e1616e-dd5f-4eb4-a7f1-b757c7880877_fbfa94eb-b0f2-40b4-a0ec-c95ea564d036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up the scores for the previous day's NBA games\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.026, 0.178, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5917", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5746bc15-9d5b-484d-8f38-d15bdcbed1ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[gridcell] Sat May 06 2023 -> CLICK\n[button] Update Search -> CLICK\n[button] Filter -> CLICK\n[checkbox] Self Park (1) -> CLICK\n[button] Show 1 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.548, 0.372, 0.563] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5918", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_9c71b00d-199e-437e-a510-ab151f6b1539.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.004, 0.561, 0.016] in the image\nAnd my action is Action: TYPE\nValue: Taylor Swift"}]}, {"id": "mind2web_5919", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_905e2ca6-5659-4d6c-be5d-940a41712c87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK\n[label] Miami -> CLICK\n[button] View details -> CLICK\n[link] Select package -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.787, 0.327, 0.965, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5920", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_c6a46943-7ccf-4d6e-a06b-13264890131f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK\n[button] Chicago -> CLICK\n[button] Today -> CLICK\n[button] April 20, 2023. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.25, 0.562, 0.259] in the image\nAnd my action is Action: SELECT\nValue: 7 Guests"}]}, {"id": "mind2web_5921", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_ac56a867-e610-41b4-a583-605eb29cd9c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.078, 0.32, 0.089] in the image\nAnd my action is Action: TYPE\nValue: NIAGRA FALLS"}]}, {"id": "mind2web_5922", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_21105c34-cac6-408f-b1ed-2ee9550a4dcd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.2, 0.134, 0.209, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5923", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_79eb3b5b-854a-44b5-a115-c239a4d58c3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\n[link] \ue92f Computer Systems \uf105 -> HOVER\n[link] Gaming Desktops -> CLICK\n[span] NVIDIA GeForce RTX 4000 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.2, 0.192, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5924", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eea26e1d-39b7-4781-b30d-dbdf56df77fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK\n[checkbox] Chevrolet \ue066 -> CLICK\n[checkbox] Ford \ue066 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.509, 0.408, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5925", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_5d332384-8419-4484-8e27-3a97401f38f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.226, 0.52, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5926", "image": {"bytes": "", "path": "./images/1a2befd0-5e90-4bed-81b5-8ee0a93873df_e85f24b8-77cd-4c47-b407-05b6a636c04c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me RPG video games.\nPrevious actions:\n[button] Video Games chevron_right -> CLICK\n[button] Shop all -> CLICK\n[link] Filter -> CLICK\n[button] Genre -> CLICK\n[link] View More -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.679, 0.063, 0.692] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5927", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_121024fb-90f8-4d41-be93-5f26d9dabfc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.472, 0.212, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5928", "image": {"bytes": "", "path": "./images/9e44c63b-2c7a-445c-b9c4-6580f4271268_aab2a04e-c41e-4057-abc6-d839f51cfcc6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track the status of a rebate for Monty Lue, house number 4847, zip code 10019\nPrevious actions:\n[link] Rebate Center -> CLICK\n[link] Track your rebates -> CLICK\n[textbox] * Required Fields First Initial * Required Fields... -> TYPE: Monty"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.024, 0.32, 0.476, 0.348] in the image\nAnd my action is Action: TYPE\nValue: Lue"}]}, {"id": "mind2web_5929", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_f785e1b7-a569-4764-9ed9-af5a405d8962.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK\n[button] Show filter modal Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.628, 0.311, 0.81, 0.326] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5930", "image": {"bytes": "", "path": "./images/e9f5f0d0-7241-4a28-b387-c65a0ce52a61_82cc36d2-5e6b-4fff-b30e-4cea1a55c919.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a rental car in Miami, FL for pick up at the airport and drop off at a different location within the same city.\nPrevious actions:\n[tab] Different drop-off -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Miami Airport\n[span] Miami -> CLICK\n[textbox] Drop-off location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.413, 0.175, 0.554, 0.218] in the image\nAnd my action is Action: TYPE\nValue: Miami"}]}, {"id": "mind2web_5931", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_b4b72b7a-2b9a-4bc6-9d43-34f2094f2bb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.247, 0.336, 0.297] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_5932", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_dad6690b-9b3e-4395-bd06-9aa065bf4027.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.252, 0.329, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5933", "image": {"bytes": "", "path": "./images/f52ba76c-e5fc-48e5-85f3-41d4234a5c5f_1bc4d405-9f40-47ef-80b3-eaf62e4f49a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used Xbox series X available.\nPrevious actions:\n[button] Main Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.142, 0.291, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5934", "image": {"bytes": "", "path": "./images/851998b2-fda2-4bd4-a822-f1871a9fde12_7a51a447-19e0-40e9-9568-7af78ea6557b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of action figure items available for pre order.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.118, 0.084, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5935", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_09c7f72f-1512-4342-b3c3-ae639ae8cdfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[span] Washington County Regional Apo -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] Next -> CLICK\n[link] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.233, 0.666, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5936", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_9b6f677f-1342-423b-bb5f-68412e75b9ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.371, 0.796, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5937", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_f7d7601c-aa74-4cfc-a9c4-2f08fe628d44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\n[button] MORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.18, 0.165, 0.467, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5938", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_ec42f184-f160-4f42-94d4-6789b2afea9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.04, 0.931, 0.063] in the image\nAnd my action is Action: TYPE\nValue: Real Madrid"}]}, {"id": "mind2web_5939", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_8be9df77-6ac7-4962-bc6b-b8084033cc94.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.26, 0.491, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5940", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_a0acf9c4-877d-41bf-b856-126033533bdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\n[link] Tabs -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.825, 0.231, 0.97, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5941", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_97b3ffde-528a-43c7-8306-22f3294f8b0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Michael Jordan"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.039, 0.931, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5942", "image": {"bytes": "", "path": "./images/1a35becb-e07b-4235-8cd3-fc2a29f8dd6f_31d78d33-b4df-433c-8033-62c738f1a8a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the most most popular of all time tab to a playlist\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Tab -> CLICK\n[svg] -> CLICK\n[link] Most popular of all time -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.38, 0.255, 0.485, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5943", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_2a72834c-741f-4059-8cb2-0a6769c33a32.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[button] Reservations -> HOVER\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: AUCKLAND"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.302, 0.234, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5944", "image": {"bytes": "", "path": "./images/c254dcec-bd9b-4cf0-9225-9d2bf1345f38_daaa0ead-b5e3-4a9e-91aa-d9cadc1b97dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check Real Madrid player Vinicius Junior bio\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.895, 0.109, 0.942, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5945", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_bc8c7895-1ab4-407c-85e6-11dfd925cfa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.34, 0.411, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5946", "image": {"bytes": "", "path": "./images/6760de22-dc0e-4b29-916f-a6a78f2c0551_b65f2754-439c-42c9-a484-846c10998517.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking prices for upcoming Los Angeles Lakers games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.012, 0.156, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5947", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_a6eff9d8-88ca-429b-9bdf-a7955bd4eb06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.169, 0.293, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5948", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_728205e5-9af6-447c-8866-339071d7f193.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[radio] New List -> CLICK\n[textbox] Title -> TYPE: New\n[button] Save -> CLICK\n[button] Explore -> CLICK\n[link] Electronic -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.154, 0.362, 0.271, 0.444] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5949", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_edb09eb7-6a8c-4aeb-9b52-796762ca821d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\n[span] Hurricane Harbor Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.653, 0.844, 0.721] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5950", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_0cb8de0b-1d19-4944-9449-4e01d24cb987.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK\n[span] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.289, 0.111, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5951", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_5b60b5ed-44e7-434e-8908-e11418f9e4dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[img] Increase children -> CLICK\n[img] Increase infants -> CLICK\n[combobox] Child 1 age -> SELECT: 4\n[combobox] Infant 1 age -> SELECT: Under 1\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.868, 0.103, 0.931, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5952", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_c78075a6-7141-410a-82c2-a1fdd561cf38.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[textbox] Destination or property -> TYPE: Athens, Attica, Greeece\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.344, 0.095, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5953", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_4581fcd6-7468-4230-b488-bcaac1055d22.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5954", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_999f432e-b9a3-4a3f-87fc-7f3e4c568500.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[checkbox] Offers Takeout -> CLICK\n[button] Thai -> CLICK\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK\n[checkbox] Accepts Apple Pay -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.321, 0.658, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5955", "image": {"bytes": "", "path": "./images/3b6385ea-0cdc-4ed8-be93-dc90113d2255_de4459d6-26bf-4a32-9099-9880aab98615.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Apple Stores close to zip code 90028\nPrevious actions:\n[link] Store -> HOVER\n[link] Find a Store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.331, 0.042, 0.669, 0.073] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_5956", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_f485178c-d6c3-4937-b013-bafc9d8fe989.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[link] EXPLORE MULTI-RIDE PASSES -> CLICK\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.362, 0.826, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5957", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_6b113310-2e4d-4c97-b6e3-51d42e406e3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Material -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.172, 0.237, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5958", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_fbe6ff6d-5197-4a28-8a47-777faa60d37b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM\n[span] Montgom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.589, 0.271, 0.635] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5959", "image": {"bytes": "", "path": "./images/2cdee3d3-54d9-4f55-a8de-79ba9e2da28c_8734306f-5a8a-4671-a560-5850fbb319a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find theaters for Creed III within 20 miles of zip code 90028.\nPrevious actions:\n[img] movie poster for Creed III -> CLICK\n[link] get tickets for Creed III -> CLICK\n[select] AMC Columbus 10 -> SELECT: Change Location..."}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.092, 0.711, 0.121] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_5960", "image": {"bytes": "", "path": "./images/e62d1f6c-d98c-4ea2-b699-527eeedf3e9b_b9e28e22-0524-4e6b-a3f2-13059124e719.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order an adult\u2019s bottomless bubbles package for Joe Bloggs who is booked onto cruise 101010\nPrevious actions:\n[link] Beverage Packages -> CLICK\n[button] add Bottomless Bubbles to the cart with price $9.5... -> CLICK\n[div] I want to give a gift to a booked cruiser -> CLICK\n[textbox] first-name -> TYPE: Joe\n[textbox] last-name -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.3, 0.434, 0.322] in the image\nAnd my action is Action: TYPE\nValue: 101010"}]}, {"id": "mind2web_5961", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_fdca6b87-1298-4b66-bbfa-d325894b0f2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.028, 0.821, 0.047] in the image\nAnd my action is Action: TYPE\nValue: winter coat"}]}, {"id": "mind2web_5962", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_471f0a54-bca2-48ac-91d6-7b20917a0ec8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[span] 48 mo -> CLICK\n[span] Get pre-qualified -> CLICK\n[button] Get started -> CLICK\n[textbox] First name -> TYPE: john\n[textbox] Last name -> TYPE: doew"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.302, 0.419, 0.495, 0.459] in the image\nAnd my action is Action: TYPE\nValue: johndoew@gmail.com"}]}, {"id": "mind2web_5963", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_1a324d9d-69b2-4367-9576-c0f051d94050.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[div] BMW -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.256, 0.255, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5964", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_b3e14891-f17f-4a02-9c65-53333af0daf6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.484, 0.013, 0.601, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5965", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_eaf07f0c-2487-42fc-9bae-98d8979b6192.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.402, 0.844, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5966", "image": {"bytes": "", "path": "./images/254a67f6-3e37-4f12-838d-2d9fb0cedbf0_c99edf07-d6a6-46ea-a1da-f1cdbea62441.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the stats of the Athletic Club from spanish Laliga\nPrevious actions:\n[li] Soccer -> CLICK\n[link] Teams \ue00d -> CLICK\n[select] UEFA Champions League -> SELECT: Spanish LaLiga"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.111, 0.337, 0.131, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5967", "image": {"bytes": "", "path": "./images/62c5067e-daff-4af6-bd79-83f6a969f4d6_7d5bb406-d247-416d-8e13-dd2cf463b43e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get details of the fastest trip between 52nd Street,Brooklyn and 74th Street, Brooklyn, leaving now, that is accessible for a disabled person.\nPrevious actions:\n[searchbox] From -> TYPE: 52nd street, brooklyn\n[listitem] 52nd Street, Brooklyn, NY, USA -> CLICK\n[searchbox] To -> TYPE: 74th street, brooklyn\n[listitem] 74th Street, Brooklyn, NY, USA -> CLICK\n[label] Accessible Trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.389, 0.359, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5968", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_0e2c3790-5fc1-451f-bc5a-f9e29750564c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.022, 0.309, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5969", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_076445f7-fdd3-49f9-a7d9-642f2d7090a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[link] Fresh Fruits -> CLICK\n[span] Add -> CLICK\n[span] Add -> CLICK\n[path] -> CLICK\n[link] Fresh Vegetables -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.903, 0.208, 0.964, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5970", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_47176cdb-d2c8-4197-8b6c-cb83c22fe1ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK\n[input] -> TYPE: 3329456534543"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.095, 0.707, 0.129] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_5971", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_68c92172-9b4e-41fd-866d-129ce1846de4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.183, 0.592, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5972", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_0e448126-506e-4091-91af-91117f73e5d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10018\n[button] Find care -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.385, 0.43, 0.448, 0.464] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5973", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_55207068-c425-4a70-ad93-db28b62041e3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.739, 0.83, 0.786] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5974", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_9d457ae2-7f3a-454c-9bcd-38738fdc80e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Pre-owned -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK\n[button] Price -> CLICK\n[link] Under $75.00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.214, 0.412, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5975", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_266eb157-7298-4781-b591-f73f82a00451.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.145, 0.579, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5976", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_17db9ef9-89fe-482a-bfe0-9e2bf9d76253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[textbox] Shipping Address -> ENTER\n[spinbutton] Home Square Footage -> TYPE: 200\n[combobox] Number of Stories -> SELECT: Two-Story\n[button] Next -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.403, 0.959, 0.436] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5977", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_2161ad6c-0a74-439a-ad07-2493fe8039c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 0.701, 0.867, 0.899] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5978", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_625191e5-adcb-4948-a105-2c4e95dad39f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.022, 0.424, 0.051] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5979", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_3b88c290-cc6d-40d9-8de4-2f891e6650c7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.072, 0.411, 0.087] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5980", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_8815465b-cee1-4d62-bd97-da432f3cf972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[link] What to Watch on Amazon Prime -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.3, 0.638, 0.317] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5981", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_cb08d3e8-86b1-44c5-9bee-0261182c7acd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: the weeknd\n[button] Search -> CLICK\n[a] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.888, 0.398, 0.957, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5982", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_971e520c-3d24-499b-9111-fd67d7d1a884.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.228, 0.336, 0.246] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_5983", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_71a88ba7-ab8d-470d-bcab-c04236870135.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.009, 0.651, 0.039] in the image\nAnd my action is Action: TYPE\nValue: mens black running shoes"}]}, {"id": "mind2web_5984", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_d248d946-05c5-485a-bb16-dd322317f149.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.272, 0.677, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5985", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_b955660b-1f8d-4a21-b953-dac02bb5c70c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[option] Price: Low to High -> CLICK\n[button] Add to Cart -> CLICK\n[textbox] Search Amazon -> TYPE: laundry detergent\n[button] Go -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.086, 0.917, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5986", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_fb963821-351c-4418-8bbc-a5f87f916ed8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: Chicago\n[menuitem] Chicago United States of America ORD OHare Interna... -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: Paris\n[menuitem] Paris France CDG Charles De Gaulle Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.266, 0.634, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5987", "image": {"bytes": "", "path": "./images/a4397261-95a5-4d57-a531-6082b2af8ac3_fe15d851-e3ca-40c2-bc4a-afb820d1d12c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black sleeping pad that is under $40.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.055, 0.798, 0.075] in the image\nAnd my action is Action: TYPE\nValue: black sleeping bag"}]}, {"id": "mind2web_5988", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_648d07dc-b2c7-47e8-b6b9-2c603d07455d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK\n[textbox] Pickup -> CLICK\n[button] Saturday, April 8, 2023 -> CLICK\n[button] Thursday, April 13, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.625, 0.266, 0.658] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5989", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_eada1fe6-09c0-45de-a024-e035bf9aa036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[span] -> CLICK\n[button] Sort by:Our top picks -> CLICK\n[div] Price (lowest first) -> CLICK\n[link] See availability -> CLICK\n[button] Reserve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.765, 0.666, 0.923, 0.692] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5990", "image": {"bytes": "", "path": "./images/370a037c-c397-4adb-ab7c-0c388f448f68_5e4409af-2205-49f1-a595-13b9617f85a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find vinyl records at the lowest price.\nPrevious actions:\n[button] Marketplace -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.054, 0.183, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5991", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_47aabd28-f643-4dbd-96ca-fcd0b7cbaae2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.24, 0.5, 0.276] in the image\nAnd my action is Action: TYPE\nValue: Harrt Reid Intl Airport, LAS"}]}, {"id": "mind2web_5992", "image": {"bytes": "", "path": "./images/33064851-00d9-46c8-b0b7-6b5048005c51_ac2a90b7-0655-4a0e-afc6-64e9c8c133ba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a registry in the name of Sheldon Cooper in Arizona and sort by latest to soonest.\nPrevious actions:\n[link] Registry -> CLICK\n[button] Find a registry -> CLICK\n[textbox] First name -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.197, 0.37, 0.222] in the image\nAnd my action is Action: TYPE\nValue: SHELDON"}]}, {"id": "mind2web_5993", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_e22f283f-8da1-4294-ac2c-90e0d472d487.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\n[svg] -> CLICK\n[combobox] Language -> SELECT: \ud83c\uddec\ud83c\udde7 English (UK)\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.438, 0.206, 0.639, 0.237] in the image\nAnd my action is Action: SELECT\nValue: Euro (EUR/\u20ac)"}]}, {"id": "mind2web_5994", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_21a13bd7-0205-4eb5-bf16-53f77d303977.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[link] Sports & Fitness -> CLICK\n[svg] -> CLICK\n[combobox] autocomplete -> TYPE: San Francisco\n[div] CA, USA -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.224, 0.273, 0.248] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5995", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_6c06c719-5c8f-4536-bf8c-e31d5d14af89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.304, 0.215, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5996", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_7f372b63-9007-46eb-9765-517c2ca4b480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] See All -> CLICK\n[link] Computers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.241, 0.285, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5997", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_d7cbd9b8-6505-43f7-a163-d7c00bfd62a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK\n[button] Calendar -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.211, 0.514, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5998", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_8f546563-ff13-45a3-8764-50c7781f81b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India\n[button] India Asia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.33, 0.777, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_5999", "image": {"bytes": "", "path": "./images/4786982f-25f5-4bf8-bb91-522656489f63_424f88c5-1dc6-439d-8cba-43b2225ac064.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve the cheapest hotel with 5 stars and free wifi located in downtown dc in Washington on June 5 for 4 adults in 2 rooms.\nPrevious actions:\n[searchbox] Please type your destination -> TYPE: washington\n[option] Washington District of Columbia,\u00a0United States -> CLICK\n[span] 25 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.457, 0.26, 0.463, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6000", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_9a9fafc5-8b22-4d00-a724-188153f1c7b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.024, 0.987, 0.046] in the image\nAnd my action is Action: TYPE\nValue: Matthews winery"}]}, {"id": "mind2web_6001", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_30aa0a88-4767-45c6-8fa8-eb179e6e0cb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU\n[div] Tribhuvan Intl Airport (KTM), Nepal -> CLICK\n[textbox] Where to? -> TYPE: SHANGHAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.363, 0.661, 0.394] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6002", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_4f65d11e-ef5e-43c6-8f29-3bb466f8c02a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] -> CLICK\n[svg] -> CLICK\n[span] Where to? -> TYPE: TOKYO\n[span] -> CLICK\n[div] Multi-city -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.669, 0.221, 0.906, 0.26] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6003", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_1c73847e-41c9-4e2b-ab4d-f9a8c2156508.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.529, 0.309, 0.561] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6004", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_a8388b0d-2e41-4cf0-ae0b-be1a72f3df55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK\n[button] From today -> CLICK\n[Date] FROM -> CLICK\n[Date] FROM -> TYPE: 04/01/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.224, 0.296, 0.287, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6005", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_5da862ec-0736-4eef-82ff-2920815a68b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK\n[label] 100 - 200 USD (6) -> CLICK\n[button] APPLY -> CLICK\n[combobox] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.15, 0.312, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6006", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_d1b27abe-d3b2-458b-8b80-428b838fc9eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK\n[span] Featured -> CLICK\n[option] Price: Low to High -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.008, 0.549, 0.036] in the image\nAnd my action is Action: TYPE\nValue: laundry detergent"}]}, {"id": "mind2web_6007", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_7d9695f9-c5ef-4fb4-908d-cbd0c1b4d423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107\n[textbox] Zip Code -> TYPE: 49107"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.142, 0.294, 0.164] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6008", "image": {"bytes": "", "path": "./images/95619447-fe28-4cc0-9e0a-d888e5f73d7e_27347e65-851e-4170-a7bf-64293faf81e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the toddler collection and add one pair of the cheapest socks for a 6 months to 5 years to the wishlist.\nPrevious actions:\n[option] Price: Low to high -> CLICK\n[div] See more chips. -> CLICK\n[img] Short Socks (2 Pairs) -> CLICK\n[svg] -> CLICK\n[button] Age18M-3Y(12-15cm) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.536, 0.642, 0.579] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6009", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_7ed1aec7-f9b5-428c-ab0a-0340f1a44480.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.027, 0.464, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6010", "image": {"bytes": "", "path": "./images/61fa0552-66ac-4572-95f2-800537cfee7b_5b09a6bf-3ece-4b80-961a-6928f0367453.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check restaurant with delivery service available 6pm on Mar 25th in Detroit, MI.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: detroit\n[span] MI, USA -> CLICK\n[svg] -> CLICK\n[button] 25 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.316, 0.855, 0.357] in the image\nAnd my action is Action: SELECT\nValue: 6 00 PM"}]}, {"id": "mind2web_6011", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_4b178183-f3c3-495d-b232-53c6250c7329.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[link] Gift Cards -> CLICK\n[div] Send a physical card through the mail. -> CLICK\n[textbox] * Amount: -> TYPE: 50\n[button] VIEW ALL -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.234, 0.783, 0.249] in the image\nAnd my action is Action: TYPE\nValue: John"}]}, {"id": "mind2web_6012", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_0c112ad5-8f20-4d35-ab34-fab5d32abbe0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[span] Paris Beauvais -> CLICK\n[generic] 26 -> CLICK\n[generic] 2 -> CLICK\n[div] -> CLICK\n[button] Apply promo code -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.063, 0.369, 0.082] in the image\nAnd my action is Action: TYPE\nValue: 1000001"}]}, {"id": "mind2web_6013", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_d0076f22-6fca-4791-b04d-2567fd6b3d69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: diner\n[heading] Spiral Diner & Bakery - Fort Worth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.135, 0.192, 0.158] in the image\nAnd my action is Action: SELECT\nValue: 3 Guests"}]}, {"id": "mind2web_6014", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_390bf505-f485-4703-86b5-6894eda4e191.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> TYPE: high tide\n[heading] High Tide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.26, 0.373, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6015", "image": {"bytes": "", "path": "./images/66625c9d-5bf3-42d1-b463-ab2767307201_aba5ccb1-8c17-4ae3-b311-38bbf81bd19f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Learn more about the Partner Deal that gives 25% off for Veterans.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.538, 0.026, 0.598, 0.039] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6016", "image": {"bytes": "", "path": "./images/76294719-e146-4f92-986f-42b86a9808c7_83e6f82f-518e-46a3-83e7-9512d36279d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Los Angeles, California, and set Riverside as my store and add a card from that store to my favorites\nPrevious actions:\n[link] Find a store -> CLICK\n[p] 31 stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.207, 0.796, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6017", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_305706d0-b1f9-42fc-988c-a57904eb9ce7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[img] -> CLICK\n[svg] -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.72, 0.359, 0.988, 0.398] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6018", "image": {"bytes": "", "path": "./images/8865ca64-0df2-4e3f-905d-a9e07e1eeb68_93f20444-cbc4-4f91-ae8d-26e72b80b236.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show bike service charges for store at Lafayette St, 10013.\nPrevious actions:\n[button] Cycle -> CLICK\n[link] Shop Services -> CLICK\n[link] Find a bike shop near you -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.569, 0.372, 0.59] in the image\nAnd my action is Action: TYPE\nValue: 10013"}]}, {"id": "mind2web_6019", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_9b2f17c5-ddb8-49a4-87b3-1840f8f1047e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[textbox] Enter ZIP or State -> CLICK\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.544, 0.234, 0.578] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6020", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_714aee7c-12e9-46f2-80e7-71ba558c3f4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK\n[link] Africa 29 -> CLICK\n[div] Durban -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.512, 0.211, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6021", "image": {"bytes": "", "path": "./images/0790c8be-6e6d-4e2a-9c13-43ac9367da4c_0c664ad1-d63c-45e2-bb2e-95f9b295e8f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the most recent full time Accounting & Finance Job available in Richmond, Virginia.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.007, 0.384, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6022", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_10fb0059-93be-4e14-875a-92fd1557bfd5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK\n[span] Pick a date... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.18, 0.518, 0.209, 0.543] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6023", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_af4330bb-a695-48ff-bddb-dddf6ee09277.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.226, 0.07, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6024", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_5799d00b-7193-4441-9a23-5d2fd1c7d4f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.0, 0.169, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6025", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_61a66563-d15b-4bd5-a0e1-cca261a596de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[textbox] Enter license plate -> TYPE: YAW639\n[combobox] state -> SELECT: LA\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 70726\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.343, 0.31, 0.383] in the image\nAnd my action is Action: TYPE\nValue: 222900"}]}, {"id": "mind2web_6026", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_36e6f5fb-eb43-4278-aee1-29a470c244a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.042, 0.343, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6027", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_e420dd73-9c53-48e7-b5be-51c7c081f040.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK\n[button] - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.603, 0.166, 0.659] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6028", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_0551c27e-cc99-459d-b713-9a698a9eb578.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK\n[button] Check More Stores -> CLICK\n[textbox] Enter zip code or location. Please enter a valid l... -> TYPE: 90028\n[img] -> CLICK\n[generic] 6201 Hollywood Blvd., Suite 126 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.202, 0.87, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6029", "image": {"bytes": "", "path": "./images/b040b35d-cfd3-41b8-8b8b-851ab151a9cc_8711c91a-4523-49b7-aab6-78c85d0e8af7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the complexity rating for Frosthaven.\nPrevious actions:\n[combobox] Search -> TYPE: frosthaven"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.048, 0.986, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6030", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_ad95a52e-a737-472f-89dd-9b9c096d10c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Sort by -> CLICK\n[div] Lowest price -> CLICK\n[div] All dates -> CLICK\n[span] 2 -> CLICK\n[span] 2 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.172, 0.781, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6031", "image": {"bytes": "", "path": "./images/91843d71-05c3-4b17-9b8c-856f2390fe02_cf567012-e9c2-4c4d-a269-6abf5adff7d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the year that Tom Brady had the most touchdowns in a single seasson.\nPrevious actions:\n[link] Players -> CLICK\n[link] Tom Brady -> CLICK\n[link] STATS -> CLICK\n[link] CAREER -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.282, 0.655, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6032", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d1e11c67-483f-4ef7-aac4-3740e9498349.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] View All Airports -> CLICK\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.273, 0.595, 0.289] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6033", "image": {"bytes": "", "path": "./images/102c50a4-23f8-44ae-8300-43822b271dbf_49fbd1f1-44e1-46b1-807a-88fa536868b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 places from OMG! below $500 to Wishlist and name it \"Togo\".\nPrevious actions:\n[link] Show 684 stays -> CLICK\n[path] -> CLICK\n[textbox] Name -> TYPE: Togo\n[button] Create -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.317, 0.709, 0.363] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6034", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_353ff760-01eb-4a28-8694-2e0dfbf72cb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.1, 0.478, 0.125] in the image\nAnd my action is Action: TYPE\nValue: Le maraise"}]}, {"id": "mind2web_6035", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_49b0a764-2d11-408e-81a9-a1f9983a7ac5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.046, 0.652, 0.081] in the image\nAnd my action is Action: TYPE\nValue: Nintendo Switch Console"}]}, {"id": "mind2web_6036", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_05442d32-f8bd-4cac-8990-cc1c6885ba52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[button] make it my store -> CLICK\n[path] -> CLICK\n[span] Easter -> CLICK\n[span] Easter Eggs -> CLICK\n[button] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.188, 0.816, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6037", "image": {"bytes": "", "path": "./images/2d4b44c7-d11a-41c1-8be7-af4ae80b7a8c_e169a421-70f0-477a-9db4-ed882245eb5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store near 11231 zip code.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: 11231\n[button] Submit -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.863, 0.229, 0.981, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6038", "image": {"bytes": "", "path": "./images/3c9442f9-5542-4395-918a-6551dbba3e3a_b060e216-e69b-4bad-81a0-482a2cfd7a18.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show Massively Multiplayer games which can be played on VR.\nPrevious actions:\n[link] Massively Multiplayer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.116, 0.497, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6039", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_888d6f02-95b8-4b33-8eb6-25baeaba2feb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\n[button] DEALS -> CLICK\n[link] DEALS & PROMOTIONS Amtrak travel deals, promotions... -> CLICK\n[label] All Regions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.558, 0.269, 0.573] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6040", "image": {"bytes": "", "path": "./images/4e9d71b9-b936-41d0-b1f9-cbfc17f79f51_ba10e068-4c5c-44f8-8b25-50986ef28501.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the baggage fee for first class flight leaving Columbus CMH to New Orleans MSY on april 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[combobox] From -> TYPE: columbus\n[button] Columbus, OH, US (CMH) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.211, 0.562, 0.228] in the image\nAnd my action is Action: TYPE\nValue: new orleans"}]}, {"id": "mind2web_6041", "image": {"bytes": "", "path": "./images/3bb1c925-71ef-4713-a139-a3be65f8a7a5_416ca3da-f479-4b9d-b5f5-29b8c251f0f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews for Fallout 4 and mark the first one as helpful.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.055, 0.93, 0.065] in the image\nAnd my action is Action: TYPE\nValue: Fallout 4"}]}, {"id": "mind2web_6042", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_6cd768bb-689b-45f4-aa5e-d0e6532efd84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai\n[option] Chennai, Tamil Nadu, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.215, 0.246, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6043", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_05e33d5a-8cac-4627-a403-d66707fd9217.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Kindle E-readers & Books -> CLICK\n[link] Kindle Books -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.008, 0.651, 0.034] in the image\nAnd my action is Action: TYPE\nValue: roman empire history"}]}, {"id": "mind2web_6044", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_dbc01bfc-47d2-48f9-b43a-8b8e74b33d08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.752, 0.541, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6045", "image": {"bytes": "", "path": "./images/56cac423-4be9-4f74-9031-7cef1fe60ef9_0f4a4727-3b2d-4295-b8fd-52f2e3c17124.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fuel economy for a 2005 Toyota Corolla\nPrevious actions:\n[link] Price New/Used -> CLICK\n[combobox] Year -> SELECT: 2005\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.625, 0.203, 0.71, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6046", "image": {"bytes": "", "path": "./images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_405e3a16-3fd4-405a-8e06-74ca8e5fe25b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Last of Us series and add it to my watch list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.175, 0.156, 0.187] in the image\nAnd my action is Action: TYPE\nValue: The Last of Us"}]}, {"id": "mind2web_6047", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_e11321fa-806c-4878-b2b8-656dd9b9c735.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK\n[searchbox] To -> TYPE: Oyster Bay\n[listitem] Oyster Bay, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.201, 0.359, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6048", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_55dbe23a-9887-4aca-9658-46b687dac5af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.302, 0.777, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6049", "image": {"bytes": "", "path": "./images/549452ab-637a-4997-bce1-5898541bb288_3a060beb-0619-4c77-8131-7ffe4c62debf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all NFL tickets\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.026, 0.28, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6050", "image": {"bytes": "", "path": "./images/562d8516-e1fa-464b-9bb1-542fe818c721_a8fc3743-1bc9-4364-8cf4-243301d9ad7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest copy of King of Tokyo on GeekMarket that ships to Canada.\nPrevious actions:\n[button] Shopping -> CLICK\n[link] GeekMarket -> CLICK\n[combobox] Search Board Games... -> TYPE: king of tokyo\n[link] King of Tokyo -> CLICK\n[button] Sort: Recently Added -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.264, 0.136, 0.383, 0.153] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6051", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_5081e4ab-c126-42d1-a018-1794aa0466d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[div] Size -> CLICK\n[link] S -> CLICK\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.572, 0.233, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6052", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_7f7174c9-88dc-4df0-8fac-54a0603bbbac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: heathrow\n[button] London (LHR - Heathrow) United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.129, 0.568, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6053", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_16b24021-bfdb-41dd-a733-ca9415863d65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.072, 0.594, 0.104] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6054", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_7473d4f5-d147-4c64-912c-620553698746.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[combobox] Enter pick up city, airport name, or airport code. -> TYPE: Brooklyn Central\n[div] Brooklyn - Central (New York), US -> CLICK\n[textbox] Pickup -> CLICK\n[button] Sunday, April 9, 2023 -> CLICK\n[button] Saturday, April 15, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.29, 0.376, 0.484, 0.403] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6055", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_5d6ff011-4cfd-4d8b-abdc-39e927e234bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.501, 0.064, 0.556, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6056", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4da58b5c-bb8e-4c17-be85-757cbff832c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.093, 0.646, 0.126] in the image\nAnd my action is Action: TYPE\nValue: resident evil"}]}, {"id": "mind2web_6057", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_fe2547fa-bebe-490d-95b9-22a6f8cd70f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.083, 0.67, 0.103] in the image\nAnd my action is Action: TYPE\nValue: Smithsonian"}]}, {"id": "mind2web_6058", "image": {"bytes": "", "path": "./images/9b6316ee-4cfb-490e-bf52-9ee6cdded08b_110a1d40-5b06-4c86-821d-a085f20b70f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find ideas and recommendation for things to do in Cancun.\nPrevious actions:\n[textbox] Where to? -> TYPE: cancun\n[circle] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.073, 0.498, 0.093] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6059", "image": {"bytes": "", "path": "./images/0c02c193-2aef-4817-92b4-56722edc6b57_0df26719-4457-4f0d-a480-07531eaae3b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: I want to see the best seller gender neutral skirts\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: skirt\n[textbox] Search by keyword or web id -> ENTER\n[span] Gender -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.133, 0.086, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6060", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_b37e82ef-4b52-4746-8b5e-68663a04a73d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[button] Transmission -> CLICK\n[span] Automatic -> CLICK\n[button] Back to all categories -> CLICK\n[heading] Distance & Shipping -> CLICK\n[button] $99 Or Less -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.413, 0.237, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6061", "image": {"bytes": "", "path": "./images/7685e8ad-3989-4316-85dd-746fac2956be_c1977bd0-8644-4263-937b-c5b4d681d54b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a recently posted part-time Job in Gamestop stores in Fresno, California, and apply.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View Jobs Stores -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.439, 0.307, 0.459] in the image\nAnd my action is Action: TYPE\nValue: fre"}]}, {"id": "mind2web_6062", "image": {"bytes": "", "path": "./images/05238c0f-514f-4af2-bc4c-f7521d649825_86b75914-108e-4670-923c-28f40115d397.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Dota 2 game and add all DLC to cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.083, 0.93, 0.098] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6063", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_6e9ddb40-c8c9-49c8-b24d-23de6338158b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.128, 0.603, 0.15] in the image\nAnd my action is Action: TYPE\nValue: india"}]}, {"id": "mind2web_6064", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_1cb32c35-d655-487b-ad30-fc234522bfe5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK\n[button] Los Angeles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.171, 0.028, 0.441, 0.062] in the image\nAnd my action is Action: TYPE\nValue: diner"}]}, {"id": "mind2web_6065", "image": {"bytes": "", "path": "./images/2b29b7d1-bd80-4d2c-a491-9dfc89827e84_2c1ac090-674a-426e-9cab-3857abef2dfe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse best selling black hoodies in mens size Big and Tall that is between $25 and $50.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens black hoodie\n[span] mens black hoodie -> CLICK\n[div] Size Range -> CLICK\n[link] Big & Tall (5,552) -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.235, 0.123, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6066", "image": {"bytes": "", "path": "./images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_062beced-fb6a-435e-9e47-a52f8ff8db4f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find videos from the Oscar 2023.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.122, 1.0, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6067", "image": {"bytes": "", "path": "./images/ebae9339-93bf-4f5a-9396-ce30bcec7b49_caf5665b-735a-4dbc-b204-6b82136c31db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the price of Boys' INFANT UA SURGE 3 RUNNING SHOES. black color and 9K size\nPrevious actions:\n[menuitem] Kids -> CLICK\n[div] Product Category -> CLICK\n[link] Shoes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.517, 0.233, 0.549] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6068", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_72517ff4-d1db-49a4-a416-9539c0b06e84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\n[link] Investors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.08, 0.412, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6069", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_cc132005-2d40-4e1f-8699-bf828e06b700.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.438, 0.032, 0.446] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6070", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_31a14711-3dfc-40e7-82e0-7c877e622c01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.084, 0.327, 0.111] in the image\nAnd my action is Action: TYPE\nValue: edinburg"}]}, {"id": "mind2web_6071", "image": {"bytes": "", "path": "./images/95936f53-1e60-4bad-9cd2-65831e309768_e7584865-130e-41bc-8b05-9c8a0376a1e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse restaurants, open 24 hours, and sorted by rating.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.257, 0.348, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6072", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_e72a4cee-1a25-4609-a4a9-09587f670585.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[menuitem] First class -> CLICK\n[button] Departing April 5, 2023 -> CLICK\n[button] May 27, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Leaving from -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.128, 0.362, 0.161] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_6073", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_7c668a7b-1de9-4df9-b75b-69ac45fc6d15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[combobox] \uf0d7 -> SELECT: 1 Child\n[select] Age -> SELECT: 0\n[link] Search Hotels -> CLICK\n[radio] $100 to $200 -> CLICK\n[radio] New York (and vicinity) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.523, 0.123, 0.529] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6074", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_9fb8c08d-560d-454e-8098-08de434ef903.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.335, 0.341, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6075", "image": {"bytes": "", "path": "./images/1fefdb27-882b-481b-97e2-720f4d2338a3_2008bd87-e75d-4056-ab8d-218ec362bbb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for activities in Phuket, Thailand.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Phuket"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.218, 0.795, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6076", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_703f0030-4a9b-4879-a9ec-f17fff4b2859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Deals -> CLICK\n[heading] Spring Break Savings & Packages -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.666, 0.781, 0.678] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6077", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_3ace967f-2791-4a0e-87d1-c514a29195a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.474, 0.32, 0.488] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6078", "image": {"bytes": "", "path": "./images/020bc054-a829-4af5-8f0a-6efce012c7ac_104a87a5-25a2-48c5-add0-206e46511d03.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the number 4 ranked board game on the geekmarket.\nPrevious actions:\n[button] Browse -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.124, 0.034, 0.231, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6079", "image": {"bytes": "", "path": "./images/607cea69-abb5-4055-aa09-117650cb7cc9_eb8b8087-d063-4855-90c0-f238e4752bdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request information for tickets for a group of 100 people at a park in california\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Groups \ue92e -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.918, 0.829, 0.938, 0.881] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6080", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_54802f88-fafb-4740-bd79-f5e3717f2733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\n[span] 7 -> CLICK\n[span] 14 -> CLICK\n[button] Done -> CLICK\n[button] Lowest Regular Rate\ue932 -> CLICK\n[label] Senior Discount -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.699, 0.145, 0.914, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6081", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_70e082cb-31c1-4468-a16b-10fe67cce0bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[svg] -> CLICK\n[div] Tomorrow -> CLICK\n[p] Startups & Small Business -> CLICK\n[div] #virtual -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.228, 0.478, 0.478, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6082", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_eaf85801-dbb0-4f5f-bc2f-75832d6dcfb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Most Popular TV Shows -> CLICK\n[link] Horror -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.577, 0.231, 0.601, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6083", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_5d55fb91-fc5c-44ae-b62e-0fc07d2d5bdc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[listbox] hour -> SELECT: 08\n[group] RETURN -> CLICK\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.221, 0.233, 0.256] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_6084", "image": {"bytes": "", "path": "./images/93d0190f-ff39-4b69-82fc-58cddac42006_f1d139be-16d6-448e-836d-4a5043a316d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the safety rating for 2012 Honda Civic\nPrevious actions:\n[link] Price New/Used -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.245, 0.12, 0.271] in the image\nAnd my action is Action: SELECT\nValue: 2012"}]}, {"id": "mind2web_6085", "image": {"bytes": "", "path": "./images/690eedad-706c-4c48-a803-45b4f1c069bb_ca11435d-ef30-4f9f-8a60-fdc777a44ab9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bestsellers under $20 and available in hardback.\nPrevious actions:\n[link] Bestsellers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.249, 0.196, 0.273] in the image\nAnd my action is Action: SELECT\nValue: Under US$20"}]}, {"id": "mind2web_6086", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_fa2e4ec5-e583-4ee6-9768-6bc6f7d43822.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.356, 0.375, 0.387] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6087", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_2f60a0c7-f38b-45e6-ab39-8b984c0ecd9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK\n[button] Condition -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.791, 0.094, 0.932, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6088", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_a091ad8f-b00e-4d24-838b-439f2c89e0c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Mediterranean -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK\n[checkbox] Outdoors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.532, 0.306, 0.554] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6089", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_36a9d86c-7e16-4e62-8351-5bf4f50e8b2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.216, 0.553, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6090", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_9f67bc8f-0bad-45c4-b2f4-b3ebbbe9aef6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[p] Orlando Magic at Brooklyn Nets -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Distance\n[combobox] Start Time -> SELECT: 3:00 PM\n[combobox] End Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.322, 0.3, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6091", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_b7a670b5-20f0-4800-b4b3-ffd095b8acd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\n[textbox] Near -> TYPE: SAN FRANCISCO\n[span] San Francisco -> CLICK\n[checkbox] Cocktail Bars -> CLICK\n[checkbox] Outdoor Seating -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.524, 0.122, 0.617, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6092", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_79ecf264-bcba-4974-af90-74b67ca769aa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.719, 0.284, 0.752] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6093", "image": {"bytes": "", "path": "./images/1ba150cb-3b4a-4f0a-bff0-b448c78608ae_a0318c58-8752-4304-9f6b-235154d272b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotel deals in Las Vegas for four adults starting on May 17 and ending on May 20, and if deal is not available, set an alert for the same.\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Hotels -> CLICK\n[textbox] Where? -> TYPE: las vegas\n[div] Las Vegas, NV -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.508, 0.119, 0.697, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6094", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_68bbcc87-382f-446f-b611-bf58f39479cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Fresno -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.13, 0.137, 0.215, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6095", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_f13cc2d8-4952-40bf-a4fb-54be342dfa9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.401, 0.29, 0.434] in the image\nAnd my action is Action: SELECT\nValue: 1"}]}, {"id": "mind2web_6096", "image": {"bytes": "", "path": "./images/9e3d2edb-6535-4180-9050-ade88dbf798e_404e24fc-6086-4570-a4aa-a1f1530104a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Chevrolet with lowest mileage in zip 08817 with shipping charges of 99 dollars or less, with a price between 20k to 30k.\nPrevious actions:\n[button] $10,000 -> CLICK\n[menuitem] $20,000 -> CLICK\n[button] $56,000 + -> CLICK\n[menuitem] $30,000 -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.165, 0.36, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6097", "image": {"bytes": "", "path": "./images/0fc98662-4405-4a30-9854-9301c98fdb37_9ce52b41-222b-432a-93fc-3a3050e800b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the parking options in Florida to book to enter on may 6 at 5 pm and leave the same day at 6pm?\nPrevious actions:\n[textbox] Search for parking -> TYPE: florida\n[li] Florida, USA -> CLICK\n[textbox] Start Date -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.376, 0.228, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6098", "image": {"bytes": "", "path": "./images/10e17667-f9cf-4a68-adcd-ad2677c3f385_a725013c-1fb2-44f4-b17c-66f001302852.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find series 1 and series 2 BMW vehicles in the store nearest to 07055\nPrevious actions:\n[button] Make -> CLICK\n[span] (954) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Series -> CLICK\n[listitem] 1-SERIES (8) 1-SERIES (8) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.317, 0.253, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6099", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_e23e2c2a-cf6c-45ff-8920-f0444ffee944.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.452, 0.341, 0.591, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6100", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_d953665b-60d3-4f3b-a12b-d55b929baddb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[link] Choose Another Hotel -> CLICK\n[button] Choose your room -> CLICK\n[button] Book Double Bed - Standard Room -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.896, 0.273, 0.977, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6101", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_a4c79037-e990-4a7a-9d4a-c0f8936dba07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Consoles & Hardware chevron_right -> CLICK\n[button] Xbox One -> CLICK\n[link] Filter -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.272, 0.375, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6102", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_fb5b3b82-7410-4d95-b743-5441b8e24ece.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.006, 0.204, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6103", "image": {"bytes": "", "path": "./images/884a375b-e3f4-4f34-8e99-290f49141f9a_b8a690b2-a54c-43a4-9e9e-85a85f00eee7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dog bed on Amazon that is machine-washable and has a diameter of at least 30 inches.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.005, 0.651, 0.024] in the image\nAnd my action is Action: TYPE\nValue: dog bed 30 inches"}]}, {"id": "mind2web_6104", "image": {"bytes": "", "path": "./images/37c09901-63d4-4194-8a96-1d87ca8c37ae_50bd371f-78a5-443f-a626-2689e0c84de9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse washing machines available for free local pickup within 25 miles of zip code 90026.\nPrevious actions:\n[combobox] Search for anything -> TYPE: washing machine\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.729, 0.087, 0.749] in the image\nAnd my action is Action: TYPE\nValue: 90026"}]}, {"id": "mind2web_6105", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_78d5767c-9755-4037-a6cc-b9395a07ba99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.545, 0.95, 0.584] in the image\nAnd my action is Action: TYPE\nValue: the home of joe bloggs"}]}, {"id": "mind2web_6106", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_912aaece-7d84-4401-bda8-02aa5289da82.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.174, 0.782, 0.207] in the image\nAnd my action is Action: TYPE\nValue: professional boxing"}]}, {"id": "mind2web_6107", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_4f9882ac-080d-4657-a4ff-47696c6a4b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.433, 0.451, 0.475] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6108", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_5700b7d0-ce16-4fb9-b77f-0546c08c8568.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.095, 0.713, 0.108] in the image\nAnd my action is Action: TYPE\nValue: jakarta"}]}, {"id": "mind2web_6109", "image": {"bytes": "", "path": "./images/1943febc-0635-4124-871c-578c4d9f5a44_62f82d6a-0799-49e5-9b06-3de4294ea2e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular guitar tab for Absolute Beginners.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.142, 0.039, 0.176, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6110", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_13a2c4e4-2eed-443e-9c1b-f9158831bce5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[textbox] (###) ###-#### -> TYPE: 888888888\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Email Address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Employer Name -> CLICK\n[textbox] Employer Name -> TYPE: Gua AB"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.883, 0.73, 0.934, 0.754] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6111", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_bc0ec628-85e4-4548-9c22-79e966b51ed2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.077, 0.327, 0.101] in the image\nAnd my action is Action: TYPE\nValue: BRISTOL"}]}, {"id": "mind2web_6112", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_7e717da9-b333-49a6-a9bd-b2f045d69cc7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai\n[option] Chennai, Tamil Nadu, India -> CLICK\n[button] Monday March 20, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.935, 0.092, 0.977, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6113", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_6d39e04d-cc1b-4633-9459-350a37def42a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.276, 0.015, 0.284, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6114", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_6b277741-9a89-48d0-9635-b0323bb1270d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK\n[div] 24 -> CLICK\n[button] Search -> CLICK\n[div] $141 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.643, 0.354, 0.728, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6115", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_baed0e46-faa0-4f15-aa46-f27dd88f6e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.279, 0.384, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6116", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_4164e2f1-5ef8-43d2-bb38-176244354c7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.854, 0.004, 0.887, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6117", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_06821f17-f373-4854-9f11-fdf64b7a44f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.157, 0.266, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6118", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_6cf0b91a-fc8d-4494-a0c8-fb11ed928aaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[link] WOMEN -> HOVER\n[tab] Innerwear & Underwear -> CLICK\n[link] Bras -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.23, 0.585, 0.277, 0.602] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6119", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_74e72084-66fc-48b6-adb1-1795475571ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[link] Group Tickets -> CLICK\n[i] -> CLICK\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.047, 0.777, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6120", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_ad0e2c07-6812-4507-8b98-f82b0d619fd6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[span] Round-trip -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.244, 0.137, 0.432, 0.169] in the image\nAnd my action is Action: TYPE\nValue: new york"}]}, {"id": "mind2web_6121", "image": {"bytes": "", "path": "./images/b3fbf029-aa63-4dd1-879f-47d8abbae4d4_a3caa31c-759f-4764-ba35-39db38cc3e33.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an attorney for divorce in Union City, NJ.\nPrevious actions:\n[link] Attorneys -> CLICK\n[input] -> TYPE: Union City Nj\n[link] Union City, NJ -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.423, 0.216, 0.575, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6122", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_da34e6ef-01dc-47ce-8f12-3a771d0ad4be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[option] Tops -> CLICK\n[heading] Size -> CLICK\n[label] L -> CLICK\n[heading] Color -> CLICK\n[label] BLACK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.169, 0.473, 0.188] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6123", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_cabd36a8-0a4b-43c8-a930-64ae46695583.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] 25 -> CLICK\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.3, 0.74, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6124", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_513911cb-10e9-44d7-9254-252734b92b6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK\n[checkbox] 1 Stop (49) -> CLICK\n[checkbox] Seat choice included -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.226, 0.048, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6125", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_72aebfb6-ea05-4023-b171-cd398ebf61b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\n[button] Fastbreak -> CLICK\n[link] Fastbreak Program -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.169, 0.19, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6126", "image": {"bytes": "", "path": "./images/945ac29d-8e65-4e14-8bab-21742ac92a47_84fd0117-d99b-47e0-96ea-08945d9fadb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest bananas at Kroger\nPrevious actions:\n[img] -> CLICK\n[textbox] Search Kroger... -> TYPE: bananas\n[span] bananas -> CLICK\n[span] Best match -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.816, 0.132, 0.963, 0.15] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6127", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_e8584aab-e315-4f72-b91f-bf7e76a7d1b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK\n[searchbox] Search -> CLICK\n[option] United States of America -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.502, 0.32, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6128", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_f8cc67b4-274d-4aeb-9012-d1e307deb997.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.143, 0.008, 0.189, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6129", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_d6cb8acb-5af8-4ed1-9b71-eb36c19e63dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[link] Added any time -> CLICK\n[link] Past year -> CLICK\n[link] Any length -> CLICK\n[link] 2-10 min -> CLICK\n[link] To listen to -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.473, 0.212, 0.492] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6130", "image": {"bytes": "", "path": "./images/fbaa5c83-11ed-40d8-b5e6-d4ddb9a1cbde_baeb278e-2713-42b7-9253-d5c13138436f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for comedy shows taking place in Chicago, IL and expand the results once.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.367, 0.266, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6131", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_d2be2e1a-e5ba-44a6-bbee-83c29f97f07a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.198, 0.887, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6132", "image": {"bytes": "", "path": "./images/d0d6b0ed-82e4-4ba7-9a9b-7b874dacf19c_b6a55c9d-ad1f-4ef5-aca1-093ccb6731d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of Cleveland's animal shelters.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Cleveland -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.11, 0.921, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6133", "image": {"bytes": "", "path": "./images/4a4ecf18-e7a3-448a-b8cb-b337ee20048d_7f6709ef-e981-466c-93ef-0fca08b49eba.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 3 in Los angeles for diner at 09 PM on Mar 10\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.062, 0.347, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6134", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_a4d6ed68-2cee-458e-92d7-a10c85cf0636.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.308, 1.0, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6135", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_e6d16748-7363-4ea8-88d5-d84d200ed602.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER\n[link] Order Status -> CLICK\n[textbox] Order number -> TYPE: 24124124091"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.389, 0.872, 0.441] in the image\nAnd my action is Action: TYPE\nValue: boobear@gmail.com"}]}, {"id": "mind2web_6136", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_636947f5-7244-4149-8c6b-21830f9574ae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[div] Apr -> CLICK\n[div] Choose date -> CLICK\n[generic] 5 -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.862, 0.475, 0.922, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6137", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_19c1d885-fd92-43a9-b9a6-091054ce4e46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[textbox] Depart date please enter date in the format dd spa... -> CLICK\n[gridcell] 28 May 2023 -> CLICK\n[button] Continue -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Increase number of infant passengers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.658, 0.298, 0.93, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6138", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_1298e843-2ed2-4cff-a3bb-95cf25587d71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[link] Fishing -> CLICK\n[menuitem] View All -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.753, 0.089, 0.766] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6139", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_e94d39fa-877b-4289-81f9-0762467b1315.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.048] in the image\nAnd my action is Action: TYPE\nValue: m s subbulakshmi"}]}, {"id": "mind2web_6140", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_83ac91e9-e83c-49e2-aa02-8a6085f50d84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] 2 travelers -> CLICK\n[img] Increase children -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.133, 0.828, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6141", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_6b1edaaf-3328-41f0-a8dc-462e1d2cb8c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\n[textbox] Origin -> TYPE: Calgary\n[div] Calgary -> CLICK\n[textbox] Destination -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.352, 0.532, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6142", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4a795c4f-8cd7-4d8b-8dfb-747268abf852.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Destination -> TYPE: new delhi\n[menuitem] New Delhi, Delhi, India -> CLICK\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.404, 0.943, 0.411] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6143", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_ed843127-7acc-4756-96f7-ef0177b3e64a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK\n[generic] 1 -> CLICK\n[generic] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.14, 0.953, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6144", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_fd35e11d-5eb9-48c1-a193-bf04d51813b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Veterinarians -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.007, 0.789, 0.031] in the image\nAnd my action is Action: TYPE\nValue: hawaii"}]}, {"id": "mind2web_6145", "image": {"bytes": "", "path": "./images/c55b9949-b785-4d9e-8b20-b626cb595623_74bf98d9-7598-4103-8a25-8fb7859700e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me events in Phoenix for the next 3 days.\nPrevious actions:\n[button] CITY GUIDES -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.66, 0.238, 0.782, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6146", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2b69e7d2-66ed-486b-8ae2-2e763e1f7d6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM\n[select] 04:30 PM -> SELECT: 04:30 PM\n[button] Select Pick-up Location -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.798, 0.122, 0.928, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6147", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_0b060218-50cb-4545-bf81-04f57be2db97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Surname -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.243, 0.631, 0.276] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_6148", "image": {"bytes": "", "path": "./images/2a8ae104-6f06-47cb-80a0-045188125868_fc3816cd-1221-4d65-a475-ef22b1771303.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Display details of new lanched iPad pro 11-inch\nPrevious actions:\n[link] iPad -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.281, 0.081, 0.343, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6149", "image": {"bytes": "", "path": "./images/8eef04e2-3d51-4a0e-84f2-db017a69050b_564cb934-0518-4171-9ef4-ddc0e0d42251.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the Promo Code for Spring Break Savings and packages.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.089, 0.348, 0.457, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6150", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_05ba30e9-0691-4d0c-9307-8af2746cc476.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.198, 0.054, 0.261, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6151", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_d471e93f-d8de-4dd3-8d20-e0d660259ade.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[li] Wedding -> CLICK\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.387, 0.648, 0.4] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6152", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_1148e403-4327-47a0-ba61-c781b3c53813.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] Selected Pick-Up Date 03/31/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.485, 0.059, 0.497] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6153", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_7dd29122-5aa7-4e40-a2a3-6193dd6eb05d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Flights & Car Rentals -> CLICK\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.267, 0.772, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6154", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_95bdb2f9-afcd-4088-bb07-12fb2b494992.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] SEARCH BY KEYWORD -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.2, 0.759, 0.234] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_6155", "image": {"bytes": "", "path": "./images/b4872f0e-9d9e-4259-8b1e-844509b85712_82cc8845-bf93-4eeb-bf4b-56ec11926ae4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all campgrounds located in California.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK\n[link] FIND BY STATE/PROVINCE \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.102, 0.538, 0.154, 0.548] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6156", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_e2ae9f11-253a-4887-856a-20a5f2a77659.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.511, 0.448, 0.552] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6157", "image": {"bytes": "", "path": "./images/04d45cb8-136b-419a-a324-dfbc5e93b7a2_af730851-81f0-4bb7-b065-a2c06a4d7121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show activities in Miami with lowest price and minimum 4 star rated.\nPrevious actions:\n[path] -> CLICK\n[combobox] Search by city or activity -> TYPE: Miami\n[generic] United States -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.319, 0.686, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6158", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_c04a9026-e147-4c97-8589-5ef46bd0f224.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[select] Select Pick-up Time -> SELECT: 04:00 PM\n[input] -> CLICK\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.755, 0.475, 0.927, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6159", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_8c2ccd6f-96dd-45ff-821e-eb2dc0a30b49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Deals -> CLICK\n[link] Limited Time Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.616, 0.305, 0.72, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6160", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_56ae15c7-ae7b-4d02-aa81-ade2de73778c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.609, 0.477, 0.715, 0.521] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6161", "image": {"bytes": "", "path": "./images/330d5618-9db4-447b-9b56-0d2c33f414d5_a7fa1b89-a997-48b0-9e33-4f34fcca5f69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the full menu for AMC dine-in locations.\nPrevious actions:\n[link] Visit the Food & Drinks page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.285, 0.087, 0.488, 0.102] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6162", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_0f57acd6-e046-4943-9760-1aa47a966503.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[link] WOMEN -> CLICK\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK\n[searchbox] Search by keyword -> TYPE: women t-shirts\n[div] WOMEN / Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.341, 0.336, 0.36, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6163", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_c43e32b7-d7af-4d60-b11f-d2f9c45da006.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Shoes & Sandals -> CLICK\n[link] Athletic Shoes & Sneakers Athletic Shoes & Sneaker... -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.321, 0.974, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6164", "image": {"bytes": "", "path": "./images/91e18ec8-5eae-4b14-a49e-9ed7ee1ebb10_4d90c658-8e5c-4f33-abda-abb115083116.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of playstation 5 games available for pre-orders.\nPrevious actions:\n[button] Main Menu -> CLICK\n[button] Video Games chevron_right -> CLICK\n[button] PlayStation 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.129, 0.094, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6165", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_2ed0e2bc-5efb-4f91-af60-ce5031a71a68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK\n[link] HOTEL INTERNSHIPS INTERNATIONAL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.085, 0.465, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6166", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_59dcc93a-860e-48a6-8b81-8097c3fee4ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[link] Careers This link will take you away from the Qata... -> CLICK\n[span] 64 -> CLICK\n[textbox] Location -> TYPE: india"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.133, 0.603, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6167", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_9bd8b189-7a5b-4d0e-96ef-ac97d7b147af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[textbox] Drop-off location -> TYPE: new york\n[span] New York, United States -> CLICK\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.827, 0.148, 0.859, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6168", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_e9e59734-447c-445e-bdb7-bea4db2729a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.447, 0.512, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6169", "image": {"bytes": "", "path": "./images/d9e9c178-e945-49fe-9d06-a01cf02ab2db_d03e7438-57ba-4030-84f8-8f933491cd6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the highest rated spas that are $$ dollar signs and under.\nPrevious actions:\n[textbox] Find -> TYPE: spa\n[span] Spa -> CLICK\n[button] Price: -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.238, 0.209, 0.256] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6170", "image": {"bytes": "", "path": "./images/8082086a-10a7-4631-9792-b57337426f9a_88037d8e-d35e-43e7-b65b-3effca4aaeee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a SUV with automatic transmission in New York drop off at March 30.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: New York\n[div] New York, NY -> CLICK\n[div] Sat, Apr 1 -> CLICK\n[checkbox] 30 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.164, 0.923, 0.216] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6171", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_fddf88e2-6ba5-4b77-94a6-aa4c1b5c0c67.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Add to playlist -> CLICK\n[a] Create a playlist -> CLICK\n[textbox] Playlist title * -> TYPE: Love\n[button] Save -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.653, 0.456, 0.673] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6172", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_d2cd0379-7409-47f4-aeb6-8c3b3a889a8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[button] Open Menu -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.525, 0.285, 0.55] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6173", "image": {"bytes": "", "path": "./images/8710addc-5ff3-4aaf-b397-4c6165f285ee_78a8a34b-3cf0-4509-b428-953fd4f0c3de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the service options for cars under warranty.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.008, 0.384, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6174", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_7ea26b28-5c62-473d-b458-360617fef404.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK\n[link] Parking -> CLICK\n[span] From $62 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.176, 0.136, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6175", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_4c5b26b1-21bb-4ba7-a996-a9609b832e1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.301, 0.691, 0.347] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6176", "image": {"bytes": "", "path": "./images/e7fbd3a3-d583-46b9-ad7e-3f7b765fc311_519ead86-64a8-4df9-a1d2-6bd89a9f8f54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check cheap flights from NYC to London on 23rd of April for students over 18 years.\nPrevious actions:\n[link] Search for flights -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[button] 1 adult -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.132, 0.532, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6177", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_eb57cac8-928c-4777-8c81-103790610108.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.205, 0.032, 0.28, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6178", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_9be7b356-89b9-40c0-827e-a23d85da1644.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[img] GIFT CARDS -> CLICK\n[img] Happy Birthday -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.453, 0.916, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6179", "image": {"bytes": "", "path": "./images/e9300d50-11fa-4f98-8c39-424630668ab9_66a995c0-14dc-4dc8-8e8f-adfdd0247b88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the popular online Health events for tomorr\now?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.419, 0.939, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6180", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_bc650861-2931-44a1-8ee6-9a22468604df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.096, 0.52, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6181", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_ca8df4fe-9e2b-49ea-9eb4-cb71f99749a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[button] APPLY -> CLICK\n[span] Magenta -> CLICK\n[button] APPLY -> CLICK\n[span] Single Pack -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.232, 0.4, 0.254] in the image\nAnd my action is Action: SELECT\nValue: Lowest Price"}]}, {"id": "mind2web_6182", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_a3741eaf-81d7-4424-8aa6-4171091b1faf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.263, 0.044, 0.366, 0.07] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6183", "image": {"bytes": "", "path": "./images/afb693cd-57cb-4468-9f7f-d965ee530913_4ff311df-0d3b-4e91-aa58-1fa0219c8834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the discount black insulated hunting boots for Men with average ratings over 3.5 stars\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.867, 0.119, 0.941, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6184", "image": {"bytes": "", "path": "./images/caafd610-202e-49d2-85d1-3f167f3ab443_8ca7bf3c-75bf-4fbc-80d1-7d527c476669.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the map of a Six flags park in mexico\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags M\u00e9xico -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.12, 0.844, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6185", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_96c2ecf0-d98a-4fd3-af03-4eefb8ccf225.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[button] Select Vehicle -> CLICK\n[button] No thanks -> CLICK\n[textbox] Other Ways We Can Help -> TYPE: ac recharge\n[button] See Pricing -> CLICK\n[button] Get Service Estimates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.55, 0.557, 0.575] in the image\nAnd my action is Action: TYPE\nValue: James Smith"}]}, {"id": "mind2web_6186", "image": {"bytes": "", "path": "./images/2089ee5c-1ccd-495e-9a80-f62e129554ed_7c725110-9b24-416e-bf34-1566ee5fd7a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the deals in California\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.043, 0.266, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6187", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_0392b523-ff32-4400-98ff-9da00b7cda72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.013, 0.873, 0.052] in the image\nAnd my action is Action: TYPE\nValue: blazer"}]}, {"id": "mind2web_6188", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_4058fdcb-00c9-479e-a343-0bf9db5ff23e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK\n[li] Outdoors -> CLICK\n[DisclosureTriangle] All Dates -> CLICK\n[li] This month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.484, 0.179, 0.553, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6189", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_9c70ec38-dd91-4342-a324-41ede6034a26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[label] BLACK -> CLICK\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.376, 0.491, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6190", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_2da19bbe-dd62-482e-bbf3-24f0ecc52e72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.329, 0.404, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6191", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_5d534eb2-9235-4e29-9b92-955b87be94bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle\n[option] Seattle (WA), United States (City) -> CLICK\n[button] Next Month -> CLICK\n[span] 11 -> CLICK\n[span] 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.471, 0.419, 0.522, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6192", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_4230e0d5-3c05-4f5e-a84b-380081e7d025.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[span] -> CLICK\n[textbox] Guest rooms -> TYPE: 1"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.407, 0.648, 0.432] in the image\nAnd my action is Action: TYPE\nValue: 7"}]}, {"id": "mind2web_6193", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_c3faae7a-eb45-4287-a15e-dc3226ffb69f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\n[input] -> TYPE: beauty salons\n[link] Beauty Salons -> CLICK\n[input] -> TYPE: Seattle, WA\n[link] Seattle, WA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.046, 0.788, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6194", "image": {"bytes": "", "path": "./images/8b39c9ac-a965-4902-a7d9-ec7fb36ca096_89a10228-542e-43ea-be51-914770f17ff5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of a flight between San Antonio, Texas, and Bellingham, Washington on April 8.\nPrevious actions:\n[textbox] From , required. -> TYPE: san antonio\n[a] SAT - San Antonio International, TX -> CLICK\n[textbox] To , required. -> TYPE: washington\n[a] WAS - Washington, DC -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.777, 0.219, 0.843, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6195", "image": {"bytes": "", "path": "./images/c5871c3f-27c3-48d8-bdf9-8b49382a2719_a1e01f2e-743a-423b-b3b2-8c89b8775b7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 movies from At the Kiosk which are new to Wishlist.\nPrevious actions:\n[link] Navigate to at-the kiosk -> CLICK\n[link] Navigate to New Releases At The Kiosk See More -> CLICK\n[img] Plane (2023) -> CLICK\n[button] Click to add title to wishlist. -> CLICK\n[link] Navigate to at-the kiosk -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.057, 0.162, 0.085, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6196", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_b2e40a74-71d8-4594-963c-04d6c99d9924.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS\n[span] Paris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.142, 0.327, 0.176] in the image\nAnd my action is Action: TYPE\nValue: MILAN"}]}, {"id": "mind2web_6197", "image": {"bytes": "", "path": "./images/afa1f70c-c0ff-4e4d-a5c2-0c0d292ee066_9d29bcf6-38dc-4fc3-b54b-c79ad0c7b672.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find cruise deals to Europe this summer that includes airfare and are all-inclusive.\nPrevious actions:\n[link] Cruises -> CLICK\n[textbox] Where? -> CLICK\n[div] Europe -> CLICK\n[textbox] When? -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.078, 0.58, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6198", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_cdeb8a94-6f22-4f9a-9224-861a9f9518c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK\n[generic] Refine by Category: Figures -> CLICK\n[link] $0 - $10 $0 - $10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.322, 0.375, 0.371] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6199", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_a709ab3a-f10a-4d4c-adda-404e37e3755d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Darien Lake -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.287, 0.844, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6200", "image": {"bytes": "", "path": "./images/82c5913d-8392-44a2-8873-6627a281fa23_731f1c88-2e9c-40af-a9a2-5cbb8486771a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate monthly payment for car value of $50,000 and downpayment of $10,000 with interest rate of 4% for 60 months.\nPrevious actions:\n[menuitem] Finance -> HOVER\n[menuitem] Loan Calculator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.188, 0.473, 0.213] in the image\nAnd my action is Action: TYPE\nValue: 50000"}]}, {"id": "mind2web_6201", "image": {"bytes": "", "path": "./images/fca09c13-5e6c-49d5-b3ee-b620a70b19f5_e33e7423-a005-4de8-89dc-d34c5f297820.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with lowest price in Tokyo for 2 adults and 1 child for 10 May and checkout on 12.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[option] Top destination Tokyo, Japan (City) -> CLICK\n[div] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.393, 0.377, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6202", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_4e0c7759-42dd-49e7-b8f0-c1a71191be69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.285, 0.359, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6203", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_33320628-6d12-4948-a068-aad951d8eab1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.118, 0.786, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6204", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_743b5d19-6618-4851-8d60-aff7605fc7d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.121, 0.296, 0.255, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6205", "image": {"bytes": "", "path": "./images/af97084c-5b72-4fec-be1e-dcab0980b335_bc8a6490-c12c-4d18-bed0-e0a9652265d7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check all Bayern Munich goalie stats\nPrevious actions:\n[button] MORE -> CLICK\n[div] Soccer -> CLICK\n[div] Bundesliga -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.181, 0.401, 0.469, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6206", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_d4f72df1-7bad-4682-801b-1306a7dbf865.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[option] Niagara Falls, ON, Canada -> CLICK\n[button] \ue9571 NIGHT Sun, Apr 09 - Mon, Apr 10 -> CLICK\n[span] 17 -> CLICK\n[span] 20 -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.104, 0.188, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6207", "image": {"bytes": "", "path": "./images/81fb481b-a234-4e25-b494-9ed26a03e977_0d8093b0-56b9-45e6-b9c9-b8d9c0f501cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the second most popular horror series to my watchlist.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.106, 0.628, 0.122] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6208", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_18dc8214-4872-434c-876a-f628e23fcfc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 1, 2023 -> CLICK\n[button] Jul 7, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.304, 0.568, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6209", "image": {"bytes": "", "path": "./images/78c52592-76e4-4c45-afd5-f94cf213314e_3f5e842c-d368-42bb-a2c6-1407fa5b61d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a star wars movie trailer.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Star Wars\n[button] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.422, 0.653, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6210", "image": {"bytes": "", "path": "./images/fbe9f625-7b47-4e13-a2f5-6823195d7438_76121b93-9b56-4b51-86a1-c62104d6fb48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a cheapest direct flight From mumbai to new delhi, India, on june 1 and return travel on june 8.\nPrevious actions:\n[tab] Flights -> CLICK\n[generic] Round-trip -> CLICK\n[combobox] Flying from -> TYPE: MUMBAI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.338, 0.476, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6211", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_25b10621-3099-4546-9ff0-dd74ac022908.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.071, 0.482, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6212", "image": {"bytes": "", "path": "./images/984f5bdb-d6d0-4b9d-ae23-bee8283a4f40_b066ef7b-a2bc-40b2-941a-8aeae8f79bf8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop 2-5 day cruises to the Caribbean from Miami during August 2023.\nPrevious actions:\n[link] 2-5 DAY CRUISES Get big savings! Check out 2-5 day... -> CLICK\n[button] Sail To -> CLICK\n[button] Caribbean -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.075, 0.491, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6213", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_1680dfb8-7555-457e-916a-b744dd50ccb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] hotels -> CLICK\n[div] Destination or property -> TYPE: jakarta\n[hp-input-button] Destination or property -> TYPE: jakarta\n[div] Jakarta, Special Capital Region of Jakarta -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.151, 0.273, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6214", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_0a1e66bf-415c-4c64-a0c9-7fe592135fec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Collectibles -> CLICK\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.198, 0.923, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6215", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_94ea9c63-2eec-4898-9bd9-b1155e31d79e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK\n[mat-pseudo-checkbox] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.852, 0.551, 0.944, 0.57] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6216", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_2c586599-8f29-41ca-a0b1-87e1e3789284.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.363, 0.187, 0.637, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6217", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_b4e902e0-1823-4a2d-82d1-e4cb17411a3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\n[button] Community -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.052, 0.615, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6218", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_84c56bf6-9bbd-44d5-bcd2-ec8a1a549af6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.638, 0.107, 0.691, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6219", "image": {"bytes": "", "path": "./images/dc636898-246e-4b08-9978-6a6dc1b20320_476b5f29-c0de-40d0-8f3d-2ce384cd591c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show mattresses available with Amazon Basics and Follow the store.\nPrevious actions:\n[link] Amazon Basics -> CLICK\n[link] BEDROOM -> CLICK\n[link] Mattress & Mattress Toppers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.096, 0.278, 0.168, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6220", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_ec1bbbbe-ce3c-4e80-8b4a-5549ce2cb133.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean\n[button] \ue9571 NIGHT Wed, Apr 19 - Thu, Apr 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.846, 0.224, 0.88, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6221", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_df0d8978-9049-4cb9-968c-6df0e0da3812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK\n[span] Black -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.367, 0.286, 0.386] in the image\nAnd my action is Action: SELECT\nValue: Good"}]}, {"id": "mind2web_6222", "image": {"bytes": "", "path": "./images/998d121b-c858-485d-9dd3-4609575d144b_3105db36-2d46-422c-990a-31de39ab0a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular song this week by the no. 1 weekly charts ranked artist\nPrevious actions:\n[link] Charts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 0.181, 0.145, 0.195] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6223", "image": {"bytes": "", "path": "./images/f7b93dc1-2f19-419e-a6e0-a701008c17fa_fcf8a62d-5909-423d-b5d8-241e0adb4dac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the booking with ticket 123456 under the name James Buck.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.437, 0.306, 0.623, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6224", "image": {"bytes": "", "path": "./images/0b59dd33-7f6a-48df-aa1e-9cc67177287f_9164d8b5-1f9c-401b-ac02-e2235a798755.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking spot near Dallas Love Field Airport.\nPrevious actions:\n[tab] Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.06, 0.784, 0.076] in the image\nAnd my action is Action: TYPE\nValue: Dallas Love Field"}]}, {"id": "mind2web_6225", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_09ad252d-0aa9-4500-9c76-a6f428acfcae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[combobox] Select Maximum Year -> SELECT: 2023\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK\n[p] Black -> CLICK\n[p] Lexus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.032, 0.35, 0.226, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6226", "image": {"bytes": "", "path": "./images/6b627cbc-a45e-4f7e-9d02-bfca1a41070a_8db68fdb-b4d0-4883-ba65-f2ecbb8ac59b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View details for a Times Square parking lot that is wheelchair accessible.\nPrevious actions:\n[textbox] Search for parking -> TYPE: Times Square\n[li] Times Square, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.36, 0.384, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6227", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_4b43afb6-5cfb-4405-95c9-4fefabda58be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\n[combobox] Pick-Up Time Selector -> SELECT: 11:00 AM\n[button] Date -> CLICK\n[button] 03/29/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 1:00 PM\n[button] Browse Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.625, 0.059, 0.649] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6228", "image": {"bytes": "", "path": "./images/d5054276-2223-44f3-b2ed-4944bae4d2b1_bff2b2ce-8a02-4c9e-8262-f76461b7f237.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Hotel in Boston zip code 02199 and make a pick-up reservation in a suv with gps for 2 people on March 27 11 am and return on March 29, 1 pm.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.025, 0.499, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6229", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_f34ec95c-8c93-4e9a-8b49-69c0eaee86e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: AUSTIN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.258, 0.582, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6230", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_a40a0d44-e057-46e9-98bc-cf21b715bbb6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[combobox] TIRE_CONDITION -> SELECT: Good To Go\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.476, 0.284, 0.513] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6231", "image": {"bytes": "", "path": "./images/632bb279-036c-48e5-b40a-962b2e90d6d1_1aefcac7-5379-4ce3-b57c-ee32805a47e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse winter coats featured in the weekly ad and is at least 15% off.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: winter coat\n[button] go -> CLICK\n[button] Deals -> CLICK\n[div] -> CLICK\n[button] Update -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.619, 0.112, 0.692, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6232", "image": {"bytes": "", "path": "./images/7a698566-2a8f-4d9d-9da9-17288b66917f_2eacf5f3-80a5-491f-9d67-5d5793a8d030.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hotels in Tokyo for two adults and one child, with a budget of $500 per night for 2 nights starting on May 5th.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: Tokyo\n[generic] Japan -> CLICK\n[div] 5 -> CLICK\n[div] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.303, 0.393, 0.309] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6233", "image": {"bytes": "", "path": "./images/4770e887-f523-4609-a989-ded8c8abad19_ac59711d-fcaa-4057-92af-1038cdc97b7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip economy flight from Pune to New York in June and select the fully refundable.\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: Pune\n[button] Pune, IN (PNQ) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.202, 0.478, 0.229] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6234", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_05079796-ff63-4353-b6f4-58469eeb7be8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[textbox] Where to? -> TYPE: India"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.173, 0.729, 0.206] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6235", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_a6a47a5f-af4c-4828-877e-98c2bec76ac6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK\n[link] Explore Destinations & Travel Requirements -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.068, 0.472, 0.207, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6236", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_7457cb3f-1727-46f9-ba3c-c6fa1f567265.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.326, 0.72, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6237", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_6283deae-3b1b-4134-a5c2-b272074c2708.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com\n[textbox] Phone * -> TYPE: 8888888888\n[combobox] Market: * -> SELECT: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.22, 0.787, 0.299] in the image\nAnd my action is Action: TYPE\nValue: 123rd st"}]}, {"id": "mind2web_6238", "image": {"bytes": "", "path": "./images/df73be67-e81a-444f-82cc-e69d54a72734_090caaac-cb79-4e25-b11f-dafd929b8871.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the gift shops available at a park in New York\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.51, 0.846, 0.555] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6239", "image": {"bytes": "", "path": "./images/f84075a2-9d97-4964-9c80-70f5bee8a418_7d12b26e-cea6-48a9-84bb-31a71783af9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find my trip with ticket number 3329456534543 along with my name John Green\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[combobox] Find Your Trip By -> CLICK\n[option] Ticket Number -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.098, 0.562, 0.132] in the image\nAnd my action is Action: TYPE\nValue: 3329456534543"}]}, {"id": "mind2web_6240", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_2c84e548-890b-4c83-bb17-731112429425.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi\n[input] -> TYPE: Wolowitz"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.661, 0.674, 0.916, 0.709] in the image\nAnd my action is Action: TYPE\nValue: debbi.wo@bbt.com"}]}, {"id": "mind2web_6241", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_15edde36-09a1-4143-8fd1-1aa23e4c17fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.793, 0.192, 0.818] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6242", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5dbffc54-e517-4b9b-a93d-8731878ee4e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK\n[div] Find a Store -> CLICK\n[link] FIND A STORE -> CLICK\n[textbox] SEARCH BY CITY, STATE, OR ZIP CODE -> TYPE: 43240"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.843, 0.199, 0.991, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6243", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_5f626ce1-8f6c-41da-a606-191bbaf298a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.005, 0.204, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6244", "image": {"bytes": "", "path": "./images/de1045f4-14ce-4de9-9aa8-601315b73b0e_e63bd0c5-1f40-461f-a792-dfe15f095b29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the most popular documentary tv series sorted by IMDB rating, see the details of the top result.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.075, 0.022] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6245", "image": {"bytes": "", "path": "./images/dc2aa3f8-eda0-455b-98dc-adc56089259a_1f10d654-9925-42b4-80dc-1c85e4fc7e9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get route information for flights from Ohio and New York airports that have already arrived.\nPrevious actions:\n[div] Ohio -> CLICK\n[textbox] Destination -> TYPE: New York\n[div] New York -> CLICK\n[img] Submit Search -> CLICK\n[group] \uf067 Status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.368, 0.076, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6246", "image": {"bytes": "", "path": "./images/e0feee24-dfed-454a-aa40-eda244f1d044_d7a061ca-bdc0-46b6-9f53-d3a7eb20cd89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the balance sheet and cash flow statement for the fiscal year 2021 of Six Flags.\nPrevious actions:\n[link] Investors -> CLICK\n[link] Financial Info -> CLICK\n[heading] Balance Sheet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.637, 0.384, 0.675] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6247", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_6530ec2d-af29-4aa3-87d9-1459e0e0aab5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK\n[button] Find Your Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.288, 0.309, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6248", "image": {"bytes": "", "path": "./images/e9a5ab90-517c-4323-a343-6e10e6b9632f_39ddc5fa-ac0b-46c4-97f1-7fda5d38e1d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the gift registry of Carla Cahill, when asked for password use Michael Cahill as name and cahillm@gmail.com as email.\nPrevious actions:\n[link] Gift Registry -> CLICK\n[span] Find a Registry -> CLICK\n[textbox] First Name -> TYPE: Carla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.233, 0.534, 0.257] in the image\nAnd my action is Action: TYPE\nValue: Cahill"}]}, {"id": "mind2web_6249", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_32f50fb0-29e4-45af-ac3a-e2c5e30fd5c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.449, 0.568, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6250", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_27cebec5-d92c-4883-b6f0-9514162b357b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.324, 0.66, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6251", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_c8d98ede-94ff-4686-80c4-d63369045443.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Women -> HOVER\n[link] Swimwear -> CLICK\n[link] Women's Plus -> CLICK\n[div] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.014, 0.319, 0.106, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6252", "image": {"bytes": "", "path": "./images/14f5587e-1353-419e-a381-f92d54ea2059_33996c48-9ef9-42e0-9ae2-d73a23df1bee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bio information about Lebron James.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.169, 0.057, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6253", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_4df4d08c-48cb-452b-bd28-c31f36f0c7f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK\n[button] Lighting -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.142, 0.605, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6254", "image": {"bytes": "", "path": "./images/3d7f4f43-cff8-45fa-8249-97a8369c2d1f_287372aa-eea9-4451-9ea6-628052669c61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a 50-dollar home sweet home gift to my friend John with the message Congrats on your new home, and add this card to your cart and checkout.\nPrevious actions:\n[img] -> CLICK\n[textbox] To: -> TYPE: John\n[textbox] From: -> TYPE: James\n[textbox] Message: 200 characters remaining -> TYPE: Congrats on your new home.\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.187, 0.992, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6255", "image": {"bytes": "", "path": "./images/8e721b00-f406-449b-9885-0267b47ecfdb_65871880-9edf-4376-ba5a-724665a1454b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used Jaguar XFs with no black exterior color and save the search as Jaguar to get a notification daily.\nPrevious actions:\n[button] Advanced Search -> CLICK\n[textbox] Ex. Black exterior -> TYPE: black\n[button] Black (Exterior Color) -> CLICK\n[button] SAVE SEARCH -> CLICK\n[textbox] Custom name your search -> TYPE: Jaguar"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.403, 0.604, 0.588, 0.648] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6256", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_08a1e24a-5539-4b20-a56a-a6201abec410.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.266, 0.359, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6257", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_1ca75cb6-4ad0-4233-bba6-b07ccfdec468.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK\n[button] Sort by:Top picks for solo travellers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.163, 0.473, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6258", "image": {"bytes": "", "path": "./images/7f94386a-d032-43cf-9dbe-2b64430c9c28_56c6ec61-144d-4320-836a-4aaa0573ed66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: locate the store in IL\nPrevious actions:\n[link] Store Locator -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.264, 0.959, 0.303] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6259", "image": {"bytes": "", "path": "./images/c73b1674-9c61-46a3-84dd-8fad3622a035_2010968d-7952-46a7-b08a-1b281434fdac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the flight from New York to Miami from Mar 27 to Mar 31 that has the lowest core price in points.\nPrevious actions:\n[span] Search flights -> CLICK\n[button] Continue to flight results -> CLICK\n[link] Sort & Filter -> CLICK\n[svg] -> CLICK\n[label] Points -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.156, 0.336, 0.223, 0.355] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6260", "image": {"bytes": "", "path": "./images/daec5481-187d-4e2f-9dae-a11892cdadf2_6940b55b-a01a-4773-a195-09ade3dfa191.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a journey with return option on same day from Edinburg to Manchester on April 2nd and book the best possible option available.\nPrevious actions:\n[combobox] Departure station, London selected. -> TYPE: edinburg\n[span] Edinburgh (Waverley) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.096, 0.327, 0.119] in the image\nAnd my action is Action: TYPE\nValue: manchester"}]}, {"id": "mind2web_6261", "image": {"bytes": "", "path": "./images/000c2828-e8ce-4b18-bfe0-89ed333add06_80ff8fd1-64c6-4df9-91bd-7478a1730329.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a $25 digital gift card for Tim Stebee, whose email address is scisoorbros@gmail.com. Fill in sender name Jeerimiah Waton\nPrevious actions:\n[img] -> CLICK\n[img] GameStop Digital Gift Cards -> CLICK\n[textbox] Recipient Name -> TYPE: Tim Stebee"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.688, 0.24, 0.975, 0.288] in the image\nAnd my action is Action: TYPE\nValue: scisoorbros@gmail.com"}]}, {"id": "mind2web_6262", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_c3667e3c-b19b-44bc-a90e-e55c3a194518.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.02, 0.441, 0.066, 0.45] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6263", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_ef48f3d8-f9e6-40d1-931c-334e477f5a28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[link] CITIES -> CLICK\n[span] Find your state -> CLICK\n[link] New York -> CLICK\n[link] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.179, 0.592, 0.207] in the image\nAnd my action is Action: TYPE\nValue: 66 perry st"}]}, {"id": "mind2web_6264", "image": {"bytes": "", "path": "./images/1538e37b-9c33-48b0-b10e-662e192ad53a_3be9ae7f-a70b-4318-8fb6-4ad2c22b8f3f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stops in Alanson, MI\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.01, 0.481, 0.049] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6265", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_c8ccfecb-f35d-47be-a43b-48928934fcd3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.292, 0.514, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6266", "image": {"bytes": "", "path": "./images/1a16a98f-4e8f-4ff9-9fa2-b97d2d403cc3_5a7f29fc-db45-4eee-8795-c0ab17f04f05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a set of sonoma bath towels to the cart and apply a coupon code for free shipping.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: bath towels\n[span] bath towels -> CLICK\n[img] Sonoma Goods For Life\u00ae Ultimate Bath Towel with Hy... -> CLICK\n[link] BATH TOWEL -> CLICK\n[button] Add to Cart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.276, 0.72, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6267", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_19738f5d-7377-4d14-9f1e-8589bd2c655c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.067, 0.784, 0.085] in the image\nAnd my action is Action: TYPE\nValue: jfk"}]}, {"id": "mind2web_6268", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_d5b4d8ea-73a9-4e11-8496-13694222c79b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.013, 0.804, 0.031] in the image\nAnd my action is Action: TYPE\nValue: zyrtec"}]}, {"id": "mind2web_6269", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_d0bc46ae-42b7-4510-949b-2c0c747f8ac3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[textbox] City or ZIP Code -> TYPE: Daytona\n[button] select to search for a kiosk using city or zipcode -> CLICK\n[button] select to browse a kiosk -> CLICK\n[button] Show Filters -> CLICK\n[button] Filter by rent -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.27, 0.141, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6270", "image": {"bytes": "", "path": "./images/3110612f-63fe-4a7a-98d0-29c806d6a34f_cf550759-5d20-4109-8c9a-469f64f2f1e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Trains in Spain guide that will help you learn about Renfe fare types\nPrevious actions:\n[img] header.burgerMenu.title -> CLICK\n[menuitem] European trains -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.607, 0.124, 0.966, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6271", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_d0f6e4c4-1e10-4b80-808a-2e0d70eb0ce0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.105, 0.266, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6272", "image": {"bytes": "", "path": "./images/41cd71cb-cae5-41c8-abe8-67e42fabd4a5_77b1d0a6-ec27-41a0-905a-1fd4d43e01ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the details of Grand National Parks tour.\nPrevious actions:\n[button] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.05, 0.735, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6273", "image": {"bytes": "", "path": "./images/0b16b42c-dea7-4079-a2a8-79eb23447193_2d7f9a07-3428-4891-8d3b-24e22be9e7b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular deals to USA national parks in the southeast region of Biscayne national park, and add to the cart the fully refundable economy flight from New York to Miami with a departure date of May 13 and a return date of May 18.\nPrevious actions:\n[tab] DEALS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.114, 0.39, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6274", "image": {"bytes": "", "path": "./images/ccb7c231-8655-4613-a28f-beb8074c523e_b61e0bec-bcd8-4a74-896a-1014bbf71f7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the Under Armour Return Policy for Men's UA Football All Over Print Metal Logo Short Sleeve\nPrevious actions:\n[menuitem] Men -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.177, 0.141, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6275", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_b1259dba-320f-42b0-97a0-41dc930a594a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.028, 0.817, 0.04] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6276", "image": {"bytes": "", "path": "./images/b332a3c6-c4f5-423c-a207-b1bcb28db6c3_60c544de-dc61-44c3-b0f2-0bb17011e3bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated Drop D guitar tab from the 1990s.\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Drop D 39,730 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.585, 0.153, 0.609] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6277", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_c5203087-62da-4044-9189-5a59dd38004b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.248, 0.668, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6278", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_fc6195f2-3260-40a9-a000-5a0d2faf4e98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\n[searchbox] Search -> TYPE: Taylor Swift\n[button] Search -> CLICK\n[link] Taylor Swift -> CLICK\n[button] Follow -> CLICK\n[link] Playlists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.402, 0.338, 0.489, 0.348] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6279", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_af385238-0c5d-4ce4-bf14-c3ece21aa30c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK\n[button] Apr 2, 2023 -> CLICK\n[button] Apr 2, 2023 selected, current check in date. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.929, 0.336, 0.984, 0.353] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6280", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_d6772d62-36d8-4118-a2d4-d899094404a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.124, 0.362, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6281", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_875c751f-e7b4-444c-b6ba-c3516398869c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[link] Long-Term Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.215, 0.235, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6282", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_ffe3ba82-c9d1-4d63-a501-1525cbd12380.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.467, 0.241, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6283", "image": {"bytes": "", "path": "./images/1b310ebc-d9f4-4eb9-b348-30b329207a36_37ad3e47-c309-4efe-ace6-3208fe05fdb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a dry cleaning shop with the highest rating and virtual consultation option for Ironing and stain treatment for a pair of suits and a few delicates in New York City.\nPrevious actions:\n[link] More -> HOVER\n[span] Dry Cleaning -> CLICK\n[textbox] Near -> TYPE: new york city\n[span] New York, NY -> CLICK\n[button] Virtual Consultations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.111, 0.63, 0.123] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6284", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_bf7c3146-d7ce-4c7e-83ca-1e3ff7b12175.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Order Food & Drinks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.346, 0.231, 0.63, 0.242] in the image\nAnd my action is Action: SELECT\nValue: AMC Grove City 14"}]}, {"id": "mind2web_6285", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_e0c7be7c-eb3c-4ce7-b04d-d385aea37cbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[button] Locations -> HOVER\n[link] Find a Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.144, 0.657, 0.171] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_6286", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_374ff5ac-f7ac-41b8-9db1-62af14e7b4a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.103, 0.325, 0.118] in the image\nAnd my action is Action: TYPE\nValue: Carribbean"}]}, {"id": "mind2web_6287", "image": {"bytes": "", "path": "./images/5f09e15c-c987-4bdb-ab6e-5db39b18317d_692ed3d3-325b-412e-bc50-f2c834c7c4c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 3 tickets for a Special Event or Experience in Miami on may 4\nPrevious actions:\n[textbox] Search restaurants, cuisines, etc. -> CLICK\n[button] View all Miami Restaurants \u203a -> CLICK\n[path] -> CLICK\n[link] Thursday, May 4th | American Express Presents CARB... -> CLICK\n[combobox] 2 tickets for Thursday, May 4th | American Express... -> SELECT: 3 Tickets"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.328, 0.162, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6288", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_4d36a13d-82ba-46ee-8587-497ec99d0638.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO\n[span] TYO -> CLICK\n[span] Where to? -> TYPE: NEW DELHI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.392, 0.308, 0.648, 0.346] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6289", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_ef25943f-68a6-4969-91d1-956e78f70336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] T-Shirts -> CLICK\n[gridcell] Price -> CLICK\n[label] $40-$60 -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.31, 0.491, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6290", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a008a7f6-6480-487f-abda-be44b38c3d47.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[textbox] Zip Code -> TYPE: 59901\n[button] Search By Zip Code -> CLICK\n[button] MAKE MY STORE -> CLICK\n[button] Departments -> HOVER\n[button] Grocery & Home -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.187, 0.177, 0.368, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6291", "image": {"bytes": "", "path": "./images/62806bef-eeeb-4892-b4d5-6a8d2005c58d_8a105984-8971-4dbe-8929-a49933e300de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the trending searches in Columbus.\nPrevious actions:\n[link] CITY PAGES -> CLICK\n[link] Columbus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.343, 0.588, 0.379] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6292", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_42238faf-5676-4bfc-8fb1-4c18741ce175.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\n[button] Explore -> HOVER\n[link] Dining -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.3, 0.969, 0.598] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6293", "image": {"bytes": "", "path": "./images/8b4e49e9-9802-4c1f-acb3-de06e38445e7_0a3ab473-d734-47eb-9710-22e03410d4f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find beauty salons that are rated A and accept coupons in Seattle, WA.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.138, 0.406, 0.167] in the image\nAnd my action is Action: TYPE\nValue: beauty salons"}]}, {"id": "mind2web_6294", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d838fd6f-80b5-45fc-8388-4038eb8363ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[link] Shopping bag, 1 items -> CLICK\n[span] Continue to checkout -> CLICK\n[span] Guest checkout -> CLICK\n[span] Select a store -> CLICK\n[span] IKEA West Chester -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.956, 0.015, 0.988, 0.053] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6295", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_48bf6e5e-9a49-4d59-a377-26dcf4a830f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] Change location -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: CHICAGO\n[span] Chicago -> CLICK\n[span] Mar 18 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.438, 0.448, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6296", "image": {"bytes": "", "path": "./images/04b8b406-a031-48a3-a002-b1791d872e16_6ed1aa8a-c227-4ee2-8dfb-d04fe3d3fdb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add $100 Google Play Gift Card to cart and recipient email address as abc@abc.com\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Google Play\n[button] google play gift card -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.128, 0.393, 0.275] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6297", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_24d287e5-d848-4bd8-bacd-eb06b15a0036.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK\n[link] Theaters -> CLICK\n[link] Warner Theatre Theaters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.266, 0.486, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6298", "image": {"bytes": "", "path": "./images/bf159a0f-e6d7-46fa-beea-c231934fa7a9_9bbb8418-648f-4efb-a31e-9cb314c075be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find travel agents in Grand Junction, Colorado\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Search the site -> TYPE: travel agent\n[button] Find -> CLICK\n[link] Find A Travel Agent | Carnival Cruise Line -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.503, 0.494, 0.545] in the image\nAnd my action is Action: TYPE\nValue: Grand Junction"}]}, {"id": "mind2web_6299", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_7b459c84-10b1-4039-8d5e-815757741f7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\n[link] We're hiring! Join our team , Opens another site i... -> CLICK\n[button] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.529, 0.341, 0.65, 0.361] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6300", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_3c16c075-2c0d-4f6b-8239-27d144b4b7bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[label] 9 -> CLICK\n[label] Available in Store -> CLICK\n[label] KEEN -> CLICK\n[svg] -> CLICK\n[label] .Stars::before { -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.846, 0.274, 0.9, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6301", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_1533426c-6f64-4fc5-aa01-7fad60360f2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago\n[span] Chicago, IL -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Ann Arbor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.335, 0.366, 0.406, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6302", "image": {"bytes": "", "path": "./images/74cb088a-06d9-4a5e-9b00-61fd6b874e89_f99dcc87-41a8-46f2-9c49-357593e5e4e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find exercise events taking place in San Francisco between July 10-31.\nPrevious actions:\n[generic] next -> CLICK\n[generic] next -> CLICK\n[generic] next -> CLICK\n[button] 10 -> CLICK\n[button] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.327, 0.879, 0.425] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6303", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_50a2f5c1-64de-41e9-abd0-4451f762fcea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.146, 0.031, 0.227, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6304", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_947b3258-9cc5-40c9-8aec-b8e6041d3782.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: HOLLYWOOD\n[span] Hollywood -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.488, 0.447, 0.499, 0.457] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6305", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_ac23a9dc-a401-429c-a93f-dbbf04494cbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK\n[textbox] To -> TYPE: Clara\n[textbox] From -> TYPE: James\n[textbox] Message Line 1 -> TYPE: Happy Christmas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.293, 0.714, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6306", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_1e025e9b-cd9f-43a1-83c4-088b78703733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.432, 0.052, 0.475, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6307", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_326b47cd-cccd-456f-b004-592a3038e94b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.541, 0.009, 0.553, 0.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6308", "image": {"bytes": "", "path": "./images/ab139e9d-eb99-47f7-8d0b-f93479fadadf_9b8e4f5e-44c2-4d7b-822a-2f50757cdf0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus using service number 5456165184.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.17, 0.25, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6309", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_6d984d16-dfbc-428c-b948-d82c0d1ca057.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.038, 0.434, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6310", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3d0769b3-8443-4f88-9b2a-25919abee6b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.5, 0.693, 0.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6311", "image": {"bytes": "", "path": "./images/bba6dd60-babd-4d8d-9e8b-242b0ffc08d7_df6e386e-5d3b-41fa-9e31-180a841bf8e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guide for paying with commuter benefits.\nPrevious actions:\n[p] About -> HOVER\n[link] SpotHero for Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.6, 0.303, 0.737, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6312", "image": {"bytes": "", "path": "./images/c82bd6d6-6f08-4f1d-aeef-351c86694bbc_bb39f777-043d-4f12-9973-afd6bef8c9b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest movies for rent in the Redbox kiosk at any Winn Dixie in Daytona Beach.\nPrevious actions:\n[button] Navigate to Find a kiosk nearby -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.254, 0.702, 0.268] in the image\nAnd my action is Action: TYPE\nValue: Daytona"}]}, {"id": "mind2web_6313", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_385a6a91-88f4-4837-83e1-2f3c5b92b626.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] Netflix streaming -> CLICK\n[svg] -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[img] Chris Rock: Selective Outrage -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.34, 0.374, 0.5, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6314", "image": {"bytes": "", "path": "./images/2fc63250-ac9a-4cd4-a29d-6460f0652fa3_481aabef-7831-4470-967a-8926d70118fa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless keyboard and mouse combo under $100.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless keyboard mouse\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.445, 0.192, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6315", "image": {"bytes": "", "path": "./images/e8603513-2740-485e-adf9-86361dd015f4_f149f408-6377-466c-8b6a-f552605df2f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare FlightAware subscriptions and signup for Enterprise plan.\nPrevious actions:\n[span] Products -> CLICK\n[span] Premium Subscriptions -> CLICK\n[button] Compare Subscriptions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.511, 0.505, 0.583, 0.524] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6316", "image": {"bytes": "", "path": "./images/18a581b9-5021-40aa-835d-3c8110402df3_a544e04d-4b21-40f0-beb2-5da432e73791.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find adventure movies coming to theaters.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Coming soon to theaters -> CLICK\n[span] Genre -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.361, 0.43, 0.381] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6317", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_8365081d-8726-4dad-9a47-25429f6fb4c8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK\n[textbox] To -> TYPE: was\n[option] Washington, DC - Union Station (WAS) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.604, 0.11, 0.712, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6318", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_46c14367-c9aa-4663-aef8-8d3ebac75daa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.054, 0.39, 0.089] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6319", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_9945456e-05f3-4c9e-8ce8-65ca56ec133c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Parking -> CLICK\n[link] Find Parking Lots -> CLICK\n[button] Stations G\u2013K \uf107 -> CLICK\n[link] Gloucester -> CLICK\n[link] Plan a trip from this station -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.22, 0.335, 0.239] in the image\nAnd my action is Action: TYPE\nValue: NORTH PLYMOUTH"}]}, {"id": "mind2web_6320", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_95b4682d-c31a-4bcc-877d-e861c8f213ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[button] Search -> CLICK\n[button] Number of rooms and guests -> CLICK\n[button] Increment -> CLICK\n[button] Increment -> CLICK\n[div] Update -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.654, 0.064, 0.754, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6321", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_694f27c3-ec85-4bb5-a08c-7650fcbbbaf9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Start Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[combobox] Start Time -> SELECT: 9:00 AM\n[combobox] End Time -> SELECT: 6:00 PM\n[button] Update Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.419, 0.397, 0.432] in the image\nAnd my action is Action: SELECT\nValue: Sort by Distance"}]}, {"id": "mind2web_6322", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_af09a4fc-7fe3-430f-9aad-50d6c1d8ce02.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: mumbai\n[span] Mumbai -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.573, 0.292, 0.583, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6323", "image": {"bytes": "", "path": "./images/619ba95b-5a27-4c53-841c-edc281aacf7a_3b9dcd1e-9bb4-4599-b812-5fda7a1bf251.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a dome tent to my shopping cart.\nPrevious actions:\n[link] Camping -> CLICK\n[menuitem] View All -> CLICK\n[div] Type -> CLICK\n[label] Dome -> CLICK\n[link] Bass Pro Shops Eclipse 2-Person 5x7 Dome Tent -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.704, 0.976, 0.736] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6324", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_11b8428e-580a-427b-945b-e9964306d187.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[button] Search -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 400\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.718, 0.246, 0.792, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6325", "image": {"bytes": "", "path": "./images/3e9d6144-e92b-4fb0-b634-48bf3eb5090b_88070251-f05f-4d48-8365-9ae69c638083.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Greenport to Oyster Bay Branch, starting with train and ending via bus with less than 1/2 mile walk.\nPrevious actions:\n[searchbox] From -> TYPE: Greenport\n[listitem] Greenport, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.196, 0.359, 0.215] in the image\nAnd my action is Action: TYPE\nValue: Oyster Bay"}]}, {"id": "mind2web_6326", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_088d7365-0d88-422a-b819-ff3660ebdf1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\n[link] New flame Icon -> CLICK\n[link] Womens -> CLICK\n[div] Product Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.435, 0.194, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6327", "image": {"bytes": "", "path": "./images/563ec938-b259-45b7-b0d3-6360b74e601d_cf9e9473-f15a-4e35-a52d-fc2f8f6a9d9d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check 2 of WrestleMania single day Parking Passes on April 1st in California near LA.\nPrevious actions:\n[div] Sports -> HOVER\n[link] WWE -> HOVER\n[link] Wrestlemania -> CLICK\n[link] Parking -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.374, 0.553, 0.396] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6328", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_720c5c98-aa5c-4e3a-b84b-4edf4fc74ac7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK\n[link] Tracks -> CLICK\n[link] Any length -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.201, 0.212, 0.21] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6329", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_75331416-79a1-49ba-9151-cb36c58c21e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.124, 0.238, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6330", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_cc84e6d9-c116-476e-8c9e-7bc04f3fe1ab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[link] Search for packages -> CLICK\n[link] Hawaii Vacations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.267, 0.286, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6331", "image": {"bytes": "", "path": "./images/39937001-8af2-4727-bb2a-6997981cf50d_4e4e4193-b03a-48de-a041-da1ba92837d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of tours available at the Coliseum with free cancellation.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.025, 0.131, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6332", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_0fb30cdf-7ea1-47af-ad9a-010175cc1fb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[gridcell] Thu Jun 01 2023 -> CLICK\n[gridcell] Fri Jun 30 2023 -> CLICK\n[button] Apply -> CLICK\n[circle] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.582, 0.125, 0.597] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6333", "image": {"bytes": "", "path": "./images/973bf55c-d1a0-41c7-9ec8-9f59b3dbfb58_531e7d43-cdb2-42b0-ad84-8f482edced43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Repost on my feed a rock playlist.\nPrevious actions:\n[searchbox] Search -> TYPE: rock\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.154, 0.199, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6334", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_98673272-fde8-4585-bcb4-8fb21d9ef497.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[button] Choose departure date -> CLICK\n[checkbox] 5 June 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[tab] Fastest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.742, 0.306, 0.916, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6335", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_a2229723-f483-4aad-a049-63b0de313d31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.184, 0.318, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6336", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_9cb7b962-2d18-47fb-926a-597470068e61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: BIRMINGHAM\n[span] Birmingham -> CLICK\n[checkbox] Return to a different location -> CLICK\n[searchbox] Return Location (ZIP, City or Airport) (required) -> TYPE: MONTGOM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.298, 0.505, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6337", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_0eb19e0d-99bd-405c-ba39-76125661d09e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[img] Add -> CLICK\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.343, 0.245, 0.389, 0.261] in the image\nAnd my action is Action: SELECT\nValue: 8"}]}, {"id": "mind2web_6338", "image": {"bytes": "", "path": "./images/6f4e562e-91f9-401d-8730-af947985b821_bb5ecc35-cad8-4934-8fd3-8db479c6b832.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 3 most rated to watch tv shows or movies on Amazon Prime to watchlist.\nPrevious actions:\n[button] Filter -> CLICK\n[button] Highest Rated -> CLICK\n[button] View Results -> CLICK\n[span] Watchlist -> CLICK\n[span] Watchlist -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.373, 0.788, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6339", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_812eb1e7-0b27-48a2-b770-544d5bccbd76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.255, 0.238, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6340", "image": {"bytes": "", "path": "./images/a747bed0-0f45-413a-8f48-2c45795e4e3d_5ecad292-0fb1-4d8e-8963-715a5e924186.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give a like to the #1 track of the Real Time Top Chart\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.806, 0.004, 0.838, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6341", "image": {"bytes": "", "path": "./images/3b7cead3-475d-41a4-a018-db89c7ace632_363d6ca6-36b4-40cd-8116-3c77b4246f5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the purchase price for powerwalls to install in a 200sqr feet 2 story house in the address 7528 East Mechanic Ave.Fargo, ND 58102\nPrevious actions:\n[link] Powerwall -> CLICK\n[link] Order Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.722, 0.243, 0.919, 0.261] in the image\nAnd my action is Action: TYPE\nValue: 7528 East Mechanic Ave. Fargo, ND 58102"}]}, {"id": "mind2web_6342", "image": {"bytes": "", "path": "./images/39fd14c2-3bdd-4ca6-8a3a-1a6e7374c8e6_2a99e22d-6bc8-48c6-b38b-d358a070a01a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a woman t-shirt in xl size from merchandise section, add 2 pieces to the cart and check out.\nPrevious actions:\n[link] Merchandise -> CLICK\n[span] Women's Last.fm Spiral Hex Tee -> CLICK\n[select] S -> SELECT: XL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.58, 0.332, 0.591, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6343", "image": {"bytes": "", "path": "./images/eab97f0c-38b3-4421-bff6-697b3267f23c_9d6b03f7-af9e-4339-9c0e-9b57b36796e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find carnival cruise options that include Alaska.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.741, 0.428, 0.871, 0.482] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6344", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_c73b04b6-058a-4c28-9cb2-ca6eb698b205.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[span] Special events -> CLICK\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.137, 0.318, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6345", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_9f40df58-a7fa-4181-b1a9-f08a0f0bd2eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.06, 0.705, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6346", "image": {"bytes": "", "path": "./images/09660a8d-b01d-4e93-9dd2-0b4af256bc80_828eeb3d-81d9-49a8-a848-523adbcf487e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change the language to UK English and the currency to Euros\nPrevious actions:\n[svg] -> CLICK\n[combobox] Language -> SELECT: \ud83c\uddec\ud83c\udde7 English (UK)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.551, 0.062, 0.565, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6347", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_4c49fa1f-80b9-49fd-b1df-515931e10c8d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.297, 0.828, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6348", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_89a66828-0f63-4b8f-9090-933d55e222a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[textbox] $ -> TYPE: 5\n[textbox] $$$ -> TYPE: 10\n[button] close -> CLICK\n[button] Color -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.008, 0.988, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6349", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_9404fc4c-c485-4e47-af68-762a4e97965f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.365, 0.846, 0.412] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6350", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_9fe8d58a-4c1f-4bbb-8bc9-2c1e157f291a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[button] Next -> CLICK\n[textbox] Mileage -> TYPE: 155000\n[button] Next -> CLICK\n[span] Black -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.502, 0.284, 0.532] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6351", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_2e1e2b82-41a6-4bb9-af07-28fc1b8604d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[button] Search flights + cruise -> CLICK\n[label] October 08, 2023 -> CLICK\n[span] Nights -> CLICK\n[label] Miami -> CLICK\n[button] View details -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.732, 0.952, 0.766] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6352", "image": {"bytes": "", "path": "./images/1203a016-d541-4914-9cdb-f042ad0abcf5_2fed1405-e307-4548-b8db-160e3d6a3342.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Anime TV shows and sorty by Newest\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.18, 0.668, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6353", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_f0282d1a-fb09-404d-88ce-5a583a75a055.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: resident evil\n[span] resident evil -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.337, 0.375, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6354", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_c534f502-bbd7-495e-b75e-fa1d5e851def.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK\n[dt] Memory -> CLICK\n[span] Show -> CLICK\n[span] 16GB -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.705, 0.192, 0.719] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6355", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_f46e2703-87c4-4986-b7be-4975b7288aef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[path] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK\n[img] -> CLICK\n[span] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.309, 0.217, 0.309, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6356", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_15c9b3ca-89b6-401b-9d4c-beb382884a11.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.337, 0.379, 0.594, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6357", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_d8c4b3b3-80ff-4b99-b6c1-2591f2525750.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.111, 0.378, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6358", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_31f7682f-dbf9-40fa-8368-f25c2670dabe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[path] -> CLICK\n[button] sub 1 -> CLICK\n[div] open -> CLICK\n[option] 6 -> CLICK\n[button] Update -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.176, 0.858, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6359", "image": {"bytes": "", "path": "./images/1d9c6a43-eeb3-40ec-8617-d09a68fb0fb3_398a1cab-4bfc-42ae-b8df-5bb1fabdb9cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me routes from NYP to WAS on May 15th.\nPrevious actions:\n[textbox] From -> CLICK\n[option] New York, NY - Moynihan Train Hall (NYP) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.333, 0.168, 0.554, 0.188] in the image\nAnd my action is Action: TYPE\nValue: was"}]}, {"id": "mind2web_6360", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_393218a4-be87-41c9-880d-9dff65eb1a23.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk\n[li] John F. Kennedy International Airport (JFK) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.059, 0.233, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6361", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_9a258ed2-27f1-43d8-96f2-b7dd1562bcea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\n[link] Home Services -> CLICK\n[textbox] Near -> TYPE: Texas City, Texas\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.152, 0.279, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6362", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_27418770-0fb2-4572-8950-c111ca546d72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.132, 0.785, 0.165] in the image\nAnd my action is Action: TYPE\nValue: madison square garden"}]}, {"id": "mind2web_6363", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_d73fbe6d-8222-4166-9484-330d448e6b15.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[li] July -> CLICK\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] 10+ Night Trips -> CLICK\n[button] Guided Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.708, 0.438, 0.772, 0.473] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6364", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_8ec95e8f-a20b-4ab2-be5b-78333b5b16fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[link] Plan a trip from this station -> CLICK\n[combobox] To\u00a0 -> TYPE: NORTH PLYMOUTH\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.296, 0.358, 0.323, 0.376] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6365", "image": {"bytes": "", "path": "./images/0f63c624-6097-473e-ad19-59bc139836d1_12c4f752-1759-4fe1-b011-6efe7006dcda.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for developer jobs in Dallas, Texas, and review details of the latest job then create an 8-day alert after signing in.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.478, 0.431, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6366", "image": {"bytes": "", "path": "./images/4e44e7b6-3d2a-4ca2-870a-edc9e9751d5d_59b106a8-1c6b-4d63-bf92-a82e063fc15a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest last minute cruise deal\nPrevious actions:\n[link] Last-Minute Deals -> CLICK\n[heading] Last-Minute Cruise Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.277, 0.079, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6367", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_131ab6e4-1cda-403e-892f-48975f9de2b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[heading] Category -> CLICK\n[input] -> CLICK\n[option] Tops -> CLICK\n[heading] Size -> CLICK\n[label] L -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.435, 0.397, 0.445] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6368", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_cc1747c1-6897-475a-a414-30da991bc3fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK\n[link] Deals -> CLICK\n[div] Add -> CLICK\n[span] Add -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.762, 0.969, 0.771] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6369", "image": {"bytes": "", "path": "./images/593830ff-fd2c-4479-abf8-8fddee2cdaea_decbda01-c8ad-439a-a719-9fae758733b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show brochure of Loss Damage Waiver Protection.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.021, 0.74, 0.031] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6370", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_0711a396-35a4-4cc4-b1a1-0264829f7b8b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[gridcell] 24 June 2023 -> CLICK\n[textbox] Passengers / Class -> CLICK\n[button] Decrease number of infant passengers -> CLICK\n[button] Increase number of child passengers -> CLICK\n[span] (Business/First) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.402, 0.927, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6371", "image": {"bytes": "", "path": "./images/895f32c1-7d6f-4474-b5b1-50daf5cb2a40_c2522b37-de29-4b8e-8f85-8cbe56475733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find jobs available in Texas for American Airlines.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.522, 0.431, 0.546] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6372", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_c2e9086d-05ae-454c-a286-99169b97287a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] Depart Date -> CLICK\n[gridcell] Monday, April 17, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.824, 0.105, 0.957, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6373", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_3f9a5ae5-bcb9-4dda-aec6-2e5d2e3a0499.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK\n[link] 18 March 2023, Saturday -> CLICK\n[button] done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.127, 0.562, 0.173] in the image\nAnd my action is Action: TYPE\nValue: 2819"}]}, {"id": "mind2web_6374", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_3b61150e-f073-4093-b655-8b362b023c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.339, 0.012, 0.422, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6375", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_7eca4c5a-2094-4510-8f7c-b18976791000.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150\n[span] Marriott Deluxe Box -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.45, 0.873, 0.467] in the image\nAnd my action is Action: TYPE\nValue: Clara"}]}, {"id": "mind2web_6376", "image": {"bytes": "", "path": "./images/b5de73d0-820d-45bf-8989-1743a0d9b072_4c168a73-6f51-4f60-8121-76e76caa359d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the closest parking spot to the Orlando Magic at Brooklyn Nets event, happening in Barclays Center, to enter after 3 pm and exit after 5 pm\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.134, 0.785, 0.167] in the image\nAnd my action is Action: TYPE\nValue: barclays center"}]}, {"id": "mind2web_6377", "image": {"bytes": "", "path": "./images/2d92911a-b208-4d68-ad00-46d2d67f9efa_107e8ce0-5be5-4b2f-8966-35de535030bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a store in spring, Texas.\nPrevious actions:\n[link] Store Locator -> CLICK\n[textbox] Please enter City, State, or Zip Code -> TYPE: SPRING, TX"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.177, 0.517, 0.194] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6378", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_32423741-d475-4401-bb90-37b5783f2819.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[span] -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK\n[button] Price -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.956, 0.014, 0.988, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6379", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_91233398-b0ea-424a-9cd5-2b60b4283b6b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok\n[button] To make this website accessible to screen reader, ... -> CLICK\n[button] To make this website accessible to screen reader, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.116, 0.603, 0.305, 0.641] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6380", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_0d7efda5-9240-4c57-9a5c-5446e9c2d83c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.248, 0.292, 0.28] in the image\nAnd my action is Action: SELECT\nValue: Events"}]}, {"id": "mind2web_6381", "image": {"bytes": "", "path": "./images/95499427-980a-4115-b1aa-6b252b4fb2c3_bc4dbb7f-e800-41f5-9fb1-edeecfa090b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse cough medicine that is rated 4 stars and above and is $15-$20.\nPrevious actions:\n[combobox] Search products and services -> TYPE: cough medicine\n[button] Search for cough medicine -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.385, 0.143, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6382", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_56260a12-8133-43af-ba62-a8526f0e5aee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM\n[select] 04:30 PM -> SELECT: 04:30 PM\n[button] Select Pick-up Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.157, 0.428, 0.255, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6383", "image": {"bytes": "", "path": "./images/a31de393-c6e0-4175-858b-03cdc435d585_edb2c211-bef6-4991-a828-73831abc411d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse events happening at Madison Square Garden.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: Madison Square Garden"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.113, 0.748, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6384", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_00e19130-721a-425b-aa74-57bcbb23ded3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[button] Paris France -> CLICK\n[circle] -> CLICK\n[button] -> CLICK\n[div] Food Tours -> CLICK\n[label] Free Cancellation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.919, 0.263, 0.963, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6385", "image": {"bytes": "", "path": "./images/06309af8-9ca0-41c4-8acc-1e4e5db2f344_59276a5b-5c7e-49bc-ba3e-07e7219dfcd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the most popular Women's Athletic Shoes.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.018, 0.092, 0.137, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6386", "image": {"bytes": "", "path": "./images/aeace1e7-0460-4f4e-99fb-2a8c867b97ef_da04e845-1f80-4464-80df-2a89df6c5d9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the highest rated Last-Minute Flights & Car Rentals\nPrevious actions:\n[link] Last-Minute Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.363, 0.656, 0.511] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6387", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_b87d39de-e0c4-41ce-9ed7-37de94e8fe7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.301, 0.612, 0.699, 0.642] in the image\nAnd my action is Action: TYPE\nValue: Doja Cat"}]}, {"id": "mind2web_6388", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_b491c36c-59cd-45e3-853d-c107a16e3373.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] Location Anywhere -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.69, 0.109, 0.787, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6389", "image": {"bytes": "", "path": "./images/7ce76343-dafe-4cf6-9bfd-918834d0c641_414ca573-101c-43e1-9a61-dea8ac4d6a54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated fast responding phone repair shop for data recovery in Houston.\nPrevious actions:\n[span] Phone Repair -> CLICK\n[textbox] Near -> TYPE: houston\n[span] Houston -> CLICK\n[button] Fast-responding -> CLICK\n[radio] Data recovery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.148, 0.63, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6390", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_ec68d5c3-9dd6-47e0-ae07-61673d79709f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: roman empire history\n[button] Go -> CLICK\n[link] Kindle eBooks -> CLICK\n[link] English -> CLICK\n[link] Last 90 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.934, 0.061, 0.968, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6391", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_9feb1093-8b40-4fa5-81f4-548ad83d5940.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS\n[generic] Close -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 31 -> CLICK\n[link] 7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.295, 0.567, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6392", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8d380612-6d85-4cf3-9691-a9ba0257e423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.269, 0.445, 0.358] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6393", "image": {"bytes": "", "path": "./images/cdb6b70d-6083-49b5-908c-a6c0f711c175_d602e0d8-9092-45a7-99e7-c0d26107ca55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces near Warner Theatre in Washington DC.\nPrevious actions:\n[link] CITIES -> CLICK\n[heading] WASHINGTON DC -> CLICK\n[link] Theaters -> CLICK\n[link] Warner Theatre Theaters -> CLICK\n[link] SEE OPTIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.246, 0.336, 0.378] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6394", "image": {"bytes": "", "path": "./images/c9740663-a6aa-4aee-919a-330a9f2b3091_bc897867-f54d-4a70-aeda-56cdfe8b25e6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated room for one adult in Los Angeles, CA between May 7 to May 14 which offers senior discount, free breakfast, is pet friendly and has a pool\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.119, 0.325, 0.136] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_6395", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_7648c204-6107-469b-915d-6b24608d0e96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[searchbox] Find a location -> TYPE: NEW YORK\n[span] New York -> CLICK\n[combobox] Reservation type -> SELECT: Events\n[combobox] Event date -> SELECT: Next month\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.095, 0.452, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6396", "image": {"bytes": "", "path": "./images/5418beec-bc06-4e11-9664-4038fbdeefe9_575c9828-b577-4fd4-bc2c-656a78fe0d83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List Batman collectible figures priced under 10 dollars and a customer rating above 4 with a same-day delivery option.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: BATMAN\n[span] BATMAN -> CLICK\n[link] Filter -> CLICK\n[checkbox] Same Day Delivery Same Day Delivery -> CLICK\n[generic] Refine by Category: Collectibles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.139, 0.375, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6397", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_0c6371f1-0488-46a8-9683-b6d73833cee9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[heading] 123 Main St -> CLICK\n[button] pickup date input field -> CLICK\n[button] 29, March 2023 -> CLICK\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.207, 0.82, 0.239] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_6398", "image": {"bytes": "", "path": "./images/14f0e837-af77-44b9-9cad-a8911aab30c6_451803f9-762c-48e4-a1a6-71d06f5d9431.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the status of flight from Columbus, number 1234 on April 5th, 2023.\nPrevious actions:\n[heading] Flight status -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.335, 0.272, 0.37] in the image\nAnd my action is Action: TYPE\nValue: 1234"}]}, {"id": "mind2web_6399", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_4ed5a644-3e8b-4966-8913-bb0e0c5b63ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\n[link] Solar Panels -> CLICK\n[link] Order Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.774, 0.582, 0.901, 0.595] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6400", "image": {"bytes": "", "path": "./images/05e1f2bd-3498-4fcb-8d43-37c903397ded_aa7a9996-a657-4023-97a9-d9baa3b5462c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the balance for a gift card with the number 1000000000000000 and pin 1222\nPrevious actions:\n[link] Visit the Gift Cards page -> CLICK\n[link] Check Balance -> CLICK\n[spinbutton] Gift Card Number -> TYPE: 1000000000000000\n[spinbutton] Gift Card Pin -> TYPE: 1222"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.411, 0.319, 0.5, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6401", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_76fc3614-10f5-43b1-a822-b3a3289e4a98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Kitchen -> CLICK\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.313, 0.146, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6402", "image": {"bytes": "", "path": "./images/277a776a-b335-4761-a543-bed1c6eca1d6_d675f8fc-0d5d-4d60-8f8e-9da0ea5b0edc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for kayaks and filter the results to show only those that are suitable for whitewater rapids.\nPrevious actions:\n[searchbox] Search -> CLICK\n[searchbox] Search -> TYPE: Kayaks\n[option] kayaks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.232, 0.164, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6403", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_3b2f20d2-4e98-433d-b1cc-4c6495958de0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK\n[link] Try Somewhere New -> CLICK\n[link] Destinations -> CLICK\n[label] Armenia -> CLICK\n[label] Austria -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.202, 0.086, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6404", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_b4a42ac9-e109-4952-8e2c-206e39e788e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[div] -> CLICK\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.797, 0.916, 0.832] in the image\nAnd my action is Action: TYPE\nValue: denise.bloom@bbt.com"}]}, {"id": "mind2web_6405", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_d4ddbbb9-4b1c-4cd9-b4b9-e938ed17e04e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[svg] -> CLICK\n[svg] -> CLICK\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.562, 0.586, 0.584] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6406", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_57fc6fee-25ac-4ac0-9074-5578b7fb359d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk\n[button] New York (JFK - John F. Kennedy Intl.) New York, U... -> CLICK\n[button] Going to -> CLICK\n[textbox] Going to -> TYPE: heathrow"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.147, 0.652, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6407", "image": {"bytes": "", "path": "./images/5b85b4e5-60cb-42e6-af73-a6a45f8fc49d_428e6ce9-bdfd-4278-8ac1-7e2a35aa7d0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets as gifts and book 2 tickets for stand-up comedian Dave Chappelle and the roots shows.\nPrevious actions:\n[link] Tickets as Gifts \u2013 Best Tickets to Buy Online -> CLICK\n[link] Dave Chapelle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.493, 0.941, 0.52] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6408", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_e6a4b3ef-f0b3-4aed-9c52-7e8ad1b03356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK\n[button] Select My Car -> CLICK\n[link] All Vehicles (13) -> CLICK\n[span] SUVs & Wagons (5) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.793, 0.474, 0.918, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6409", "image": {"bytes": "", "path": "./images/117b1d5c-1e54-4588-ba84-aa173887b067_cca8b945-855b-47c1-82fb-2ccaf2794176.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Renew a existing KOA rewards account with the rewards number 1000000001 e postal code 10023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.013, 0.799, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6410", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_55f7b393-c44a-43dd-924a-37bbcb3e2b07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[combobox] select-filter -> SELECT: 50 mi\n[span] Silver -> CLICK\n[p] Black -> CLICK\n[p] Lexus -> CLICK\n[p] Backup Camera -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.32, 0.429, 0.355] in the image\nAnd my action is Action: SELECT\nValue: Newest first (by car year)"}]}, {"id": "mind2web_6411", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_296e4fb3-13b3-4223-ae21-3bb06155dd9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[tab] Lowest price first -> CLICK\n[checkbox] list-filter-item-label-0 -> CLICK\n[checkbox] list-filter-item-label-1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.509, 0.089, 0.517] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6412", "image": {"bytes": "", "path": "./images/83ada6a6-e264-4d98-9d48-7f97f87bab5d_b0b16702-6153-482c-b402-5cd4ff52a76b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the photo gallery of Acadia National Park.\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Photos & Multimedia -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.087, 0.641, 0.147, 0.652] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6413", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_b692968d-a907-4613-89eb-1760e9529b96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107\n[textbox] Zip Code -> TYPE: 49107\n[div] 49107 - Buchanan, MI -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.193, 0.45, 0.475, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6414", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_74a19b41-3a33-4bab-b089-69728a1ad3bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[button] Flavor -> CLICK\n[div] -> CLICK\n[button] close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.349, 0.57, 0.39] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6415", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_2129d050-f557-464e-a1c4-932650bbc1a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: m s subbulakshmi\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.243, 0.146, 0.25] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6416", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_4b7d1d2a-fd6c-4c7c-b09e-31e4ead7df5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[div] Make/Model -> CLICK\n[combobox] Year -> SELECT: 2016\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Camry\n[textbox] Mileage -> TYPE: 40000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.206, 0.71, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6417", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_c155e0d8-1093-4266-8b0c-bc68546903ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi\n[input] -> TYPE: Wolowitz\n[input] -> TYPE: debbi.wo@bbt.com\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.823, 0.953, 0.853] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6418", "image": {"bytes": "", "path": "./images/51b75f3d-b293-495d-a5a0-8ed5cc8270a2_3d8b49b4-942a-45c5-a376-69f56192a34e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a reservation for 2 guests on april 21 at noon in a restaurant in Austin that is new on resy\nPrevious actions:\n[path] -> CLICK\n[button] Austin -> CLICK\n[heading] New On Resy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.206, 0.08, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6419", "image": {"bytes": "", "path": "./images/5f9182dc-d35d-4c0e-9abe-cd913c136528_f173443e-4eb1-4ae7-a454-247a9d439f6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find career openings in the marketing department\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.217, 0.677, 0.255, 0.692] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6420", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_c3453d54-f335-4c14-ba7a-4675249cedc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[button] Thai -> CLICK\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK\n[checkbox] Accepts Apple Pay -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.077, 0.63, 0.085] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6421", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_f3653021-d6dd-40c8-a6af-e8b82e9cb356.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60\n[button] Submit price range -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.75, 0.027, 0.761] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6422", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_d6a1eadd-6cae-44c4-850a-a5c685fc157f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Flight status -> CLICK\n[generic] Apr 7, 2023 -> CLICK\n[option] Apr 9, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.161, 0.637, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6423", "image": {"bytes": "", "path": "./images/cd4df5fa-2c90-4a76-b4b0-0ca7bcdac2ee_99d9fbae-2e31-4c14-b028-651a7fe28b17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a train ticket with seat from Berlin to Zurich on March 31 after 8 am in aisle quite zone in side by side seat formation.\nPrevious actions:\n[span] Berlin -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: ZURICH\n[span] Z\u00fcrich -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 31 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.268, 0.133, 0.291] in the image\nAnd my action is Action: SELECT\nValue: 08"}]}, {"id": "mind2web_6424", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_8b4597c5-e6f7-4480-b226-acb1effa91d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[tab] DEALS -> CLICK\n[link] Flights to popular destinations -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.105, 0.331, 0.263, 0.351] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6425", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_f15001c6-c158-486a-8987-66186ce22fab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Jackets -> CLICK\n[div] Sports -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.325, 0.194, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6426", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_981565cd-b59b-4909-b094-0e73023b641d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> CLICK\n[combobox] Departure station, none selected. Select a station... -> TYPE: Manchester"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.128, 0.326, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6427", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_45a80841-1cc0-465b-9537-9b8b10dae0d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[div] -> CLICK\n[checkbox] Avis -> CLICK\n[button] More filters -> CLICK\n[checkbox] 4+ doors -> CLICK\n[spinbutton] Maximum price -> TYPE: 1000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.615, 0.563, 0.716, 0.582] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6428", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_6b855ba0-f2bd-493a-bc6f-9a7379dfbd8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.108, 0.789, 0.128] in the image\nAnd my action is Action: TYPE\nValue: Alagnak"}]}, {"id": "mind2web_6429", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_465ea3dd-835f-4dba-b0fb-7d1092c13c1c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[link] Explore Destinations & Travel Requirements -> CLICK\n[combobox] Origin -> CLICK\n[combobox] Origin -> TYPE: New York\n[p] New York City -> CLICK\n[combobox] Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.608, 0.374, 0.656] in the image\nAnd my action is Action: TYPE\nValue: Tokyo"}]}, {"id": "mind2web_6430", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_3d3c3e48-24f0-4760-b98f-803f6a4dbe61.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[span] Sheffield -> CLICK\n[textbox] Date use format: 24-Mar-23 -> CLICK\n[link] 29 -> CLICK\n[listbox] Leaving at or Arrive by selector -> SELECT: Leaving at\n[listbox] hour -> SELECT: 10"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.21, 0.194, 0.228] in the image\nAnd my action is Action: SELECT\nValue: 45"}]}, {"id": "mind2web_6431", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_441ca13c-8adc-428d-b2ff-025df829b1b3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: adele\n[option] Adele -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.387, 0.941, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6432", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_b5cf3337-8a5b-42c9-b0a3-2d56740dd044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.106, 0.713, 0.132] in the image\nAnd my action is Action: TYPE\nValue: street taco"}]}, {"id": "mind2web_6433", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_b542e191-6e7d-40fd-bc21-5c9cf5e57afa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.208, 0.463, 0.215] in the image\nAnd my action is Action: TYPE\nValue: India"}]}, {"id": "mind2web_6434", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f547410c-3931-49b3-8113-614e741e6ad6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.061, 0.274, 0.47, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6435", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_d7a941ee-56f7-4e42-8143-8a9ef38682bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: SPRINGFIELD\n[button] Springfield, IL, US (SPI) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.194, 0.481, 0.221] in the image\nAnd my action is Action: TYPE\nValue: AUSTIN"}]}, {"id": "mind2web_6436", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_8e540eed-1de3-4c82-8db8-76b4c92dbf45.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\n[button] load Health Services Menu -> CLICK\n[link] Skin, Hair & Nails -> CLICK\n[button] Hair Loss Evaluation & Treatment -> CLICK\n[link] Hair Loss Evaluation & Treatment -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10018"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.421, 0.514, 0.454] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6437", "image": {"bytes": "", "path": "./images/1267fbd8-54c8-4f2e-8471-b13c4de332c7_449e6dc9-7f9a-408c-bed6-3d20020ddddf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a tiny home anywhere for an adult and 2 pets to stay in from april 10 to april 12\nPrevious actions:\n[path] -> CLICK\n[span] Tiny homes -> CLICK\n[button] Check in / Check out Any week -> CLICK\n[button] 10, Monday, April 2023. Available. Select as check... -> CLICK\n[button] 12, Wednesday, April 2023. Available. Select as ch... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.621, 0.102, 0.737, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6438", "image": {"bytes": "", "path": "./images/eb9995b5-261b-4659-bebc-951e0f855d75_41d423cb-c3e5-4dae-92e0-2e6fd5ce03d8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the availability of a Nintendo Switch gaming console at the nearest Target store.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: Nintendo Switch\n[link] nintendo switch -> CLICK\n[img] Nintendo Switch with Neon Blue and Neon Red Joy-Co... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.206, 0.769, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6439", "image": {"bytes": "", "path": "./images/df3b7cd4-526e-453b-9114-c0db1b3ed5ea_6fcc8f6e-834b-491d-ae80-20b826c834a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Dining Room Sets under furniture and filter the results to show only items availble under the Buy It Now format.\nPrevious actions:\n[link] Home & Garden -> CLICK\n[button] Furniture -> CLICK\n[link] Dining Sets -> CLICK\n[button] Buying Format -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.83, 0.344, 0.971, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6440", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_d5d020f2-aeae-4c90-9b5f-0b0183babe5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.323, 0.083, 0.333] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6441", "image": {"bytes": "", "path": "./images/75db63ac-4fcf-400e-833f-d31f00cf6ab6_1817cbf8-9fa6-4bba-9c16-d9485c6a8b6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearby top-rated restaurants in Chicago, save the restaurant, and then book a table for 7 people on April 20 for lunch at 2 pm.\nPrevious actions:\n[link] Nearby Restaurants -> CLICK\n[div] Toronto -> CLICK\n[button] Chicago -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.232, 0.048, 0.241] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6442", "image": {"bytes": "", "path": "./images/f57e6c0a-8f8b-4756-9f1d-1bdea7a0af5c_775aaaea-a625-4f60-aaea-007d6535c143.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a pack of toilet paper and a bottle of laundry detergent to your Amazon cart with the lowest total price.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: toilet paper\n[button] Go -> CLICK\n[span] Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.785, 0.065, 0.917, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6443", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_d26c73be-ce7d-42a2-8980-4bb23f15a0ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB\n[button] \uf002 -> CLICK\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.769, 0.254, 0.852, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6444", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_a42afdd3-8e38-4ae9-bc0b-ddd2a3d058e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.401, 0.084, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6445", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_6f9d9303-c179-4500-90b0-311631d41991.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.228, 0.345, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6446", "image": {"bytes": "", "path": "./images/6a9a2e52-24b4-46c3-9e52-0f79c15ae3d0_4944ca15-6133-4d09-8a1c-cf0e040131c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a full-time job in Budget USA in finance in any location, and apply to the latest job.\nPrevious actions:\n[link] Careers -> CLICK\n[link] Openings -> CLICK\n[button] Country -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.393, 0.307, 0.406] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6447", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_1bf154c8-15bb-47d8-98fd-60b02921b167.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 0.418, 0.607, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6448", "image": {"bytes": "", "path": "./images/2bc47bba-32d7-406b-bb39-c6f2b2f2039a_1cf49d43-a70f-4b13-aeeb-fe28f507be53.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite the top rock track\nPrevious actions:\n[link] Music -> CLICK\n[link] rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.11, 0.176, 0.135, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6449", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_a8d5de92-8fd0-4c76-abb2-99501c4f2e36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[link] Time: newly listed -> CLICK\n[button] Condition -> CLICK\n[link] Used -> CLICK\n[button] Style -> CLICK\n[link] French -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.53, 0.244, 0.603, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6450", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_a730b544-051d-4ef2-a3d2-cbe725ac4ee0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[textbox] To -> TYPE: washington\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[div] Depart Date -> CLICK\n[button] Next month -> CLICK\n[gridcell] Thursday, June 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.307, 0.805, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6451", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3b5e3d83-6a1f-443a-b5cd-0946e3dbc507.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.381, 0.495, 0.62, 0.521] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6452", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_2af9e053-5c3c-4c50-bf2b-199258df6d98.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] click here -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: Ohare, Chicago\n[span] , United States -> CLICK\n[span] , Chicago -> CLICK\n[link] Make a Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.337, 0.567, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6453", "image": {"bytes": "", "path": "./images/49c60777-2500-4cea-8200-a95d3be2c9a1_56ff70da-d235-48f9-875f-9f3a17423d58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental for an economy car in Houston from Mar 10 to Mar 13.\nPrevious actions:\n[button] Ellipsis Icon -> CLICK\n[link] Car rentals -> CLICK\n[textbox] Pick up -> TYPE: Houston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.137, 0.255, 0.463, 0.274] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6454", "image": {"bytes": "", "path": "./images/93f3a9a1-2fd8-4457-b689-d70b3249b99b_afe77c2d-c101-407d-8a8e-f73b0bfa6588.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status with flight number 2819 on 18th of March.\nPrevious actions:\n[tab] FLIGHT STATUS -> CLICK\n[button] \ue908 Search by date required selected as 15 March 202... -> CLICK\n[link] 18 March 2023, Saturday -> CLICK\n[button] done -> CLICK\n[spinbutton] Flight Number (Required) -> TYPE: 2819"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.894, 0.103, 0.934, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6455", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_7b60bb01-31dd-49a7-b2a1-b0f9ed18651f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[img] Madonna -> CLICK\n[link] Like A Virgin -> CLICK\n[button] Add to List -> CLICK\n[combobox] List -> SELECT: New\n[button] Save -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.46, 0.01, 0.469, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6456", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_3ae4ddd5-b5f1-47cf-bcaf-9e139114ca80.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.24, 0.0, 0.293, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6457", "image": {"bytes": "", "path": "./images/a31de393-c6e0-4175-858b-03cdc435d585_4f58be7e-a7f8-4d07-b40c-649e97d4ab84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse events happening at Madison Square Garden.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.219, 0.754, 0.235] in the image\nAnd my action is Action: TYPE\nValue: Madison Square Garden"}]}, {"id": "mind2web_6458", "image": {"bytes": "", "path": "./images/3a85b415-9e68-4cf0-91be-386d4d8f0710_705d55f8-4fbb-4b6a-8f06-cf33aef62a05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: download pdf for list of accessible station in NYC.\nPrevious actions:\n[span] Menu -> CLICK\n[link] Accessibility -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.521, 0.859, 0.542] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6459", "image": {"bytes": "", "path": "./images/da800367-eeab-478b-bcf2-e6d4106591d2_077ba195-2e04-43b3-afce-0b47b400e479.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events in Lubbock, Texas during next 7 days\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.218, 0.782, 0.26] in the image\nAnd my action is Action: TYPE\nValue: Lubbock, Texas"}]}, {"id": "mind2web_6460", "image": {"bytes": "", "path": "./images/d516b2f7-b180-422f-9e24-6fb778cb4b55_e909e452-ca74-4e47-8a19-0bae9174a58e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me Ed Sheeran Chords & Tabs\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.05, 0.838, 0.076] in the image\nAnd my action is Action: TYPE\nValue: Ed Sheeran"}]}, {"id": "mind2web_6461", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_d2c8159b-e150-4b43-8385-f0fc12d07bf3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[span] Youth -> CLICK\n[combobox] Select passenger age -> SELECT: 16\n[button] Done -> CLICK\n[listbox] hour -> SELECT: 12\n[listbox] minutes -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.212, 0.327, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6462", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_e206ee61-e177-44d4-9979-26f39dc2239a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.811, 0.035, 0.919, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6463", "image": {"bytes": "", "path": "./images/8eae88ef-9641-43c6-be6d-f8abc96d99fa_29aa4ba4-c9ce-417a-9ffd-24d73d1dec89.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive Sony controller compatible with PS5 with free shipping and add it to my cart.\nPrevious actions:\n[link] \ue92d Gaming & VR \uf105 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.434, 0.128, 0.63, 0.149] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6464", "image": {"bytes": "", "path": "./images/91f56f3b-e3de-4d57-95df-f976a11c64f7_c6dcefac-fb03-4657-86c0-8738db10dd14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a veterinarian emergency service for dogs in Hawaii with a BBB rating of A+\nPrevious actions:\n[link] Hawaii, HI -> CLICK\n[button] Find -> CLICK\n[link] All -> CLICK\n[label] Veterinarians -> CLICK\n[label] Veterinarian Emergency Services -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.329, 0.204, 0.426, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6465", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_997cc562-d56c-4861-bad7-1022dcced9d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: berlin\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK\n[generic] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.236, 0.346, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6466", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_6f701fb9-97ae-44d8-8687-8b254b1ffb58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[radio] Price: low to high -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.313, 0.969, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6467", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_35e91393-85cf-48da-a8ab-49d8e51a5972.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.183, 0.222, 0.202] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6468", "image": {"bytes": "", "path": "./images/122af0dc-9e4e-4c5a-98ad-5c4d02f32284_8ceb9903-e4ac-4aed-bd80-57d318467160.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the next available bus from Chicago to Ann Arbor.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Chicago\n[span] Chicago, IL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.172, 0.568, 0.194] in the image\nAnd my action is Action: TYPE\nValue: Ann Arbor"}]}, {"id": "mind2web_6469", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_c0c75b87-87d9-4bf8-b4c8-62bd4f5cd482.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[link] restaurants. -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.184, 0.261, 0.716, 0.286] in the image\nAnd my action is Action: TYPE\nValue: La Bergamote"}]}, {"id": "mind2web_6470", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_f9a35bff-eddb-43f7-b14b-e3749487b47f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[span] Sat 8 Apr - Sat 15 Apr -> CLICK\n[checkbox] 2 May 2023 -> CLICK\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.186, 0.246, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6471", "image": {"bytes": "", "path": "./images/53b4ca73-c05b-4609-98c8-a0a62072bdb5_18bf58f3-ebb4-42d1-b1e3-ef8ba7e28ea5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add cheapest vitamin C and D to cart.\nPrevious actions:\n[img] -> CLICK\n[link] Vitamins A-Z -> CLICK\n[img] Vitamin D -> CLICK\n[div] Relevance -> CLICK\n[div] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.219, 0.263, 0.287, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6472", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_9b96a9ad-7b49-4f3c-90ed-d242d5015b41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[button] Select My Car -> CLICK\n[link] Recommended -> CLICK\n[link] Price (Low to High) -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $16.99/Day$6.55/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.654, 0.777, 0.677] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6473", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_5ff7d430-6fc0-4de3-9a6d-d9eb5dae3fdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH\n[button] find store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.208, 0.586, 0.231] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6474", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_dd6de110-8d90-4416-9a1b-0987e282bec0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.007, 0.1, 0.018] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6475", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_f51fd4e2-ed05-4127-9077-0f925bf4755a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK\n[button] Going to -> TYPE: New Delhi\n[button] New Delhi Delhi, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.77, 0.204, 0.95, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6476", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_4fa8f954-d625-412d-9832-bf3a4b9c1388.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] SEARCH BY KEYWORD -> CLICK\n[searchbox] SEARCH BY KEYWORD -> TYPE: Chicago\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.257, 0.977, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6477", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_cdacd995-13b3-4369-94a3-ae13afd2727b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.109, 0.83, 0.116] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6478", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_04a916be-3c46-4417-917f-c2ebb4477795.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\n[button] Check Availability -> CLICK\n[div] 27 -> CLICK\n[button] increase number -> CLICK\n[button] Apply -> CLICK\n[button] 10:30 AM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.598, 0.962, 0.625] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6479", "image": {"bytes": "", "path": "./images/8308d10f-3904-473a-a186-c6b8f939f018_334334b1-8249-4f2b-8bbe-957ca969ed1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest private sightseeing trip of India in the month of June\nPrevious actions:\n[button] India Asia -> CLICK\n[button] Next -> CLICK\n[path] -> CLICK\n[gridcell] Thu Jun 01 2023 -> CLICK\n[gridcell] Fri Jun 30 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.138, 0.78, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6480", "image": {"bytes": "", "path": "./images/0a2623c8-ce74-4572-9eb6-89d0b216d0cf_431d589f-9030-4d59-8246-23b942dbc896.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Follow Taylor Swift and add a playlist to next up.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.005, 0.561, 0.019] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6481", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_d91f9ef8-1eb0-4b4e-97f4-53ffdd24f253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK\n[link] Added any time -> CLICK\n[link] Past year -> CLICK\n[link] Any length -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.26, 0.212, 0.271] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6482", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_b34f0027-2cd9-4f3d-9ef6-3590b99cc795.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> CLICK\n[span] Enter an Address or Venue to find parking -> TYPE: stewart hotel\n[option] STEWART HOTEL\u00a0\u00a0148 W 31st Street, New York Parking... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.658, 0.286, 0.697] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6483", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_c0f7e9d2-0b58-43a7-bdb5-3aab72d5ffb7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[searchbox] Search Site -> CLICK\n[searchbox] Search Site -> TYPE: Western Digital internal SSD 1 TB\n[button] \uf002 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.248, 0.198, 0.451, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6484", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_fc043e8d-37d3-44a0-a1fd-fc04dd4d87de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[span] Vacation packages -> CLICK\n[textbox] From -> TYPE: san francisco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.235, 0.385, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6485", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_06264c9a-c4ab-4c01-ad10-8b7cd5d82367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.076, 0.203, 0.359, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6486", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_4dfdc265-d3cd-47da-8fe1-7808d1596608.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[generic] Tuesday April 4th -> CLICK\n[div] 7 -> CLICK\n[button] Tuesday April 11, 2023 -> CLICK\n[svg] -> CLICK\n[checkbox] Free internet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.66, 0.249, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6487", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_2c62f1f6-f57c-482b-9321-5cf44af07e07.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: 90028\n[span] 90028 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.396, 0.269, 0.459, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6488", "image": {"bytes": "", "path": "./images/f355cfcf-a3f7-47e4-b0be-b1c595e0954e_70e1077e-985b-4404-8a85-fa82c80db258.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me current deals for California.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.26, 0.033, 0.309, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6489", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_5475af7a-bbf1-45fc-8a4e-0cc96327858b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.211, 0.639, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6490", "image": {"bytes": "", "path": "./images/3e142eee-7a62-4ad7-ae16-419d596ab63b_fddf53b9-c162-4b31-9ab7-90a60f30363f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the list of injured NBA players.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.047, 0.335, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6491", "image": {"bytes": "", "path": "./images/bf469f30-6628-4017-b963-672645d7feab_57746056-f9a1-4dee-a17d-3ce65f01e4b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest rated dog collar under 10 dollar.\nPrevious actions:\n[link] Dog (1,338) -> CLICK\n[span] Category -> CLICK\n[link] Collars & Leashes (485) -> CLICK\n[link] Sort by: Featured -> CLICK\n[link] Highest Rated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.565, 0.168, 0.582] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6492", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_7daaafa4-1726-4b9a-895e-79ec1c80b455.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota\n[combobox] Select Model -> SELECT: Corolla\n[textbox] Zip -> TYPE: 10019"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.601, 0.148, 0.748, 0.168] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6493", "image": {"bytes": "", "path": "./images/265cd715-0607-4ebe-8420-046b1a165239_49c4fcaf-64f4-4bbe-8357-c31d97aa56ad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are Jerry Trainor's upcoming projects?\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Jerry Trainor"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.652, 0.009, 0.671, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6494", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_c4edf30a-cfa0-4d58-b4cf-a0df18c146c5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[polyline] -> CLICK\n[link] Cars for sale near Tampa, FL -> CLICK\n[checkbox] Third-Party Certified (343)\uf05a -> CLICK\n[span] Drive Type -> CLICK\n[checkbox] Front Wheel Drive (147) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.342, 0.296, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6495", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_69f3a5c7-082c-4b11-a016-a1138abc3d8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Jk rowling\n[link] Jk rowling -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.684, 0.186, 0.834, 0.206] in the image\nAnd my action is Action: SELECT\nValue: Publication date, new to old"}]}, {"id": "mind2web_6496", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_1a7f0d8f-b5fa-4866-b871-59de5b9c1c1e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.59, 0.329, 0.622] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6497", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_4344eb2f-250c-4aba-b34e-a5555f30c841.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[textbox] Search for parking -> TYPE: busch stadium\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (10) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.658, 0.458, 0.695] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6498", "image": {"bytes": "", "path": "./images/ff6ff6e5-e73c-4ba2-809e-59a1e269d68f_a1b66ea7-5509-4164-b0a8-e7591a52b9b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Hair Loss Evaluation and Treatment in 10018 and show directions.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.021, 0.265, 0.041] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6499", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_b64c2417-c44e-46c4-bb0b-ff1775e7da29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\n[heading] CAR -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.237, 0.266, 0.273] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn Central"}]}, {"id": "mind2web_6500", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_20b9f688-742f-4a8f-8955-04d57f566697.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK\n[option] One way -> CLICK\n[gridcell] 14 April 2023 -> CLICK\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.28, 0.94, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6501", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_deaf5409-9171-444b-af73-c5f6b73aec49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\n[LabelText] Buy Used -> CLICK\n[combobox] Select Make -> SELECT: Toyota\n[combobox] Select Model -> SELECT: Corolla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.521, 0.19, 0.599, 0.211] in the image\nAnd my action is Action: TYPE\nValue: 10019"}]}, {"id": "mind2web_6502", "image": {"bytes": "", "path": "./images/57f72023-3633-4c97-93f6-af12fe2edf4f_b84e4315-a4fa-4c98-85d7-362aa485addc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a south african history podcast with length between 10 to 30 minutes and filter it by audiobook tag.\nPrevious actions:\n[searchbox] Search -> TYPE: SOUTH AFRICAN HISTORY PODCAST\n[button] Search -> CLICK\n[link] Tracks -> CLICK\n[link] Any length -> CLICK\n[link] 10-30 min -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.347, 0.108, 0.357] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6503", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_af8d6e2e-cb67-4ba2-b95d-734aeb121700.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[link] Pants -> CLICK\n[gridcell] Category -> CLICK\n[input] -> CLICK\n[option] Sweatpants -> CLICK\n[heading] Sweatpants -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.778, 0.184, 0.806, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6504", "image": {"bytes": "", "path": "./images/d070774f-9ca2-43c0-a7d0-221697791cf0_363d203f-3721-4830-bd17-b3ba4819cdb4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a place with a good swimming pool that charges between $200 and $300 per night.\nPrevious actions:\n[img] -> CLICK\n[span] Filters -> CLICK\n[textbox] min price $ -> TYPE: 200\n[textbox] max price $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.659, 0.478, 0.786, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6505", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_e1d49b1d-8ccb-40dc-b6ed-08c19adbb66b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: Brooklyn\n[option] Brooklyn, OH, US Select -> CLICK\n[combobox] Renter Age -> SELECT: 18"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.341, 0.837, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6506", "image": {"bytes": "", "path": "./images/cbfa5c92-41cb-4481-97b2-9fb41298be13_9a06fdfb-25fa-4319-903f-ca492483c9fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wireless bra in size xxs or s, add one brown and one purple color to the cart and view the total cart value.\nPrevious actions:\n[input] -> CLICK\n[option] Relaxed Wireless -> CLICK\n[heading] Size -> CLICK\n[label] XXS -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.212, 0.256, 0.392] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6507", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5e1e5cf7-2414-4425-a730-3a1d08d2897a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[span] -> CLICK\n[textbox] To , required. -> CLICK\n[textbox] To , required. -> TYPE: Heathrow\n[a] LHR - London Heathrow, United Kingdom -> CLICK\n[textbox] Depart , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.28, 0.693, 0.31] in the image\nAnd my action is Action: TYPE\nValue: 04/22/2023"}]}, {"id": "mind2web_6508", "image": {"bytes": "", "path": "./images/b73503a7-e4e1-43f7-bfc0-866e7003615f_ffd68484-58e9-46e4-9d94-e0aa3514e9fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a taxi from O'hare Airport to 123 Main St, West Chicago, IL on Mar 29, 2023 at 12 noon for 2 adults.\nPrevious actions:\n[button] pickup time input field -> CLICK\n[button] Confirm -> CLICK\n[combobox] Passengers -> SELECT: 2\n[button] Search -> CLICK\n[button] Choose a standard-size taxi costing US$126.84 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.855, 0.53, 0.988, 0.585] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6509", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_9edd8d21-46ba-4b5e-a9c2-d5677b6f439f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK\n[gridcell] Wednesday, April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.266, 0.795, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6510", "image": {"bytes": "", "path": "./images/eebf3f55-1a4d-4cdf-81c1-2fa3002bed6d_6017cb86-e365-4f2e-ae94-89c66c382a9b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Keen branded new pair of women's hiking shoes in the color brown, number 9, available in Columbus, Ohio store, average rating must be above 4.\nPrevious actions:\n[link] Footwear -> CLICK\n[menuitem] Hiking Boots -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.502, 0.068, 0.51] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6511", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_e2eb86ff-e660-46d6-a5d8-7109a895d213.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] Leaving from -> CLICK\n[textbox] Leaving from -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.136, 0.362, 0.16] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6512", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_134500d5-c3c2-4f6e-b266-64e10e38b77a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK\n[button] Overview of the Annual Pass -> CLICK\n[button] Annual Pass Internet Order Questions -> CLICK\n[button] Annual Pass Use -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.803, 0.95, 0.837] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6513", "image": {"bytes": "", "path": "./images/86ea50f5-1310-456e-97bf-799d8eb1896b_3a8e2481-e070-4ea5-8ff9-d87a03299985.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the NFL Passing yards players stats.\nPrevious actions:\n[link] National Football League NFL -> CLICK\n[button] Open More Dropdown -> CLICK\n[heading] STATS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.11, 0.693, 0.18] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6514", "image": {"bytes": "", "path": "./images/196cde81-b001-47ec-b7c3-a77869e36deb_9712bbdc-3c5d-417f-b2d8-d9532b8fd75f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check bookstores available in Chelsea area\nPrevious actions:\n[link] shopping. -> CLICK\n[li] Neighborhood -> CLICK\n[link] Chelsea -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.111, 0.206, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6515", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_14d7f372-72b4-45e3-9082-e6915b5bce86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[searchbox] Search -> TYPE: gobites uno spork\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.225, 0.473, 0.382] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6516", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_a9db226a-fd14-429d-9f96-905cb66d254e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[span] 20 -> CLICK\n[button] Done -> CLICK\n[button] 1 Room, 1 Guest\ue932 -> CLICK\n[button] Add Rooms -> CLICK\n[button] Add Adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.316, 0.13, 0.341, 0.143] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6517", "image": {"bytes": "", "path": "./images/d6c1c4f5-74a7-4451-8dd2-aabada5d7ba1_4993fb7b-d906-4cad-8fa3-13bfe605511b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rental charges for bikes in 37863.\nPrevious actions:\n[link] Find a Store -> CLICK\n[textbox] Enter ZIP or City, State -> TYPE: 37863\n[path] -> CLICK\n[generic] Bike shop Pigeon Forge -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.431, 0.886, 0.561, 0.906] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6518", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_74e08d5f-5c4d-4ca2-9071-4ca8955b2592.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[span] Cannes -> CLICK\n[link] 2022 -> CLICK\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.509, 0.722, 0.54] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6519", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_84ac998d-2de1-42a7-802a-4df326c3ff1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\n[textbox] From -> TYPE: WASHINGTON\n[option] Washington, DC - Union Station (WAS) -> CLICK\n[textbox] To -> TYPE: NEW YORK\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] Select a trip start date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.221, 0.73, 0.242] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6520", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_b0b71afe-6f0d-43ef-84e1-9739bd184012.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] -> CLICK\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.115, 0.953, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6521", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_25ed6cca-1f43-4bd0-a185-8d4db5e858e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.28, 0.84, 0.315] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_6522", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_c88d4a1e-9abf-487f-886a-34ca0b837800.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\n[button] Close -> CLICK\n[button] More -> CLICK\n[button] Add to playlist -> CLICK\n[button] Add to playlist -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.407, 0.229, 0.456, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6523", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_15e83256-e4cc-4937-b835-1cf9ab6b1cee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.262, 0.658, 0.452, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6524", "image": {"bytes": "", "path": "./images/8a6f2641-9896-4657-b447-56927420e8c4_8c9d79e2-b2a1-4197-9879-8ef7936e9e85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest educational trade-in Nintendo switch game rated for everyone, and print the details.\nPrevious actions:\n[checkbox] Educational Educational -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK\n[link] Price Low To High -> CLICK\n[link] Comic Coloring Book - Nintendo Switch -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.962, 0.424, 0.991, 0.435] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6525", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_2a40aabf-98f7-48bd-863b-0f5a3dd0ebb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[span] Black -> CLICK\n[button] Next -> CLICK\n[button] None -> CLICK\n[combobox] CONDITION -> SELECT: Good\n[combobox] TIRE_CONDITION -> SELECT: Good To Go"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.334, 0.284, 0.368] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6526", "image": {"bytes": "", "path": "./images/440273fa-f1b0-41e8-be75-90732ad5170d_ecb3d820-f6e3-4ffb-84c9-e31d01c412bf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Mens size guide for bottoms\nPrevious actions:\n[link] Size Guide -> CLICK\n[link] Men -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.351, 0.557, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6527", "image": {"bytes": "", "path": "./images/ad381d87-53a0-4aa1-a1e5-d6d172a905cd_b211b155-4276-43bf-9669-5973995ff7f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the women cropped tops with lowest price first.\nPrevious actions:\n[link] WOMEN -> CLICK\n[link] Cropped Tops -> CLICK\n[generic] Sort by -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.761, 0.234, 0.943, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6528", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a81330b9-cafd-455a-851e-4c12df331c37.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.328, 0.195, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6529", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_a939b3d8-1cd7-4b45-9cd1-3ef535f86ff7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[menuitem] Women -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.431, 0.133, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6530", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_37627ad6-4621-4127-bbeb-101ffa0b748f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.749, 0.166, 0.86, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6531", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_e2409464-38c3-4846-b44d-16d5e4f8752c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\n[span] COVID-19 booster, testing, treatment & records -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.765, 0.485, 0.78] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6532", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_726244d6-5065-4024-9dad-6bf45baea932.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[link] For the Home -> HOVER\n[link] Rugs -> CLICK\n[link] Washable Rugs Washable Rugs -> CLICK\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.355, 0.034, 0.367] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6533", "image": {"bytes": "", "path": "./images/b7cee0c0-b67d-46b8-bbd1-9f042fe810f5_9d7d0da5-57b8-4690-83ca-7ac5bc0523d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking for 123456 last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.357, 0.353, 0.397] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_6534", "image": {"bytes": "", "path": "./images/1d0dcb84-da65-4830-ba60-cc2b09a52a13_92f2909f-c267-419a-b4e0-2a5ec5b1fae2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find plastic bathroom baskets that are gray from low to high prices near zip code 60173.\nPrevious actions:\n[checkbox] Gray 12 products -> CLICK\n[button] Show filter modal Material -> CLICK\n[span] -> CLICK\n[span] Plastic -> CLICK\n[button] Show sorting options modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.272, 0.619, 0.307, 0.657] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6535", "image": {"bytes": "", "path": "./images/7f0d7056-07f8-48b3-8093-e48abb301018_dc4a1187-c24d-4f06-bcad-066836cd1a30.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is the cheapest luxury car to pickup on the second closest nearby location to New York, United States, 100\nPrevious actions:\n[link] Luxury Car Rental -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: New York, United States, 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.008, 0.043, 0.426, 0.064] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6536", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_7c35e712-8cec-40c1-baa4-93011d5f7d76.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.433, 0.139, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6537", "image": {"bytes": "", "path": "./images/716ed90e-a138-452e-b5b5-167911871fda_4a544b84-9172-41ad-aa8a-e19736c63137.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Playstation gift card of $10.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: playstation gift card $10\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.319, 0.179, 0.472] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6538", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_6b765434-a76e-4653-b427-7662d96ba478.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[svg] -> CLICK\n[div] -> CLICK\n[checkbox] Avis -> CLICK\n[button] More filters -> CLICK\n[checkbox] 4+ doors -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.334, 0.551, 0.373, 0.56] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_6539", "image": {"bytes": "", "path": "./images/db72bae1-ff16-495c-89c7-1cff7c0ae503_081badf7-327c-4983-a9e2-7f77d44cb4f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the most popular Spanish recipe book available in paperback that are in stock.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: recipe\n[link] recipe book -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.285, 0.196, 0.31] in the image\nAnd my action is Action: SELECT\nValue: In Stock (7,640)"}]}, {"id": "mind2web_6540", "image": {"bytes": "", "path": "./images/ddcbce13-4c42-4c40-99ab-2c58588f1ccf_f053b82b-ae9c-4cf9-9b60-45a18358f2bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Mens orange color Jacket and parkas with L Size and add to cart\nPrevious actions:\n[link] MEN -> HOVER\n[link] Jackets & Parkas -> CLICK\n[heading] Pocketable UV Protection 3D Cut Parka -> CLICK\n[checkbox] ORANGE -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.815, 0.186, 0.843, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6541", "image": {"bytes": "", "path": "./images/b3a28e48-3912-4b0e-b3a9-d359da13864d_7f52cfc8-106d-40e8-ba47-2b67a7d462ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL passing touchdown season stats.\nPrevious actions:\n[link] NFL . -> HOVER\n[link] Stats . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.414, 0.125, 0.428] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6542", "image": {"bytes": "", "path": "./images/92ba0696-bc54-41c9-8ddf-1a2f44420c6f_ca113dc2-d281-4cf9-9793-9122d1170097.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan an accessible trip from empire state building to little Caribbean with least walking\nPrevious actions:\n[listitem] Little Caribbean, Brooklyn, NY, USA -> CLICK\n[button] Open Travel Preferences modal. -> CLICK\n[combobox] Minimize my -> SELECT: Walking\n[button] Done button - Press enter key to submit travel pre... -> CLICK\n[label] Accessible Trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.441, 0.359, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6543", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_b5eb98db-7b56-403f-8497-7bec0ac4fd57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[combobox] Pick-up time -> SELECT: 9:00 am\n[combobox] Drop-off time -> SELECT: 6:00 pm\n[button] Search -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.25, 0.045, 0.258] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6544", "image": {"bytes": "", "path": "./images/4c997e24-d185-4730-84d6-f8cb512f4c03_7dd2e5c2-af14-4f5c-b147-58913a3bb612.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for car options to pick up at any location near 10023 on april 12 at 3 am\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: 10023\n[span] , 10023 -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 12 -> CLICK\n[combobox] Pick Up Time -> SELECT: 3:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.337, 0.567, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6545", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_747b1dea-1bd9-469d-8c76-54d19e3895ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK\n[checkbox] Beachfront -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.121, 0.089, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6546", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_06195ec9-afde-4c23-a8d3-db3666bc04a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL FROM -> CLICK\n[button] Seattle, WA -> CLICK\n[span] Dates -> CLICK\n[button] September 2023 -> CLICK\n[button] DURATION -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.378, 0.476, 0.616, 0.5] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6547", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_91007a2c-d94c-4c8d-ad80-1b8ca2c877db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.354, 0.539, 0.366] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6548", "image": {"bytes": "", "path": "./images/987b25a8-05af-4b18-bd5b-7e2ccba5fb1e_73a60d20-1d3e-4b8a-bbea-d906c3a1faeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Western Digital internal SSD with 1TB storage and 8GB Ram DDR to the cart.\nPrevious actions:\n[button] \uf002 -> CLICK\n[link] Western Digital WD_BLACK SN770 M.2 2280 1TB PCIe G... -> CLICK\n[button] ADD TO CART \uf0da -> CLICK\n[button] NO, THANKS -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.282, 0.008, 0.39, 0.025] in the image\nAnd my action is Action: TYPE\nValue: 8GB Ram"}]}, {"id": "mind2web_6549", "image": {"bytes": "", "path": "./images/36ab5d78-2f6b-47a6-ad88-3f6c8fdc3513_6d6e210e-ec5d-44db-bfde-0d32e4dd500a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the movies playing today at the theater closest to zip code 10001\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[textbox] Find A Theatre -> TYPE: 10001\n[generic] Submit Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.444, 0.14, 0.458] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6550", "image": {"bytes": "", "path": "./images/867dc9d1-5017-4871-a52a-a1511f239628_8cae68b6-ff63-4283-8f18-a3a8e7ba48bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse movie trailers currently in theaters with \"fresh\" rating.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Trailers -> CLICK\n[link] VIEW ALL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.539, 0.182, 0.656, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6551", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6cf7c15e-7f95-413a-b4d7-01e26c009e43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.09, 0.58, 0.147, 0.608] in the image\nAnd my action is Action: TYPE\nValue: 400"}]}, {"id": "mind2web_6552", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_c0865eda-5f46-4b7d-bb9e-a5592539ec17.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA RESORT CAMPGROUNDS \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.415, 0.149, 0.433] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6553", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_b46bd2d8-3838-4c77-9166-af6ada07da63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.225, 0.011, 0.776, 0.063] in the image\nAnd my action is Action: TYPE\nValue: organic strawberries"}]}, {"id": "mind2web_6554", "image": {"bytes": "", "path": "./images/8e7b05d4-7dfb-4345-af2b-3e1dcd1c2ea2_413185f7-a793-4718-93ff-3c654b9f1c1d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a map with charging stations in London.\nPrevious actions:\n[button] Menu -> CLICK\n[link] Charging -> CLICK\n[link] Find Us -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.097, 0.216, 0.13] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_6555", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_4f6b4130-a0b4-40e7-b91f-ffe8a6443083.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[textbox] mm/dd/yyyy -> CLICK\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.695, 0.27, 0.891, 0.304] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_6556", "image": {"bytes": "", "path": "./images/d6a0ab7c-f0d7-4ff7-9d42-55b44b5e7f74_32e030ec-b522-40ed-9217-95c09cb73aee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the artist profile of M S Subbulakshmi and play all from the top tracks.\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: m s subbulakshmi\n[button] Search -> CLICK\n[link] M.S. Subbulakshmi -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.409, 0.192, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6557", "image": {"bytes": "", "path": "./images/e92cd371-0bd2-4ac3-82e4-7fdaac46b626_9bf58be2-cf7a-4732-af73-5e7c17b70540.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add zyrtec to the cart for pickup at the nearest CVS to zip code 90028\nPrevious actions:\n[combobox] Search products and services -> TYPE: zyrtec\n[button] Search for zyrtec -> CLICK\n[img] Zyrtec 24 Hour Allergy Relief Tablets with 10 mg C... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.26, 0.381, 0.341] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6558", "image": {"bytes": "", "path": "./images/4aaf59c0-889a-406c-b0c2-454d7670e04f_fbede47e-7ae1-4ed2-9cc9-0d0d7a55577a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the TSA Guidelines to travel with Children\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Traveling with children -> CLICK\n[link] sit on a parent\u2019s lap -> CLICK\n[link] approved car seat -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.343, 0.918, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6559", "image": {"bytes": "", "path": "./images/1efdcd9d-ebc6-4bb7-8823-e54dfe25f409_a03b4d90-3f1b-4a34-a8ac-dda1d6e458e1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a reservation for three guest on April 5th, 5pm in Alinea restaurant, Chicago, IL\nPrevious actions:\n[span] Chicago, IL - Lincoln Park -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[button] Go to next month -> CLICK\n[button] 2023-04-05 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.704, 0.333, 0.908, 0.358] in the image\nAnd my action is Action: SELECT\nValue: 5 00 PM"}]}, {"id": "mind2web_6560", "image": {"bytes": "", "path": "./images/8b743c63-2a99-4c29-93ef-af920dab9535_73592b67-ffcd-4021-8342-1ec06c4a56ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse monthly parking rates near the Museum of Modern Art.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: MOMA\n[option] Museum of Modern Art (MoMA) \u00a0\u00a011 West 53rd St, New... -> CLICK\n[link] MONTHLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.209, 0.53, 0.328, 0.557] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6561", "image": {"bytes": "", "path": "./images/b1fa9bb3-6e2c-485b-90f1-2a54510bf358_9124ca10-aa2c-45cb-870b-29a580fbb2f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking near Disneyland that has EV charging.\nPrevious actions:\n[span] Enter an Address or Venue to find parking -> TYPE: Disneyland"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.362, 0.754, 0.389] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6562", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_4cadd81b-2ad9-43cc-a6ae-7785bf77b8b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: los angeles \n[div] Los Angeles -> CLICK\n[path] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.131, 0.237, 0.158] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6563", "image": {"bytes": "", "path": "./images/d0ac6860-23f7-40c1-b30d-12269470d2c3_06e9c069-d60e-483d-936f-6a14544521fd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check prices of organic strawberries with three different stores and add the one with lowest price to cart for 94105.\nPrevious actions:\n[span] organic strawberries -> CLICK\n[img] Organic Strawberries -> CLICK\n[span] All stores -> CLICK\n[textbox] Search -> TYPE: organic strawberries\n[span] organic strawberries -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.427, 0.154, 0.443] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6564", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_3dcc458a-5ca1-4057-a628-51580cd69e87.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[span] London Paddington -> CLICK\n[textbox] Date use format: 25-Mar-23 -> CLICK\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.193, 0.327, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6565", "image": {"bytes": "", "path": "./images/8b6eed27-39b4-4dbc-97d3-eeec13a8817d_c8b5a410-7058-493a-bb19-342825c78916.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a roundtrip on July 1 from Mumbai to London and vice versa on July 5 for two adults and a 12-year-old in premium economy and if a flight is not available, search through the calendar for available flights.\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: LONDON\n[button] London, GB (LON - All Airports) -> CLICK\n[textbox] Depart -> CLICK\n[button] Saturday, July 1, 2023 -> CLICK\n[button] Wednesday, July 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.227, 0.481, 0.251] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6566", "image": {"bytes": "", "path": "./images/c0eeead1-f8ea-4819-a6da-ef0108b40c89_df415b14-e121-43a8-8548-058989210645.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign Allan Smith for email offers with the email allan.smith@gmail.com and zip code 10001\nPrevious actions:\n[a] -> CLICK\n[textbox] First Name (required) -> TYPE: Allan\n[textbox] Last Name (required) -> TYPE: Smith\n[textbox] Email Address (required) -> TYPE: allan.smith@gmail.com\n[textbox] Confirm Email Address (required) -> TYPE: allan.smith@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.516, 0.319, 0.754, 0.343] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_6567", "image": {"bytes": "", "path": "./images/dadb0251-a77c-45d7-aacb-0bb2e70e2b56_749c3a9d-c4aa-4517-8e1f-ec5fa5845eb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find food delivery services of American cuision in New York for 4 people on March 20, 8 pm.\nPrevious actions:\n[combobox] Time -> SELECT: 8:00 PM\n[combobox] Size -> SELECT: 4 guests\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] American -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.732, 0.424, 0.812, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6568", "image": {"bytes": "", "path": "./images/50f1e384-4b63-4827-a670-cd19f5a2c710_72893745-5e04-4e66-8557-81416454ade4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find schedule for Long Island Rail Road & Metro-North Railroad from Bay Shore to Breakneck Ridge on Thu, Mar 23, 08:37 AM.\nPrevious actions:\n[link] Schedules -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.259, 0.474, 0.291] in the image\nAnd my action is Action: TYPE\nValue: Bay Shore"}]}, {"id": "mind2web_6569", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_0c87b2bb-027a-4daa-867f-bc0d3d2382fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] City -> CLICK\n[span] -> CLICK\n[button] Hiring Type -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.845, 0.324, 0.966, 0.36] in the image\nAnd my action is Action: SELECT\nValue: Most recent"}]}, {"id": "mind2web_6570", "image": {"bytes": "", "path": "./images/cf361c84-6414-4b05-a7a1-77383997150a_7d52e0c3-c338-4214-889f-318d4ce15d5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get an SUV with an additional driver and wifi for pick up in any rental location near Washington regional airport on June 1, 11 am, and drop off at Washington international airport on June 2, 11 am, and pay for the booking instantly.\nPrevious actions:\n[generic] Vehicle Type * -> CLICK\n[p] SUVs & Wagons -> CLICK\n[button] Select My Car -> CLICK\n[link] Pay Now -> CLICK\n[checkbox] $21.99/Day -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.56, 0.777, 0.594] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6571", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_ba7b7ba5-9604-43af-8fbe-fea243c8bb58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.095, 0.04, 0.106, 0.047] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6572", "image": {"bytes": "", "path": "./images/981fdb06-2352-439f-a6d0-ccaa857c7a54_609108a1-eaaf-4f18-8442-8fec437811f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the trade-in options for PS4.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: PlayStation 4 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.203, 0.027, 0.378, 0.045] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6573", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_7d2559d3-fa15-4fb3-ac78-fdbd51dcc976.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.195, 0.18, 0.829, 0.189] in the image\nAnd my action is Action: TYPE\nValue: seattle"}]}, {"id": "mind2web_6574", "image": {"bytes": "", "path": "./images/d1942a73-745f-44c5-ba30-0d0c7925f5d2_8203c2a7-4502-4ff2-ac52-1440d2c847ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comedy movie streaming on Netflix and add to watchlist.\nPrevious actions:\n[link] MOVIES -> HOVER\n[link] Netflix streaming -> CLICK\n[svg] -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.285, 0.43, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6575", "image": {"bytes": "", "path": "./images/f8089c50-e80c-4532-82a1-96009f485c57_3484246f-8de9-4c54-884b-0ffcaf153cb2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Women's t-shirt for $40-$60 and add it to Wishlist, stop at the Login screen.\nPrevious actions:\n[link] WOMEN -> CLICK\n[tab] Tops -> CLICK\n[link] T-Shirts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.475, 0.488, 0.494, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6576", "image": {"bytes": "", "path": "./images/8e1a344d-a27b-4613-8863-7afad0829b23_1295eaa5-670e-4aa2-b430-998bd21fda2b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest washable blue rug under 2'x3' size.\nPrevious actions:\n[span] Color -> CLICK\n[link] Blue (237) -> CLICK\n[span] Size -> CLICK\n[link] Under 2'x3' (38) -> CLICK\n[link] Sort by: Featured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.808, 0.303, 0.974, 0.331] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6577", "image": {"bytes": "", "path": "./images/74e653f0-aeea-4f01-8d80-c5846fae0c1c_ad513ef7-dd90-4c7b-b00b-ec97683ed0fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the Recommended Gaming PCs for someone who plays Fortnite, Overwatch and GTA V at 4k\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Gaming PC Finder Gaming PC Finder -> CLICK\n[div] Remove -> CLICK\n[div] Remove -> CLICK\n[p] Remove -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.642, 0.494, 0.72, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6578", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_ab469efb-c6dc-47f7-9426-0938350e8063.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\n[link] Live TV -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK\n[textbox] Search -> TYPE: 99201\n[button] Antenna -> CLICK\n[button] Broadcast TV Spokane (3) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.241, 0.779, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6579", "image": {"bytes": "", "path": "./images/13d78369-994a-4202-a9ed-8361e1fdea9d_90936bb2-dd00-403c-b782-4b006604b686.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse bluetooth headphones with active noise-cancellation.\nPrevious actions:\n[searchbox] Search Site -> TYPE: headphones\n[button] \uf002 -> CLICK\n[span] Active -> CLICK\n[button] APPLY -> CLICK\n[span] Bluetooth -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.619, 0.192, 0.636] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6580", "image": {"bytes": "", "path": "./images/8e3c7ad0-f138-490e-918e-42bcce9f55a9_af945b27-92ce-453c-9c39-59ce63a8190a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a harry potter book to my wishlist.\nPrevious actions:\n[textbox] Search for books by keyword / title / author / ISB... -> TYPE: Harry Potter"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.754, 0.056, 0.863, 0.08] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6581", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_671088e4-09b7-41ef-b93b-264eead46e7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK\n[checkbox] Resident Evil Resident Evil -> CLICK\n[button] Done -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.33, 0.177, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6582", "image": {"bytes": "", "path": "./images/d311891e-82b9-4a16-ab46-6af92f054f94_51f6ca95-3089-4b4d-b3e4-ccb627fba834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check if 5pm is available to book a indoor reservation on 31st of March in Matthews Winery for 5 people\nPrevious actions:\n[span] Matthews Winery -> CLICK\n[link] Book now Indoor Reservation, Two-hour indoor seati... -> CLICK\n[button] 2023-03-31 -> CLICK\n[i] -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.462, 0.202, 0.5, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6583", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_fe43de0e-b79f-4f44-b856-0087c33dbbf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.056, 0.615, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6584", "image": {"bytes": "", "path": "./images/7a40f4c8-7aa6-4be4-abbe-24a0b511dc0e_d9556517-1528-497f-b701-9edc906c9343.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for hiking boots and filter the results to show only those with a waterproof rating of at least 3 stars.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.085, 0.128, 0.119] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6585", "image": {"bytes": "", "path": "./images/e224beac-a998-41e4-abd1-5370196ce9df_e62f4bfc-502c-4f11-a37e-f64cc8febe6e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the cheapest led smart home indoor strip lighting, add it to the cart, and schedule the delivery.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.01, 0.038, 0.111, 0.055] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6586", "image": {"bytes": "", "path": "./images/9ed8cd2a-b0f8-4ccf-9d48-1ddfba4f5e58_5bf6ca81-46f9-4681-b086-fc23bd8fb027.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find motorcycle parking near Radio City Music Hall.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.252, 0.592, 0.277] in the image\nAnd my action is Action: TYPE\nValue: radio city music hall"}]}, {"id": "mind2web_6587", "image": {"bytes": "", "path": "./images/d29fd2a4-2305-4276-8a0e-2599291d0a17_9d01e4d4-cf3a-47ce-98a3-26f66887e574.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the list of reviews I wrote about my games.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.482, 0.0, 0.589, 0.048] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6588", "image": {"bytes": "", "path": "./images/504c0c6b-7e78-4bfa-ae3f-00f8e59c3693_bf663b13-de7d-4099-9aa6-cdd33f15c1f3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking lot in Gloucester and book a ride from there to North Plymouth on April 28, 2:30 pm, view the map to understand the route better.\nPrevious actions:\n[em] Plymouth -> CLICK\n[button] Depart at 8:20 AM, 4/10/23 \uf107 -> CLICK\n[LabelText] Monday, April 10, 2023, click or press the enter o... -> CLICK\n[gridcell] Friday, April 28, 2023 -> CLICK\n[combobox] Hour -> TYPE: 2"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.471, 0.246, 0.506] in the image\nAnd my action is Action: TYPE\nValue: 30"}]}, {"id": "mind2web_6589", "image": {"bytes": "", "path": "./images/90a0f029-c1af-4dd7-8ccd-c4c2a74e8731_32d68cd0-e6e1-470e-aa15-4f34d99a7f9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a basic economy flight + hotel for an award travel from las vegas to san francisco leaving and returning on any date on april for 1 traveler and one room\nPrevious actions:\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: san francisco\n[span] San Francisco, CA -> CLICK\n[textbox] Departure -> CLICK\n[button] Tuesday, April 11, 2023 -> CLICK\n[button] Friday, April 14, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.628, 0.622, 0.653] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6590", "image": {"bytes": "", "path": "./images/ccd8d9f9-65f1-42ac-a699-5db7e380c760_36d6ee3c-4be7-4367-a004-181ac15edf7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Washington airports to New York airports and only show those with scheduled status.\nPrevious actions:\n[link] Flight Tracking -> HOVER\n[link] Flight Finder -> CLICK\n[textbox] Origin -> TYPE: Washington\n[div] Washington -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.504, 0.208, 0.909, 0.235] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6591", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_2c6307f7-6697-4a4f-8e2e-73682ce6f1a3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM\n[select] 04:30 PM -> SELECT: 04:30 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.25, 0.915, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6592", "image": {"bytes": "", "path": "./images/0fb1c90e-a494-4de9-86ac-d4e5366869c4_65a752cb-43ca-4607-bc88-ec49b8285742.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a virtual consultation for Solar Roof. The name is James Smith. The address is 123 st rd. The emial address is abc@abc.com\nPrevious actions:\n[link] Solar Roof -> CLICK\n[link] Schedule a virtual consultation -> CLICK\n[textbox] First Name -> TYPE: James"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.459, 0.355, 0.471] in the image\nAnd my action is Action: TYPE\nValue: Smith"}]}, {"id": "mind2web_6593", "image": {"bytes": "", "path": "./images/a2b159c8-86d0-4b76-9244-f19cc41d5ad8_519c155a-6823-4c59-8683-a50bb52b637a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Medical book related to Diseases & Disorders to basket.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.021, 0.059, 0.129, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6594", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_b3929fbc-0cb0-4e73-8144-eab7ac9ebb5e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes\n[input] -> CLICK\n[div] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.427, 0.102, 0.439] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6595", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_5f14babf-98ed-4a84-a458-84233cd7bb3a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.592, 0.222, 0.781, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6596", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_2be3b5da-bcc5-4de4-b691-c7115cd419f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[textbox] Email address -> TYPE: buckeye.foobar@gmail.com\n[textbox] Confirm email address -> TYPE: buckeye.foobar@gmail.com\n[input] -> TYPE: 1111111111111111\n[textbox] Address 1 -> TYPE: the home of joe bloggs\n[textbox] City -> TYPE: new york"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.401, 0.95, 0.464] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_6597", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_443327ef-bef2-4f4c-8aa3-77669cbad78a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK\n[searchbox] Search carmax locations. -> TYPE: california\n[button] Search. -> CLICK\n[div] Modesto -> CLICK\n[link] Visit Modesto store details. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.164, 0.46, 0.187] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6598", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_fad40521-7262-4bf8-9611-be44c197681a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK\n[searchbox] Search by city... -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.246, 0.266, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6599", "image": {"bytes": "", "path": "./images/013faefc-26df-4eeb-be59-5638c5f9dc72_c696d8ea-3bb7-4e69-9be9-d9f7228436db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a room at the Marriott Bonvoy resort in the Caribbean for a 10-day stay starting on May 5th.\nPrevious actions:\n[textbox] \ue900DESTINATION WHERE CAN WE TAKE YOU?SEARCH FROM CUR... -> TYPE: Carribbean"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.358, 0.067, 0.661, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6600", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_abe9b06e-30c5-4200-a4da-afbd766799c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\n[li] May -> CLICK\n[combobox] How many guests? -> SELECT: 1 Guest\n[button] SEARCH DEALS -> CLICK\n[button] Filter -> CLICK\n[button] Pet-Friendly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.435, 0.296, 0.502, 0.311] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6601", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_cde21781-bb33-4185-a6ab-f03a12216547.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[img] james9091 -> CLICK\n[link] Playlists -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.49, 0.373, 0.588, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6602", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_994e0184-cc07-44d1-b721-977f549fd4a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.125, 0.418, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6603", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_608512bb-519f-407a-a619-1eeffcb9d896.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[label] Lowest price -> CLICK\n[button] Back to all categories -> CLICK\n[button] Fuel Type -> CLICK\n[listitem] Gas (45) Gas (45) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.268, 0.253, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6604", "image": {"bytes": "", "path": "./images/749dfeeb-8293-4c25-9cad-84f1a93f165d_602e1f0b-b86e-4f60-8c92-40b3aece1274.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find EV charging supported parking closest to Smithsonian museum on Apr 9 between 9am to 6pm\nPrevious actions:\n[textbox] Search for parking -> TYPE: Smithsonian\n[li] Smithsonian National Air and Space Museum, Indepen... -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.697, 0.452, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6605", "image": {"bytes": "", "path": "./images/d9ab19b6-fecb-4545-ab5d-fabc94c27ff6_572f791d-97d7-4e35-adf9-34762045fb72.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a premium luxury car closest to North Las Vegas on April 29 at 6 pm and drop it off at the same location and the same day at 11 pm, supplier should be Avis if available.\nPrevious actions:\n[svg] -> CLICK\n[link] Luxury Car Rental -> CLICK\n[label] Pick-up location -> TYPE: north las vegas\n[div] North Las Vegas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.444, 0.116, 0.548, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6606", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_5be16eae-be60-40bd-ab2c-acfab3a0cd36.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[svg] -> CLICK\n[checkbox] 15 June 2023 -> CLICK\n[checkbox] 20 June 2023 -> CLICK\n[span] 2 adults -> CLICK\n[button] - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.827, 0.213, 0.927, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6607", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78bfd4cf-87db-44f9-9bc0-a390988df75b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day\n[div] Hackney Clothes Swap - Earth Day -> CLICK\n[button] Get tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.268, 0.573, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6608", "image": {"bytes": "", "path": "./images/6678924a-0a3d-4c8b-ac35-3ba0aceea4d5_40298f0b-61e7-46f8-bb2e-5c1acb88b464.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Golden State Warriors Player List.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Golden State Warriors"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.065, 0.931, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6609", "image": {"bytes": "", "path": "./images/b2c18588-b115-4937-b69f-8feeab22c387_1d9aa0c2-db0a-47b1-9e38-267bea54a66c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate the list of movies \"at home\", sorted by most recent.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.176, 0.587, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6610", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_db6d2722-20ac-437d-ba01-65b4408ee420.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[link] 25 -> CLICK\n[listbox] hour -> SELECT: 23\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2\n[listbox] select child age -> SELECT: 5-15"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.088, 0.494, 0.391, 0.523] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6611", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_3565c84e-3f3a-4a37-9d8c-1bfcae9e432f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: london"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.065, 0.326, 0.081] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6612", "image": {"bytes": "", "path": "./images/ddee9314-5a29-4258-b11b-b6432ec719f1_05ffdddd-205e-48b4-9f8a-65ff0ac005ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all free virtual business start-up events happening tomorrow in Las Vegas and follow the organizer with most followers.\nPrevious actions:\n[combobox] autocomplete -> TYPE: LAS VEGAS\n[div] Las Vegas -> CLICK\n[svg] -> CLICK\n[div] Tomorrow -> CLICK\n[p] Startups & Small Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.127, 0.476, 0.26, 0.551] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6613", "image": {"bytes": "", "path": "./images/b02e47ac-c1a6-4f5c-886f-f32af745e7f3_9f90d836-c2d6-473c-9f78-705033f9ec52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View a reservation made under the last name Walker in Australia for a car using the reservation confirmation number A987654.\nPrevious actions:\n[link] close dialog -> CLICK\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: AUSTRALIA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.305, 0.347, 0.557, 0.387] in the image\nAnd my action is Action: TYPE\nValue: Walker"}]}, {"id": "mind2web_6614", "image": {"bytes": "", "path": "./images/b307117b-e10c-470f-a85d-968b2e442b19_cc41d893-f1d0-4303-b3a7-b19fc73f05a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a coffee shop with wi-fi.\nPrevious actions:\n[textbox] Find -> TYPE: coffee shop\n[span] Coffee Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.162, 0.363, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6615", "image": {"bytes": "", "path": "./images/2e15efa0-f646-4dc4-9ce0-2e3d155c788b_8e68485f-08ab-473e-9845-e5fb8af0833b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated mover in Honolulu to shift a vehicle and large appliances out of state and who has virtual discussion options available.\nPrevious actions:\n[link] Home Services -> HOVER\n[span] Movers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.36, 0.018, 0.564, 0.029] in the image\nAnd my action is Action: TYPE\nValue: HONOLULU"}]}, {"id": "mind2web_6616", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_56e1e554-8541-4c1a-a4bb-d8ad1f7b95cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[link] Search for cars -> CLICK\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.167, 0.573, 0.205] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6617", "image": {"bytes": "", "path": "./images/13a676be-2f4f-4abf-83fb-4ab641793801_a7e54311-1339-4fde-a1b9-2571f6f85d29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a list of Administrative and Clerical jobs MTA is currently recruiting for in Brooklyn\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.912, 0.353, 0.921] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6618", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_99c101a0-07d4-433d-86f7-9d16d464c14b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[button] 11:30 -> CLICK\n[button] 11:00 -> CLICK\n[button] Let's go -> CLICK\n[link] Large cars 5\u00a0Seats 3 Large bags -> CLICK\n[ins] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.16, 0.771, 0.34, 0.802] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6619", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_637682df-5b80-4530-a3eb-e242bef29336.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK\n[link] Upcoming Games -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.412, 0.13, 0.565, 0.152] in the image\nAnd my action is Action: SELECT\nValue: PS5"}]}, {"id": "mind2web_6620", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_2dfa3c40-55c0-42ec-b141-50b68a77b02a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK\n[input] -> TYPE: John\n[input] -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.56, 0.223, 0.688, 0.241] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6621", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_290eda81-bc60-42d8-95be-e11a2a5de824.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[p] Consoles -> CLICK\n[searchbox] Find values for games and more -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.141, 0.932, 0.165] in the image\nAnd my action is Action: TYPE\nValue: PS4"}]}, {"id": "mind2web_6622", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_011c68d2-620f-46fc-a2cd-48650f992b99.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.514, 0.977, 0.556] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6623", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_8066875c-cbad-453f-8371-11f45293f19b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.223, 0.116, 0.573, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6624", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_b2e83bac-a21a-425c-a069-086f2dca47e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Reservations -> CLICK\n[link] Make a Reservation -> CLICK\n[textbox] Enter your pick-up location or zip code -> TYPE: Harry Reid Intl Airport, LAS"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.46, 0.245, 0.477] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6625", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_91d99a7d-e270-4d8e-ac84-4df6ee2ab313.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[span] Exterior Color -> CLICK\n[div] -> CLICK\n[checkbox] Online Paperwork (4)\uf05a -> CLICK\n[span] Vehicle History -> CLICK\n[checkbox] No Accidents (4) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.775, 0.257, 0.888, 0.294] in the image\nAnd my action is Action: SELECT\nValue: Price - Lowest"}]}, {"id": "mind2web_6626", "image": {"bytes": "", "path": "./images/3f62b47b-876b-487e-8681-63e80555938d_7de0d4fb-357b-420e-997e-df792f9099ac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search and view the details of 32-inch medium-light fishing rod for ice fishing available in stock under 100 dollars with an average rating of 4 and above.\nPrevious actions:\n[svg] -> CLICK\n[label] 32\" -> CLICK\n[svg] -> CLICK\n[label] Less than $100 -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.507, 0.981, 0.534] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6627", "image": {"bytes": "", "path": "./images/81b6bba7-3d05-40a4-a475-48eaeacbda0d_aa99e548-cd12-4d60-876a-1b739f2c9009.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show route map and flight cost from Los Angeles to Miami on 12 April.\nPrevious actions:\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.655, 0.114, 0.866, 0.138] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6628", "image": {"bytes": "", "path": "./images/1df0723c-f732-4ecb-b202-8e1854b7c079_819a213b-c14d-4a2b-92d9-438c0755d8da.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse a list of rental vehicles for Brooklyn, OH, US for 18 year old renter.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.187, 0.84, 0.21] in the image\nAnd my action is Action: TYPE\nValue: Brooklyn"}]}, {"id": "mind2web_6629", "image": {"bytes": "", "path": "./images/2c19d467-4e44-4c0d-b050-a13823ca545d_6468eb70-ab1c-4fce-9744-5fe7bfea7cde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find hard side Carry-on Luggage used for business in black color.\nPrevious actions:\n[span] Luggage -> CLICK\n[span] Carry-on Luggage -> CLICK\n[img] -> CLICK\n[svg] -> CLICK\n[button] Color -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.099, 0.777, 0.112] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6630", "image": {"bytes": "", "path": "./images/34e13beb-0235-41d4-b108-137cc7480904_a991029f-599d-41f2-ad44-8fdc236dbc68.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest parking spot for a standard car near 66 Perry St., New York, from April 20, 5:30 pm to April 21, 5:30 am.\nPrevious actions:\n[span] 01:30PM -> CLICK\n[link] 05:30PM -> CLICK\n[span] Apr 20 -> CLICK\n[gridcell] 21 -> CLICK\n[span] 07:30PM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.813, 0.435, 0.937, 0.462] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6631", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_c49c8db3-a45d-4c32-97bc-8a71c035485e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[svg] -> CLICK\n[heading] Supima\u00ae Cotton V-Neck Short-Sleeve T-Shirt (2022 E... -> CLICK\n[checkbox] L -> CLICK\n[button] ADD TO CART -> CLICK\n[button] VIEW CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.666, 0.446, 0.959, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6632", "image": {"bytes": "", "path": "./images/c9f2258a-07c3-46ed-a974-01543606b31b_3dc9bc1a-201e-4f46-beb8-69950bc0d565.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals for Honolulu from New York with budget of $1300 for premium economy.\nPrevious actions:\n[textbox] Input departure airport -> TYPE: New York\n[textbox] Input arrival airport -> TYPE: Honolulu\n[option] Honolulu, HI, US (HNL) -> CLICK\n[textbox] Budget. Please enter a numerical value -> TYPE: 1300\n[div] Economy -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.491, 0.94, 0.509] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6633", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_4ebb9e93-734c-4664-9f53-e5562feb80e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Cannes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.682, 0.493, 0.705, 0.507] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6634", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_adf9d11d-07e4-4751-8d2e-3cd3ce8f311d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] - -> CLICK\n[div] Mr -> CLICK\n[textbox] First name -> TYPE: Joe\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.846, 0.26, 0.934, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6635", "image": {"bytes": "", "path": "./images/4b431888-9909-40b5-8351-be52905e4d5a_bb8869c7-6b6f-4878-852b-40a52c258f7f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Real Madrid team schedule of the UEFA Champions League championship.\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Real Madrid\n[link] Real Madrid LaLiga -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.11, 0.093, 0.135] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6636", "image": {"bytes": "", "path": "./images/883fc533-d267-4eeb-8ccd-fe77bc28cd9b_be1bb8af-ec73-4160-82af-3279a45e05de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Discover Katy Perry and add bookmark to it and view all tracks.\nPrevious actions:\n[textbox] Search -> TYPE: Katy Perry\n[button] Search -> CLICK\n[a] -> CLICK\n[button] BOOKMARK -> CLICK\n[button] OKAY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.564, 0.67, 0.645, 0.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6637", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_4ea0b732-6a9c-4d03-8da2-9045ecc460b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.093, 0.493, 0.109] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6638", "image": {"bytes": "", "path": "./images/90557510-32dc-415f-8507-41b050594962_0f456a6f-04d4-4bc3-beb1-b377715f08b1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the coming soon AMC Artisan Films\nPrevious actions:\n[link] Visit the See A Movie page -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.209, 0.912, 0.223] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6639", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_3e01a8bb-5799-45c2-b0c9-83891b43492b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.328, 0.333, 0.354, 0.351] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6640", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_5d2eec92-a8c9-436b-a89a-ed85f6174d4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york yankees\n[option] New York Yankees -> CLICK\n[link] TICKETS -> CLICK\n[div] More Options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.188, 0.761, 0.199] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6641", "image": {"bytes": "", "path": "./images/9572e7bd-0365-4339-899d-1d1dc8504543_7e7b3cd5-ee9e-4a9c-aee6-96220607e196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a lowest priced car for pickup from Birmingham Airport, Alabama, at 11 am on March 22, 2023, and the return location is Montogomery Airport, Alabama, at 5 pm on March 25, 2023.\nPrevious actions:\n[span] 22 -> CLICK\n[button] 03/25/2023 -> CLICK\n[combobox] Return Time Selector -> SELECT: 5:00 PM\n[button] Browse Vehicles -> CLICK\n[combobox] SORT BY -> SELECT: Price: Low to High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.209, 0.914, 0.23] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6642", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_dc038372-b9b4-4b34-9eb0-10f048962d43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[link] Flight + Hotel -> CLICK\n[textbox] Where from? -> TYPE: KATHMANDU"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.13, 0.448, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6643", "image": {"bytes": "", "path": "./images/1538e37b-9c33-48b0-b10e-662e192ad53a_455a4731-6e47-4746-9c56-df4a0cc128eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stops in Alanson, MI\nPrevious actions:\n[button] Explore\ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.062, 0.436, 0.108] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6644", "image": {"bytes": "", "path": "./images/fd0e4520-b47b-4a24-9b21-d10e68c42472_62c7af18-1fb9-4022-a9ec-a69d457cf223.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Braintree to Boston Airport on April 14, 12 pm, and see the fair calculator.\nPrevious actions:\n[combobox] From -> TYPE: brain\n[link] T red line bus commuter rail Zone 2 Braintree -> CLICK\n[combobox] To\u00a0 -> TYPE: boston\n[span] Boston -> CLICK\n[button] Get trip suggestions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.136, 0.25, 0.352, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6645", "image": {"bytes": "", "path": "./images/86afd67c-1bff-455c-baa7-e18dcb64b0f3_91b03325-cd3b-4a7f-b0dd-7a308e18b42b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find comedy tv shows on netflix sorted by audience score.\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[img] netflix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.246, 0.165, 0.317, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6646", "image": {"bytes": "", "path": "./images/5c91b907-39db-49c3-af73-5eb5c2390a93_643210af-4c59-4a5f-af43-ebfed3c9b5e5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Modesto California and set is as my store.\nPrevious actions:\n[link] Find a store -> CLICK\n[searchbox] Search carmax locations. -> TYPE: california\n[button] Search. -> CLICK\n[div] Modesto -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.109, 0.465, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6647", "image": {"bytes": "", "path": "./images/0db002f6-db7d-4382-9c0d-8f5cef2afa17_22107c45-e852-4ec8-9e35-2609a62c2bbd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find detailed uses of Anuall Pass for 2023 on the USGS Online\nPrevious actions:\n[button] Open Menu -> CLICK\n[link] Passes -> CLICK\n[link] Annual Pass -> CLICK\n[button] Overview of the Annual Pass -> CLICK\n[button] Annual Pass Internet Order Questions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.463, 0.95, 0.486] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6648", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_f320b86a-6600-4949-8b57-4df2b3f9b664.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] -> CLICK\n[tab] Fastest -> CLICK\n[button] See flight -> CLICK\n[button] Select -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.668, 0.617, 0.712] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6649", "image": {"bytes": "", "path": "./images/13f1648c-9de8-497d-bed3-02096206dc5f_960ebc24-1a13-4086-afd4-fd6e30b783fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add two pairs of women's Pink sweet pants in XXL size to your cart and view your cart.\nPrevious actions:\n[checkbox] PINK -> CLICK\n[checkbox] XXL -> CLICK\n[input] -> CLICK\n[option] 2 -> CLICK\n[button] ADD TO CART -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.38, 0.491, 0.401] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6650", "image": {"bytes": "", "path": "./images/8b079ace-b202-4d78-a9b0-4cde39e58934_f2b7effa-1ee4-4c0d-a8c6-c086818a5542.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a vehicle at New York JFK International Airport May 2-May 5 for an owner above 25 years.\nPrevious actions:\n[searchbox] Pick-up & Return Location (ZIP, City or Airport) (... -> TYPE: New York JFK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.293, 0.362, 0.526, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6651", "image": {"bytes": "", "path": "./images/6a7eaba3-f6ec-4eba-9e04-ff974b228ef4_580a4333-fdc6-4c07-aa20-748b8b6c9ffb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select an event from the 2019 UFC schedule and show me the full profile from one of the athletes\nPrevious actions:\n[link] MMA -> HOVER\n[link] Schedule/Results -> CLICK\n[select] 2023 -> SELECT: 2019\n[select] All -> SELECT: UFC"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.101, 0.409, 0.299, 0.419] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6652", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_068af6bd-86ad-4e0b-8449-467e35cd186b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo\n[textbox] Confirmation or ticket number* -> TYPE: 12345678912345\n[button] Add flight -> CLICK\n[textbox] Email address * -> TYPE: ian.lo@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.492, 0.189, 0.519] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6653", "image": {"bytes": "", "path": "./images/998e55fb-66d7-443f-bb73-9cb6a2bc26a8_60fa0498-f3bd-4aec-b81c-d4bc4ee53e24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find used BMW i3 for 10001 zip code with lowest price.\nPrevious actions:\n[menuitem] Buy -> CLICK\n[combobox] Select Make -> SELECT: BMW"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.012, 0.256, 0.324, 0.277] in the image\nAnd my action is Action: SELECT\nValue: i3"}]}, {"id": "mind2web_6654", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_ad4e03b0-1b4d-4943-b2d9-94ab2b408563.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.252, 0.464, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6655", "image": {"bytes": "", "path": "./images/06a6d90b-071d-4644-8372-b928e99ae970_b69043a4-7684-42c4-b359-62bb1badccce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find two rooms in a cheapest hotel in Niagra Falls for three adults and one three year old kid from May 17 to May 20, view only available hotels within 100 to 200 dollar range with taxes and fees, and choose the cheapest hotel that offers free breakfast and a pool.\nPrevious actions:\n[checkbox] Show rates with taxes and all fees -> CLICK\n[checkbox] Show available hotels only -> CLICK\n[link] Free breakfast (18) -> CLICK\n[link] Pool (18) -> CLICK\n[generic] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.325, 0.611, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6656", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_b4e1ca8e-3b28-4edf-ad6a-992c4ed20441.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[div] Availability -> CLICK\n[checkbox] In Stock In Stock -> CLICK\n[button] Done -> CLICK\n[button] Best Matches -> CLICK\n[link] Price Low To High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.449, 0.177, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6657", "image": {"bytes": "", "path": "./images/db53ba89-9cff-441a-a703-053e89e32961_da1da5de-72e9-43ca-93c7-7638f6b66736.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find superhero sci-fi movies based on comic books rated 7 to 9, and add the three movies with the most votes to my watchlist.\nPrevious actions:\n[path] -> CLICK\n[span] Browse Movies by Genre -> CLICK\n[link] Superhero -> CLICK\n[checkbox] Superhero Sci Fi (745) -> CLICK\n[checkbox] Based On Comic Book (226) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.237, 0.313, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6658", "image": {"bytes": "", "path": "./images/9cc81f50-465c-4d77-8fd7-d81fe1ffde84_f04f3bd9-068a-491b-a24c-c356de9dfcc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the channel number for the local CBS in Spokane Washington in 99201.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.608, 0.217, 0.652, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6659", "image": {"bytes": "", "path": "./images/73960473-941b-4e09-8eb1-f059b85c2dba_b88c0362-1340-41c4-be55-38cf7e7c180d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of one-way flights today (April 17) from Chicago to Paris.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.348, 0.28, 0.387] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_6660", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_e65a5c29-d512-478a-99f5-ff82dcc22246.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Rock -> CLICK\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.726, 0.122, 0.77, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6661", "image": {"bytes": "", "path": "./images/77be98ff-e4db-4745-9b87-6ce69754c4c2_5b2e46d2-a778-4bd0-9ba5-fbd526c7c17e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest wheelchair accessible parking for the Pittsburgh Pirates at St. Louis Cardinals event in Busch Stadium on Apr 13.\nPrevious actions:\n[li] Busch Stadium, Clark Avenue, St. Louis, MO, USA -> CLICK\n[p] Pittsburgh Pirates at St. Louis Cardinals -> CLICK\n[button] Filter -> CLICK\n[checkbox] Wheelchair Accessible (10) -> CLICK\n[button] Show 10 Results -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.404, 0.397, 0.417] in the image\nAnd my action is Action: SELECT\nValue: Sort by Price"}]}, {"id": "mind2web_6662", "image": {"bytes": "", "path": "./images/bd4b77db-00a5-405f-bf0a-a4d168967d64_c649a304-2019-4ff6-9bab-817b700a4e2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find Panini Diamond Kings Baseball cards set below $25.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.098, 0.646, 0.133] in the image\nAnd my action is Action: TYPE\nValue: Panini Diamonds Kings Baseball cards"}]}, {"id": "mind2web_6663", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_e07fd3ab-03a2-4115-baef-7334d62c7687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[div] -> CLICK\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK\n[span] Castles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.046, 0.97, 0.069] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6664", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_405b8bdc-3279-4507-8ed8-b6102b66252e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[span] Pick a date... -> CLICK\n[button] 15 -> CLICK\n[button] 16 -> CLICK\n[div] Petaluma Music Festival -> CLICK\n[button] Get tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.51, 0.149, 0.573, 0.169] in the image\nAnd my action is Action: SELECT\nValue: 2"}]}, {"id": "mind2web_6665", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_74dcfb10-d052-4f30-b65c-a2a06d3bb983.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.74, 0.015, 0.891, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6666", "image": {"bytes": "", "path": "./images/3a231c03-d038-4b61-b5a2-685b77d9920e_ef0e466a-4175-4c3c-80fb-9471d37a9103.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the bag charges for the passenger James Son for the flight with the confirmation number 10000002\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK\n[link] Baggage info -> CLICK\n[link] Baggage fee calculator -> CLICK\n[tab] My flights -> CLICK\n[textbox] Confirmation number (required) -> TYPE: 10000002"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.27, 0.471, 0.446, 0.498] in the image\nAnd my action is Action: TYPE\nValue: Son"}]}, {"id": "mind2web_6667", "image": {"bytes": "", "path": "./images/701bc7ed-4b55-4fd2-b333-128e55c0a4c2_a36b8ff4-cf58-4fa8-9f5c-e3246e288c83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an upcoming game which is releasing in August 2023 for PS5.\nPrevious actions:\n[button] Toggle Sidebar -> CLICK\n[button] Discover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.087, 0.216, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6668", "image": {"bytes": "", "path": "./images/604c4377-e24c-4900-a72f-346d8999d443_341e6e6d-828b-40a6-acfd-90e47191518d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a deal which is highly rated in Fiji location.\nPrevious actions:\n[textbox] Where? -> TYPE: Fiji"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.164, 0.89, 0.184] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6669", "image": {"bytes": "", "path": "./images/112459f9-10ea-44b5-affa-79008c2977e7_fc7ce1dd-bf86-4110-95b6-073aa3e5b082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse check in with confirmation number 123456 for John.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.278, 0.082, 0.389, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6670", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_0dffc638-04bd-4d82-87de-2094b4767d4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK\n[link] Girls -> CLICK\n[link] Jackets -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.267, 0.233, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6671", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8deea034-568e-4813-992f-b74bdf900906.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[generic] Site Menu -> CLICK\n[link] icon of Build Your Own Custom PC Build Your Own Cu... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.272, 0.406, 0.395] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6672", "image": {"bytes": "", "path": "./images/cf89b0ae-39ca-4451-80df-260d46c62f21_84bf94c3-3a36-4743-a629-a73720bfa17f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel in Ohio From July 2nd to July 8th for 2 adults.\nPrevious actions:\n[button] Check-in March 18, 2023 -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Next month -> CLICK\n[button] Jul 2, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.952, 0.206, 0.994, 0.228] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6673", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_585cb70c-a451-4298-add8-b19c4b26f1c1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[textbox] Destination -> TYPE: Venice Beach\n[menuitem] Venice Beach, Los Angeles, CA, USA -> CLICK\n[textbox] Event space -> TYPE: 100\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.61, 0.161, 0.648, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6674", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_e0ca183e-3787-4b09-87f3-b7b9079d4b6f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[textbox] Where to? -> TYPE: Dubai\n[button] Dubai United Arab Emirates -> CLICK\n[path] -> CLICK\n[button] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.12, 0.103, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6675", "image": {"bytes": "", "path": "./images/b1a1f767-8611-4539-9c08-475011d38e12_f720be0a-2053-4046-94b6-7ce03c2d5f6d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news about Mikal Bridges\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Mikal Bridges"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.174, 0.931, 0.207] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6676", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_2861d75f-d51e-42a9-bf74-e1e0fc01fda6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.003, 0.031, 0.02] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6677", "image": {"bytes": "", "path": "./images/708cbe51-b493-41da-afa8-648564133972_b1b1da1b-a169-4ae8-8540-090e1735abfc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Flight schedules and notifications for a flight outside the us from BHZ to EWN leaving on april 30\nPrevious actions:\n[link] Flight status -> CLICK\n[link] Create flight status notification -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.259, 0.264, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6678", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_1f0b8000-4fe1-4b1f-8034-8f8e4023440d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight\n[textbox] Enter your pick-up location or zip code -> CLICK\n[combobox] Return Time -> SELECT: noon\n[textbox] Return to same location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.351, 0.567, 0.388] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6679", "image": {"bytes": "", "path": "./images/759a1b1b-bb1f-4981-aef4-02a319f8dbaa_34e4693d-d86b-4536-ba7c-274c55c63850.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the profile page for author of latest shot\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.176, 0.061, 0.222, 0.077] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6680", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_b4e28822-72ba-426d-820b-e5984992fff9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes\n[button] Go -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.255, 0.219, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6681", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_d5a67096-558b-4e6c-be09-957a0e4ae20e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[textbox] Mileage -> TYPE: 40000\n[button] Go -> CLICK\n[div] XLE Sedan 4D -> CLICK\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.135, 0.564, 0.208, 0.575] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6682", "image": {"bytes": "", "path": "./images/270c18c6-b3cf-4409-ba6b-18160525692f_b756f03a-d366-4b47-a6f4-5acb671698b4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest used and working Nintendo Switch for under $400 that can be bought right now.\nPrevious actions:\n[button] Submit price range -> CLICK\n[input] -> CLICK\n[input] -> CLICK\n[button] Sort selector. Best Match selected. -> CLICK\n[link] Price + Shipping: lowest first -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.316, 0.027, 0.329] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6683", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_9f957e9c-cf83-4dc3-8223-a5da537ceafc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK\n[button] Next -> CLICK\n[gridcell] Mon May 01 2023 -> CLICK\n[gridcell] Sun May 07 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.346, 0.78, 0.364] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6684", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_57ecc027-a48c-4a61-9ffb-931ae1fab2a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[div] -> CLICK\n[button] close -> CLICK\n[heading] Same Day Delivery -> CLICK\n[link] Self-Rising Crust Uncured Pepperoni Frozen Pizza -... -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.603, 0.144, 0.775, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6685", "image": {"bytes": "", "path": "./images/22e33a38-902c-4f62-9e9a-822b2370b6d1_8ff5337a-6643-4d26-88c1-f731c8d15f93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the cheapest exotic car for two from Beverly Hills at 11 am on March 31 and return on the same day at 5 pm.\nPrevious actions:\n[link] Exotic Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.197, 0.15, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6686", "image": {"bytes": "", "path": "./images/789b7d2d-fb01-453c-b933-383965e6123c_e7becd76-12f3-404f-a927-5c51aa736b85.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cast and crew of Titanic and add to watchlist\nPrevious actions:\n[textbox] Search TV Shows and Movies... -> TYPE: Titanic"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.137, 0.594, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6687", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_cd9ce436-abb2-4fd7-aef5-356733c7e1a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.217, 0.435, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6688", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_1ac506ac-dd62-4adb-8cc6-e42e39ea1e35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK\n[combobox] Enter your destination city, airport name, or airp... -> TYPE: Toronto"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.314, 0.582, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6689", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_08b7c08f-c6d0-44a6-95d8-59c4f97021f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK\n[button] Sort By: Best Match -> CLICK\n[button] Price Low to High -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.155, 0.278, 0.162] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6690", "image": {"bytes": "", "path": "./images/cfaa49bd-7943-4da8-ae5f-bb3a15dfaa05_919527a3-b5dd-493f-ab2b-dc9cc35b2cbc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find showtimes for John Wick 4 at a theater in Wichita, Kansas 67226\nPrevious actions:\n[link] Visit the Our Theatres page -> CLICK\n[link] Find a Theatre -> CLICK\n[link] Wichita -> CLICK\n[link] Showtimes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.15, 0.559, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6691", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_f3c95979-f964-4912-8cf9-a627b0322a93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[path] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.298, 0.969, 0.315] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6692", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_034c4eb3-eecf-41d3-b403-54797be9544a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Locations -> CLICK\n[link] Find a Location -> CLICK\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.272, 0.579, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6693", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_b08e91e1-2c15-4dda-a02d-558267a8292f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK\n[textbox] Last Name* -> TYPE: Lo\n[textbox] Confirmation or ticket number* -> TYPE: 12345678912345"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.226, 0.57, 0.482, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6694", "image": {"bytes": "", "path": "./images/86897828-35e8-4002-a98a-4e1dd26c6edb_c7a0d65f-bad9-4424-90af-42b14680cc05.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket for All star stand up comedy event happen 6pm on the 25th March 2023.\nPrevious actions:\n[link] concerts. -> CLICK\n[textbox] Search by Name -> TYPE: all star stand up comedy\n[p] All Star Stand Up Comedy -> CLICK\n[link] View Tickets -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.727, 0.425, 0.975, 0.466] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6695", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_6e15b8bd-803c-4dd4-8e9e-0b102e3d9a69.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK\n[link] Beginner 554,088 -> CLICK\n[link] Drop C 3,930 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.226, 0.153, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6696", "image": {"bytes": "", "path": "./images/fdd4fc92-3b4d-48a6-9cfe-f66ffed422e6_8159f605-56cd-4c82-8451-7c0f4743d451.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an entire home with a rating above 5 for two in Manila, Philippines from June 1 to 8 with self-catering, double bed, free wifi, and walk-in shower, check the availability, and reserve.\nPrevious actions:\n[span] -> CLICK\n[button] Show all 14 -> CLICK\n[span] -> CLICK\n[button] Show all 25 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.153, 0.263, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6697", "image": {"bytes": "", "path": "./images/acc194d4-2f71-496a-b378-e18ab698ab0f_411a33e1-09da-4e0a-96a5-303cfa86ccae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find reggae concerts this weekend.\nPrevious actions:\n[button] CONCERTS -> CLICK\n[link] Reggae -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.632, 0.245, 0.655, 0.259] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6698", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_3b3620c3-4b5c-44fa-a170-36828db8938a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[button] Departing April 5, 2023 -> CLICK\n[button] Apr 11, 2023 -> CLICK\n[button] Done : (Save changes and close the date picker.) -> CLICK\n[button] Search -> CLICK\n[checkbox] Nonstop (1) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.302, 0.048, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6699", "image": {"bytes": "", "path": "./images/229199b4-9988-485f-8175-b5efd8faf08b_2ab5735a-26cd-414e-b9ed-52d802f1408d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Ask a question regarding Health Insurance Top Up plans beneficial or not?\nPrevious actions:\n[link] QUESTIONS & ANSWERS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.149, 0.075, 0.262, 0.091] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6700", "image": {"bytes": "", "path": "./images/a2b1d94b-5309-4c9b-bcdd-8048172af7b5_794a7443-6027-4bd4-bc18-229028decf0b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find an foldable over-ear headphone under 35 USD in certified refurbished condition with lowest price + shipping.\nPrevious actions:\n[link] Electronics -> HOVER\n[link] Headphones -> CLICK\n[link] Ear-Cup (Over the Ear) -> CLICK\n[button] Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.848, 0.244, 0.989, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6701", "image": {"bytes": "", "path": "./images/70b3ef5b-d900-44cf-9b62-9ecece97954c_a134016b-dd49-4e6d-9c0a-f2a9c11f25f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find climbing gear and sort the results by price high to low.\nPrevious actions:\n[button] Climb -> CLICK\n[button] Climb -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.143, 0.546, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6702", "image": {"bytes": "", "path": "./images/7bda9645-0b5f-470a-8dd7-6af0bff4da68_30732d9f-efee-4ba4-8b2a-a72e47d5bde6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for pickup restaurant available in Boston, NY on March 18, 5pm with just one guest\nPrevious actions:\n[combobox] Reservation type -> SELECT: Pickup"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.499, 0.313, 0.518, 0.325] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6703", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_8084a818-7884-4e70-90c3-6f94536efcb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] Buy -> HOVER\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.381, 0.223, 0.41] in the image\nAnd my action is Action: TYPE\nValue: 10017"}]}, {"id": "mind2web_6704", "image": {"bytes": "", "path": "./images/a8de57df-b0be-400d-9211-931321c6500c_1596c7ad-f09a-48bc-b641-b66197b5b5d0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the best rated ukulele tab to my favorites\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.027, 0.838, 0.041] in the image\nAnd my action is Action: TYPE\nValue: ukulele"}]}, {"id": "mind2web_6705", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_938f123e-a8b0-46fe-82bd-580041111df6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK\n[searchbox] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.183, 0.29, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6706", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_e5be8100-215f-469b-bd1a-791ce30bfe16.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.014, 0.873, 0.057] in the image\nAnd my action is Action: TYPE\nValue: sports wear"}]}, {"id": "mind2web_6707", "image": {"bytes": "", "path": "./images/6c107328-3ff6-4cb3-af16-5f9ca447fd66_aac2e48b-eaab-49e0-8d87-8fdf57bb909a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add documentary \"The Elephant Whisperers\" and \"The Magician's Elephant\" to the watchlist.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Elephant Whisperers"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.15, 0.196, 0.657, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6708", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_43f085f3-b693-48ab-ac7a-3d9c3b9f7af2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[gridcell] Thursday, June 1, 2023 -> CLICK\n[button] Done -> CLICK\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.738, 0.453, 0.944, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6709", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_c3904d4c-b5f4-4a2d-9fdd-68dc50c3227b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] Car rentals -> CLICK\n[searchbox] Please type your destination -> TYPE: mexico city"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.34, 0.409, 0.391] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6710", "image": {"bytes": "", "path": "./images/9b18e5e4-4873-49bf-b422-c3f5bc5d1f33_eb8ba29f-90ce-4615-8e5b-92f140b46bf4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a WWE ticket for price range between $50 to $100.\nPrevious actions:\n[link] WWE Tickets -> CLICK\n[link] TICKETS -> CLICK\n[p] $41 - $1,255 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.756, 0.207, 0.796, 0.231] in the image\nAnd my action is Action: TYPE\nValue: 50"}]}, {"id": "mind2web_6711", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_2722ee03-60cc-45cc-8e74-a341b470de12.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.128, 0.846, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6712", "image": {"bytes": "", "path": "./images/dd13bc51-f582-4004-8641-eb4e62cabfc7_460dd5e5-220f-4476-a4fa-639b266566fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get prequalified with a good credit score for a $30000 car with a $6000 down payment for 48 months.\nPrevious actions:\n[menuitem] Prequalify for Financing -> CLICK\n[input] -> TYPE: 30000\n[input] -> TYPE: 6000\n[span] Good -> CLICK\n[span] 48 mo -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.661, 0.787, 0.683] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6713", "image": {"bytes": "", "path": "./images/04ec089f-2ae1-4fa9-bfdf-4dff9ab710ad_58d10ad0-dc54-4dad-9f1e-4e11611a1176.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated white water rafting experience available in Montana from May 1 to May 7\nPrevious actions:\n[textbox] Where to? -> TYPE: Montana\n[button] Montana USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.367, 0.777, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6714", "image": {"bytes": "", "path": "./images/4bc70fa1-e817-405f-b113-0919e8e94205_b1ec99ca-9953-4575-b633-8b9e6da0aee4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add the cheapest Women's Sweaters to my shopping cart.\nPrevious actions:\n[span] Shop by Category -> CLICK\n[link] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.236, 0.414, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6715", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_d3696fd4-af25-471e-851b-6b0f1e991970.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[div] open -> CLICK\n[option] 6 -> CLICK\n[button] Update -> CLICK\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.593, 0.278, 0.606] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6716", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_5bf7212f-6896-4585-addb-9f5a65a58eb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.051, 0.054, 0.084] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6717", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_bc27f335-5b49-47ef-8632-88d20acb5da2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK\n[textbox] Vehicle Price -> TYPE: 10000"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.213, 0.459, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6718", "image": {"bytes": "", "path": "./images/d22ce493-c49c-43f7-939c-4a429797c2a3_ca5e412b-7659-4de8-b48b-d24749857658.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book me a flight from BWI to NYC for 2 for August 2nd-August 7th\nPrevious actions:\n[combobox] Enter your departing city, airport name, or airpor... -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: BWI\n[button] Baltimore, MD, US (BWI) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.164, 0.481, 0.185] in the image\nAnd my action is Action: TYPE\nValue: NYC"}]}, {"id": "mind2web_6719", "image": {"bytes": "", "path": "./images/6d87b507-14dd-4903-a131-fa089499ccb5_3fb3d43d-eda2-454f-bd81-4beaabe0e47c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight From Dublin To Athens Greece for 1 Adult that leaves on April 1 and returns April 5th.\nPrevious actions:\n[textbox] To -> CLICK\n[button] Greece -> CLICK\n[button] Athens -> CLICK\n[div] Apr -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.434, 0.281, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6720", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_c7822f35-404a-4681-8945-0b6ac3c36b54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK\n[combobox] In what state are you selling the car? -> SELECT: New Jersey\n[radio] Yes -> CLICK\n[radio] No -> CLICK\n[radio] Yes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.598, 0.798, 0.793, 0.831] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6721", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_52dfe9c5-b379-4ce0-8c66-dd85b7724207.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[span] Tel Aviv -> CLICK\n[textbox] To -> TYPE: VENICE\n[span] Venice Treviso -> CLICK\n[div] May -> CLICK\n[generic] 9 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.299, 0.131, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6722", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_3dafde2b-5c39-47e2-b9b3-0c1e19c6dc3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[link] Search flights one way -> CLICK\n[textbox] From , required. -> CLICK\n[textbox] From , required. -> TYPE: JFK\n[a] JFK - New York John F Kennedy Intl, NY -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.351, 0.492, 0.374] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6723", "image": {"bytes": "", "path": "./images/14b72eb6-48c6-4408-9044-94c3003dccfc_bfa98fe8-342a-4833-b221-f1274a517937.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight for two adults from Shanghai to Seoul on May 10, and return on May 12 also book a hotel with it, with maximum bundled savings and if top hotel not available book the next one.\nPrevious actions:\n[div] Seoul, Republic Of Korea -> CLICK\n[svg] -> CLICK\n[path] -> CLICK\n[checkbox] 10 May 2023 -> CLICK\n[checkbox] 12 May 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.878, 0.238, 0.942, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6724", "image": {"bytes": "", "path": "./images/1c6bfd10-4dcb-4a5c-bb2d-922ff9f20087_86eb051c-670a-49bb-b354-428ae03e2016.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an English language book on roman empire history in the Kindle store released in the last 90 days, it should be displayed according to its latest publication date, and add the top result to the shopping list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.04, 0.054, 0.066] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6725", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_9055b4da-fa8e-445e-ae40-52b8c5e24167.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[button] Next month -> CLICK\n[gridcell] June 18 -> CLICK\n[gridcell] June 21 -> CLICK\n[button] Search packages -> CLICK\n[button] Amenities -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.486, 0.089, 0.494] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6726", "image": {"bytes": "", "path": "./images/02e7bae3-c67f-4227-b6ea-7b87d111202a_474d7869-7905-4b42-90a7-c75117862cbe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show latest documentaries on Netflix and add the one with 70+ metascore.\nPrevious actions:\n[link] What to Watch on Netflix -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.246, 0.077, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6727", "image": {"bytes": "", "path": "./images/26a20a7b-cd98-4752-a9ea-fcaebb3ab56e_6b99a1ca-17aa-452a-9370-27bb2a175812.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information about how to buy metro card on booth.\nPrevious actions:\n[link] Fares & Tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.7, 0.5, 0.713] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6728", "image": {"bytes": "", "path": "./images/4bce534f-3057-40ca-aecd-ffb07b3c9fcb_0ba0e97c-8b51-40e2-9387-368af44c654c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search the actor who won the Oscars for best actor in a suuporing role ine 1990, and add his upcoming movies to my watchlist.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.757, 0.513, 0.779, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6729", "image": {"bytes": "", "path": "./images/cc174cb2-520d-49c3-93da-f93a1c485c03_5df6fedf-afb3-4095-b06a-c9a3317e485a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Calculate the fare options to go from the south station to the north station\nPrevious actions:\n[button] Fares \uf0d7 -> CLICK\n[link] Fares Overview -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.677, 0.168, 0.845, 0.181] in the image\nAnd my action is Action: TYPE\nValue: south station"}]}, {"id": "mind2web_6730", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_0a45420f-cf51-42a2-82da-24ffd4e8dba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\n[link] Visit the Food & Drinks page -> HOVER\n[link] Order Food & Drinks -> CLICK\n[select] AMC Columbus 10 -> SELECT: AMC Grove City 14"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.277, 0.637, 0.299] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6731", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_e1e17c9e-26b5-4a08-a7ad-8e999a9870d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[menuitem] Boston, MA, USA -> CLICK\n[textbox] Start Date -> CLICK\n[button] \ue023 -> CLICK\n[gridcell] Tue, May 30, 2023 -> CLICK\n[gridcell] Thu, Jun 1, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.314, 0.943, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6732", "image": {"bytes": "", "path": "./images/6b831239-435b-494e-9aa8-a49e8605d0b3_163e98c4-a7ef-42c2-b151-5cc75d670ca5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What is trending now on AMC on-demand?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.026, 0.633, 0.062] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6733", "image": {"bytes": "", "path": "./images/ac59be41-36c3-44c5-8e1f-ffb9461ba980_321b75fd-1b65-443f-ba11-fcc65a6007b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for all Alternative Concerts taking place in New York City.\nPrevious actions:\n[button] Change Location -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.097, 0.203, 0.231, 0.214] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6734", "image": {"bytes": "", "path": "./images/f804150b-c2e9-40ca-b71f-d0d0d95e0c46_3c33e494-db1f-4561-ac74-d928cd7cca26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest hotel in New York City for 2 adults from Apr7 to Apr 8.\nPrevious actions:\n[option] New York\u00a0(New York City) New York State,\u00a0United St... -> CLICK\n[checkbox] 7 April 2023 -> CLICK\n[checkbox] 8 April 2023 -> CLICK\n[button] Search -> CLICK\n[menuitem] Price (lowest first) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.295, 0.344, 0.547, 0.354] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6735", "image": {"bytes": "", "path": "./images/efa705c1-f836-4704-b6c8-6e114a2eb865_9fd6dca3-1d20-46bb-814e-0786016ce859.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a service estimate on AC recharge of my 2022 Toyota Corrola, L Sedan 4D. My name is James Smith, email is buckeye.foobar@gmail.com, and phone number is 6157075521.\nPrevious actions:\n[link] Car Repair -> HOVER\n[link] Auto Repair Prices -> CLICK\n[combobox] Year -> SELECT: 2022\n[combobox] Make -> SELECT: Toyota\n[combobox] Model -> SELECT: Corolla"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.533, 0.195, 0.712, 0.215] in the image\nAnd my action is Action: SELECT\nValue: L Sedan 4D"}]}, {"id": "mind2web_6736", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_3151a491-e1ce-480a-9cfb-d389ee8624df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK\n[textbox] Things to do in -> TYPE: singapore\n[button] Singapore Singapore -> CLICK\n[button] From April 5, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.308, 0.431, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6737", "image": {"bytes": "", "path": "./images/36b86ab8-a448-46f7-813f-3e3b5c3d687c_1f81f2b6-3fc7-4827-aeee-4a41116ee19d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find offer for a black Honda with vin number 1HGCM66543A064159 with 155000 millage near 49102. The car should be in good condition with no damage, 2+ keys and paid off.\nPrevious actions:\n[label] VIN -> CLICK\n[textbox] Enter VIN (17 digits) Where can I find the VIN -> TYPE: 1HGCM66543A064159\n[button] Get your offer -> CLICK\n[textbox] ZIP Code -> TYPE: 49102\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.533, 0.31, 0.553] in the image\nAnd my action is Action: TYPE\nValue: 155000"}]}, {"id": "mind2web_6738", "image": {"bytes": "", "path": "./images/957878ba-cb47-40d1-bf37-23039b2dff27_c219dddc-bdf3-4b52-b770-2f8c34504fc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find a CarMax in Michigan, 49107 and book an appointment for a car repair on April 7th..\nPrevious actions:\n[button] Open helpful links menu -> CLICK\n[link] Service & Repairs -> CLICK\n[link] FIND A SHOP NEAR YOU -> CLICK\n[link] Find a Shop -> CLICK\n[li] 43065 -> TYPE: 49107"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.049, 0.1, 0.294, 0.117] in the image\nAnd my action is Action: TYPE\nValue: 49107"}]}, {"id": "mind2web_6739", "image": {"bytes": "", "path": "./images/988ff1b8-c9e8-44af-a729-33f0e99c9dbf_45697305-9fe5-4695-9ccf-4a73a68552e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show cars with AWD with maximum budget of $50,000.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.059, 0.673, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6740", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_ed9a300a-422a-4ee9-ac12-b6e26509649d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.47, 0.289, 0.52, 0.307] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6741", "image": {"bytes": "", "path": "./images/3eae65d9-7778-4cf2-bc3d-f666b5e43bce_331cc579-44f2-4e80-ace7-f8b7909fd044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Notify me about availability for 2 guests on april 17 at the High Tide restaurant\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.021, 0.418, 0.048] in the image\nAnd my action is Action: TYPE\nValue: high tide"}]}, {"id": "mind2web_6742", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8aeecea7-7ffa-475a-9844-2b49f26b6ce1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.751, 0.278, 0.767] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6743", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_e3c9691e-c9cc-44c9-b2ca-c8f93c388a5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.805, 0.787, 0.824] in the image\nAnd my action is Action: TYPE\nValue: James"}]}, {"id": "mind2web_6744", "image": {"bytes": "", "path": "./images/2e133e56-ac17-41dc-987a-257078d770c3_7d42e63b-49be-41b1-a453-28707cb28367.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find on the water activities in Dubai from 5pm to 12am with a maximum duration of four hours\nPrevious actions:\n[textbox] Where to? -> TYPE: Dubai"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.23, 0.729, 0.263] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6745", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_21334cc2-269f-4dd5-898b-f2cab62a8b19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK\n[link] Students & Grads -> CLICK\n[link] INTERNSHIP PROGRAMS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.517, 0.168, 0.813, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6746", "image": {"bytes": "", "path": "./images/a2959cdb-fbc4-435f-ba89-85d50d22298c_c94f5e1a-4d22-4c21-90b6-164dcee297dc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking spaces in JFK airport between April 18-20 that have lowest shuttle times\nPrevious actions:\n[tab] Airport -> CLICK\n[searchbox] Airport Name, Code or City -> TYPE: jfk"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.169, 0.914, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6747", "image": {"bytes": "", "path": "./images/ddd935d4-9c28-4e64-bb4f-91ad1cb974dc_212a36a7-c358-4fd7-9122-8c6721b7ed7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See in the Men's sale if any L-sized t-shirt in blue color is available or not, if available, add one to the cart choose pick in store for zip 10005.\nPrevious actions:\n[link] MEN -> HOVER\n[link] Sale -> CLICK\n[heading] Category -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.436, 0.48, 0.47] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6748", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_b80dae22-0311-4f5c-9aed-76f14574703d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Michael Jordan\n[textbox] Search -> ENTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.158, 0.207, 0.198] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6749", "image": {"bytes": "", "path": "./images/f464de6d-9923-4e8e-8046-56751a90a9a8_d48017ee-76d2-4d21-8bd8-112aa9cb8bc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track a bus leaving from Abbotsford.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.443, 0.0, 0.559, 0.054] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6750", "image": {"bytes": "", "path": "./images/b1eb6602-3aa7-40ee-8968-69d83e7ef21c_f626fb4a-bfe2-443e-aaaf-663f8eae8055.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the newest silver-colored Lexus SUV with a black interior and backup camera to purchase which has been made between 2022-2023 and is within 50 miles of zip 10017.\nPrevious actions:\n[menuitem] New Cars -> CLICK\n[p] SUV / Crossover -> CLICK\n[textbox] ZIP -> TYPE: 10017\n[button] Search -> CLICK\n[combobox] Select Minimum Year -> SELECT: 2022"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.137, 0.235, 0.156] in the image\nAnd my action is Action: SELECT\nValue: 2023"}]}, {"id": "mind2web_6751", "image": {"bytes": "", "path": "./images/44dcda68-082c-455a-a409-7091470cc006_e964b7d2-296b-49f0-9a08-4813d10b5a46.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Edinburgh Waverly to Glagsgow Central on May 18, leaving nearest 3PM for 1 adult with a Veterans Railcard\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: Edinburgh\n[span] Edinburgh (Waverley) -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: Glasgow\n[span] Glasgow Central -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.132, 0.194, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6752", "image": {"bytes": "", "path": "./images/a9708ad7-bec8-4435-b055-8ef6da520b8c_df64899f-f2f6-4a81-99ec-a0029e9790a8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the top track for the top indie artist in the last 30 days\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.008, 0.791, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6753", "image": {"bytes": "", "path": "./images/2e2cb6d9-560c-4258-baed-82e0ed5b5dbc_e6bb3bac-2055-4e4d-b429-cf5310a8955a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find bus stop information at Abbotsford, WI.\nPrevious actions:\n[button] Explore\ue911 -> CLICK\n[link] Bus stops -> CLICK\n[button] Abbotsford, WI \ue911 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.321, 0.13, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6754", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_78d05d85-15c8-4638-b44f-b3bcdade5119.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Rock -> CLICK\n[img] David Bowie -> CLICK\n[link] David Bowie -> CLICK\n[button] Add to List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.409, 0.227, 0.419, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6755", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_c31831cf-fcb1-4fc6-a696-aa5540372aa1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[textbox] Pick-up location -> CLICK\n[textbox] Pick-up location -> TYPE: Brooklyn\n[span] Brooklyn, New York, United States -> CLICK\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.762, 0.097, 0.877, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6756", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_03b9d01e-5454-4f71-88b8-20e5c41872f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK\n[button] Add to Wish List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.22, 0.716, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6757", "image": {"bytes": "", "path": "./images/59bdd427-c34b-4b93-a8b8-0c3a49acdcfa_1967f971-1242-4c07-8421-62e434f90fef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a round-trip flight from Santa Fe, New Mexico to Phoenix, Arizona for 1 adult on August 13th and returning on August 28th.\nPrevious actions:\n[button] Phoenix, AZ, US (PHX) -> CLICK\n[textbox] Depart -> CLICK\n[button] Move forward to switch to the next month. -> CLICK\n[span] 13 -> CLICK\n[span] 28 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.284, 0.479, 0.478, 0.516] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6758", "image": {"bytes": "", "path": "./images/c50985ee-d4d0-4a9e-ac6f-97bede200abd_700030bb-f584-4e85-91bd-357f444c6051.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check the lowest price ticket including fees to the next M3 Rock Festival - 2 Day Pass\nPrevious actions:\n[link] Music -> HOVER\n[link] Festivals -> HOVER\n[link] M3 Rock Festival -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.358, 0.553, 0.384] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6759", "image": {"bytes": "", "path": "./images/69661946-6c3e-4e26-95ed-1f7641a31f22_d9c1648c-f508-47c9-83d1-5649ca2da7df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse listings for a used Toyota Corolla near 10019 and sort by cheapest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.332, 0.135, 0.432, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6760", "image": {"bytes": "", "path": "./images/4775a0e3-3dad-46f2-b719-b1c0dae9e147_a702b9eb-d196-46e6-b587-372a8c3c648a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tortillas bread in grocery stock near zip 59901 and add 5 pieces of 12 pcs pack in the cart and view the cart.\nPrevious actions:\n[button] MAKE MY STORE -> CLICK\n[button] Departments -> HOVER\n[button] Grocery & Home -> HOVER\n[link] Grocery -> CLICK\n[link] Bread -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.134, 0.122, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6761", "image": {"bytes": "", "path": "./images/60383804-a8e5-4e50-8715-da391d76617d_b48691e5-ea8b-45b3-8bef-d5389d03b4a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse candle holder red decor products.\nPrevious actions:\n[link] Marketplace -> CLICK\n[link] Shop by Color -> CLICK\n[link] Shop red -> CLICK\n[button] Show filter modal Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.375, 0.135, 0.563, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6762", "image": {"bytes": "", "path": "./images/c987dd1d-9ade-4701-8cdb-a31f8f3bf673_6f943877-135c-4690-bc5b-ee941b3a8565.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a best rated Thai restaurant in Westminster, California, that provides takeout, and whose staff is fully vaccinated and accepts Apple pay.\nPrevious actions:\n[button] See all -> CLICK\n[checkbox] All staff fully vaccinated -> CLICK\n[checkbox] Accepts Apple Pay -> CLICK\n[button] Search -> CLICK\n[button] Recommended -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.518, 0.137, 0.612, 0.157] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6763", "image": {"bytes": "", "path": "./images/920f240d-77a9-476b-a1d4-dcc88d199bfa_5c1e2823-28b4-4884-9036-1d917f7a70e2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find rock music events in Canada, happening this weekend and save top three popular events for my record.\nPrevious actions:\n[link] Music -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.246, 0.247, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6764", "image": {"bytes": "", "path": "./images/4947d606-626e-4da3-a595-bfedacbed3ec_ca273023-d776-42ee-b189-656af8e4a2f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request ticket receipt for ticket no. 1234567890123 and last name Davis\nPrevious actions:\n[link] Receipts and refunds -> CLICK\n[link] Receipts for tickets and fees -> CLICK\n[textbox] Ticket number -> CLICK\n[textbox] Ticket number -> TYPE: 1234567890123"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.24, 0.656, 0.293] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6765", "image": {"bytes": "", "path": "./images/d29e8a14-ee66-4330-b282-09cb1955aad0_98f2a61d-1e65-44d2-b21b-8856adfb16c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the weekly ad in List View.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.347, 0.022, 0.497, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6766", "image": {"bytes": "", "path": "./images/009cc066-87b2-4ebb-ad8f-41eeb3d94be6_d9a6085d-eed6-4737-bec7-50f8e2953d86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a parking with EV charging for month with lowest price in Chicago.\nPrevious actions:\n[li] Chicago, IL, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] EV Charging (10) -> CLICK\n[button] Show 10 Results -> CLICK\n[select] Sort by Distance -> SELECT: Sort by Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.299, 0.402, 0.372, 0.422] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6767", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_1ad4f2f9-ba55-482c-bda8-b879589bdb54.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK\n[checkbox] 19 inch (7) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.795, 0.096, 0.99, 0.111] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6768", "image": {"bytes": "", "path": "./images/31c12c59-d9b8-4d9a-8456-9f1107d0e5f8_8c67fc5e-0ba2-4649-b99f-249f1310f9c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the reservation made by James Smith in the United States with Confirmation Number 123456\nPrevious actions:\n[button] Reservations -> CLICK\n[link] View / Modify / Cancel -> CLICK\n[combobox] Select Residency -> SELECT: UNITED STATES\n[input] -> TYPE: smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.581, 0.421, 0.902, 0.469] in the image\nAnd my action is Action: TYPE\nValue: 123456"}]}, {"id": "mind2web_6769", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_2f0192ff-5e1a-44a6-8649-bfff77330b42.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[link] repeat Trade-In -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.258, 0.352, 0.435, 0.505] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6770", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_062d3d9a-c1d4-47c7-8192-39cfc6fcf0a6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\n[button] Travel Info -> CLICK\n[link] Flight Tracker -> CLICK\n[textbox] From -> TYPE: Los Angeles\n[option] Los Angeles area -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.401, 0.219, 0.601, 0.267] in the image\nAnd my action is Action: TYPE\nValue: Boston"}]}, {"id": "mind2web_6771", "image": {"bytes": "", "path": "./images/3f0988e0-e3a4-4dd7-a89f-482175a474a8_c1406787-2858-4cfd-9ff0-18dd822ec56a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Wheelchair accessible parking for April 09 event in Barclays Center that allows entry after 10am\nPrevious actions:\n[textbox] Search for parking -> TYPE: Barclays Center\n[li] Barclays Center, Atlantic Avenue, Brooklyn, NY, US... -> CLICK\n[textbox] Select Event Date -> CLICK\n[gridcell] Sun Apr 09 2023 -> CLICK\n[p] Philadelphia 76ers at Brooklyn Nets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.319, 0.33, 0.384, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6772", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_d81b3416-82ef-4ec6-b938-3da5c2548270.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Search -> CLICK\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK\n[span] -> CLICK\n[button] Change store -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.194, 0.716, 0.212] in the image\nAnd my action is Action: TYPE\nValue: Seattle, WA"}]}, {"id": "mind2web_6773", "image": {"bytes": "", "path": "./images/e0ff7945-56b0-487a-920b-cbf954e6fb72_da89b808-f145-4860-a45f-450834fba4e4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Reserve a dinner table in the first restaurent in the results, any time after 8 pm tomorrow for two in New York in any available Indian cuisine-serving restaurant.\nPrevious actions:\n[div] Columbus, OH -> CLICK\n[button] New York -> CLICK\n[textbox] Search restaurants, cuisines, etc. -> TYPE: indian"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.179, 0.063, 0.228, 0.079] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6774", "image": {"bytes": "", "path": "./images/a919aa15-2acf-4090-8468-1d3a0d6450b1_b2e7c3b2-0fa3-49bc-8478-4cea20de1dc4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Need the cheapest small car on April 7, 10 am at Auckland airport for pick-up, return will be on April 10 at approx 5 pm.\nPrevious actions:\n[link] 7 -> CLICK\n[combobox] Pick Up Time -> SELECT: 10:00 AM\n[textbox] mm/dd/yyyy -> CLICK\n[link] 10 -> CLICK\n[combobox] Return Time -> SELECT: 5:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.78, 0.381, 0.891, 0.414] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6775", "image": {"bytes": "", "path": "./images/d64cbfc6-7ace-4cad-9f0a-6643cd83de6e_e238e976-5417-483a-bff6-e54699142179.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest full-sized Amazon basic comforter with 4+ average customer reviews and priced between 50 to 100 dollars, if red color availabe in the cheapest product then add one to the cart.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: COMFORTER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.651, 0.007, 0.686, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6776", "image": {"bytes": "", "path": "./images/4fe76361-f0e3-458f-a3d5-453da3c7cecc_ccf4155a-d9ed-4ede-a1a6-010e16f61ea7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List of Broadway events sorted by date.\nPrevious actions:\n[link] broadway. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.694, 0.221, 0.894, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6777", "image": {"bytes": "", "path": "./images/d63b1715-688c-4be2-b196-dde9659bc59d_1a23c4b3-11dc-419a-a0c3-e4d328690204.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: From Birmingham (BHX) to Paris search for packages with casinos, restaurant, fitness and a free internet from April 7th to 11th.\nPrevious actions:\n[link] Search for packages -> CLICK\n[textbox] From -> TYPE: Birmingham\n[span] Birmingham, England, United Kingdom -> CLICK\n[textbox] Enter destination or airport -> TYPE: Paris\n[span] City -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.591, 0.141, 0.76, 0.175] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6778", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_745f0ada-af2d-4846-ae61-94bc84783005.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Shop Hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.248, 0.571, 0.286] in the image\nAnd my action is Action: TYPE\nValue: New Delhi"}]}, {"id": "mind2web_6779", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_309508ef-3fbc-4301-898a-906c004937a7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\n[combobox] Search for anything -> TYPE: trash can automatic lid\n[option] automatic trash can lid -> CLICK\n[textbox] Maximum Value in $ -> TYPE: 60"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.224, 0.176, 0.238] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6780", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_bdc9a239-d845-44f2-ac6d-a78a43ce85a5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[img] humangear GoBites Uno Spork 0 -> CLICK\n[button] Color: Magenta -> CLICK\n[span] -> CLICK\n[button] Change store -> CLICK\n[searchbox] Search by location -> TYPE: Seattle, WA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.691, 0.24, 0.709, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6781", "image": {"bytes": "", "path": "./images/4f75897b-8df4-4f4c-88c3-72d440f6a4ee_0dbe031f-7330-4084-81bd-d133f5f5014a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the destinations in Armenia, Austria, Belgium and Bulgaria in Alphabetical order\nPrevious actions:\n[button] Plan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.215, 0.154, 0.313, 0.163] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6782", "image": {"bytes": "", "path": "./images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_97649533-7183-42a1-ae1f-275a69e171b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open gift list for beauty products.\nPrevious actions:\n[path] -> CLICK\n[button] More ways to shop -> CLICK\n[link] Send gifts -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.058, 0.26, 0.178, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6783", "image": {"bytes": "", "path": "./images/0c577209-47dc-4645-8d10-0b659663a969_2e7bca75-da1d-4ae9-a4e3-63c8a0469fdd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find the score of the latest nba game played by the phoenix suns.\nPrevious actions:\n[combobox] Search query -> TYPE: phoenix suns\n[link] Phoenix Phoenix NBA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.746, 0.319, 0.976, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6784", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_14510cda-06ca-4191-bea4-39e0e54bb281.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[div] Bridges and Tunnels tolls -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.133, 0.239, 0.367, 0.335] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6785", "image": {"bytes": "", "path": "./images/274571ea-fc2f-4353-86ba-00ecb112d6d2_1f5cb659-0c88-4f0e-a389-97e9e90a0893.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find multi-colored fillable eggs for easter eggs made from polypropylene priced between 5 to 10 dollars for pick-up from Barboursville, zip 25504.\nPrevious actions:\n[span] Charleston -> CLICK\n[textbox] zip or city, state -> TYPE: 25504"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.201, 0.081, 0.281, 0.11] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6786", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_51c840cc-adac-4cc2-a914-b07bcef81959.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.298, 0.956, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6787", "image": {"bytes": "", "path": "./images/706d0ccd-c0ec-423d-88c0-a5716700a855_04e9cc0d-00d8-48a5-b493-a9b27a1aa465.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for used BMW X5 Crossovers and compare the mileage of the first two cars.\nPrevious actions:\n[link] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.259, 0.397, 0.353, 0.451] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6788", "image": {"bytes": "", "path": "./images/e62bcf45-0be3-4195-b234-37755d6d715e_4c487c29-f51f-47a4-b521-08bb8e2c8253.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book by Sam Harris that has a rating of at least 4 stars and is available in paperback format.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: Sam Harris\n[button] sam harris -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.326, 0.219, 0.339] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6789", "image": {"bytes": "", "path": "./images/51e85ea8-7a75-40f3-8f35-ed255f87171f_6484f6b2-1c3b-438a-b60d-739032df779a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to cart 2 bottles of vitamin D that are buy 1 get 1 free and new arrival.\nPrevious actions:\n[button] Shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.268, 0.234, 0.574, 0.262] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6790", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_dacb180b-b588-4ff6-982a-c9294c11bddb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.0, 0.605, 0.038] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6791", "image": {"bytes": "", "path": "./images/9a9b1b45-cb23-4073-897f-67410087df9d_ddeea3d8-a96c-4584-8dae-084e4a76aaae.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule COVID-19 test appointment at 90028.\nPrevious actions:\n[span] COVID-19 booster, testing, treatment & records -> CLICK\n[link] COVID-19 testing Schedule a COVID-19 test -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.633, 0.308, 0.844, 0.335] in the image\nAnd my action is Action: TYPE\nValue: 90028"}]}, {"id": "mind2web_6792", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_430ba357-5e86-4d8c-a1a0-66fe657b0197.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK\n[link] Scores -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.698, 0.199, 0.717, 0.214] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6793", "image": {"bytes": "", "path": "./images/2742f638-cf66-4c72-a6a6-69f2a12bc269_b4b0eb8a-8008-4e51-8b11-0f2dc0fb6013.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight on May 9 and return on May 16 from Tel Aviv to Venice with plus type fare option.\nPrevious actions:\n[generic] 9 -> CLICK\n[generic] 16 -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Select -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.512, 0.969, 0.702, 1.017] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6794", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_3e0071a1-0e7a-4dcc-afaf-8d49bc8ba14b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK\n[link] Tablets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.378, 0.389, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6795", "image": {"bytes": "", "path": "./images/1282a011-194c-40c4-8f76-875e502cdd53_45555477-fab7-4449-b1ff-66433e3230ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse laptops with at least 16gb of ram and under $700.\nPrevious actions:\n[searchbox] Search Site -> TYPE: laptop\n[button] \uf002 -> CLICK\n[textbox] price to -> TYPE: 700\n[button] APPLY -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.267, 0.192, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6796", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_5dcf5117-8341-4ea2-a6eb-a516c41a71b8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK\n[link] XL -> CLICK\n[img] Men's UA Tech\u2122 2.0 Short Sleeve -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.867, 0.375, 0.924, 0.393] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6797", "image": {"bytes": "", "path": "./images/549a9805-e404-4dcf-8c72-ddf46471114b_614535fe-72dd-44fb-bb5e-97d4a2547a1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare two wireless printers that are rated above 4 stars.\nPrevious actions:\n[searchbox] Search Site -> TYPE: wireless printer\n[button] \uf002 -> CLICK\n[dt] Customer Ratings -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.279, 0.192, 0.29] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6798", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_fc871d4d-aa26-42a3-a595-7ec4b6c676ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.28, 0.148, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6799", "image": {"bytes": "", "path": "./images/8e200503-9b49-48aa-aaf6-3d6692b5cb50_e043291f-ff92-42c8-a9d2-016c8f3469f0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for locksmiths near Texas City, Texas that provide virtual consultations and are open now.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.158, 0.023, 0.243, 0.042] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6800", "image": {"bytes": "", "path": "./images/61d28a34-cc8e-46a9-b41d-c95933de2214_21f4dc7e-c394-4957-8985-4f8889128c41.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all time popular drop c rock tab of beginner difficulty\nPrevious actions:\n[link] Tabs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.023, 0.214, 0.153, 0.227] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6801", "image": {"bytes": "", "path": "./images/c9f65ae8-bd67-4a83-82d7-d42587e34e98_f5d1ec4a-c8d9-4910-9f1e-577da891b353.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my parking space in Houston. Name: James Smith, Email: buckeye.foobar@gmail.com, phone: 8888888888. Address: 123rd st.\nPrevious actions:\n[link] Rent My Parking Space -> CLICK\n[textbox] First Name * -> TYPE: James\n[textbox] Last Name * -> TYPE: Smith\n[textbox] Email * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.539, 0.787, 0.581] in the image\nAnd my action is Action: TYPE\nValue: 8888888888"}]}, {"id": "mind2web_6802", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_62891d67-0dd1-4e76-86cf-cb8ae1c0e520.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.082, 0.713, 0.093] in the image\nAnd my action is Action: TYPE\nValue: Athens, Attica, Greeece"}]}, {"id": "mind2web_6803", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_8c84c4cf-69a8-4598-aae3-828c35e95aa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.027, 0.673, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6804", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_e8f30da3-c814-4d47-8040-6e285a960ae4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\n[button] Manage -> HOVER\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.602, 0.611, 0.755, 0.625] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6805", "image": {"bytes": "", "path": "./images/ae969e05-d10e-4255-99f7-c27e071fad69_3e7af91f-3368-46f6-9c85-0c5de6736fa6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the standings for the English Premier League.\nPrevious actions:\n[div] \u2026 -> CLICK\n[link] Soccer . -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.306, 0.103, 0.391, 0.113] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6806", "image": {"bytes": "", "path": "./images/e2f8b054-59b5-4fd8-bd58-9f95b29f979d_7e4e90e4-e0f9-4262-878b-221a78155dee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest 2018 Honda Civic for sale within 100 miles of 07055\nPrevious actions:\n[textbox] ZIP Code -> TYPE: 07055\n[combobox] Distance -> SELECT: 100 Miles\n[combobox] Minimum Year -> SELECT: 2018\n[combobox] Maximum Year -> SELECT: 2018\n[button] Make \ue920 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.872, 0.277, 0.883] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6807", "image": {"bytes": "", "path": "./images/2a831fb6-3110-4ffb-8687-1d2acab09873_677f0c0a-d900-4ca9-8c5e-73fd4036a379.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Locate a park that has a campground and is located in the state of Colorado.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[link] Search by Activity or Topic -> CLICK\n[combobox] By State -> SELECT: Colorado"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.164, 0.641, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6808", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_c35de92f-4b70-42f2-827e-95ba59506320.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[textbox] Enter ZIP or State -> TYPE: 07470\n[button] set store -> CLICK\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.202, 0.317, 0.255, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6809", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_9ae02909-fe47-4383-8a72-7194c0f533cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\n[heading] CAR -> CLICK\n[label] Book with miles -> CLICK\n[LabelText] Enter pick up city, airport name, or airport code. -> TYPE: caldwell\n[div] Caldwell -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.654, 0.158, 0.679] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6810", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_6ad45996-e569-422b-8e82-f5d261f319d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.369, 0.783, 0.402] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6811", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_fbca318e-9c4e-417a-9f36-e39c77345c0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[searchbox] Find a location -> TYPE: NAPA VALLEY\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.528, 0.354, 0.573, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6812", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_8acab6b2-6b28-45c9-84b2-0c56d4964684.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: New York\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.163, 0.781, 0.194, 0.848] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6813", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_62479223-7350-45b8-a272-43a71a83db44.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.61, 0.867, 0.751] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6814", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_de99fc69-4313-49d0-9740-e0fabd61bc14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.783, 0.214, 0.843, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6815", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_a09ec185-740a-4090-85e2-0bb866a277d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[div] , Greece -> CLICK\n[div] Choose date -> CLICK\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.733, 0.191, 0.953, 0.24] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6816", "image": {"bytes": "", "path": "./images/7eff6c15-bab6-4ce4-abb8-72619f593ea1_c8297402-4dc8-4983-9ec1-6ee82f468ab6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Track my bus with service number SE4.\nPrevious actions:\n[button] Check my bus\ue911 -> CLICK\n[link] Track my bus -> CLICK\n[tab] Service Number -> CLICK\n[combobox] Search by Service Number -> TYPE: SE4\n[span] Columbia, SC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.723, 0.212, 0.98, 0.255] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6817", "image": {"bytes": "", "path": "./images/b1a1f767-8611-4539-9c08-475011d38e12_3703e141-87f5-412d-9675-2eaf6c10dbea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news about Mikal Bridges\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.06, 0.966, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6818", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_c45c6c0c-d446-41da-99c0-cc44abed21eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK\n[button] Recommended -> CLICK\n[span] Most Reviewed -> CLICK\n[button] Verified License -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.287, 0.626, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6819", "image": {"bytes": "", "path": "./images/66a5b212-cf94-4917-8015-58970dc54187_1a954a07-0ffd-4322-a31d-b66f330025eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the amtrak national route map\nPrevious actions:\n[button] DESTINATIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.554, 0.122, 0.922, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6820", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_d4bf4941-facb-40a5-844f-31f00302fd71.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK\n[button] Ireland -> CLICK\n[span] Dublin -> CLICK\n[button] France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.523, 0.164, 0.715, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6821", "image": {"bytes": "", "path": "./images/85706225-aa42-43c1-8cfa-8696118698a2_a5c25f8f-2e98-4c30-bbdc-a56abaee49bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of flights from Los Angeles area to Boston area tomorrow\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.491, 0.026, 0.578, 0.059] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6822", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_afe07d47-565f-4f9c-be14-366c5a45b1c3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[heading] Color -> CLICK\n[span] BLACK -> CLICK\n[svg] -> CLICK\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.463, 0.51, 0.482, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6823", "image": {"bytes": "", "path": "./images/33964bc4-fc1d-417c-ba5a-2c1c45ed8d8b_96597d17-6899-4300-8c45-7ae8387e89dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the most expensive things to do anywhere in December in a map\nPrevious actions:\n[tab] Select a Month -> CLICK\n[li] December -> CLICK\n[button] Filter -> CLICK\n[button] $ Price: High to Low -> CLICK\n[button] DONE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.114, 0.195, 0.128] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6824", "image": {"bytes": "", "path": "./images/00e83fae-f2f0-48ab-912c-32c03982642b_dfbd7cb5-d7b8-4500-b831-e7be9b8494eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show computer game reviews sorted by score.\nPrevious actions:\n[button] Reviews -> CLICK\n[link] Game Reviews -> CLICK\n[combobox] Platform -> SELECT: PC"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.166, 0.178, 0.291, 0.202] in the image\nAnd my action is Action: SELECT\nValue: Sort by Score"}]}, {"id": "mind2web_6825", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_5b8da6f5-c53c-4b69-bfad-7bdfd2e6ce20.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[textbox] Search by Airport, City, Zip, Address or Attractio... -> TYPE: laguardia airport\n[span] LaGuardia Airport -> CLICK\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.136, 0.5, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6826", "image": {"bytes": "", "path": "./images/56d210ec-22eb-4b3f-ba76-ee531403701a_f52cc040-3159-499d-8b13-ea4613b23b63.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the \"Meetings & Events\" section of the website to browse locations for a business meeting for one room and 20 guests in Boston from May 30th to June 1st.\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Book An Event Book a Meeting or Event -> CLICK\n[textbox] Destination -> TYPE: Boston"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.245, 0.335, 0.277] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6827", "image": {"bytes": "", "path": "./images/e5c228ff-7834-46d2-b906-dcea21a0c477_52e1bc08-aa96-46ba-aa6d-d0191b7a51a2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the subway station nearest to 07055\nPrevious actions:\n[span] Nearby Stations & Stops -> CLICK\n[searchbox] Address, station, landmark -> CLICK\n[searchbox] Address, station, landmark -> TYPE: 07055"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.235, 0.366, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6828", "image": {"bytes": "", "path": "./images/8df744ef-83ee-4472-9fb0-25bd9cf9dcd9_b060d99c-865a-4016-b147-d497c82a20db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a ten-ride ticket from Washington to New york starting from April 5\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.077, 0.203, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6829", "image": {"bytes": "", "path": "./images/be5e5f14-c875-4cfd-a517-175619491b90_bea83466-1716-4e8b-81aa-7df1c9d2586d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the NFL Scoreboard from the superbowl 2015\nPrevious actions:\n[link] NFL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.09, 0.206, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6830", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_8676f7b9-73e2-4bf3-b8e6-d38576f3f87f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.356, 0.065, 0.441, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6831", "image": {"bytes": "", "path": "./images/8d0eda4b-7a6e-45a4-bd8c-59fd3da58676_02680f4d-0ad2-4cac-b260-f95bda93cf34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a one-way flight ticket from Addis Ababa to Accra on April 14 and book the cheaper economy ticket.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: addis ababa\n[strong] Addis Ababa -> CLICK\n[textbox] To Autocomplete selection. Enter your place. -> TYPE: accra\n[strong] Accra -> CLICK\n[option] One way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.745, 0.252, 0.795, 0.282] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6832", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_1b86bf52-d450-49a1-b6e9-54aac49ff1cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.151, 0.312, 0.165] in the image\nAnd my action is Action: TYPE\nValue: Houston"}]}, {"id": "mind2web_6833", "image": {"bytes": "", "path": "./images/160fc162-7f03-4f59-83e1-5502d00806f2_edb1d676-2d90-478f-a19b-c083f267b082.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what is playing on Showtime sorted by newest.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.614, 0.126, 0.668, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6834", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_096ee7c8-8209-4457-9239-6737dd54c324.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.135, 0.104, 0.147] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6835", "image": {"bytes": "", "path": "./images/6ab8130f-362c-4fc3-bdab-d0528f0c6181_043449b3-6d42-43f7-844d-b5855db096f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest 3-star hotel or resort for two within a budget of 50 to 100 dollars for four days starting from April 25, near the beach in goa that offers breakfast, wifi, a swimming pool, and free cancellation.\nPrevious actions:\n[checkbox] 25 April 2023 -> CLICK\n[checkbox] 29 April 2023 -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.333, 0.263, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6836", "image": {"bytes": "", "path": "./images/db203a3a-8e62-41aa-ac05-717a8c11508d_5e5cfcd1-5a2e-4b97-9a6c-60e242291757.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Brooklyn, NY to Staten Island, NY on March 25th mainly using bus and subway terminal. The trip should arive at 9:45 AM.\nPrevious actions:\n[searchbox] From -> TYPE: brooklyn\n[listitem] Brooklyn, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.141, 0.313, 0.359, 0.343] in the image\nAnd my action is Action: TYPE\nValue: staten island"}]}, {"id": "mind2web_6837", "image": {"bytes": "", "path": "./images/26fff471-7083-46e4-945e-d1b167157a0d_411cc346-144b-4861-ba9e-c9395f8f0598.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check refund status for John Davis for ticket no. 1234567890123\nPrevious actions:\n[textbox] Ticket number -> TYPE: 1234567890123\n[textbox] First name -> CLICK\n[textbox] First name -> TYPE: John\n[textbox] Last name -> CLICK\n[textbox] Last name -> TYPE: Davis"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.417, 0.895, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6838", "image": {"bytes": "", "path": "./images/a338a731-596c-456e-9a9f-25f9e284309e_1ce7ca32-49ee-4274-abe9-5294b2487601.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Make a playlist and add 3 songs from popular Selena Gomez tracks and name it Love.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.321, 0.005, 0.561, 0.023] in the image\nAnd my action is Action: TYPE\nValue: Selena Gomez"}]}, {"id": "mind2web_6839", "image": {"bytes": "", "path": "./images/6a326478-2a1b-4e47-b298-53f3ac12ed51_e46c7544-0dea-4eee-8a35-8253034883a4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with information about luggage and what to bring.\nPrevious actions:\n[button] Plan -> HOVER\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.059, 0.202, 0.262, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6840", "image": {"bytes": "", "path": "./images/78915162-53c9-4cb1-86e1-6be6047528e0_e1f6fdb8-4efe-45dc-90d8-624bdd5a4e2f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out if Coldplay is playing any events in Columbus, OH on May 17.\nPrevious actions:\n[textbox] Search by team, artist, event or venue -> TYPE: Coldplay\n[paragraph] Coldplay -> CLICK\n[button] Filter by\u00a0Date -> CLICK\n[button] Next month -> CLICK\n[div] 17 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.578, 0.357, 0.645, 0.377] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6841", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_41227868-8f44-46fc-9ee1-31604f7f4dbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[textbox] Last name -> TYPE: Bloggs\n[button] Continue -> CLICK\n[button] Continue -> CLICK\n[button] Okay, got it. -> CLICK\n[img] undefined -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.784, 0.423, 0.953, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6842", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_c523456c-2910-4d24-a99f-3ca35aa410c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] Premium -> CLICK\n[svg] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.254, 0.331, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6843", "image": {"bytes": "", "path": "./images/0b70e49b-2759-4276-ad4f-471e405544b9_c2d31bb2-2ab9-4d3f-b785-17fddb4b85f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View all conductor trainee job openings in Amtrak New York and apply to the latest job.\nPrevious actions:\n[textbox] Search by Keyword -> TYPE: Conductor\n[textbox] Search by Location -> TYPE: New Yok\n[button] To make this website accessible to screen reader, ... -> CLICK\n[button] To make this website accessible to screen reader, ... -> CLICK\n[link] To make this website accessible to screen reader, ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.777, 0.241, 0.879, 0.266] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6844", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_e2bca4e2-c8ba-4505-bb4a-2c11560be18b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 19 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.43, 0.875, 0.468] in the image\nAnd my action is Action: SELECT\nValue: First"}]}, {"id": "mind2web_6845", "image": {"bytes": "", "path": "./images/66d12284-5525-42d2-a66f-fe5c460b7764_184d3ce5-9bab-402e-bf38-9d7a0072c5ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order popcorn at AMC Grove City 14\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.421, 0.026, 0.524, 0.06] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6846", "image": {"bytes": "", "path": "./images/42f64db7-c573-4ba8-9cab-a390e2f5e535_951b37fc-526a-4203-93fe-e65dfab59126.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flight from Mumbai to New York City on 21st April, 2023.\nPrevious actions:\n[link] From Departure Airport or City Your Origin -> TYPE: Mumbai\n[link] BOM Mumbai, India -> CLICK\n[link] To Destination Airport or City Your Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.255, 0.103, 0.515, 0.125] in the image\nAnd my action is Action: TYPE\nValue: New York City"}]}, {"id": "mind2web_6847", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_8aabcd7c-a3ec-4ba4-83c7-c61f37de5cea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Antiques -> CLICK\n[link] Furniture -> CLICK\n[link] Chairs -> CLICK\n[button] Sort: Best Match -> CLICK\n[link] Time: newly listed -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.63, 0.17, 0.713, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6848", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_ec93c9c4-0c2e-4576-84b5-0f558804edc3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags St. Louis -> CLICK\n[button] Go! -> CLICK\n[link] Plan Your Visit \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.749, 0.245, 0.894, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6849", "image": {"bytes": "", "path": "./images/b48c9974-4ba0-4112-98ce-3667781fa71b_507874fe-1115-4387-ae3a-678440621c58.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest standard train ticket from London to Sheffield on March, 29, arriving by 10:45 am for 2 adults.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.071, 0.327, 0.095] in the image\nAnd my action is Action: TYPE\nValue: london"}]}, {"id": "mind2web_6850", "image": {"bytes": "", "path": "./images/581da9fe-4d75-42a7-b138-9b287a153575_5382b192-80ec-4d29-8cfe-cea3aa9af99b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a pair of mens running shoes in black, size 7 extra wide, 4+ stars and under $50 and add them to my cart\nPrevious actions:\n[textbox] Search Amazon -> TYPE: mens black running shoes\n[button] Go -> CLICK\n[region] 4 Stars & Up -> CLICK\n[link] Under Armour Men's Charged Assert 9 Running Shoe -> CLICK\n[button] 7 X-Wide -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.729, 0.483, 0.869, 0.502] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6851", "image": {"bytes": "", "path": "./images/efe9051b-d626-443c-b8c8-e7844a0acd29_4c53006a-8253-499c-9e1f-0abe87119311.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find popular tracks from the rock band Nirvana.\nPrevious actions:\n[searchbox] Search -> TYPE: Nirvana"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.308, 0.02, 0.567, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6852", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_91b89cbd-5da5-4edf-a302-06a3338116a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo\n[strong] Colombo -> CLICK\n[textbox] To 2180 results are available, use up and down arr... -> TYPE: new york\n[strong] New York -> CLICK\n[textbox] Depart date please enter date in the format dd spa... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.803, 0.445, 0.851, 0.469] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6853", "image": {"bytes": "", "path": "./images/754c8c29-92b6-4af1-9594-f9d3da37b951_e4ab6d47-920a-4d58-98a1-6ae5bf1c6cab.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 3-star rated hostel with balcony and free cancellation option in Udipi, Karnataka for one person under 50 dollars from June 15 to 20, free wifi is compulsory.\nPrevious actions:\n[link] Hostels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.073, 0.274, 0.41, 0.304] in the image\nAnd my action is Action: TYPE\nValue: udupi"}]}, {"id": "mind2web_6854", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_850cc85e-d691-4b91-ac4c-1212d64d2b5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[span] -> CLICK\n[button] Country -> CLICK\n[span] -> CLICK\n[button] State / Province -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.268, 0.245, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6855", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_516114e2-6390-41c7-b809-44aea3dfef43.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Nike -> CLICK\n[button] US Shoe Size -> CLICK\n[link] 9 -> CLICK\n[button] Condition -> CLICK\n[link] Pre-owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.834, 0.212, 0.923, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6856", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_67a25df3-31cd-4a0c-88ca-4468b63ad958.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK\n[textbox] Sun Mar 26 \uf073 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.314, 0.259, 0.343, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6857", "image": {"bytes": "", "path": "./images/373dec75-7278-4d12-bcc3-0e802e337c4a_dc54e8cf-9e8b-4094-a90c-2230a4eedbff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for Tree stands with at least 3 stars, and sort the results by price from low to high.\nPrevious actions:\n[link] Hunting -> CLICK\n[menuitem] Climbing Stands -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.444, 0.087, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6858", "image": {"bytes": "", "path": "./images/0633c328-f1ad-42bd-8aec-7a1883b5898d_c9c98fc1-8b2b-42ba-a708-893695e385df.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a luxury trip deal anywhere to go in spring\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Luxury Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.104, 0.89, 0.12] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6859", "image": {"bytes": "", "path": "./images/b30b9f84-0541-4826-a3af-98220b851f7c_712639b1-118b-4f73-b96f-cd5b48c06cb3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's sports wear, size S.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: sports wear\n[svg] -> CLICK\n[heading] Gender > Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.142, 0.471, 0.167] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6860", "image": {"bytes": "", "path": "./images/4f208b8b-6d5c-4d3f-8df0-cab24d29edda_50018063-9417-46ac-a1ed-269e8302453d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Last of Us series and add it to my watch list.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: The Last of Us\n[link] The Last of Us The Last of Us 2023 Pedro Pascal, B... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.228, 0.942, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6861", "image": {"bytes": "", "path": "./images/be5bae8d-bebc-4474-9d28-1cc633be8d8d_98a9cc6a-d5a0-4500-938f-546404bb57f1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest hotel in New Delhi for 3 days starting from April 24 and add 3 to list.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.461, 0.01, 0.509, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6862", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_eb8c0d47-b9b8-4622-a93d-57b975949833.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[menuitem] Research -> HOVER\n[menuitem] Price Trends -> CLICK\n[span] -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.113, 0.16, 0.126] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6863", "image": {"bytes": "", "path": "./images/2281faf9-ff02-42e3-b785-8bcc3a0e530a_cdff5762-b4b7-4a22-955e-f8148168d909.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look up information on the potential side effects of rogaine.\nPrevious actions:\n[combobox] Search products and services -> TYPE: rogaine"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.191, 0.286, 0.33, 0.432] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6864", "image": {"bytes": "", "path": "./images/b7003092-3e6e-4fac-9867-083cac064f89_075e3102-01a5-4d21-a14a-22ffb129f1b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find JetBlue career openings in New York\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.134, 0.56, 0.183, 0.589] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6865", "image": {"bytes": "", "path": "./images/c497534c-76a5-4ffb-af9a-10ee7afcc784_2ba8d331-eb38-47cf-a09d-60885c37a401.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a car to book with miles to pick up on Caldwell (Ohio) on april 8 at 8 am and return on april 13 on noon at the same location\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.611, 0.294, 0.624] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6866", "image": {"bytes": "", "path": "./images/4e3cc9e2-baf7-449f-bf61-919eb773f29c_a43bbcfe-96dc-4222-ac22-4f2afc78bc28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Travel Pack for hiking.\nPrevious actions:\n[button] Travel -> CLICK\n[link] Travel Backpacks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.051, 0.45, 0.11, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6867", "image": {"bytes": "", "path": "./images/21e5c264-df85-4055-a566-ecb65cdd8c63_f60f9f16-8348-47e1-b2ad-67b88dd5fac7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add to my basket tickets for 3 people from New York to Alfred.\nPrevious actions:\n[span] New York, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Alfred\n[span] Alfred, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[gridcell] March 31, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.273, 0.197, 0.312, 0.217] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6868", "image": {"bytes": "", "path": "./images/42657330-bfc5-425d-ae21-396a9ba1fb12_4fdd3302-d141-439e-a0a2-9a01d9249890.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find yellow t-shirts for women small size that are less than $20.\nPrevious actions:\n[link] WOMEN -> CLICK\n[RootWebArea] Women's, Men's & Kids' Clothing & Accessories | UN... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.094, 0.623, 0.114] in the image\nAnd my action is Action: TYPE\nValue: women t-shirts"}]}, {"id": "mind2web_6869", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_45d41999-3dfb-4c9d-ba3b-cac736ee5256.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.425, 0.255, 0.568, 0.291] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6870", "image": {"bytes": "", "path": "./images/b83120d9-5892-4e8f-b4b9-346cc07827a7_0262cd12-5355-4602-be83-a0e4fc8e5196.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a playlist and name it \"Dirty\" and add 5 Doja Cat songs.\nPrevious actions:\n[button] Search -> CLICK\n[button] Add -> CLICK\n[button] ADD TRACK -> CLICK\n[textbox] Search -> TYPE: Doja Cat\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.662, 0.373, 0.688, 0.386] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6871", "image": {"bytes": "", "path": "./images/8d7a29cf-7e66-4660-9b26-4eeba7c3f158_34f7ec94-d726-48df-b6da-a798f9bc8325.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of children's program events in Illinois.\nPrevious actions:\n[link] Events -> CLICK\n[button] All -> CLICK\n[input] -> CLICK\n[button] All -> CLICK\n[label] Children\u2019s Program -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.234, 0.161, 0.267] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6872", "image": {"bytes": "", "path": "./images/c759aa6c-0738-4ac6-b03c-41d0f91ff4d3_a007f0aa-95eb-4604-96a1-fe8c2dfbd07c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a SUV rental from Mar 31 (noon) to April 7, 2023 (noon) from Chicago O'hare Intl Airport, ORD and returning to same place with the lowest pay now price.\nPrevious actions:\n[link] 7 -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK\n[button] Select My Car -> CLICK\n[generic] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.417, 0.023, 0.535, 0.034] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6873", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_c3bca17f-7481-4506-a7fd-bded60c14834.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: belo horizonte"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.175, 0.104, 0.438, 0.121] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6874", "image": {"bytes": "", "path": "./images/1ced6d51-577b-4e85-b96e-b823dbbcbf29_c7f7cff6-1d18-48c2-8a61-dc14b5b44b96.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed pre-owned man Nike shoe in size 9 with free local pickup under 75 dollars.\nPrevious actions:\n[link] Fashion -> HOVER\n[link] Men's Shoes -> CLICK\n[link] Nike -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.368, 0.392, 0.465, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6875", "image": {"bytes": "", "path": "./images/29f639c1-20da-46a6-b9c5-bbec77ac3c03_93aa5675-083a-42ed-9c3f-a25176a028ec.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open community discussions for Dota 2 game and open latest topic.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.237, 0.194, 0.325, 0.209] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6876", "image": {"bytes": "", "path": "./images/03e45ce0-4375-44aa-b57f-cf439ccbe363_08c9c18f-c5f0-460c-ba3d-a1e51201ddf5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news article and send an email about it.\nPrevious actions:\n[link] Jets signing former Packers QB Boyle to 1-year dea... -> CLICK\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.268, 0.768, 0.286] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6877", "image": {"bytes": "", "path": "./images/11344944-81ef-4fde-82b7-7e9724fc96f1_9078fd3a-f526-43ca-8756-187c5f59b43f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the list of discussion forums for games I own.\nPrevious actions:\n[link] COMMUNITY -> HOVER\n[link] DISCUSSIONS -> CLICK\n[link] Game Forums -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.231, 0.189, 0.443, 0.204] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6878", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_4fc26c69-ac92-4f10-b4ac-36bdbe42d9af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.149, 0.487, 0.183] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6879", "image": {"bytes": "", "path": "./images/a6372f23-f462-4706-8455-5b350c46d83c_f932237e-6a41-40a9-8df8-38ab876cc6b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a winery tour in Napa Valley in a winery which serves Mediterranean cuisine with wine testing for 4 guests on April 15, 10 am in a outdoor setup.\nPrevious actions:\n[span] Napa Valley -> CLICK\n[combobox] Reservation type -> SELECT: Wineries\n[svg] -> CLICK\n[svg] -> CLICK\n[button] 15 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.275, 0.724, 0.334] in the image\nAnd my action is Action: SELECT\nValue: 10 00 AM"}]}, {"id": "mind2web_6880", "image": {"bytes": "", "path": "./images/b467797c-5fab-4ee6-9b1f-ac6dc50c05cf_fec13c43-55d5-4c4d-9059-7137018069eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced single pack of Xerox genuine magenta toner sold by Newegg with free shipping.\nPrevious actions:\n[link] \ue660 Computer Peripherals \uf105 -> CLICK\n[link] Printer Ink & Toner \uf105 -> CLICK\n[link] Xerox Toner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 0.428, 0.158, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6881", "image": {"bytes": "", "path": "./images/c44cfc8c-abea-4eef-838a-ba3a2716f8fa_c19e76a4-4664-435b-ba46-9aa4971e02db.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: calculate a car loan payment for a $25000 car with a $5000 down payment and 12 percent interest rate for 48 months.\nPrevious actions:\n[menuitem] Finance -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.376, 0.044, 0.468, 0.067] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6882", "image": {"bytes": "", "path": "./images/a3bc6528-5fd5-45ac-81aa-7fafde757022_cd20ad0a-0250-46b7-93aa-2bcd1837d9f8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse new laptops from $400 to $500 that offers free shippping.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.025, 0.652, 0.044] in the image\nAnd my action is Action: TYPE\nValue: laptop"}]}, {"id": "mind2web_6883", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_bfc2aafb-1493-4af0-8bd9-8680ffbec320.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[tab] Flights -> CLICK\n[button] Leaving from -> TYPE: Los Angeles, CA\n[button] Los Angeles (LAX - Los Angeles Intl.) California, ... -> CLICK\n[button] Going to -> TYPE: Miami, FL\n[textbox] Going to -> TYPE: Miami, FL"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.261, 0.652, 0.304] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6884", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_b7ea921e-9106-4ffa-8427-c196f77649fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.093, 0.132, 0.385, 0.144] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6885", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_9a38f090-86ba-4472-b089-7737200bcfaf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[div] Add -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK\n[div] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.23, 0.606, 0.273] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6886", "image": {"bytes": "", "path": "./images/58394242-6531-4791-a7fc-6f279037706c_4749e515-087a-47b2-a652-3a8342d174a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Favorite a shared room in Belo Horizonte costing up to $100 to stay in a weekend in May\nPrevious actions:\n[label] Weekend -> CLICK\n[div] May -> CLICK\n[span] -> CLICK\n[span] Filters -> CLICK\n[textbox] max price $ -> TYPE: 100"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.139, 0.495, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6887", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_cdfc251d-d069-4874-9855-405b68bd27ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14\n[select] All Movies -> SELECT: 65\n[select] Sun, Mar 26 -> SELECT: Tue, Mar 28\n[link] 7:30pm -> CLICK\n[text] J10 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.54, 0.82, 0.591, 0.889] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6888", "image": {"bytes": "", "path": "./images/9a5aa299-e77d-44ca-ab0f-f6fd83e3fde1_4fdb839d-8ee9-406c-bd79-3ae4d764b752.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest and highest-rated hotel in the Navi Mumbai area of Mumbai, India, on April 12 for just one day.\nPrevious actions:\n[span] Mumbai -> CLICK\n[span] 12 -> CLICK\n[i] -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.456, 0.198, 0.574, 0.226] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6889", "image": {"bytes": "", "path": "./images/76514d5c-cc1c-40b0-8c08-427cedb106e4_08747f7d-7119-4877-821b-f4fc61f180d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for queen-size pillow protectors from the Marriot shop, and if found, add two pieces to the cart and checkout.\nPrevious actions:\n[button] Special Offers -> CLICK\n[link] Shop Marriott Opens a new window -> CLICK\n[menuitem] category pillows -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.667, 0.251, 1.0, 0.442] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6890", "image": {"bytes": "", "path": "./images/44ad28c9-c0d6-4d41-a49d-afeaa6ecaabe_ed7860e2-f5ac-45ce-9b5f-6eedf85b7cd1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way bus ticket between Albany, NY and Bloomington, NY April 10th and add the second one to cart.\nPrevious actions:\n[div] Albany, NY -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Bloomington, NY\n[span] Bloomington, NY -> CLICK\n[textbox] Pick a date -> CLICK\n[button] Next month -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.716, 0.069, 0.769] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6891", "image": {"bytes": "", "path": "./images/f122e4c9-7634-4193-94f9-8623cd75d1f1_78a07e5a-688d-4a24-9bde-901d62e64a5c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets from Manchester Piccadilly to any station in London on April 8, leaving nearest 10 AM for 1 adult\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.076, 0.327, 0.101] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6892", "image": {"bytes": "", "path": "./images/d7c3103a-c195-4503-ab20-ecae4d4ce419_bf283bf2-f76d-42be-b04d-3dcf4f25f1ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the motorcycle toll fee for the Queens midtown tunnel.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.0, 0.597, 0.035] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6893", "image": {"bytes": "", "path": "./images/58badcfc-343a-47c1-8aec-f609925eb4ed_d2244643-d85f-47f6-a9c3-8db219104141.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a Ricky Kej track to listen and share which has been added in the last year and is between 2 to 10 minutes.\nPrevious actions:\n[searchbox] Search -> TYPE: Ricky Kej\n[link] Search for \u201cRicky Kej\u201d -> CLICK\n[link] Tracks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.039, 0.261, 0.212, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6894", "image": {"bytes": "", "path": "./images/8c5ccffa-b88e-4928-9394-6d46cfd36e59_2d6cc8ed-6f0c-4311-a5aa-860a52c8452e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are today's Limited Time Offers deals?\nPrevious actions:\n[link] Coupons & Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.465, 0.096, 0.476] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6895", "image": {"bytes": "", "path": "./images/5b56d5b8-1f41-43ca-9f21-369d849f1aa0_00aa52ec-0e86-450f-b72e-2dc795817cac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the season pass prices for Hurricane HarborLos Angeles.\nPrevious actions:\n[span] Hurricane Harbor Los Angeles -> CLICK\n[button] Go! -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.313, 0.047, 0.434, 0.056] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6896", "image": {"bytes": "", "path": "./images/5a181549-c79c-499c-b7d7-90860f0e0068_a2f3a3c8-b17a-48c3-9762-f1311a93667c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play Trailer of \"The Flash\" 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.173, 0.156, 0.185] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6897", "image": {"bytes": "", "path": "./images/ea8737b0-0ff4-4476-b97d-5677d4f526dc_ef71de36-8d22-4f74-a4db-9ef1d45fd9fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-rated locksmith in San Francisco for a key extraction from a vehicle.\nPrevious actions:\n[link] Home Services -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.366, 0.145, 0.474, 0.176] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6898", "image": {"bytes": "", "path": "./images/fb04bb83-9ad5-4186-a0da-17af9ba2a63d_1bdbfb19-e149-4494-8c81-823066198ce4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find offers for VIFP Club loyalty number 9016467685 and show the ones that are for members only.\nPrevious actions:\n[button] Manage -> HOVER\n[use] -> CLICK\n[link] Visit our cruise deals page to view your offers -> CLICK\n[button] Show My Deals -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.128, 0.445, 0.323, 0.455] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6899", "image": {"bytes": "", "path": "./images/521d9006-4560-49af-b232-c713d87dd2e2_0d5b7f9f-9236-43f6-a551-ef4633323303.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a group ticket for 20 people in Six Flags Magic Mountain in Los Angeles on Apr 30, add 5 mega meal deals with it. Name the group Crew and check out as James Johnson, james.john@gmail.com. It is a family trip organized by Johnson.\nPrevious actions:\n[i] -> CLICK\n[button] Buy Tickets -> CLICK\n[button] April 30, 2023 -> CLICK\n[button] Buy Now -> CLICK\n[generic] Increase -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.75, 0.047, 0.777, 0.063] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6900", "image": {"bytes": "", "path": "./images/e8ce2bcd-894d-4b2f-99e4-ae695a6b9dd9_a5d935f5-61f6-4797-9dc6-33eb9a260ece.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest cars available at San Francisco Airport for a day.\nPrevious actions:\n[link] San Francisco Airport -> CLICK\n[div] Fri, Mar 31 -> CLICK\n[checkbox] 29 March 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.097, 0.923, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6901", "image": {"bytes": "", "path": "./images/74b456b6-0e62-429f-b13c-45861f2cdf82_f3f39cfc-eb80-4f5e-ab84-0bce4f894d21.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a recent full-time job in United Airlines corporate in Gurugram, India if a job in sales is available, then apply through email at jacksparrow@gmail.com.\nPrevious actions:\n[button] State / Province -> CLICK\n[span] -> CLICK\n[button] City -> CLICK\n[span] -> CLICK\n[button] Hiring Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.038, 0.323, 0.218, 0.334] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6902", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_5630e994-101d-43c2-8c69-da80024e3159.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK\n[button] All -> CLICK\n[button] See all -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.262, 0.688, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6903", "image": {"bytes": "", "path": "./images/eee72e78-71bd-434b-ba74-33888ea5522d_238e3167-126e-4c08-8de2-c51cb969c94b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book the cheapest hotel in Le maraise neighborhood in paris with 2 room for 3 adult on March 27th to April 2nd.\nPrevious actions:\n[link] Search for hotels -> CLICK\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Le maraise\n[span] Le Marais, Paris, France -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.375, 0.246, 0.404] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6904", "image": {"bytes": "", "path": "./images/4008118a-68a1-4a9f-968c-8b92a979dff7_62b5851a-66ac-4f7b-ab41-5c8ca47f87b0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the breakfast options on the Carnival Breeze ship.\nPrevious actions:\n[button] Explore -> HOVER\n[link] Dining -> CLICK\n[span] EXPLORE DINING -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.035, 0.27, 0.055, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6905", "image": {"bytes": "", "path": "./images/486bdb13-16c5-4a53-8566-a60caaf94a73_bf3a4668-958b-4149-a5b0-3870bf764b06.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a GoBites Uno Spork in the color magenta and have it delivered to a store in Seattle, WA. Checkout with default address, and use credit number 123456789, CVV 123, expire 01, 2024\nPrevious actions:\n[button] Add to cart -> CLICK\n[link] Cart & Checkout -> CLICK\n[link] Proceed to checkout -> CLICK\n[button] Continue with this address -> CLICK\n[button] Continue without membership -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.119, 0.789, 0.2, 0.817] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6906", "image": {"bytes": "", "path": "./images/61563837-99af-45e2-a13f-9d6bd9f74a81_90dd2ba7-06e3-4e8d-b4a7-cd9f16ee5fd2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show all the external solid state drives available with lowest price first.\nPrevious actions:\n[link] See All -> CLICK\n[link] Computers -> CLICK\n[link] Drives & Storage -> CLICK\n[link] External Solid State Drives -> CLICK\n[span] Sort by: -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.847, 0.118, 0.979, 0.134] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6907", "image": {"bytes": "", "path": "./images/3e0d115f-9da7-4211-9d99-122912890b40_865d6d65-322d-4261-8173-2a3f843e747d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Delete the history of what i heard recently.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.232, 0.0, 0.313, 0.021] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6908", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_53510142-0f5f-4f73-86f1-61dc206fb9a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[generic] Run Search -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.077, 0.338, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6909", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_92de7066-88c4-424e-97df-4a48ba0b59b5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.18, 0.259, 0.204] in the image\nAnd my action is Action: TYPE\nValue: SPRINGFIELD"}]}, {"id": "mind2web_6910", "image": {"bytes": "", "path": "./images/1a038b20-cd5a-4c32-8b11-fe2aeeb4ec85_e977dfbb-fe57-43f2-979f-b9249b2747b2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View Fares from Grand Central to Stoney Brook for Port Jefferson after 10:00pm for adult.\nPrevious actions:\n[link] Fares & Tolls -> CLICK\n[link] See railroad fare details. -> CLICK\n[link] Plan a trip to see fares. -> CLICK\n[searchbox] From -> TYPE: grand central\n[span] Grand Central -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.215, 0.474, 0.252] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6911", "image": {"bytes": "", "path": "./images/e9c7496f-bdea-4651-aeae-0f4d12d59d12_85fb9ffc-48b1-4e4b-b07f-f81e89cebb4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter car accident lawyers in Stanford, CA, sort them by distane and find the phone number of the nearest one.\nPrevious actions:\n[input] -> TYPE: Stanford, CA\n[input] -> TYPE: car accident lawyers\n[link] car accident lawyers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.59, 0.112, 0.683, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6912", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_e1297f39-b8fc-447a-a101-078ec44c68f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York\n[li] New York, NY, USA -> CLICK\n[textbox] Monthly Start Date -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.325, 0.195, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6913", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_807e15c8-e808-4db8-abf7-e3e606df063c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\n[link] Demo Drive -> CLICK\n[button] Model Y -> CLICK\n[textbox] Last Name -> TYPE: Adams\n[textbox] First Name -> TYPE: Roy"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.481, 0.416, 0.659, 0.436] in the image\nAnd my action is Action: TYPE\nValue: 123-999-0000"}]}, {"id": "mind2web_6914", "image": {"bytes": "", "path": "./images/8e849b85-5acc-4d26-ad5e-d24ad24343df_db0dd015-f9d9-4fd6-9c1b-90ee29a84a8e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book parking spot for James SMith at Stewart Hotel in New York for 2 hours. The street address is 133 st avenue in New York. Zip code is 10001 and phone number is 888888888. The email address is buckeye.foobar@gmail.com. Employer's name is Gua AB.\nPrevious actions:\n[link] BOOK MONTHLY PARKING -> CLICK\n[textbox] First Name -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: SMith\n[textbox] Address1 -> TYPE: 133 st avenue"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.345, 0.24, 0.38] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_6915", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_226e8bc5-1ca6-42ef-867a-370029f7942b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[div] Recommended -> CLICK\n[option] Price (Low to High) -> CLICK\n[heading] La Quinta Inn & Suites by Wyndham Houston Hobby Ai... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.26, 0.953, 0.292] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6916", "image": {"bytes": "", "path": "./images/08f78082-3408-4714-99ea-5a699840317d_5660db7c-d327-43d3-ba70-3c9541460e84.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a store in Magnolia Texas.\nPrevious actions:\n[link] Find Stores -> CLICK\n[link] View store directory -> CLICK\n[link] Texas -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.506, 0.168, 0.559, 0.179] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6917", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_46fee503-6ddf-4dcb-914b-7b66ac2afbaa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Request a refund -> CLICK\n[button] Add trip -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.497, 0.333, 0.742, 0.355] in the image\nAnd my action is Action: TYPE\nValue: Lo"}]}, {"id": "mind2web_6918", "image": {"bytes": "", "path": "./images/8634bbc0-7bcd-4c88-ad0f-5c73c3f69e53_78e4da0e-d2e1-458b-a8ec-ee5acaa8d971.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Join the waitlist for a ticket for the Hackney Clothes Swap - Earth Day event being held on April 22nd. Name: Joe Bloggs. Email: joe@bloggs.com\nPrevious actions:\n[div] Hackney -> CLICK\n[combobox] autocomplete -> CLICK\n[combobox] autocomplete -> TYPE: Hackney\n[div] London, UK -> CLICK\n[textbox] Search events -> TYPE: Hackney Clothes Swap - Earth Day"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.626, 0.537, 0.766, 0.607] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6919", "image": {"bytes": "", "path": "./images/3358dffd-3673-435b-bfcb-cb242a2c622c_67f3e885-cc8d-4607-bde7-b6dd64775a4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse round-trip bus tickets from Houston to Dallas leaving on May 7th and returning a week later.\nPrevious actions:\n[textbox] From (type text or use the arrow keys) -> TYPE: Houston\n[span] Houston, TX -> CLICK\n[textbox] To (type text or use the arrow keys) -> TYPE: Dallas\n[div] Dallas, TX -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.196, 0.77, 0.227, 0.794] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6920", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_b946c050-003b-4cd1-a7cc-f23d7e291ba1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[link] Trade In -> CLICK\n[link] Trade in by mail -> CLICK\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.744, 0.246, 0.767, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6921", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_bc272ed7-686b-48e6-be10-19d50e5ff9d3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[textbox] Start Date -> CLICK\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.635, 0.455, 0.648, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6922", "image": {"bytes": "", "path": "./images/f2f531ff-94d5-4320-8594-7ae5d3a52af5_34e7f545-430f-4557-84b4-ec4cfea0876d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show top 50 Movies and TV shows by Sci fi genre.\nPrevious actions:\n[path] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.079, 1.0, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6923", "image": {"bytes": "", "path": "./images/862faed7-449c-4624-902b-6b512a977d0c_2be16347-d879-4bba-abe1-8c4028244c19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new surge protector with 6 to 8 outlets under 25 dollars with customer reviews above 4+, add 2 pieces to the cart.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.008, 0.651, 0.037] in the image\nAnd my action is Action: TYPE\nValue: surge protector"}]}, {"id": "mind2web_6924", "image": {"bytes": "", "path": "./images/f738b393-9234-4c07-8baf-20f05627d5ed_8c2bdab0-6b93-4326-83f2-c3bd35cff1de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the highest-reviewed landscaper in West Hollywood for the design of lawn, who gives fast responses, and make sure he has a license verified by Yelp.\nPrevious actions:\n[span] Landscaping -> CLICK\n[textbox] Near -> TYPE: WEST HOLLYWOOD\n[span] West Hollywood -> CLICK\n[radio] Lawns or greenscapes -> CLICK\n[button] Fast-responding -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.553, 0.092, 0.65, 0.103] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6925", "image": {"bytes": "", "path": "./images/0a2130e7-1108-4281-8772-25c8671fb88e_eb82b9a7-62d4-4ad0-93aa-7ddc9c93cb65.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a non-fiction children's book for ages 9-11 on the topic of history in the Hindi language, and add the top book to the cart, if not available choose notify me option and also add the book to new wishlist must buy.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Children's Books -> CLICK\n[link] Non-Fiction -> CLICK\n[link] History -> CLICK\n[select] All -> SELECT: Ages 9-11 (13,217)"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.407, 0.196, 0.426] in the image\nAnd my action is Action: SELECT\nValue: Hindi (59)"}]}, {"id": "mind2web_6926", "image": {"bytes": "", "path": "./images/cdd9ef05-4d5b-4469-b001-8d73b6dc8120_aee3baf0-fbec-4ac0-8ff2-60d01149dd39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the documents required to sell a car in New Jersey for which payments are being paid and it has single person on the title who can come to the store\nPrevious actions:\n[link] Sell/Trade -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.567, 0.22, 0.823, 0.265] in the image\nAnd my action is Action: SELECT\nValue: New Jersey"}]}, {"id": "mind2web_6927", "image": {"bytes": "", "path": "./images/80e12375-19ad-400f-9e35-2a3853173bed_8efdb3e2-d599-4e4c-91de-518fbcfe3e4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for the same-day delivery of thin crust frozen pepperoni pizza and add 2 packets to the cart.\nPrevious actions:\n[link] Categories -> CLICK\n[link] Grocery -> CLICK\n[span] Frozen Foods -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.285, 0.316, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6928", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_1643a5ef-db8a-42fb-a052-37a0e9122ac1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[img] A person holding a tomato and a grocery bag with a... -> CLICK\n[button] Next -> CLICK\n[link] Personal Care -> CLICK\n[svg] -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.285, 0.83, 0.302] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6929", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_517a437f-10b1-4713-b44e-9d72da782cb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[textbox] Enter your Origin -> TYPE: Cheyenne\n[textbox] Enter your Destination -> TYPE: Helena\n[button] Get Directions -> CLICK\n[li] Cheyenne, WY, USA -> CLICK\n[li] Helena, Montana -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.498, 0.549, 0.573, 0.572] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6930", "image": {"bytes": "", "path": "./images/ee22220c-802b-431f-abb4-0131fd8dbe5f_98caa132-9fac-4589-8b0d-4fcc6e8e0f75.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get 2 tickets for a free pet festival in English in Portland on april 15 to april 16\nPrevious actions:\n[label] -> CLICK\n[label] -> CLICK\n[label] -> CLICK\n[combobox] autocomplete -> TYPE: portland\n[div] Portland -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.286, 0.216, 0.298] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6931", "image": {"bytes": "", "path": "./images/b20e1dc4-651b-46e1-8470-16250657f2a8_75f3bf7b-84da-4fb6-9810-13ca7ce311fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest SUV in Brooklyn for 1 day.\nPrevious actions:\n[span] Different drop-off -> CLICK\n[tab] Same drop-off -> CLICK\n[button] End date -> CLICK\n[button] May 22, 2023 -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.404, 0.049, 0.442, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6932", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_b106cb92-845b-4cc4-b750-58e03d6ac5f6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.586, 0.017, 0.781, 0.052] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6933", "image": {"bytes": "", "path": "./images/4777d638-204d-4e44-b81c-2fb43c471fb2_1a28a0b0-4e06-4b68-9287-28d439b713ed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most reviewed cocktail bar with outdoor seating for reservation in San Francisco.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.104, 0.036, 0.201, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6934", "image": {"bytes": "", "path": "./images/87ca0d6a-c513-4aae-b14f-7d4db3255e98_a35dc0e8-fa41-4eb8-a854-1f952e660828.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a flight from Dublin in Ireland to Paris Beauvais in france for 2 adults leaving apr 26 and returning may 2 with the promo code 10000001\nPrevious actions:\n[textbox] From -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.17, 0.195, 0.286, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6935", "image": {"bytes": "", "path": "./images/c1a354a1-7990-4ca8-9fc5-2fdb1b5df337_2763fe68-209b-4181-b309-e9e75ebaf703.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a book in Hindi with ISBN 1648926800.\nPrevious actions:\n[link] Advanced Search -> CLICK\n[input] -> CLICK\n[input] -> TYPE: 1648926800\n[select] All -> SELECT: Hindi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.859, 0.383, 0.969, 0.424] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6936", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_8b26cb30-f938-42d7-ad51-858d186a5422.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.453, 0.425, 0.512, 0.44] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6937", "image": {"bytes": "", "path": "./images/35cf31d3-c88d-45fe-9959-4cf1de72bc7d_f0e05a7d-56d2-4f09-b264-8e29f664c2ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse for trending women sports bras and add 3 trending black bras to Wishlist.\nPrevious actions:\n[menuitem] Women -> CLICK\n[link] Sports Bras -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Black -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.464, 0.157, 0.485, 0.17] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6938", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_5c48bea6-ba29-458e-8ccb-ab038bfe9f9c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[link] Birthdays -> CLICK\n[button] Buy Now\ue903 -> CLICK\n[button] Next -> CLICK\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.14, 0.487, 0.167] in the image\nAnd my action is Action: TYPE\nValue: 05/05/1995"}]}, {"id": "mind2web_6939", "image": {"bytes": "", "path": "./images/969f36c3-52e7-42da-80bd-11a2d04d53ae_a48a19f1-6b38-44bf-9f1c-923274418b08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Biography of the NFL player named Michael Jordan\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.046, 0.966, 0.078] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6940", "image": {"bytes": "", "path": "./images/942e315d-b494-469c-93df-cd69c88ea2d2_6c20de6d-fcb6-460e-a454-34f681cbb142.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find information regarding Fastbreak program.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.743, 0.018, 0.837, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6941", "image": {"bytes": "", "path": "./images/e3486ac5-6d1e-476f-b389-2e6df37f94bd_ed1bafa7-9add-4b87-989b-90aa882fac86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search a cheapest homestay with balcony in Bali, Indoneshia from April 1 to 4 for 2 adults and 4 children aged 3, 5, 8, and 12 years, respectively, double beds prefered, including breakfast.\nPrevious actions:\n[combobox] Age of Child 1(Child's age (years)) -> SELECT: 3\n[img] Add -> CLICK\n[combobox] Age of Child 2(Child's age (years)) -> SELECT: 5\n[img] Add -> CLICK\n[combobox] Age of Child 3(Child's age (years)) -> SELECT: 8"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.259, 0.393, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6942", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d4d09cdb-86cd-4870-9e02-5cc4fc3a08d2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[button] Deals -> HOVER\n[link] Explore Deals -> CLICK\n[link] Climbing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.247, 0.448, 0.428, 0.483] in the image\nAnd my action is Action: SELECT\nValue: Price Low - High"}]}, {"id": "mind2web_6943", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_587b5c69-60df-46d7-a9bb-da2624630a64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK\n[link] Car payment calculator -> CLICK\n[textbox] Vehicle Price -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.159, 0.176, 0.459, 0.197] in the image\nAnd my action is Action: TYPE\nValue: 10000"}]}, {"id": "mind2web_6944", "image": {"bytes": "", "path": "./images/0cb50efe-4568-4c8d-bf0e-ed106cf99d1d_5633f55b-e5ea-434b-9aae-06fec4fbe863.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the all time most popular solo track by ArianaGrande\nPrevious actions:\n[link] Search -> CLICK\n[textbox] Search -> TYPE: Ariana Grande\n[button] Search -> CLICK\n[a] -> CLICK\n[button] Sorted by: Last 7 days -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.568, 0.546, 0.656, 0.569] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6945", "image": {"bytes": "", "path": "./images/d1e46885-62b3-42ae-837a-474b1541348a_6a689c11-d9a8-4139-b828-7312938f530d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking passes for New York Yankees game on April 14\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.164, 0.158, 0.782, 0.188] in the image\nAnd my action is Action: TYPE\nValue: New york yankees"}]}, {"id": "mind2web_6946", "image": {"bytes": "", "path": "./images/71638c81-42f0-4218-a1b0-d3a137ad1cff_0ea5b7ba-2e88-4415-ac7e-eb3b6a7f71e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the price of Tesla Model Y Performance for the 10001 zip code.\nPrevious actions:\n[link] Model Y -> CLICK\n[link] Order Now -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.713, 0.454, 0.963, 0.484] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6947", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_cff604f9-1605-4b09-b220-446853102b4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[heading] Amenities \ue023 -> CLICK\n[label] Business center (11) -> CLICK\n[label] High-speed Internet (11) -> CLICK\n[button] APPLY -> CLICK\n[generic] Distance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.506, 0.37, 0.525] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6948", "image": {"bytes": "", "path": "./images/28f6ac29-940a-4ec6-a8bd-16ae2ce69a1c_e24f4566-ec7e-49e8-b98f-bfab996bad35.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search receipt with the eTicket 12345678 for the trip reserved by Jason Two\nPrevious actions:\n[tab] MY TRIPS -> CLICK\n[link] Find a receipt -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.067, 0.182, 0.082, 0.192] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6949", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_0f7759b9-f43a-4d97-ace7-6405722611eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[textbox] Enter a city, hotel, airport, address or landmark -> TYPE: Chennai\n[option] Chennai, Tamil Nadu, India -> CLICK\n[button] Monday March 20, 2023 -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.552, 0.22, 0.765, 0.246] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6950", "image": {"bytes": "", "path": "./images/acc13b3d-0585-4cbe-a2b1-30c6e25517fa_4fc9c4f6-0be4-45fe-b57b-1950681d0415.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for a Four Points by Sheraton gift card of value 150 dollars with Marriott deluxe box and add it to the cart in the name of Clara from James with a happy Christmas message and checkout.\nPrevious actions:\n[link] Gift Cards \ue922 -> CLICK\n[link] Browse Gift Cards -> CLICK\n[button] Our Brands -> CLICK\n[img] Travel Reinvented. -> CLICK\n[textbox] *Amount -> TYPE: 150"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.687, 0.247, 0.783, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6951", "image": {"bytes": "", "path": "./images/ab23e83a-e3bd-4648-9d68-989f5c158d8f_8abf4ce0-758b-4ae6-8a7d-5a906da17d25.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Request a refund for the traveler Ian Lo and, his document number is 12345678912345 and email ian.lo@gmail.com\nPrevious actions:\n[tab] MY TRIPS -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.056, 0.142, 0.148, 0.152] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6952", "image": {"bytes": "", "path": "./images/9653dc0d-96d9-414f-a711-3af94bd7cf5b_5302abb9-8cce-45a3-8c07-a1b13fc6f6a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: sell playstation controller for cash credit on GameStop in 43240.\nPrevious actions:\n[link] repeat Trade-In -> CLICK\n[img] -> CLICK\n[link] Sony DUALSHOCK 4 Wireless Controller for PlayStati... -> CLICK\n[div] Find a Store -> CLICK\n[link] FIND A STORE -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.168, 0.657, 0.194] in the image\nAnd my action is Action: TYPE\nValue: 43240"}]}, {"id": "mind2web_6953", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_50fd7aaa-959c-4d4c-b224-5dd9a2bd05fe.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[textbox] Flight origin input -> TYPE: Mumbai\n[span] Chhatrapati Shivaji Intl -> CLICK\n[textbox] Flight destination input -> TYPE: Dubai\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.106, 0.292, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6954", "image": {"bytes": "", "path": "./images/ec472065-2913-40df-bbbf-ee95bc76485a_d28a21e8-c00e-4910-b822-cc0f714abbc5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list named New that includes thr\nee albums, one each from the genres pop, rock and electronic.\nPrevious actions:\n[button] Explore -> CLICK\n[link] Explore All -> CLICK\n[link] 3,682,991 Pop -> CLICK\n[img] Phil Collins - No Jacket Required -> CLICK\n[button] Add to List -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.325, 0.346, 0.375, 0.359] in the image\nAnd my action is Action: SELECT\nValue: New"}]}, {"id": "mind2web_6955", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_abeec9fe-726d-4040-9765-cc8bb0a8b920.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.086, 0.229, 0.914, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6956", "image": {"bytes": "", "path": "./images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_83a8bc23-465e-4b2f-a976-ae902a22fc9a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out details about cancellation fees.\nPrevious actions:\n[link] FAQs -> CLICK\n[link] \uf2b1Cancellation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.157, 0.879, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6957", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_d0ce30cd-701a-4a18-88a0-296d0f6c054c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.84, 0.022, 0.93, 0.033] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6958", "image": {"bytes": "", "path": "./images/10b2af14-f708-4abe-94fc-00163d11cb56_5a47ed74-98e7-45da-a78e-7084c186f24c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Create a new list and add four items from the personal care category at Walgreens.\nPrevious actions:\n[button] Done -> CLICK\n[button] Back -> CLICK\n[path] -> CLICK\n[link] Shower Essentials -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.751, 0.372, 0.83, 0.397] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6959", "image": {"bytes": "", "path": "./images/d3a4e6c3-65e8-46f7-8dd7-439bc559af2a_c42f6c09-f6c3-462b-959f-2973c7f727bb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find value of 2016 Toyota Camry XLE Sedan 4D in black color with number plate AZXA46.\nPrevious actions:\n[button] Next -> CLICK\n[div] Select Your Options -> CLICK\n[generic] Black image Black -> CLICK\n[button] Next -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.548, 0.38, 0.634, 0.409] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6960", "image": {"bytes": "", "path": "./images/02142919-1e63-4059-9471-419158e159a7_32e7b754-8ce2-4176-a691-0dce0ebe24af.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check prices for luxury sedan car in Houston with insurance.\nPrevious actions:\n[textbox] Pick-up location -> TYPE: Houston\n[button] Place Houston, TX -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.853, 0.113, 0.923, 0.148] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6961", "image": {"bytes": "", "path": "./images/6d5c4d19-c2cc-4e68-9f51-71ba6cd6e38a_dc9fbc47-c21f-4f1d-bd12-6bd2c9d95272.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a hotel with 4 star and above rating in Los Angeles for 3 days next week and save it.\nPrevious actions:\n[link] Search for hotels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.241, 0.087, 0.478, 0.109] in the image\nAnd my action is Action: TYPE\nValue: Los Angeles"}]}, {"id": "mind2web_6962", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_73330912-2776-4a17-99b0-8b5976828695.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.317, 0.139, 0.376, 0.173] in the image\nAnd my action is Action: TYPE\nValue: madurai"}]}, {"id": "mind2web_6963", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_26c209a2-46d8-42f1-bac1-7f3ed1d525bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK\n[searchbox] Search -> TYPE: Uncharted Legacy of Thieves Collection\n[div] Uncharted: Legacy of Thieves Collection -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.43, 0.301, 0.449] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6964", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_571bfafa-ad8c-454a-bcd7-5d507abb8478.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[div] Choose date -> CLICK\n[div] Jun -> CLICK\n[generic] 1 -> CLICK\n[generic] 4 -> CLICK\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.509, 0.383, 0.702, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6965", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_6c95fe7c-6317-48bf-a43b-7f3032763ef7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: organic dog food"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.52, 0.091, 0.757, 0.124] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6966", "image": {"bytes": "", "path": "./images/7cd5a347-0e44-4ea2-8fcf-45fec1844279_ff9510d8-86fe-40ad-b787-0a90b1d78a19.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music event organizers and follow the second one.\nPrevious actions:\n[link] Music -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.231, 0.235, 0.316, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6967", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_1222eefa-0175-4eef-a66f-e6bd0d109c4a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK\n[input] -> TYPE: Joe\n[input] -> TYPE: Bloggs"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.318, 0.347, 0.372] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_6968", "image": {"bytes": "", "path": "./images/8e133f6c-155d-4ba4-89f5-16f569d6e918_b8094fea-6545-48cd-b82a-d9420fd540c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest food tour in Paris which offers free cancellation\nPrevious actions:\n[textbox] Where to? -> TYPE: Paris"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.185, 0.729, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6969", "image": {"bytes": "", "path": "./images/c95ac388-3fea-457d-bb5d-fede3785c6f6_90c6af14-70ab-4b7d-962e-c01741f97a29.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me hotels in New York city to stay in during spring\nPrevious actions:\n[textbox] Where? -> TYPE: New York City"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.152, 0.89, 0.171] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6970", "image": {"bytes": "", "path": "./images/1cc13b4e-d3d3-480f-a2fe-f18e83159d93_6c7ee22f-e6b6-4cdb-a287-5162da143ba5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: with a $200 budget check available in central parkto check in on 24th to 27th march for 2 adult and a toddler less than a year old.\nPrevious actions:\n[link] hotels. -> CLICK\n[textbox] Check In \uf073 -> CLICK\n[link] 24 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.171, 0.389, 0.2] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6971", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_c06ba573-4b50-4b1e-9a87-70d18fa8474a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA HOLIDAY CAMPGROUNDS \uf0da -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.234, 0.508, 0.282, 0.522] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6972", "image": {"bytes": "", "path": "./images/360eeaa8-0077-42f4-8200-1e3cf6414cda_a7be10f1-c85f-444d-93a6-48f078088d83.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Send a message to Joe Bloggs who has the username 'boredcelt'.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.393, 0.007, 0.475, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6973", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_8c0f2845-0345-4194-a6bd-c1143e3da795.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[option] 6 -> CLICK\n[button] Update -> CLICK\n[button] Search -> CLICK\n[button] Search without signing in -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.764, 0.278, 0.779] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6974", "image": {"bytes": "", "path": "./images/9e035a36-1c77-4014-98ec-4d48ee41d904_6a78e4e9-77a2-4025-8623-e0d7eda8379b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Compare the fare types to book a 1-adult ticket from Springfiels, IL to Austin, TX for April 29th 2023\nPrevious actions:\n[button] Austin, TX, US (AUS) -> CLICK\n[span] -> CLICK\n[button] Find flights -> CLICK\n[textbox] Date -> CLICK\n[button] Move backward to switch to the previous month. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.597, 0.227, 0.627, 0.249] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6975", "image": {"bytes": "", "path": "./images/d86d5cc1-50f7-4bfd-8e5b-60db84e87956_fca33043-62dc-44e2-b64d-f14bb211f687.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the trade-in value for Call of Duty: Black Ops III for Xbox One.\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: trade in\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.441, 0.258, 0.618, 0.373] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6976", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_e1f976e1-1ac3-447e-921f-672cd8545c6c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.199, 0.152, 0.46, 0.17] in the image\nAnd my action is Action: TYPE\nValue: Boston Logan Airport"}]}, {"id": "mind2web_6977", "image": {"bytes": "", "path": "./images/e104a4ef-521f-4ca0-8e1a-098656207de2_62309d1f-10e6-4601-9cbb-6b407fe0a0a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find estimated monthly payments for a $10,000 vehicle with $500 down payment, 72 month term length and challenged credit score in the state of New Jersey\nPrevious actions:\n[link] Finance -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.383, 0.07, 0.617, 0.094] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6978", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_b3cf21f4-85bf-4461-8154-b500af3a6b9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.465, 0.407, 0.492, 0.43] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6979", "image": {"bytes": "", "path": "./images/a1d1f6c0-1ae9-47f0-b054-8f3f9935b7da_2a83533c-7eff-4390-817b-d6032b626a7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule for the San Francisco 49ers.\nPrevious actions:\n[link] Schedule -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.058, 0.387, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6980", "image": {"bytes": "", "path": "./images/d56df06a-8234-4f31-8737-e74fe9d5fa04_857ae6d3-3942-4710-a68b-2ecaf84fda28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest return tickets from Leeds to Sheffield leaving at 8:30 am on Mar 25, 2023 and returning at 23:00 pm on same day for 1 adult and 2 child.\nPrevious actions:\n[svg] -> CLICK\n[listbox] select children -> SELECT: 2\n[listbox] select child age -> SELECT: 5-15\n[button] Done -> CLICK\n[button] Get cheapest tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.583, 0.709, 0.621] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6981", "image": {"bytes": "", "path": "./images/c2a17420-ceb4-4324-ae8f-60cf845a144b_6f4fd24e-96c4-4cba-9914-2abd10715701.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open Canyon de Chelly schedule calendar.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Canyon de Chelly"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.147, 0.784, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6982", "image": {"bytes": "", "path": "./images/e91126e0-023d-4f96-9378-74efb5d6ecc3_57555471-f75f-42f1-a810-cf336ce2258b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews and research information about Audi A6 2020.\nPrevious actions:\n[menuitem] Research -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.151, 0.183, 0.173] in the image\nAnd my action is Action: SELECT\nValue: Audi"}]}, {"id": "mind2web_6983", "image": {"bytes": "", "path": "./images/a3fc5023-1198-4c17-8c1f-45780ab7f3bd_4ab4e823-aad2-4316-90bd-3e6b9c41cf08.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the latest deluxe digital copy of the Xbox Series X Resident Evil game for pre-order and add it to the cart.\nPrevious actions:\n[link] Filter -> CLICK\n[generic] Refine by Category: Video Games -> CLICK\n[generic] Refine by Category: Xbox Series X|S -> CLICK\n[link] Shop Pre-Orders -> CLICK\n[button] Franchise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.453, 0.366, 0.467] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6984", "image": {"bytes": "", "path": "./images/1bd729e8-abb2-4649-bb81-8c790c04f8ad_3fb0efb0-a518-4c6b-b5e6-709f2274140e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a multi-purpose painting tool available for pick up at zip 44240, if the brand Red Devil is available, add the cheapest one to the wishlist and view my complete wish list.\nPrevious actions:\n[button] Departments -> CLICK\n[button] Paint -> CLICK\n[link] Painting Tools -> CLICK\n[button] 43229 -> TYPE: 44240"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.31, 0.053, 0.417, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6985", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_20a29fa4-5700-4dae-a6c7-46b5d878e615.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK\n[radio] Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.326, 0.284, 0.359] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6986", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_549866d9-de41-45c4-934c-6f26d3529dd7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Okay, got it. -> CLICK\n[img] undefined -> CLICK\n[button] Continue without a seat -> CLICK\n[div] 1 Small Bag only -> CLICK\n[label] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.79, 0.953, 0.825] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6987", "image": {"bytes": "", "path": "./images/55518089-52b9-4504-8e4f-885a9b2943a8_f1ee27e8-e8cb-43fd-882f-97d3c7dbdfb8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me a list of comedy movies, sorted by user ratings.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.1, 0.111, 0.364, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6988", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_02471179-19b4-45e2-9121-a5e8a2a39f26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK\n[div] Size -> CLICK\n[link] XL -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.277, 0.189, 0.495, 0.356] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6989", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_fdf8015b-4c08-45d5-a48a-750a95229995.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK\n[span] -> CLICK\n[button] Category -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.886, 0.408, 0.977, 0.423] in the image\nAnd my action is Action: SELECT\nValue: Most recent"}]}, {"id": "mind2web_6990", "image": {"bytes": "", "path": "./images/67f9fb2d-443a-4e5b-9530-e24372faedd2_62d943fd-4365-4704-9f70-94cb2619c702.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the exotic vehicles they have available to rent?\nPrevious actions:\n[button] Vehicles -> CLICK\n[link] Exotic Cars -> CLICK\n[link] Explore Vehicles -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.541, 0.139, 0.577] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6991", "image": {"bytes": "", "path": "./images/e12f51f6-c8c5-4a68-9586-d77927bca79c_8aaf0965-49ce-4370-9a67-300ef0a9123f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a Tesla Shop Gift Card for April May with the email april.may@gmail.com to my cart\nPrevious actions:\n[link] Shop -> CLICK\n[menuitem] Lifestyle -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.037, 0.478, 0.315, 0.836] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6992", "image": {"bytes": "", "path": "./images/8888ebb8-c2b3-4173-9718-51d752b9e5ab_5cf2cb2e-ee55-47fe-8fee-f18dbe96fb3c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest fare for JFK, NY to Heathrow, London and nearby airports for 1 adult on April 22, one way\nPrevious actions:\n[textbox] Depart , required. -> CLICK\n[textbox] Depart , required. -> TYPE: 04/22/2023\n[combobox] Number of passengers -> SELECT: 1\n[combobox] Passenger 1 -> SELECT: Adult (16-64)\n[combobox] Search by -> SELECT: Lowest fare"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.71, 0.688, 0.895, 0.71] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6993", "image": {"bytes": "", "path": "./images/4596152e-6589-4bf2-9c9f-dd3ed9e8c0dc_86c7afdc-89d2-4a50-8f67-18f069d328f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find out details about cancellation fees.\nPrevious actions:\n[link] FAQs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.07, 0.292, 0.328, 0.342] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6994", "image": {"bytes": "", "path": "./images/4d73937b-3be7-4f5f-950f-b1905244a2ac_8f0334e0-1f8a-4958-9416-68b2d03744a1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show MLB tickets for this weekend and select the next one.\nPrevious actions:\n[button] SPORTS -> HOVER\n[link] All MLB Tickets -> CLICK\n[button] All dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.724, 0.227, 0.881, 0.239] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6995", "image": {"bytes": "", "path": "./images/329d9ee8-de96-40c2-aa19-2dcf8e86b724_66a067aa-db40-45b7-bf6a-a4ba43889d2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get me info about planning a wedding cruise\nPrevious actions:\n[link] Weddings & Occasions -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.721, 0.314, 0.923, 0.33] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6996", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_79305d4e-54ba-42af-8bb6-7ae0e8aa483c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: orlando\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK\n[link] 28 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.124, 0.902, 0.162] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_6997", "image": {"bytes": "", "path": "./images/40cd58cd-6c9a-47b9-a927-92243970d87a_29af2ad7-b801-42cd-93d0-f2c973573ee3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find monthly daytime only parking nearest to Madison Square Garden starting from April 22\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: madison square garden\n[li] Madison Square Garden, Pennsylvania Plaza, New Yor... -> CLICK\n[select] All Parking Options -> SELECT: Daytime Only Parking"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.256, 0.384, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6998", "image": {"bytes": "", "path": "./images/f9062def-8361-48df-849c-4d7dbc1e11d1_62d7062f-27a8-4e87-b201-1aebf4117985.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: find trade in value for PS4 console\nPrevious actions:\n[searchbox] Find values for games and more -> CLICK\n[searchbox] Find values for games and more -> TYPE: PS4\n[button] SEARCH -> CLICK\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles & Hardware -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.184, 0.375, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_6999", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_7466e760-1596-4515-be42-00af9e90ecb1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] What type of deals? -> CLICK\n[div] Vacations -> CLICK\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Europe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.173, 0.89, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7000", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_0d6383fc-e3a0-4402-ace2-f80e4c686a24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.128, 0.267, 0.141] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7001", "image": {"bytes": "", "path": "./images/b770af80-75f0-470c-8797-1466d90c35ce_1a8b410e-a1b9-42cb-9ac0-fa9515cf6140.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest last-minute pet-friendly road trip hotel deal in May for a single person with a pet and book.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.068, 0.492, 0.097] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7002", "image": {"bytes": "", "path": "./images/e2adf8f1-547d-4671-96c1-4a21a56e135d_2bf233d9-c3f4-4d32-8f7c-fbfa4c810e2a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View the upcoming schedule from Otis St@Summer St to City Point of the transit near South Station for today.\nPrevious actions:\n[tab] Schedules -> CLICK\n[span] Transit Near Me -> CLICK\n[input] -> TYPE: South Station"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.257, 0.562, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7003", "image": {"bytes": "", "path": "./images/8f4a020d-ac81-4d20-ae3a-02808faaba4e_b3159b0c-489d-49a5-86d8-e614669b3b88.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a birthday party package for a group of 10 at Arizona park with my address details. Group name is Neo, group leader's name is Thomas Anderson, birthday is May 5th, 1995. The phone number is2526545258. The email address is thomas.neo@gmail.com and address is po box 2846, Texas city in the state of Texas.\nPrevious actions:\n[button] Next -> CLICK\n[input] -> TYPE: Neo\n[input] -> TYPE: Thomas\n[input] -> TYPE: 05/05/1995\n[input] -> TYPE: Texas city"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.213, 0.185, 0.487, 0.211] in the image\nAnd my action is Action: TYPE\nValue: 252-654-5258"}]}, {"id": "mind2web_7004", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_fe3572a1-398a-479d-ba14-aa4ce84f34cf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.22, 0.273, 0.254] in the image\nAnd my action is Action: TYPE\nValue: 10001"}]}, {"id": "mind2web_7005", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_bee75aa3-6f7d-4626-be6f-1b217ac16733.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[button] Update search -> CLICK\n[button] Edit cuisine type filter -> CLICK\n[checkbox] Pizza -> CLICK\n[button] Submit -> CLICK\n[button] Open additional search filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.287, 0.776, 0.713, 0.798] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7006", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_1579fed0-00b7-47db-bfb3-7098175a0ebd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[button] Search -> CLICK\n[combobox] Sort by -> SELECT: Low to High\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.18, 0.331, 0.19] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7007", "image": {"bytes": "", "path": "./images/56cfe20a-f008-48d3-b683-002ce7790616_ca5a6e89-1bbb-4600-89e3-030ef9d18217.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest luxury car rental in New York for two with pickup from JFK airport at 9am, April 3 and drop off at JFK on 6pm, April 6\nPrevious actions:\n[tab] Cars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.23, 0.495, 0.27] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7008", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_6d149fca-3072-4909-90ba-487c98b599cd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK\n[link] Group Travel for Students -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.206, 0.336, 0.244] in the image\nAnd my action is Action: TYPE\nValue: washington"}]}, {"id": "mind2web_7009", "image": {"bytes": "", "path": "./images/6e98e331-c80b-4316-b332-af76c4cb2440_ed4fbad4-bbb8-48a2-98b6-3f0b9cf383ee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for women's golf polos in m size, priced between 50 to 75 dollars, and save the lowest priced among results.\nPrevious actions:\n[menuitem] Golf -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Polos -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.345, 0.233, 0.38] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7010", "image": {"bytes": "", "path": "./images/aab91310-0100-4a40-98e9-720c53199bff_43686440-2fc6-402a-baf6-13907700d8c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find mens hiking shoes under $100 in size 10 that has biggest discount.\nPrevious actions:\n[textbox] Search by keyword or web id -> TYPE: mens hiking shoes"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.168, 0.042, 0.187, 0.057] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7011", "image": {"bytes": "", "path": "./images/a11022ab-f733-4295-a2f2-0da19cccc1b4_a504d03f-205a-4905-8823-9493469d0034.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sign up for emails using the name Dick Smith and the email smith@gmail.com, select Six Flags Magic Mountain as the Park of Preference,\nPrevious actions:\n[textbox] First Name -> TYPE: Dick\n[textbox] Last Name -> TYPE: Smith\n[textbox] Email -> TYPE: smith@gmail.com\n[combobox] Park of Preference -> SELECT: Six Flags Magic Mountain / Los Angeles, CA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.322, 0.794, 0.678, 0.826] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7012", "image": {"bytes": "", "path": "./images/5b37d2e1-cbf7-4465-b28b-b63b0754895b_0e970a9f-f965-4bcd-8555-8f396193105e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a cheapest train ticket in upstairs deck and at least one window seat from Paris to Milan on March 26, departing after noon for 16 year old and 45 year old.\nPrevious actions:\n[combobox] Departure station, none selected. Select a station... -> TYPE: PARIS\n[span] Paris -> CLICK\n[combobox] Arrival station, none selected. Select a station. -> TYPE: MILAN"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.114, 0.139, 0.326, 0.161] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7013", "image": {"bytes": "", "path": "./images/6803cd71-78a9-4bff-a1a3-1153dcc28c11_d118ec61-bac9-44db-bf4d-acc261383072.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find organic dog food and add it to the cart.\nPrevious actions:\n[searchbox] Search: suggestions appear below -> TYPE: organic dog food\n[link] organic dog food -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.763, 0.846, 0.945, 0.877] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7014", "image": {"bytes": "", "path": "./images/c64dcaa1-555d-43f3-adb4-3452c4e5d09b_fd745a55-eadc-4aff-a3b3-fd9c98aafbb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse barbershops that have private lot parking.\nPrevious actions:\n[textbox] Find -> TYPE: barbershop\n[span] Barbershop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.034, 0.123, 0.084, 0.139] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7015", "image": {"bytes": "", "path": "./images/c5d1ecbe-f7a6-4d27-9e03-08dcfdcb524f_ba1b3830-c493-48da-9c25-87c05df40afb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a flight + cruise for two flying from Atlanta in October for vacation starting on October 8 for 6 nights with Miami as the departure port, choose the cheapest flight, hotel, and room in the cruise for booking.\nPrevious actions:\n[combobox] Departing from -> TYPE: ATLANTA\n[span] Atlanta, GA (ATL) -> CLICK\n[span] Jun 2023 -> CLICK\n[option] Oct 2023 -> CLICK\n[button] Search flights + cruise -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.065, 0.555, 0.31, 0.566] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7016", "image": {"bytes": "", "path": "./images/19847108-680f-4021-83a9-2548fab75fac_609ac7e9-480b-4b27-bfb7-6cecf26afdb5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Mark a review in the best rated activity to do in Paris in May 12, 2023 as he\nlpful\nPrevious actions:\n[button] Next -> CLICK\n[gridcell] Fri May 12 2023 -> CLICK\n[circle] -> CLICK\n[svg] -> CLICK\n[span] Traveler Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.503, 0.278, 0.565, 0.288] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7017", "image": {"bytes": "", "path": "./images/39358d9c-6db2-4662-a91e-47a416eeacf7_765485e9-a5cf-4af2-b2b7-e1810fd891a9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See what deals there are for Dish Outdoor.\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.174, 0.266, 0.201] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7018", "image": {"bytes": "", "path": "./images/40bf7303-ba39-44fe-b73f-ad3c9743c579_b9e37de8-55fb-4bdf-9bd3-a9fd20adb92d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 19-inch stainless steel kitchen sink with two equal bowls that could be shipped to my home and compare the top two cheapest.\nPrevious actions:\n[link] Kitchen Sinks -> CLICK\n[link] Drop In Two Bowl -> CLICK\n[checkbox] Ship to Home Eligible (97) -> CLICK\n[checkbox] Stainless Steel (75) -> CLICK\n[checkbox] Equal Bowl (69) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.028, 0.716, 0.076, 0.734] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7019", "image": {"bytes": "", "path": "./images/44bde32f-12bd-4c70-a1a5-37bdadfe872b_23f2c136-1524-48cd-a3cf-e66581e35dad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find target branded sprinkles for easter baking\nPrevious actions:\n[link] Categories -> CLICK\n[span] Grocery -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.066, 0.209, 0.316, 0.234] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7020", "image": {"bytes": "", "path": "./images/edf748d4-07cd-4f0b-aad3-01baebbd557b_9a1f3e01-87cc-45c1-bb32-f90ce0bc5eed.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest available hotel for a three night stay from 1st June in Jakarta. The guest is named Joe Bloggs with the email address of buckeye.foobar@gmail.com and phone number of 11111111111. Billing address is in New York, zip code 10001\n.\nPrevious actions:\n[button] Yes, I agree -> CLICK\n[span] Lowest price -> CLICK\n[button] Choose room -> CLICK\n[button] Book now -> CLICK\n[textbox] First name -> TYPE: Joe"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.145, 0.95, 0.178] in the image\nAnd my action is Action: TYPE\nValue: Bloggs"}]}, {"id": "mind2web_7021", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_6dbd8788-9384-4c38-be4c-9511cdae63f7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK\n[textbox] Destination -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.309, 0.619, 0.328] in the image\nAnd my action is Action: TYPE\nValue: EWR"}]}, {"id": "mind2web_7022", "image": {"bytes": "", "path": "./images/0fd460cc-679c-4d04-8816-c6460bdf895a_d469455a-3a83-4df8-a461-ebe480791b9f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Confirm my vip tour at the six flags Discovery Kingdom\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Discovery Kingdom -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, Tours \uf078 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.307, 0.2, 0.493, 0.212] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7023", "image": {"bytes": "", "path": "./images/ba5e0124-b1c0-4116-968c-6b31a4c9c0a7_83363272-ba05-42ae-b732-707f2ceeecf1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cheapest bus for 2 persons from New York to Washington, DC.\nPrevious actions:\n[textbox] To (type text or use the arrow keys) -> TYPE: Washington\n[span] Washington, DC -> CLICK\n[span] -> CLICK\n[button] Find tickets -> CLICK\n[combobox] Sort: -> SELECT: Price"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.207, 0.397, 0.793, 0.468] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7024", "image": {"bytes": "", "path": "./images/4272e233-3440-4572-bd86-b3a2b22a4061_c42d4ac2-8268-4a2e-95c1-399ab2e7ae1f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most popular tours and attractions in Kyoto, Japan from May 25 with free cancellation option, check availability for 2 adults and 1 infant, and book.\nPrevious actions:\n[checkbox] 25 May 2023 -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[label] Most popular -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.382, 0.239, 0.92, 0.417] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7025", "image": {"bytes": "", "path": "./images/a065d3cb-e588-437b-b2e5-38359e770014_0ab53b01-e6f9-417c-87e4-bde4e5ba5393.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find cruises to Hawaii departing from Los Angeles in January 2024 with a duration of 12 days.\nPrevious actions:\n[button] SAIL TO -> CLICK\n[button] Hawaii -> CLICK\n[button] SAIL FROM -> CLICK\n[button] Los Angeles, CA -> CLICK\n[button] DATES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.389, 0.194, 0.441, 0.22] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7026", "image": {"bytes": "", "path": "./images/618dbd1f-16e6-442e-ba3b-364c6dbda810_d75139cd-0143-414a-83ba-e2fdd4372c5d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to limited-time offers and choose the cheapest outdoor dining table, add to the cart, and checkout as a guest and select Ikea West Chester store for pick-up.\nPrevious actions:\n[button] + 11 more -> CLICK\n[button] Furniture sets 138 -> CLICK\n[button] Outdoor dining sets 46 -> CLICK\n[button] Add to bag, T\u00c4RN\u00d6, Table+2 chairs, outdoor -> CLICK\n[link] Shopping bag, 1 items -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.686, 0.266, 0.969, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7027", "image": {"bytes": "", "path": "./images/a6fc427d-c2fd-4f1b-8f7d-8bb8647e63e9_e0567d5e-e397-4a09-bfb2-83578e05ae26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find deals in New York.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.288, 0.0, 0.417, 0.027] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7028", "image": {"bytes": "", "path": "./images/30d5f7dc-3650-4177-93bc-b59f1f5621f0_a14c70a0-22cb-4218-9f3f-281c20bcfd0a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: List pg-13 comedy movies currently in theatres with the highest audience scores\nPrevious actions:\n[select-label] Audience score (highest) -> CLICK\n[span] Genre -> CLICK\n[div] -> CLICK\n[button] APPLY -> CLICK\n[span] Rating -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.399, 0.243, 0.645, 0.253] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7029", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_d229d3ba-1804-4ac9-ab0f-8fff81657d28.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[link] Careers -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.3, 0.317, 0.479, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7030", "image": {"bytes": "", "path": "./images/5092fad7-ff8a-481e-bb0b-fe83590193ce_206ec9bd-b2fe-4964-8dcb-c593a923ad7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show reviews of top critics for lowest rated Tom Hanks work\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.239, 0.169, 0.525, 0.189] in the image\nAnd my action is Action: TYPE\nValue: Tom Hanks"}]}, {"id": "mind2web_7031", "image": {"bytes": "", "path": "./images/4b2bb9e4-8e28-490b-895f-80da5e7e2815_fc909f7d-4360-4b36-8cb3-086b4a086b5a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get email alerts for a flight from BHZ to EWN leaving on april 30 on the email lin.lon@gmail.com\nPrevious actions:\n[link] Create flight status notification -> CLICK\n[textbox] From , required. -> TYPE: bhz\n[a] BHZ - Belo Horizonte, Brazil -> CLICK\n[textbox] To , required. -> TYPE: ewn\n[a] EWN - Coastal Carolina Regional, NC -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.673, 0.217, 0.687, 0.236] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7032", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_5728ce0c-5baf-4b2c-98c4-dac3a0343b10.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[link] Products -> CLICK\n[button] Kitchenware & tableware -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.261, 0.605, 0.281] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7033", "image": {"bytes": "", "path": "./images/5199e802-2fce-448d-8859-3cdf57b8dada_d2462cfe-1b26-4571-be84-5f838fdcbd5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the list of ballet event for the next 30 days.\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: Ballet"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.275, 0.871, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7034", "image": {"bytes": "", "path": "./images/904bd858-1c40-4d22-9a5b-e2974c3b5a7e_1245d53c-a9f4-4a43-b386-dfbdf4e4aed6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most expensive ticket for New York Knicks game on April 9\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> CLICK\n[textbox] Search for artists, teams or venues... -> TYPE: New york knicks\n[option] New York Knicks -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.864, 0.32, 0.941, 0.336] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7035", "image": {"bytes": "", "path": "./images/45ae95ac-e539-4314-a0ca-8947b1843890_b30b7141-b970-418b-ac3d-3069ae385e86.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a monthly parking with valet service at the New York University.\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: New York University\n[li] New York University, New York, NY, USA -> CLICK\n[button] Filter -> CLICK\n[checkbox] Valet (15) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.488, 0.458, 0.544] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7036", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_b444233b-12da-405d-b489-b08e50eeecc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lansing\n[list] KOA Logo Icon Auburn Hills / Holly KOA Holly, MI K... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.436, 0.248, 0.547, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7037", "image": {"bytes": "", "path": "./images/e832e1f9-3a9d-440e-a96f-8cbbf241e4af_e9b98033-42d1-478a-ba2b-e7e73105a6f5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Sell my single-key black car, which has no defect with a leather interior through Louisiana, license plate number YAW639 in zip 70726 with mileage of 222900, get the offer at my email sellmycar@mail.com.\nPrevious actions:\n[button] No -> CLICK\n[button] No -> CLICK\n[radio] Owned -> CLICK\n[button] No -> CLICK\n[button] No -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.802, 0.284, 0.828] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7038", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_76fd2922-8aaf-4d4b-9266-d53fa5daf0de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Show more filters modal -> CLICK\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK\n[button] Shape -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.226, 0.966, 0.265] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7039", "image": {"bytes": "", "path": "./images/4f14fb44-31d8-49f3-b5fb-95826175bcb8_aa0d6885-065b-4dfe-8471-916d90dc4f57.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search new books from Jk Rowling available for kids within the age from 3 to 5 that are below $20\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.212, 0.042, 0.746, 0.061] in the image\nAnd my action is Action: TYPE\nValue: Jk rowling"}]}, {"id": "mind2web_7040", "image": {"bytes": "", "path": "./images/6c28458c-ba2a-41a5-b5c2-948f2954c25b_2e5b9bce-a220-413b-bf5b-eb0cc86e8fef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest travel time flight for 1 traveler from JFK, NYC to Heathrow, London on April 19, then from Heathrow, London to CDG, Paris on April 21 and then from CDG, Paris to JFK, NYC on April 23\nPrevious actions:\n[textbox] To , required. -> TYPE: CDG\n[a] CDG - Paris Charles de Gaulle, France -> CLICK\n[textbox] Depart , required. -> TYPE: 04/21/2023\n[button] Add another flight -> CLICK\n[textbox] From , required. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.106, 0.408, 0.29, 0.423] in the image\nAnd my action is Action: TYPE\nValue: CDG"}]}, {"id": "mind2web_7041", "image": {"bytes": "", "path": "./images/09675529-c12d-42dc-a260-c1e046f87256_36eaf5a7-66ea-447c-87c7-8db65126fffa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Trade in a Nemo Front Porch 2P Tent via mail with my membership info Joe Bloggs, phone number 123456789\nPrevious actions:\n[combobox] search input -> TYPE: Nemo Front Porch 2P Tent\n[img] search icon -> CLICK\n[button] Trade in -> CLICK\n[link] Complete trade-in -> CLICK\n[button] Verify membership to continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.396, 0.192, 0.438] in the image\nAnd my action is Action: TYPE\nValue: Joe"}]}, {"id": "mind2web_7042", "image": {"bytes": "", "path": "./images/81835704-aebc-4600-abd4-02102509fda5_5c53350f-286b-4be8-b37e-346ce0772af0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a premium 4-door car with automatic transmission and unlimited mileage at Heathrow Airport for pickup, on date April 26, 2 pm, and drop off at the same Airport on April 30, 1 pm, book a range rover if available with full protection.\nPrevious actions:\n[div] London Heathrow Airport (LHR) -> CLICK\n[div] Wed, Apr 19 -> CLICK\n[checkbox] 26 April 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 2:00 PM\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.649, 0.155, 0.753, 0.203] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7043", "image": {"bytes": "", "path": "./images/8b130a70-f03a-4f86-8f2a-44f1b44f3655_931b38e6-e860-43fa-9d36-5b864e1ff95b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check my trip for Smith and booking number X899987799.\nPrevious actions:\n[link] Manage trips / Check-in -> CLICK\n[textbox] Passenger last name , required. -> TYPE: Smith\n[textbox] Confirmation / Record locator , required. -> TYPE: X899987799"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.299, 0.875, 0.323] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7044", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_2bc8e547-5166-4076-90bc-4c1d37ee725b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.122, 0.562, 0.2, 0.587] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7045", "image": {"bytes": "", "path": "./images/e72f9c32-4633-4fcf-abd7-b7cc4a9aaba3_3bb8a4f7-e32f-4613-ba9a-f72be20a839b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find similar artists to the Weekend.\nPrevious actions:\n[link] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.0, 0.906, 0.028] in the image\nAnd my action is Action: TYPE\nValue: the weeknd"}]}, {"id": "mind2web_7046", "image": {"bytes": "", "path": "./images/dfa415d9-efb2-4477-bc33-672f02d3399d_dff4e313-9134-4041-a303-6eb0720df8be.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a low-priced small air-conditioned car on rent in Mexico City Airport for under 100 dollars and book without insurance for next day.\nPrevious actions:\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[div] View deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.324, 0.862, 0.475, 0.918] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7047", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_40f1da67-fce1-4c14-9e24-9b3f57fe90cb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: berlin"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.074, 0.198, 0.366, 0.224] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7048", "image": {"bytes": "", "path": "./images/55631305-2957-4343-b4f7-73aa68acba47_46d1aec9-efcf-40c7-bfeb-13d5c0db36c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two tickets for the Boston Red Sox vs. New York Yankees game on August 18th, with a total cost of no more than $200.\nPrevious actions:\n[button] Search -> CLICK\n[link] TICKETS -> CLICK\n[span] -> CLICK\n[label] 2 -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.796, 0.221, 0.837, 0.245] in the image\nAnd my action is Action: TYPE\nValue: 200"}]}, {"id": "mind2web_7049", "image": {"bytes": "", "path": "./images/a39f20a6-4d31-4f5f-b0ac-be8d548d7fa5_513ceb8e-6771-4ee4-850a-2aabe2c17e0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the hot deals within 500 miles from 10001\nPrevious actions:\n[link] DEALS \uf0d7 -> CLICK\n[link] HOT DEALS \uf0da -> CLICK\n[textbox] City, State or Zip -> TYPE: 10001\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.62, 0.094, 0.717, 0.113] in the image\nAnd my action is Action: SELECT\nValue: 500 Miles"}]}, {"id": "mind2web_7050", "image": {"bytes": "", "path": "./images/cf8b2846-ac33-46aa-887c-174de6184057_360a4b82-5666-4d87-8b10-2ea3b37f78ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show status of my order with order number X123456789 and email buckeye.foobar@gmail.com\nPrevious actions:\n[link] \ud83d\ude9aOrder Status -> CLICK\n[textbox] Order number * -> TYPE: X123456789\n[textbox] E-mail used on order * -> TYPE: buckeye.foobar@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.657, 0.418, 0.766, 0.459] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7051", "image": {"bytes": "", "path": "./images/bc45669b-6bed-4240-9d9b-6b5e45a7e6f3_6ec19c62-07fe-42e9-99da-b36682d1ab92.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse xbox series x consoles that are used and between $200 and $400.\nPrevious actions:\n[combobox] Search for anything -> TYPE: xbox series x console\n[button] Search -> CLICK\n[input] -> CLICK\n[textbox] Minimum Value in $ -> TYPE: 200\n[textbox] Maximum Value in $ -> TYPE: 400"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.151, 0.656, 0.176, 0.678] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7052", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_716b1d6d-07d1-4ace-b6af-d1fa67a344c0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK\n[link] Complementary Medicine -> CLICK\n[select] All -> SELECT: Audio (376)\n[select] All -> SELECT: Under US$20"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.34, 0.196, 0.365] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7053", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_68d7bbb8-fe62-4300-9f0e-cc06b85a6552.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Distance & Shipping -> CLICK\n[button] Nationwide -> CLICK\n[menuitem] 100 miles -> CLICK\n[input] -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.196, 0.249, 0.218] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7054", "image": {"bytes": "", "path": "./images/bafd6a44-5938-431f-8e2e-17d680d5c48b_76adfe82-7943-40af-9121-513e8de299ef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the page with information about Wi-Fi subscriptions.\nPrevious actions:\n[tab] TRAVEL INFO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.275, 0.17, 0.348, 0.181] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7055", "image": {"bytes": "", "path": "./images/ff82e848-f29e-477b-a286-c807cbd0d8fa_2c87ab6d-07c9-414a-856c-558889e7cd0d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a job in customer services in Qatar Airways, find any job available in India, view the details and save the job.\nPrevious actions:\n[textbox] Location -> TYPE: india\n[link] Mumbai - BOM, India - IN 2 jobs -> CLICK\n[button] Search -> CLICK\n[span] -> CLICK\n[span] Customer Experience- Customer Services Agent (Cont... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.715, 0.246, 0.844, 0.296] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7056", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_a433db85-74f7-4865-aef9-fcf866f4a035.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia\n[button] Submit -> CLICK\n[span] Select store -> CLICK\n[button] Show more filters modal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.075, 0.969, 0.086] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7057", "image": {"bytes": "", "path": "./images/e344b89e-767a-4618-ba82-4b81cdcba280_e9b55aac-5518-472f-b369-9fd23371f29f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: add a motherboard under $200 and a compatible processor at any price to the shopping cart.\nPrevious actions:\n[textbox] price to -> TYPE: 200\n[button] APPLY -> CLICK\n[link] GIGABYTE B450M DS3H WIFI AM4 AMD B450 SATA 6Gb/s M... -> CLICK\n[a] Processors - Desktops -> CLICK\n[div] AMD Ryzen 5 5600 - Ryzen 5 5000 Series Vermeer (Ze... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.502, 0.693, 0.689, 0.717] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7058", "image": {"bytes": "", "path": "./images/227054d0-24f1-4f8a-9dcb-5146fac623f4_71990241-9b93-43ab-8200-e6ea3063bca2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Samsung-made and larger than 11-inch screen Android tablet, in the price range of 200 to 300 dollars and add it to the cart.\nPrevious actions:\n[link] \ue92e Electronics \uf105 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.859, 0.304, 0.902, 0.312] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7059", "image": {"bytes": "", "path": "./images/29f47ddb-3d25-4cfb-b1d6-d3987e998a13_b82ef559-4765-4aea-9ff7-59980a30a227.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the movie \"65\" that is digital at the AMC Grove City 14 showing at 7:30 pm on Tuesday, March 28, 2023 and buy four tickets for seats J10 through J7\nPrevious actions:\n[select] AMC Grove City 14 -> SELECT: AMC Grove City 14"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.455, 0.049, 0.633, 0.096] in the image\nAnd my action is Action: SELECT\nValue: 65"}]}, {"id": "mind2web_7060", "image": {"bytes": "", "path": "./images/f863168b-1d7e-4f51-9681-79891abc4b45_7872a569-29b2-44ff-9e06-811c8577edff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in the National Park System that has a Junior Ranger Program for kids.\nPrevious actions:\n[link] FIND A PARK -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.44, 0.163, 0.56, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7061", "image": {"bytes": "", "path": "./images/c3e841d5-4624-44cb-a8b5-897b9aa3ef9b_85e3c094-4c46-499f-90fa-05b2a66d9a39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a single day pass to Six Flags, Magic Mountain.\nPrevious actions:\n[button] Browse the Parks Below -> CLICK\n[span] Six Flags Magic Mountain -> CLICK\n[button] Go! -> CLICK\n[link] Tickets, Passes, VIP Tours \uf078 -> CLICK\n[link] Tickets & Passes -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.456, 0.241, 0.465] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7062", "image": {"bytes": "", "path": "./images/eb609e15-ff10-4b3d-82ce-348c439548ca_5caef1d1-97f5-4407-b1f3-5cbfc6655121.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Shop for UA outlet clothing and find a outdoor sports jacket in size ymd.\nPrevious actions:\n[menuitem] Outlet -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.075, 0.342, 0.097, 0.36] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7063", "image": {"bytes": "", "path": "./images/87d1206c-a16b-4816-9c1e-6e1a04f30bf6_904633a2-814a-4074-830e-bf4096bc461a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add games with the title Atlantis to my collection.\nPrevious actions:\n[combobox] Search -> TYPE: Atlantis\n[svg] -> CLICK\n[link] Atlantis -> CLICK\n[button] \uf168 Add To Collection -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.594, 0.254, 0.638, 0.269] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7064", "image": {"bytes": "", "path": "./images/844f8d77-e12e-4d68-8c4c-9146bd5b1530_24eb707a-5e41-4be7-8b4f-b7d2233b07e0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open gift list for beauty products.\nPrevious actions:\n[path] -> CLICK\n[button] More ways to shop -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.25, 0.052, 0.5, 0.075] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7065", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_0c1dc335-6547-4426-bfed-610421e2c194.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Rent A Car -> CLICK\n[button] Pick-up -> TYPE: Brooklyn"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.202, 0.495, 0.235] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7066", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_93aaf6df-5228-4992-b532-9613a18117d1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.121, 0.35, 0.142] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7067", "image": {"bytes": "", "path": "./images/4ee7d5e1-50be-49d2-8af1-83b3da0519ad_5cb18345-d17d-4d6a-9db8-dbfe0d3cd3bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show the details for a monthly parking option in Ney York City near Street Taco to start on may 6 that i can self park\nPrevious actions:\n[tab] Monthly -> CLICK\n[textbox] Search for parking -> TYPE: street taco"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.078, 0.194, 0.83, 0.208] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7068", "image": {"bytes": "", "path": "./images/e6bdb364-4327-44f0-a6d8-94eb00b36ca7_ec2892e2-3184-4086-bef5-33ba043db515.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Go to the page with help in choosing a solar energy product for homes.\nPrevious actions:\n[link] Solar Panels -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.639, 0.579, 0.798, 0.689] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7069", "image": {"bytes": "", "path": "./images/8bfeeb54-beb9-4271-9436-fbd1a705efcd_067334cd-e49b-45ac-8a32-31cdec47b52b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest 2017 Honda Civic within 100 miles of 07470 which has a sun roof and black exterior color while excluding out of market vehicles\nPrevious actions:\n[button] Make -> CLICK\n[listitem] Honda (116) Honda (116) -> CLICK\n[button] Back to all categories -> CLICK\n[button] Model -> CLICK\n[listitem] Civic (35) Civic (35) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.154, 0.249, 0.173] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7070", "image": {"bytes": "", "path": "./images/2ef75333-ca40-454f-a65c-88b6b60e2497_f35539ff-43bf-48f6-af52-483fc39a7cc8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: book a tour to the statue of liberty for 3 adult on march 27th\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.395, 0.098, 0.43, 0.114] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7071", "image": {"bytes": "", "path": "./images/f4bad8a9-72c9-4237-83fa-5673a0e139e9_de5d2cb5-2deb-4b0f-817c-5a1f3d8f6b1a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a table for 2 at a restaurant that was featured on the latest Hit List\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.833, 0.143, 0.988, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7072", "image": {"bytes": "", "path": "./images/ad0369b6-cfd5-4555-bb4f-d84a942be555_d35d8236-d15b-4fde-b1a4-cb2250309b2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order the cheapest climbing shoes in today's deals.\nPrevious actions:\n[link] Climbing -> CLICK\n[combobox] Sort By -> SELECT: Price: Low - High\n[img] Black Diamond Zone Climbing Shoes 0 -> CLICK\n[button] Add to cart\u2014$46.73 -> CLICK\n[link] Cart & Checkout -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.671, 0.238, 0.956, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7073", "image": {"bytes": "", "path": "./images/8fb78266-8862-4608-9ff3-92f81e58b2ff_ecaed200-95a2-4e5a-b81d-7e4638985800.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest non-stop or 1 stop flight from JFK, New York to Changi, Singapore for 1 traveler on April 11 which includes seat selection and free cancellation\nPrevious actions:\n[tab] Flights -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.185, 0.092, 0.254, 0.107] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7074", "image": {"bytes": "", "path": "./images/e638beb6-059f-40ea-8653-929221e997db_f933ceb6-cb8e-401e-a15f-74121d8541ff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a comprehensive health screening for women in 10003.\nPrevious actions:\n[span] Schedule a women's health exam -> CLICK\n[link] Comprehensive health screening Available in-person -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> CLICK\n[textbox] Tell us a location Enter a ZIP code or city & stat... -> TYPE: 10003"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.352, 0.279, 0.514, 0.301] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7075", "image": {"bytes": "", "path": "./images/6e565708-43e2-492b-9f1d-25d51387dcf7_a9c6510b-8e65-41cb-b24e-eee23f722354.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find pizza serving restaurants for pick up in Chicago, sufficient for 10 people on March 30, 8:15 PM from a joint which also offers dining package.\nPrevious actions:\n[svg] -> CLICK\n[span] Pickup -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.098, 0.095, 0.248, 0.136] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7076", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_203ab1c5-86ca-4185-87ee-b74643fa9e97.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[gridcell] Sat, Apr 29, 2023 -> CLICK\n[div] 30 -> CLICK\n[label] Purpose of your event -> CLICK\n[li] Business -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.04, 0.607, 0.336, 0.64] in the image\nAnd my action is Action: TYPE\nValue: 1"}]}, {"id": "mind2web_7077", "image": {"bytes": "", "path": "./images/8d9e09e1-c31a-420b-ace8-079204db551a_d5f3fc99-7306-4bf6-8f6f-d0efa393636a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select a ticket for an event similar to professional boxing\nPrevious actions:\n[textbox] Search for artists, teams or venues... -> TYPE: professional boxing"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.165, 0.232, 0.392, 0.247] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7078", "image": {"bytes": "", "path": "./images/8368b990-c6ca-4cfe-a7ab-c2a88697639d_1463c3d8-4e6e-4c2f-897f-4ad740d598d6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Build a PC with Intel i9 Raptor Lake 24 cores, Asus ROG strix Z790 motherboard, Asus TUF Rtx 4090 and remaining all compatible products with SSD.\nPrevious actions:\n[button] \uf067 SELECT -> CLICK\n[link] SSD Storage -> CLICK\n[button] \uf067 SELECT -> CLICK\n[button] \uf067 SELECT -> CLICK\n[link] Fan & Heatsink CPU Cooler -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.809, 0.285, 0.868, 0.295] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7079", "image": {"bytes": "", "path": "./images/35646414-520b-4de0-90e0-5dfeafd850a3_9d601e7e-2e4f-42d0-a3d4-8e24b865fed4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a lowest priced ticket from Bristal airport to London Paddington on April 8 after 2 pm, seating must be forward facing window side with power socket.\nPrevious actions:\n[link] 8 -> CLICK\n[listbox] hour -> SELECT: 14\n[listbox] minutes -> SELECT: 00\n[button] Get cheapest tickets -> CLICK\n[span] Continue -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.081, 0.346, 0.256, 0.377] in the image\nAnd my action is Action: SELECT\nValue: Forward facing"}]}, {"id": "mind2web_7080", "image": {"bytes": "", "path": "./images/cdd64586-2f2e-4000-89fa-200feefcd97d_8183f7ea-b058-4050-9a78-7d016c2f1e14.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find newly arrived Batting Gloves for women\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.292, 0.362, 0.35, 0.399] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7081", "image": {"bytes": "", "path": "./images/5098c679-dacc-4abd-9331-18b898f936dd_f8551872-cb70-46ad-b3d7-435fef6cf6ea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: What are the St louis park policies regarding security?\nPrevious actions:\n[button] Browse the Parks Below -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.527, 0.283, 0.846, 0.318] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7082", "image": {"bytes": "", "path": "./images/725c38e7-7987-40b3-8f19-8450f8277f06_5c7395ff-ffb0-411f-a8a1-bd2d6f51d101.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest event to attend in Athens, Greece on April 2nd\nPrevious actions:\n[div] Events & Activities -> CLICK\n[input] -> CLICK\n[button] Greece -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.616, 0.211, 0.672, 0.225] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7083", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_9c650bd4-031f-4287-8751-f277c6861d52.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER\n[link] Medical -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.033, 0.153, 0.196, 0.166] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7084", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_f39d7ec8-50c8-4394-b48e-97e4a42e0dac.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK\n[button] Home d\u00e9cor -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.355, 0.201, 0.605, 0.215] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7085", "image": {"bytes": "", "path": "./images/913b902b-37b7-4230-a0f2-a19931b06951_61dce3f4-e036-403d-b3c3-bc956eb57807.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the fastest single stop flight fron JFK airport in New York to Male in Maldives on March 13\nPrevious actions:\n[textbox] Flight origin input -> TYPE: new york\n[div] John F Kennedy Intl -> CLICK\n[textbox] Flight destination input -> TYPE: male\n[span] Mal\u00e9, Maldives -> CLICK\n[button] Start date calendar input -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.359, 0.317, 0.391, 0.344] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7086", "image": {"bytes": "", "path": "./images/fd2494f3-fe7d-4c58-81e4-742c72b4e1f2_fd9fc151-a496-427a-ab5b-a46920baf5cc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the order status using an order number of24124124091. The email address is boobear@gmail.com\nPrevious actions:\n[link] Store -> HOVER\n[link] Order Status -> CLICK\n[textbox] Order number -> TYPE: 24124124091\n[textbox] Email address -> TYPE: boobear@gmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.65, 0.497, 0.872, 0.549] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7087", "image": {"bytes": "", "path": "./images/ab1ae27a-029c-4f0c-9e01-82715e08d518_860311ce-e09d-4946-99fa-8af11abde481.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the tables at Chianti - MG Road restaurant on March 30,1:30 PM for 4 Guests.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.021, 0.526, 0.048] in the image\nAnd my action is Action: TYPE\nValue: CHIANTI"}]}, {"id": "mind2web_7088", "image": {"bytes": "", "path": "./images/ce34bc61-e3d2-40c8-b02b-b149efc4b115_18a087b4-2e0a-4bb1-828e-6eabfe01b850.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a mini van at Brooklyn City from April 5th to April 8th for a 22 year old renter.\nPrevious actions:\n[button] Selected Pick-Up Date 03/19/2023 -> CLICK\n[button] 04/05/2023 -> CLICK\n[svg] -> CLICK\n[button] 04/08/2023 -> CLICK\n[combobox] Renter Age -> SELECT: 22"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.653, 0.32, 0.84, 0.345] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7089", "image": {"bytes": "", "path": "./images/1bb8d48f-dac7-472d-b2b2-651ab184baf2_aa73fdd1-6124-47ae-b9a4-922e2cf5c1b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for a hybrid job at Resy in software development, if the job is open apply immediately.\nPrevious actions:\n[link] Careers -> CLICK\n[link] View All Open Positions -> CLICK\n[span] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.693, 0.301, 0.796, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7090", "image": {"bytes": "", "path": "./images/31df4f0d-8852-4694-b6cf-5f337bd26d39_66f6a6e6-f08f-40b2-95a6-0ad325c3aa2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Retrieve booking 1234567890 booked by Jason Smith\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.252, 0.293, 0.479, 0.327] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7091", "image": {"bytes": "", "path": "./images/0fb36232-0d2a-4115-9012-99dbaad25a5e_516711a6-50e7-4f43-a97a-38b6442f384d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find one-way flights from New York to Toronto.\nPrevious actions:\n[span] -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: New York\n[button] New York, NY, US (NYC - All Airports) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.18, 0.481, 0.204] in the image\nAnd my action is Action: TYPE\nValue: Toronto"}]}, {"id": "mind2web_7092", "image": {"bytes": "", "path": "./images/c731e6d1-7194-4d16-a6a8-f5fc9be56a55_d39863cc-1c9b-4c24-bccf-c7f95e0ade4b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy a pop rock album CD from the United Kingdom that was released in 2016, is between \u00a315 and \u00a320 and in perfect condition.\nPrevious actions:\n[button] Marketplace -> CLICK\n[link] Pop Rock -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.019, 0.118, 0.163, 0.13] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7093", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_fcd0a544-caa9-4349-bbaa-c79b998d2979.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[button] Plan -> CLICK\n[link] Fare Finder -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.138, 0.262, 0.342, 0.287] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7094", "image": {"bytes": "", "path": "./images/78c52592-76e4-4c45-afd5-f94cf213314e_9149d122-ada8-4f08-98c1-30557c30f762.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play a star wars movie trailer.\nPrevious actions:\n[textbox] Search IMDb -> TYPE: Star Wars"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.631, 0.007, 0.649, 0.016] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7095", "image": {"bytes": "", "path": "./images/8aedabbf-a7b4-4da7-8eaf-9e159a3ec99b_6aa2eb8f-0a6a-4844-8aa0-f1a9e66a2deb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: review the dinner menu of La Bergamote restaurant in Hell's Kitchen.\nPrevious actions:\n[i] -> CLICK\n[textbox] Search by restaurant name, cuisine, or anything be... -> TYPE: La Bergamote\n[button] Search -> CLICK\n[heading] La Bergamote \u2014 Midtown -> CLICK\n[link] Reservation -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.253, 0.285, 0.28, 0.297] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7096", "image": {"bytes": "", "path": "./images/fdc94e3a-caa3-42b0-aa1b-b99f728f6292_40c1286c-8c7e-4b32-b160-227c9e2ef1f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Schedule a demo drive for Model Y for Roy Adams with phone number 123-999-0000, email address RA@gmail.com and zip code 90001 in the United States.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.454, 0.808, 0.638, 0.842] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7097", "image": {"bytes": "", "path": "./images/9cd20dc6-d2a5-4e8f-9252-549f2c51413c_9dbba049-3068-4fc1-853d-a48205645473.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View inventory of Tesla Model S that are blue and new with 19\" wheels within 50 miles of zip code 60602.\nPrevious actions:\n[link] Model S -> CLICK\n[link] View Inventory -> CLICK\n[image] -> CLICK\n[checkbox] 19\" Wheels -> CLICK\n[textbox] Registration Zip Code Where you will register the ... -> TYPE: 60602"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.048, 0.313, 0.282, 0.343] in the image\nAnd my action is Action: SELECT\nValue: 50 miles"}]}, {"id": "mind2web_7098", "image": {"bytes": "", "path": "./images/6b54b029-bff8-49a7-acca-c57163f14279_24a30d96-2890-4243-9595-14ea4999444c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find music events in Los Angeles today\nPrevious actions:\n[div] Search for events -> CLICK\n[textbox] Search events -> TYPE: music\n[combobox] autocomplete -> TYPE: los angeles"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.13, 0.42, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7099", "image": {"bytes": "", "path": "./images/82f654ab-fbfe-43f2-a2fe-0001b2ac853f_24ad4f85-593a-4b4e-bbfc-ecec0c6f3e00.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights from New York City to London and filter the results to show only non-stop flights.\nPrevious actions:\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[path] -> CLICK\n[textbox] Flight origin input -> TYPE: New York City\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.514, 0.198, 0.702, 0.245] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7100", "image": {"bytes": "", "path": "./images/7c2ab1ed-54b7-4316-8b2f-d7467e01b540_ba706103-fef4-4462-9ee1-8c8022b3388b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the cheapest direct flight from Madurai to Chennai on 20/3/23 booking through Air India.com\nPrevious actions:\n[link] Search for flights -> CLICK\n[textbox] Flight origin input -> TYPE: madurai\n[div] Madurai, Tamil Nadu, India -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.101, 0.262, 0.106] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7101", "image": {"bytes": "", "path": "./images/e4f8a347-b288-4a00-9e6b-89cbccda42d2_2804c209-5ed1-40c8-9ed5-bc60068ae0fb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a parking near any airport in california starting april 20 and ending april 23\nPrevious actions:\n[link] Reserve Now -> CLICK\n[textbox] Select Start Date -> CLICK\n[gridcell] Thu Apr 20 2023 -> CLICK\n[textbox] Select End Date -> CLICK\n[gridcell] Sun Apr 23 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.644, 0.321, 0.844, 0.35] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7102", "image": {"bytes": "", "path": "./images/97e3f951-891d-4626-8dbb-ab6e39261d05_4eaaee20-45f8-42fd-8046-0020ea934869.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find first class flight for May 27 from JFK, New York to Heathrow, London for 2 adults, one 4 year old child and one infant under 1 year old who will sit on a lap.\nPrevious actions:\n[tab] Flights -> CLICK\n[tab] One-way -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.781, 0.108, 0.85, 0.127] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7103", "image": {"bytes": "", "path": "./images/408cc1bd-0a76-4bad-b5f4-11db4405047f_b0e6bcb2-ebc1-47f3-959c-3cba6751f827.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Plan a trip from Boston Logan Airport to North Station.\nPrevious actions:\n[tab] Trip Planner -> CLICK\n[combobox] From -> TYPE: Boston Logan Airport\n[option] Boston Logan Int'l Airport, 1 Harborside Dr, East ... -> CLICK\n[combobox] To\u00a0 -> TYPE: North Station\n[link] T orange line green line D green line E commuter ... -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.129, 0.825, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7104", "image": {"bytes": "", "path": "./images/0b2f241e-8d1f-4f22-be2f-5722ceff23a3_4e1f470d-9dfd-4136-9785-360b584f0683.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a treehouse in India with free cancellation and below $100 per night.\nPrevious actions:\n[img] -> CLICK\n[button] Location Anywhere -> CLICK\n[textbox] Where -> TYPE: India \n[div] India -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.898, 0.04, 0.97, 0.061] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7105", "image": {"bytes": "", "path": "./images/af8a7016-67db-4a6d-ab61-04eeb1f244cb_0387a16c-0486-4263-97b5-a8e3145814bc.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a round trip from Phoenix to Miami with maximum budget of $2000.\nPrevious actions:\n[combobox] Flying from -> TYPE: Phoenix\n[button] Phoenix, AZ, US (PHX) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.68, 0.616, 0.813, 0.648] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7106", "image": {"bytes": "", "path": "./images/a51b649f-15b4-4ce4-936e-0b76efa0dd2d_0f407117-ae70-42ea-9230-41fca96353ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: View travel credits for the passenger Leon Sin with the record locator jcqnhd ans ticket number 1234567\nPrevious actions:\n[searchbox] Search AA.com\u00ae -> TYPE: travel credit"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.759, 0.003, 0.925, 0.026] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7107", "image": {"bytes": "", "path": "./images/c8990751-0aab-440c-bf6d-a32ac1216344_eef7607c-2b44-4939-8098-d82b207e60f2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find guided vacations in Europe for more than 10 nights in the month of July\nPrevious actions:\n[textbox] Where? -> TYPE: Europe\n[div] Europe -> CLICK\n[textbox] When? -> CLICK\n[tab] Select a Month -> CLICK\n[li] July -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.008, 0.82, 0.025] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7108", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_69b182b6-f5e4-4aa1-85d5-a98b88129a7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[combobox] Search by product -> TYPE: mirror\n[button] Search -> CLICK\n[span] Columbus -> CLICK\n[searchbox] Search by ZIP code or city, state -> TYPE: atlanta georgia"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.917, 0.065, 0.948, 0.082] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7109", "image": {"bytes": "", "path": "./images/112dba49-3ea4-49ce-9a7b-b47d82d3f81b_f5cb11a1-e04c-4d99-98f2-5c902d3c1283.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find top-rated outdoor activity events in Spain, that is happening on May 1 and book the tickets for two adults in morning group with an English guide.\nPrevious actions:\n[span] -> CLICK\n[span] 1 -> CLICK\n[div] Sort by -> CLICK\n[div] Top rated -> CLICK\n[link] Get tickets -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.457, 0.295, 0.614, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7110", "image": {"bytes": "", "path": "./images/e84111f2-5193-421e-877a-9af8418b558a_a1bb6c97-bc21-4cbe-ba5b-6a8d0e0536e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a gaming desktop with an rtx4000 GPU, available in stock.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.027, 0.096, 0.215, 0.118] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7111", "image": {"bytes": "", "path": "./images/44a12ff5-0172-444a-b979-f224162c1aa8_065612a0-3a86-4991-b06d-abb9ec4e1de3.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for cars to pick up at the laguardia airport in April 12th, 2023 midnight\n and return to same location at noon\nPrevious actions:\n[button] Find a Location -> CLICK\n[link] 1 Laguardia Airport (LGA)\u00a0 -> CLICK\n[combobox] Pick Up Time -> SELECT: midnight\n[textbox] Enter your pick-up location or zip code -> CLICK\n[combobox] Return Time -> SELECT: noon"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.109, 0.295, 0.5, 0.34] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7112", "image": {"bytes": "", "path": "./images/d538537c-ff96-4918-8807-af09b26199d1_778f35dc-5a7c-4835-a404-f057a5b4311b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a flights between SFO and EWR using the Boeing 787-9 aircraft\nPrevious actions:\n[textbox] Origin -> CLICK\n[textbox] Origin -> TYPE: SFO\n[div] San Francisco Int'l (San Francisco) - -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.494, 0.257, 0.619, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7113", "image": {"bytes": "", "path": "./images/f118238f-ef8f-4b63-9159-a81e981ef46e_3ed11a2a-8fd5-4f13-af2b-be976fd73a0c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add decorative LED Candles to the cart.\nPrevious actions:\n[link] Products -> CLICK\n[button] Lighting -> CLICK\n[link] Decorative lighting -> CLICK\n[img] LED candles -> CLICK\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.508, 0.938, 0.551] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7114", "image": {"bytes": "", "path": "./images/e92a2c89-3310-434b-9543-a0d896881bb9_ccced979-b0e9-4efc-997c-d53364206c7d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find premium car in Brooklyn for a day.\nPrevious actions:\n[tab] Travel Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.792, 0.154, 0.842, 0.169] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7115", "image": {"bytes": "", "path": "./images/ee9e993b-0254-465d-bb04-072e01e5f498_18cbba50-27fd-4d98-84c7-7b9802e028d5.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the selection of fishing reels that are power assisted and are in stock.\nPrevious actions:\n[link] Fishing -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.062, 0.181, 0.192, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7116", "image": {"bytes": "", "path": "./images/3e50b35f-7af9-4835-a741-158c2d941722_8838a017-236c-4418-8aa7-48ad6c0514bd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest room for Athens, Attica, Greece 1 Apr 2023 - 3 Apr 2023, 2 nights, 1 Room, 1 Adult\nPrevious actions:\n[generic] 1 -> CLICK\n[div] Choose date -> CLICK\n[generic] 3 -> CLICK\n[button] Search -> CLICK\n[button] Yes, I agree -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.051, 0.386, 0.074] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7117", "image": {"bytes": "", "path": "./images/fcb50c4e-a89e-4f18-b7c0-423295e7c298_9b1378d8-3623-4724-a26a-b493469ca55c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for a certified pre-owned front-wheel drive Jeep, Compass model car with single owner and the lowest mileage within 500 miles of zip 59316.\nPrevious actions:\n[button] Search Certified Pre-Owned -> CLICK\n[p] Four-Wheel Drive -> CLICK\n[p] Single Owner -> CLICK\n[combobox] Select Sort Order -> SELECT: Lowest mileage first\n[div] Request Info -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.318, 0.55, 0.682, 0.588] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7118", "image": {"bytes": "", "path": "./images/fc552b69-feb8-4951-bf67-725071bf8c8a_2c9106bd-de29-4ee2-a559-b876ebeec9de.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book fasted flexible direct flights from New York to Tokyo on June 1, between 6 to 12 pm departure, and from Tokyo to New Delhi on June 5.\nPrevious actions:\n[span] -> CLICK\n[div] Multi-city -> CLICK\n[span] Sat 13 May -> CLICK\n[checkbox] 1 June 2023 -> CLICK\n[span] Where from? -> TYPE: TOKYO"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.08, 0.232, 0.337, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7119", "image": {"bytes": "", "path": "./images/1d173ebb-ff7a-4e6b-9911-0f0349f9a174_a5b8ee8c-a196-4fb7-b6c4-b3c98765d90c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find parking in New York City by month on July 5th start date.\nPrevious actions:\n[tab] Monthly -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.083, 0.082, 0.785, 0.104] in the image\nAnd my action is Action: TYPE\nValue: New York"}]}, {"id": "mind2web_7120", "image": {"bytes": "", "path": "./images/d042ee7e-a2eb-448b-9942-b7aacc9115be_5e1ff95e-1727-43e6-9876-c4e2480529f4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the soonest upcoming event near zip code 90028.\nPrevious actions:\n[link] Events -> CLICK\n[button] Near you -> CLICK\n[textbox] Location search -> TYPE: 90028"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.129, 0.438, 0.157, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7121", "image": {"bytes": "", "path": "./images/694f3b5c-8ee4-45c1-bb7c-b5377c366096_9e3771b1-9b5b-4f07-b3d7-ccd343968002.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Select the mystery vehicle to pick up at Bradenton in Mahattan, Florida on april 20 at 3 pm and return at 6pm\nPrevious actions:\n[textbox] Enter your pick-up location or zip code -> TYPE: Manhattan\n[div] Manhattan -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 20 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.611, 0.261, 0.639, 0.272] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7122", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_bf68b1e8-6439-482f-9667-b1bd3845d2e8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK\n[gridcell] Size -> CLICK\n[label] 13Y(160) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.891, 0.209, 0.91, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7123", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_f31dd533-86bd-4d07-af19-fa8d0f61bb64.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[span] (HND) -> CLICK\n[combobox] Nationality -> CLICK\n[option] United States of America (USA) -> CLICK\n[combobox] Vaccination status Vaccination status -> CLICK\n[option] Fully vaccinated -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.904, 0.15, 0.944, 0.186] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7124", "image": {"bytes": "", "path": "./images/54e0d420-ec89-456a-b179-67c6b4b4babc_32c17d65-3a2c-4123-b579-31095e299b66.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Order one TAHE inflatable stand up paddle board using my default account/shipping info.\nPrevious actions:\n[button] Water -> HOVER\n[link] Paddle Boards -> CLICK\n[img] TAHE Beach SUP-Yak Tandem Inflatable Stand Up Padd... -> CLICK\n[button] Add to cart\u2014$799.95 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.269, 0.229, 0.494, 0.254] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7125", "image": {"bytes": "", "path": "./images/5e47dc3e-2a4a-4273-871d-dfdb33b091d5_2e501862-ee92-4af0-8eb0-7594690edef9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Filter women's sports bras that are purple with high support that are small size.\nPrevious actions:\n[div] Filter -> CLICK\n[RootWebArea] Women's Sports Bras | Under Armour -> CLICK\n[button] Show more -> CLICK\n[div] Color -> CLICK\n[link] Refine by color: Purple -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.723, 0.233, 0.763] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7126", "image": {"bytes": "", "path": "./images/50c13c64-4c9b-4f55-84f6-65ae443848a2_1335391d-8f77-4fc8-ab7e-983f67cc075a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find highest rated romantic summer vacations in Spain\nPrevious actions:\n[textbox] Where? -> CLICK\n[textbox] Where? -> TYPE: Spain\n[div] Spain -> CLICK\n[textbox] When? -> CLICK\n[li] Summer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.789, 0.009, 0.82, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7127", "image": {"bytes": "", "path": "./images/c7b0d1bc-2a0c-4060-92dd-cd4b8721b625_88c965f7-c3ca-411a-a58b-9c9db6803254.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for KOA near Lansing, Michigan for June 7, 2023 to June 12, 2023.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.227, 0.104, 0.434, 0.12] in the image\nAnd my action is Action: TYPE\nValue: Lansing"}]}, {"id": "mind2web_7128", "image": {"bytes": "", "path": "./images/1684f224-47d5-45ed-9a26-821b98b851cd_f5e72948-efaa-4d29-88d8-b29de4097021.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for the highest reviewed for a 4-adults stay in Hollywood, USA, on April 19 to 23 with an option for free cancellation with a price range between 150 to 200 dollars.\nPrevious actions:\n[img] Add -> CLICK\n[i] -> CLICK\n[span] SEARCH -> CLICK\n[textbox] Minimum price filter -> TYPE: 150\n[textbox] Maximum price filter -> TYPE: 200"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.077, 0.204, 0.089, 0.219] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7129", "image": {"bytes": "", "path": "./images/bf94a193-a30b-45b7-a93e-4733ea6a7ed4_8ee9e907-8abc-4563-b292-038c3e0e5edf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Alaska Cruises from Seattle in September 2023 with a duration of 8 days for 4 people on a ship Carnival Luminosa.\nPrevious actions:\n[button] SAIL TO -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.131, 0.655, 0.274, 0.688] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7130", "image": {"bytes": "", "path": "./images/2a440d5f-1cc8-43de-86f5-ea181b0d12f8_146148dd-b0a6-4ee8-a061-0ecbe585e606.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Who were the NBA season leaders from the 2020-21 regular season?\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.148, 0.028, 0.178, 0.046] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7131", "image": {"bytes": "", "path": "./images/0dd6fbb7-4236-47f7-a69c-30f0836b76c3_96e8f2c6-30ca-4af9-9cad-68c16acf5eff.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: See the price trends of used Tesla cars from August 1, 2022, to January 31, 2023, and compare the trend with the Cargurus index.\nPrevious actions:\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[generic] Previous Month -> CLICK\n[gridcell] 31 -> CLICK\n[button] Update Chart -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.398, 0.617, 0.408, 0.622] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7132", "image": {"bytes": "", "path": "./images/8dc49f67-4803-416a-8d05-d6e891e3efc0_75c668c5-0c35-4978-83b5-45de8d786e2c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the hotel with highest review score having free internet and free cancelation in Chennai for 20/03/23\nPrevious actions:\n[option] Chennai, Tamil Nadu, India -> CLICK\n[button] Monday March 20, 2023 -> CLICK\n[button] Search -> CLICK\n[svg] -> CLICK\n[tab] Review score -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.071, 0.745, 0.081, 0.757] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7133", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_0f984b4b-992d-4e92-b019-f3e933eb6465.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[textarea] -> TYPE: Happy Birthday Love\n[input] -> TYPE: Stuart Bloom\n[input] -> TYPE: Denise\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.639, 0.339, 0.677] in the image\nAnd my action is Action: TYPE\nValue: Debbi"}]}, {"id": "mind2web_7134", "image": {"bytes": "", "path": "./images/a92a83ca-25ff-4751-8754-c65ef858699d_0428fa36-92d7-4cc9-8e63-e5e07cfa06e9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the Lava Hot Springs East KOA photo gallery.\nPrevious actions:\n[textbox] WHERE DO YOU WANT TO GO? -> TYPE: Lave Hot Springs East KOA"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.236, 0.225, 0.306, 0.237] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7135", "image": {"bytes": "", "path": "./images/8e4dbb21-de6c-4b9a-8bd1-97fc6b1fe3c0_49946c93-dc95-4827-97d8-1d4712866ae9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a wedding venue in Venice Beach California in June for 100 guests.\nPrevious actions:\n[button] Find & Reserve -> CLICK\n[link] Book Meetings & Events -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.289, 0.266, 0.476, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7136", "image": {"bytes": "", "path": "./images/3d77584f-9074-46db-bd1e-086a506b54d3_4ddbdea8-b5ac-4696-820c-befd4dff83c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for the cheapest third-party certified front-wheel driving automatic white car for sale near Tampa, Florida with online paperwork and no accidents, confirm availability with my details.\nPrevious actions:\n[checkbox] No Accidents (4) -> CLICK\n[combobox] Sort By: -> SELECT: Price - Lowest\n[link] Confirm Availability for Used 2019 Buick Encore Pr... -> CLICK\n[textbox] First Name -> TYPE: James\n[textbox] Last Name -> TYPE: Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.357, 0.254, 0.493, 0.283] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7137", "image": {"bytes": "", "path": "./images/9dfba9af-d79e-4a75-83b4-0f85ab04c2e6_4b230fe6-7974-432d-89e3-e9d599c8b47e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a price quote for new Kia Carnival for 11101.\nPrevious actions:\n[link] Cars for Sale -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.493, 0.03, 0.673, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7138", "image": {"bytes": "", "path": "./images/0ff1648e-28bb-4014-9b8a-3c050c25e334_d1e667d6-a1c0-4ea4-8af5-2197e32eef24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse the women's hiking boots section and filter the results to show only those that are waterproof and have a rating of at least 4 stars and size 6.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.052, 0.128, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7139", "image": {"bytes": "", "path": "./images/122178b3-b5d4-41f0-be38-150ca0e2f5ed_f77f54bb-e260-4d62-a12d-3e8f3df35306.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a black blazer for men with L size and add to wishlist.\nPrevious actions:\n[svg] -> CLICK\n[searchbox] Search by keyword -> TYPE: blazer\n[span] blazer -> CLICK\n[heading] Gender > Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.153, 0.471, 0.178] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7140", "image": {"bytes": "", "path": "./images/e2142cde-5bec-46ee-8d17-9b6ee52a66e8_a8f5854f-36b5-45ff-94b0-1a79573adb4e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the inventory of the Tesla Model S within 200 miles from 10001\nPrevious actions:\n[link] Model S -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.291, 0.473, 0.491, 0.493] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7141", "image": {"bytes": "", "path": "./images/c5a0a291-ef9c-408d-9505-af7aeb6f5280_2aa336c5-81e8-426c-b8cc-18d8ed689c5b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Railcard options for a 20 year old couple traveling within London and the South East\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.838, 0.009, 0.863, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7142", "image": {"bytes": "", "path": "./images/ecb649da-0ca7-4707-8ebb-8707296e28b7_0f4264dd-1f6e-4313-b1e6-c3f392fc27c9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for flights available from Calgary (CYYC) to New York (ZNY).\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.336, 0.469, 0.355] in the image\nAnd my action is Action: TYPE\nValue: Calgary"}]}, {"id": "mind2web_7143", "image": {"bytes": "", "path": "./images/2ff0909b-a8bb-4370-8428-9d355f5c6a67_75c1a95a-3206-4beb-9527-099e88355322.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Open the most popular photos of the Adam A-500 model aircraft.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.047, 0.742, 0.068] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7144", "image": {"bytes": "", "path": "./images/555dd7af-72ce-4455-b4d1-dd9b20ae7c5e_d9a18977-172e-40e0-8bf9-513bbdf8ce7b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy two 100-pound digital gift cards with a birthday card and message of Happy Birthday Love from Stewart Bloom, send to Denise Bloom on email id denise.bloom@bbt.com and Debbie Wolowitz on email id debbie.wo@bbt.com.\nPrevious actions:\n[input] -> TYPE: Bloom\n[input] -> TYPE: denise.bloom@bbt.com\n[input] -> TYPE: Debbi\n[input] -> TYPE: Wolowitz\n[input] -> TYPE: debbi.wo@bbt.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.57, 0.809, 0.809, 0.844] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7145", "image": {"bytes": "", "path": "./images/7bdf34b4-2aa0-4387-9f6a-f7d7c5d17119_4b74b3ef-b1db-41c2-a207-02fec15b8daa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the alerts and conditions for the Alagnak Wild River.\nPrevious actions:\n[link] FIND A PARK -> CLICK\n[combobox] Park Name -> TYPE: Alagnak"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.216, 0.188, 0.784, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7146", "image": {"bytes": "", "path": "./images/4970cd99-ce6b-42e3-bc78-695c9d510f6c_662c8c2a-d32f-4265-85c2-2c854b72c7e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the trip planner for a trip between Cheyenne, WY and Helena, MT.\nPrevious actions:\n[link] FIND A CAMPGROUND \uf0d7 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.072, 0.122, 0.266, 0.137] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7147", "image": {"bytes": "", "path": "./images/5ad0f114-6cea-4f61-af3d-f9f4d4096d6c_6474e85f-0cdf-4124-be63-64495755c3dd.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add Uncharted: Legacy of Thieves Collection to my guide and find the Queen's bracelet information.\nPrevious actions:\n[link] Guides -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.323, 0.204, 0.677, 0.246] in the image\nAnd my action is Action: TYPE\nValue: Uncharted Legacy of Thieves Collection"}]}, {"id": "mind2web_7148", "image": {"bytes": "", "path": "./images/3b390b60-5c04-40ee-a5af-353def43ffbc_a5ebdd01-9df5-4350-a0cd-bdc3f2ca1173.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me japanese restaurants in Fresno with online delivery.\nPrevious actions:\n[link] CITY PAGES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.43, 0.131, 0.571, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7149", "image": {"bytes": "", "path": "./images/f1db33c6-c414-438b-bbfc-cdaa6cfc6b68_1aa6829d-0535-4794-be5c-4f934498abeb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check reviews for best electric SUV and find the 1 star rated review and mark it helpful.\nPrevious actions:\n[link] Car Reviews -> HOVER\n[link] Best Cars -> CLICK\n[img] Best SUVs image -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.681, 0.232, 0.829, 0.244] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7150", "image": {"bytes": "", "path": "./images/b8cfdd2c-c48d-4a58-84fb-a88514baf637_dda39a16-4008-4904-8895-51a26c5f8e31.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an available full-time job in USA finance, save all the accountant jobs.\nPrevious actions:\n[option] United States of America -> CLICK\n[button] Team -> CLICK\n[searchbox] Search -> CLICK\n[option] Finance -> CLICK\n[button] Time Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.387, 0.307, 0.408] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7151", "image": {"bytes": "", "path": "./images/cc27fdda-c68f-4d43-bbcb-6c7954bf00ec_96dab6e9-ebf8-4646-813a-ded293589c49.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Target Stores located near Cincinnati, Ohio and filter by those that have a Starbucks.\nPrevious actions:\n[link] Find Stores -> CLICK\n[button] enter zip or city, state -> CLICK\n[textbox] City -> CLICK\n[textbox] City -> TYPE: Cincinnati\n[combobox] State -> SELECT: OH"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.433, 0.656, 0.509, 0.697] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7152", "image": {"bytes": "", "path": "./images/1202e28a-361d-4704-979a-526c599b4859_d1797a5c-2b21-43af-8036-66c4b2fa4941.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a fully electric car pickup at Athens Airport (ATH) Sat, Apr 1, 2023 and return to the same palce at Mon, Apr 3, 2023.\nPrevious actions:\n[div] Car Hire -> CLICK\n[textbox] I need a car in -> TYPE: Athens\n[button] Athens Airport (ATH) Athens, Greece -> CLICK\n[div] Apr -> CLICK\n[generic] 1 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.341, 0.321, 0.362] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7153", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_7a897c64-a917-42b1-9c88-4587761e7767.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[gridcell] Fri, May 12, 2023 -> CLICK\n[div] Purpose of your event -> CLICK\n[li] Social -> CLICK\n[textbox] Guest rooms -> TYPE: 7\n[textbox] Event space -> TYPE: 50"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.663, 0.367, 0.959, 0.407] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7154", "image": {"bytes": "", "path": "./images/4498c83b-029f-4f1e-af81-722de20160e2_c776cdbf-4c36-4f53-9966-ee977e4f6309.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train with the shortest trip from New York, NY to Washington DC on June 1 in first class and add the ticket to the cart, preferably in the morning .\nPrevious actions:\n[button] FIND TRAINS -> CLICK\n[span] Sort/Filter -> CLICK\n[checkbox] filter trains by Time of Day:6a-12p -> CLICK\n[mat-pseudo-checkbox] -> CLICK\n[button] Close -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.705, 0.212, 0.831, 0.285] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7155", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_5baed715-106d-4b5c-b7b0-353a8b06f423.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK\n[button] Log in later -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.69, 0.166, 0.728] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7156", "image": {"bytes": "", "path": "./images/c7058499-3dc9-4175-9142-d22416d25a1f_f2307de6-5b1d-42b8-9bc9-b568deacc0e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search the best VIP seat to the Red Hot Chili Peppers 2023 Tour\nPrevious actions:\n[link] Music -> HOVER\n[a] Genres -> HOVER\n[link] Jazz -> CLICK\n[img] Image for Red Hot Chili Peppers 2023 Tour -> CLICK\n[button] VIP -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.025, 1.01, 0.08, 1.029] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7157", "image": {"bytes": "", "path": "./images/2705de3e-4461-4668-8573-8f10c1eb6641_3afea1db-55b2-42ec-bbce-86728f28a0ca.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a low-priced refurbished Microsoft Xbox one console in white color and available in stock.\nPrevious actions:\n[link] Filter -> CLICK\n[generic] Refine by Category: Consoles -> CLICK\n[checkbox] Microsoft Microsoft -> CLICK\n[button] Color -> CLICK\n[link] White -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.674, 0.375, 0.723] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7158", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_4952ce05-c06b-458e-b0ac-da5925c2ac39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[textbox] Where to? -> TYPE: skiing\n[button] When -> CLICK\n[button] Next -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.764, 0.307, 0.777, 0.316] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7159", "image": {"bytes": "", "path": "./images/181e8206-2d04-47ea-bcaf-f701b8c5525a_5f391dd3-bf59-4f55-8776-180a9fd6dc48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Castles in Europe for a week next month and price below $1000 per night.\nPrevious actions:\n[button] 1, Saturday, April 2023. Available. Select as chec... -> CLICK\n[button] 8, Saturday, April 2023. Available. Select as chec... -> CLICK\n[span] -> CLICK\n[span] Castles -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.526, 0.19, 0.739, 0.2] in the image\nAnd my action is Action: TYPE\nValue: 1000"}]}, {"id": "mind2web_7160", "image": {"bytes": "", "path": "./images/1b74fa2c-3236-45ac-9b4d-ff913112db52_87659e64-52c9-4193-842b-6676a7d6bf73.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for parking near Sofi stadium that allows to pay with phone.\nPrevious actions:\n[combobox] \uf002\u00a0 Enter an Address or Venue to find parking -> TYPE: sofi stadium\n[span] SoFi Stadium -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.128, 0.336, 0.276] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7161", "image": {"bytes": "", "path": "./images/551ab381-5bfe-4491-9602-0b0c584d1346_d71a5ed7-97e4-49b1-b3e5-d64c46ae7a24.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Give me the IMDB recommendations of what to watch.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.288, 0.628, 0.31] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7162", "image": {"bytes": "", "path": "./images/e4cf5835-4726-4a84-bc8f-5023e95ddec9_416731f8-343d-415b-8f98-b01beae69685.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for SkyMiles domestic Award Deals from Philadelphia to Atlanta.\nPrevious actions:\n[combobox] Origin -> CLICK\n[option] PHL Philadelphia, PA -> CLICK\n[combobox] Destination -> CLICK\n[option] ATL Atlanta, GA -> CLICK\n[button] Filters -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.365, 0.488, 0.41] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7163", "image": {"bytes": "", "path": "./images/14a4e19d-d82d-45dd-b4f0-6b60ed87d085_e63aff02-4d6c-4289-b897-91262275d712.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find nearest Sprouts Farmers Market and add two fruits and one sauce from the deals.\nPrevious actions:\n[img] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.009, 0.172, 0.193, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7164", "image": {"bytes": "", "path": "./images/759d1c94-3314-481f-8763-5918b00567c4_39ca15cc-f268-42e1-ba92-de0f199ac70b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for Mexican restaurants in Concord, CA and start an order at La Pinata Mexican Restaurant.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.023, 0.39, 0.037] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7165", "image": {"bytes": "", "path": "./images/fd2e9403-3201-4bb7-a19d-f475960cf5be_87e392a0-87a0-45b7-9c89-069cc86317ce.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse Thai restaurants that offer delivery and have vegan options available.\nPrevious actions:\n[span] Thai Restaurants -> CLICK\n[button] Offers Delivery -> CLICK\n[span] 1 -> CLICK\n[button] See all -> CLICK\n[checkbox] Vegan -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.637, 0.124, 0.688, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7166", "image": {"bytes": "", "path": "./images/f9723022-04b9-4778-ad33-cf54eac3d393_bb12a018-b966-477f-8fec-249635e955eb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find events starting from April 1, 2023 within 100 miles of New York and see who are going in the top listed event and follow all of them who are going.\nPrevious actions:\n[textbox] Location search -> TYPE: new york\n[span] New York -> CLICK\n[combobox] SEARCH RADIUS -> SELECT: 100 km\n[button] APPLY -> CLICK\n[button] From today -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.099, 0.264, 0.287, 0.294] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7167", "image": {"bytes": "", "path": "./images/bb3e2b61-c14c-4a15-8de4-7c12256d52c7_1c128c7d-67c5-454c-9aaa-82cd82e5e69d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for concerts taking place in Los Angeles, CA in the next month.\nPrevious actions:\n[option] Los Angeles, CA -> CLICK\n[span] Filter by -> CLICK\n[div] 16 -> CLICK\n[div] 16 -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.047, 0.31, 0.264, 0.405] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7168", "image": {"bytes": "", "path": "./images/a513befc-46c3-4bb6-87c0-8f219107c756_f7834a7a-41e6-48ab-9ed4-922a1940da9e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a NBA basketball game for the PS5 to the shoping cart\nPrevious actions:\n[searchbox] Search games, consoles & more -> TYPE: nba2k23\n[button] Search -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.218, 0.179, 0.322] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7169", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_53a1b63c-5d8d-48eb-a2b4-7246f1da4b0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK\n[input] -> TYPE: James Smith"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.25, 0.759, 0.279] in the image\nAnd my action is Action: TYPE\nValue: buckeye.foobar@gmail.com"}]}, {"id": "mind2web_7170", "image": {"bytes": "", "path": "./images/c5070129-37b1-4f87-930f-c45049a70e97_50e901a3-dcef-4026-a6b6-282ba62561c2.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Use the FareFinder to find flights for $150 or less from Dublin to anywhere.\nPrevious actions:\n[div] Dublin -> CLICK\n[textbox] To: -> CLICK\n[div] Anywhere -> CLICK\n[svg] -> CLICK\n[div] $ 150 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.718, 0.403, 0.862, 0.448] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7171", "image": {"bytes": "", "path": "./images/00deddc4-8991-47b5-92f1-f9eb88011b16_318ea7e0-6ab8-4357-9d9a-e407c2f4dfad.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all outdoor events this month in NYC\nPrevious actions:\n[DisclosureTriangle] All locations -> CLICK\n[li] NYC -> CLICK\n[DisclosureTriangle] All Categories -> CLICK\n[li] Outdoors -> CLICK\n[DisclosureTriangle] All Dates -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.026, 0.329, 0.165, 0.352] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7172", "image": {"bytes": "", "path": "./images/9ab66035-9569-4d5a-b1ca-09128cdd7456_d0539316-5a09-4304-a9c2-2beef62f1c93.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest Hawaii package for two adults from June 18 to 21, and the hotel must be near a beach, have a beachfront, hot tub, and pool, and provides towels.\nPrevious actions:\n[button] Property style -> CLICK\n[checkbox] Beach -> CLICK\n[div] Recommended -> CLICK\n[tab] Price (low to high) -> CLICK\n[div] View Deal -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.672, 0.56, 0.959, 0.603] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7173", "image": {"bytes": "", "path": "./images/401c4e6f-6b0b-47b4-8157-92d7ca468bbc_6c7a7082-2897-41c7-9688-4b0f3d778cdb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: rent a car in Brooklyn - Central, NY on from April 9 to April 15.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.221, 0.116, 0.294, 0.133] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7174", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_ba3ec399-f548-4454-b3f0-eaf53fce1d3b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.043, 0.966, 0.072] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7175", "image": {"bytes": "", "path": "./images/1ee63f83-8b6a-4883-813f-63f589e6e52b_cc4e6036-7475-48cc-99dd-d130b01c3dea.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book two rooms in the cheapest 3-star hotel which provides free breakfast, and free airport transportation for three adults, and one child from April 22 to 25 in texas city, the hotel should provide amenities for kids.\nPrevious actions:\n[button] Search without signing in -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.866, 0.222, 0.942, 0.232] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7176", "image": {"bytes": "", "path": "./images/b49f0d3e-7047-41df-9b4f-c2c15b1d3f70_66e14c21-122f-4ff8-af51-9510d38fef5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find The Ritz-Carlton hotels in Washington DC that can hold a student social event from May 9 to May 12 and will require 7 guest rooms and an event space for 50\nPrevious actions:\n[button] Meetings & Events -> CLICK\n[link] Group Travel -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.279, 0.142, 0.487, 0.151] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7177", "image": {"bytes": "", "path": "./images/0991035b-6acb-4dca-aaef-5384a0739781_cdb3023c-a1e1-4791-aeb1-ecfdcd3e3c26.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find careers at the company on the Product Management team\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.826, 0.035, 0.879, 0.05] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7178", "image": {"bytes": "", "path": "./images/0c7a69f9-989d-4899-a3fe-94c952859007_84a83797-c89b-4fe8-a1d3-e2198a825f0f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Chicago to London on 20 April and return on 23 April.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.126, 0.301, 0.365, 0.317] in the image\nAnd my action is Action: TYPE\nValue: Chicago"}]}, {"id": "mind2web_7179", "image": {"bytes": "", "path": "./images/5ea9ec16-845e-4a99-8848-fd96e8bad254_588f22df-6300-45c1-839a-bdaf09f6b27b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find flights from Los Angeles to Miami for two people departing on July 1st and returning on July 7th.\nPrevious actions:\n[textbox] Going to -> TYPE: Miami, FL\n[button] Miami (MIA - Miami Intl.) Florida, United States -> CLICK\n[button] 1 traveler -> CLICK\n[img] Increase adults -> CLICK\n[button] Departing April 11, 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.963, 0.17, 0.981, 0.182] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7180", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_a60ce1f0-5d1f-4a3b-a4fb-251b1c51f61b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[link] Rent a Moving Truck Budget Truck -> CLICK\n[textbox] US City,State or Zip Code -> TYPE: 07718"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.082, 0.172, 0.352, 0.196] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7181", "image": {"bytes": "", "path": "./images/fe6b5531-ce29-4ba1-8e98-43b5fecf57fa_2a015d4e-2e74-4a02-ae2e-1e529eabf668.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest flight from London to New York on 9th May.\nPrevious actions:\n[combobox] Flying to -> TYPE: New York\n[option] Destination New York (NY) -> CLICK\n[span] 9 -> CLICK\n[button] SEARCH FLIGHTS -> CLICK\n[span] Price per person -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.01, 0.808, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7182", "image": {"bytes": "", "path": "./images/a83c14aa-1a0b-4aaf-a801-ac828d2e81ca_ed47769d-ee72-44f7-bdc9-f58989e4f21d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a 4 star rated Hotel in New York Timesquare for 2 adults.\nPrevious actions:\n[textbox] Where are you going? -> TYPE: Timesqure New York\n[button] Times Square, New York, NY, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.612, 0.108, 0.83, 0.125] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7183", "image": {"bytes": "", "path": "./images/813e47ec-6304-42c5-82d4-1fc461247965_deb51466-b520-47d8-bba4-841ca652c58f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest set of stainless steel spoons, which are available in stock in San Diego, California.\nPrevious actions:\n[path] -> CLICK\n[span] -> CLICK\n[path] -> CLICK\n[span] -> CLICK\n[button] Type -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.221, 0.966, 0.264] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7184", "image": {"bytes": "", "path": "./images/f4555944-010b-48d2-a4cb-b7103c686bac_e2fccb19-f1cf-467f-9917-1202c07e965e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the movie which won the jury prize in Cannes 2022, add the director of the movie to the new list named director and add \"to watch\" in the description of that list.\nPrevious actions:\n[link] Eo -> CLICK\n[button] \u200bJerzy Skolimowski -> CLICK\n[button] Add to list -> CLICK\n[menuitem] Create new list -> CLICK\n[textarea] -> TYPE: Directors"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.125, 0.415, 0.609, 0.451] in the image\nAnd my action is Action: TYPE\nValue: To Watch"}]}, {"id": "mind2web_7185", "image": {"bytes": "", "path": "./images/66a5b212-cf94-4917-8015-58970dc54187_6afdf927-8e32-47d0-a8a1-3197de949c01.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the amtrak national route map\nPrevious actions:\n[button] DESTINATIONS -> CLICK\n[link] \uf18a SEE ALL ROUTES -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.345, 0.235, 0.655, 0.268] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7186", "image": {"bytes": "", "path": "./images/bcc2b6df-c012-4513-9fc1-ab149e93001f_7e768de4-2cc5-4d90-8eb6-7b30dc596f7e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Rent a 12-inch available moving truck with a complete protection package for approx 250 miles, pick it up at 4:00 pm, and drop it off at the same location on June 20 at 4:30 pm from zip 07718.\nPrevious actions:\n[button] Find Your Truck -> CLICK\n[button] Select Truck -> CLICK\n[input] -> TYPE: 250\n[button] Continue to Location -> CLICK\n[select] Select a Pick-up Time -> SELECT: 04:00 PM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.739, 0.236, 0.915, 0.254] in the image\nAnd my action is Action: SELECT\nValue: 04 30 PM"}]}, {"id": "mind2web_7187", "image": {"bytes": "", "path": "./images/ff173880-e7f5-4b4e-b941-79e9c3504add_d0f8939d-9053-4418-a49a-b8bbb6bae5d9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find wall mirrors for under $20.\nPrevious actions:\n[link] Products -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.117, 0.267, 0.32, 0.284] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7188", "image": {"bytes": "", "path": "./images/2ef470ab-b06a-4479-883f-78b4e3b94a04_1c3d2eff-3a63-4757-bdbf-48e0f4ba8d4d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find five star rated cultural tours in New York on May 7 with a max price of $75\nPrevious actions:\n[button] For Guests\ue91e -> CLICK\n[link] Activities \ue922 -> CLICK\n[textbox] Location-Search -> TYPE: New York\n[link] New York City New York, USA -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.364, 0.127, 0.552, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7189", "image": {"bytes": "", "path": "./images/b7082615-e6e1-4981-b51b-9259671d1adf_765d1395-6d7e-496e-96ad-ce9fa6367197.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Change your store to the one nearest to 07055\nPrevious actions:\n[span] Your store for 43219 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.67, 0.17, 0.847, 0.189] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7190", "image": {"bytes": "", "path": "./images/46a3683f-fbe0-40d0-8729-6c7964d994e6_11731204-34aa-4719-a789-eae83d26586a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a men's UA outlet T-shirt of XL size and add to cart.\nPrevious actions:\n[link] Mens -> CLICK\n[div] Product Category -> CLICK\n[link] Clothing -> CLICK\n[div] Product Type -> CLICK\n[link] Short Sleeves -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.044, 0.74, 0.233, 0.788] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7191", "image": {"bytes": "", "path": "./images/4a0bd619-4aa9-48d8-8322-89a86aef93db_7c149935-f2e7-47f5-beca-303dc388238e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a newly listed used antique Oak chair in the French style made before year 1800.\nPrevious actions:\n[button] Style -> CLICK\n[link] French -> CLICK\n[button] Material -> CLICK\n[link] Oak -> CLICK\n[button] Time Period Manufactured -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.612, 0.254, 0.752, 0.278] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7192", "image": {"bytes": "", "path": "./images/9f57055d-c269-47d7-99be-3525d725439e_6dc6377a-e668-4b3b-8e93-5f5f1899b8f9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the NBA team with the best odds to win the NBA title.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.311, 0.056, 0.335, 0.065] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7193", "image": {"bytes": "", "path": "./images/e48f848d-62b8-441e-aafb-c76aeb2c4f84_5df6d848-d5b7-4202-ac80-1959faf35581.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Look for an internship role for a university graduated in the in Germany, and apply to the job posted in last 7 days.\nPrevious actions:\n[button] Our Company\ue91e -> CLICK\n[link] Careers \ue922 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.007, 0.317, 0.047, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7194", "image": {"bytes": "", "path": "./images/67648411-9d11-4254-8973-7d8dfbca58e8_bdc2d235-fba7-4911-a120-56b4a3e32410.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search four door SUVs from Avis while traveling from New Orleans, L, to New York that isn't more than $1000 in total price from Mar 25th to Apr 1st, pickup at 2PM.\nPrevious actions:\n[generic] Saturday March 25th -> CLICK\n[button] Saturday March 25, 2023 -> CLICK\n[generic] Saturday April 1st -> CLICK\n[button] Saturday April 1, 2023 -> CLICK\n[button] Noon -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.665, 0.285, 0.745, 0.306] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7195", "image": {"bytes": "", "path": "./images/4ff347e6-e911-4af5-8151-7805a9e91b28_d0e2ec63-357b-4237-9476-b54c6feba4e7.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show current alerts for red line subway.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.582, 0.093, 0.722, 0.172] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7196", "image": {"bytes": "", "path": "./images/9e3786bf-56a4-4b4a-a94e-a4c1620cd0b2_7a9111bf-ea91-4f63-b47b-e1117bc84494.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find videos from the Oscar 2023.\nPrevious actions:\n[button] Open Navigation Drawer -> CLICK\n[span] Oscars -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.294, 0.197, 0.357, 0.221] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7197", "image": {"bytes": "", "path": "./images/bef473f1-82a1-4359-a2c0-59b6dc2f6abb_7b768457-fd7e-478f-b7a5-e5b3b31acd34.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a park in Alaska that is also a National Heritage Area.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.211, 0.298, 0.789, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7198", "image": {"bytes": "", "path": "./images/3cb44998-5a0c-44c5-84b7-ffb729a356d8_e1c0c2da-37ad-41aa-a735-8768c02f6928.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the schedule between New York and Washington DC sorted by shortest trip.\nPrevious actions:\n[textbox] From -> TYPE: New York\n[option] New York, NY - Moynihan Train Hall at Penn Sta. (N... -> CLICK\n[textbox] To -> TYPE: Washington"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.327, 0.247, 0.559, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7199", "image": {"bytes": "", "path": "./images/1b17b79c-589f-45eb-b55d-c977d1b9708e_03315d1e-dfa4-4cc0-b5ee-7a7b8f4cf799.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest motel for 2 adults in Seattle from 11 May to 18 May\nPrevious actions:\n[combobox] Enter a destination or property -> TYPE: seattle"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.147, 0.491, 0.845, 0.514] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7200", "image": {"bytes": "", "path": "./images/5f2c3149-1820-4632-bfdb-3cf4615de2cc_325b7d4b-c635-4187-851d-8219f9a98b4c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse dog food for puppies that is rated at least 4 stars and sort the results by price from low to high.\nPrevious actions:\n[textbox] Search Amazon -> TYPE: dog food\n[button] dog food -> CLICK\n[i] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.016, 0.51, 0.219, 0.528] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7201", "image": {"bytes": "", "path": "./images/446e3135-8a53-455f-9471-9f6660f6a94d_a811817b-439d-4624-995e-f2151b37a537.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a best business class flight ticket from Colombo to New York with a two-day Qatar stopover for 2 adults and 1 child on June 24.\nPrevious actions:\n[textbox] From Autocomplete selection. Enter your place. -> TYPE: colombo"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.242, 0.5, 0.28] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7202", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_073be97e-382f-4914-9a6e-ba12b35d6460.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[tab] Character -> CLICK\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK\n[button] Apply -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.27, 0.064, 0.279] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7203", "image": {"bytes": "", "path": "./images/f4dcbd09-45f3-47b5-a49d-36658d5b50ca_2e19a5e8-4201-46f0-a062-f812e7f06f8a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find tickets for Pizza making class in New York next month.\nPrevious actions:\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.086, 0.705, 0.096] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7204", "image": {"bytes": "", "path": "./images/f1f4e4f7-3fb0-4aeb-8123-4882a1f39cb2_b20324e8-7daa-49b4-b79c-fcc6ef95992d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the new arrival of kids graphic t-shirt for 13 years old boy\nPrevious actions:\n[link] KIDS -> CLICK\n[tab] Tops -> CLICK\n[link] UT: Graphic Tees -> CLICK\n[gridcell] Size -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.063, 0.484, 0.139, 0.504] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7205", "image": {"bytes": "", "path": "./images/c95b0276-a588-4aeb-906f-a3f6f1c205ae_442b42be-0716-4617-8548-d72fbceb218d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me the list of BGG shirts for sale\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.32, 0.008, 0.393, 0.03] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7206", "image": {"bytes": "", "path": "./images/03e45ce0-4375-44aa-b57f-cf439ccbe363_540f58be-846a-4639-988b-214d708f6238.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest news article and send an email about it.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.692, 0.092, 0.963, 0.1] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7207", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_8ff449e2-99a5-48b0-94be-3804c92710c4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[a] SFO - San Francisco International, CA -> CLICK\n[textbox] To -> TYPE: new york\n[a] NYC - New York, NY -> CLICK\n[combobox] Number of passengers -> SELECT: 2\n[textbox] Depart (date format mm/dd/yyyy) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.344, 0.401, 0.358, 0.413] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7208", "image": {"bytes": "", "path": "./images/837989e0-7476-496a-be02-e4c69f1f989b_27977c97-543d-4538-bfb8-ac7679262132.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a cheapest one way flight from Mumbai to Dubai on 24 May.\nPrevious actions:\n[span] Dubai, United Arab Emirates -> CLICK\n[svg] -> CLICK\n[tab] One-way -> CLICK\n[generic] Thursday May 18th -> CLICK\n[div] 24 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.931, 0.112, 0.975, 0.14] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7209", "image": {"bytes": "", "path": "./images/0dd0b532-7bac-4b3e-b5a4-2ca6b0897af6_20d5f662-5659-41f2-873e-3f1a4a681fe1.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse comedy TV shows streaming on HBO Max that was released in 2010.\nPrevious actions:\n[link] What to Watch on HBO Max -> CLICK\n[button] Filter -> CLICK\n[button] Comedy -> CLICK\n[button] TV Shows -> CLICK\n[span] 2010 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.361, 0.438, 0.639, 0.463] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7210", "image": {"bytes": "", "path": "./images/b28e6a37-6e9c-46bb-8012-18ca742a521f_1650528f-5ac6-4a51-91a8-e217fa90b7a0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Explore Durban in Africa and book a flight from Doha for 2 adults and 1 infant, on May 28, book the first flight in economy comfort.\nPrevious actions:\n[button] Explore (current) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.254, 0.114, 0.284, 0.129] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7211", "image": {"bytes": "", "path": "./images/1860601a-faef-4dba-8cbb-807ea8434dca_a7c0ac26-f51d-45fc-969f-73a22770dfc0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check for travel requirements when travelling from New York to Tokyo as US citizen.\nPrevious actions:\n[tab] Travel Info -> CLICK\n[link] Travel Planning Center -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.055, 0.102, 0.322, 0.131] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7212", "image": {"bytes": "", "path": "./images/8ab1c90f-52e0-4c07-9d41-d44c75c25c5e_cb7997fb-4091-45ac-b23e-3c4f1e114867.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for an automatic grey sports car with the lowest price, gas fuel and free shipping manufactured between 2018 to 2022, compare the top two results and compare photos.\nPrevious actions:\n[menuitem] 2022 -> CLICK\n[button] Back to all categories -> CLICK\n[button] Exterior Color -> CLICK\n[listitem] Gray (5) Gray (5) -> CLICK\n[button] Back to all categories -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.0, 0.296, 0.253, 0.324] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7213", "image": {"bytes": "", "path": "./images/08a998f9-82f7-48c8-b6ba-72a58d39b457_85e614b4-41b1-43b9-8519-71f86c26641d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find all Uniqlo's located in Chicago, IL.\nPrevious actions:\n[link] Store Locator -> CLICK\n[searchbox] SEARCH BY KEYWORD -> CLICK\n[searchbox] SEARCH BY KEYWORD -> TYPE: Chicago"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.821, 0.178, 0.959, 0.213] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7214", "image": {"bytes": "", "path": "./images/7ec95f61-b8f4-4431-84d2-55d9f7cdd594_fba5ff31-33b7-42ac-81a2-29fd9779dc8c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: check flight status for a flight from united airlines leaving Belo Horizonte to Buenos Aires on April 9\nPrevious actions:\n[option] Apr 9, 2023 -> CLICK\n[combobox] Airline. Enter to change collapsed list. Selected ... -> CLICK\n[option] United Airlines (UA) -> CLICK\n[combobox] Enter your departing city, airport name, or airpor... -> TYPE: belo horizonte\n[button] Belo Horizonte, MG, BR (CNF - Tancredo Neves) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.362, 0.295, 0.637, 0.322] in the image\nAnd my action is Action: TYPE\nValue: buenos aires"}]}, {"id": "mind2web_7215", "image": {"bytes": "", "path": "./images/3d76a0fc-7ba9-4a74-ac52-db0cea359553_acaf6392-4605-4e32-a646-d1b3d7675895.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced round trip flight with hotel on May 2 from Kathmandu, Nepal KTM to Shanghai, China PVG and return on May 5. Book a double room and check out with the default flights.\nPrevious actions:\n[checkbox] 5 May 2023 -> CLICK\n[button] Search -> CLICK\n[button] Recommended -> CLICK\n[div] Lowest Price -> CLICK\n[svg] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.842, 0.316, 0.968, 0.338] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7216", "image": {"bytes": "", "path": "./images/08a0c5cc-41a9-4eb8-b3ea-91d9e26f8bd3_455d1b00-132a-42d9-bb64-a7c94ae46392.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get directions for the Koa resort campground in Arizona\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA RESORT CAMPGROUNDS \uf0da -> CLICK\n[button] Arizona -> CLICK\n[link] Tucson / Lazydays KOA Resort\ue250 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.561, 0.151, 0.659, 0.159] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7217", "image": {"bytes": "", "path": "./images/8f6374b0-36f1-478e-a282-a61849c8174f_fb4c8a9d-fe56-4fa9-9c3f-9c8044e3e2c6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add eleven produce items from Aldi, including 1 pack of blueberry, 6 bananas, 1 grape tomato, 1 roma tomato, 1 cilantro, to my cart and have it delivered tomorrow between 10am and 1pm to the default address.\nPrevious actions:\n[button] View Cart. Items in cart: 5 -> CLICK\n[button] View Cart. Items in cart: 5 -> CLICK\n[div] Go to checkout -> CLICK\n[div] -> CLICK\n[button] Confirm address -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.481, 0.619, 0.535] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7218", "image": {"bytes": "", "path": "./images/ca049641-9721-4593-95c5-a47e22365b5b_37e9f402-9fb7-4e3e-a1fe-8756bffbffbb.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new electric car with highest ev mile range per charge, see the details of the top result.\nPrevious actions:\n[link] Cars for Sale -> HOVER\n[link] Cars for Sale -> CLICK\n[checkbox] New (9,776) -> CLICK\n[heading] Fuel Type -> CLICK\n[checkbox] Electric (175) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.113, 0.589, 0.296, 0.635] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7219", "image": {"bytes": "", "path": "./images/a8474730-82eb-4f12-b947-db64ac08660b_4c7017eb-b5a7-45a4-9644-d3b39dfe5c2e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the most recently posted job opening in customer service in Ireland with a remote working option.\nPrevious actions:\n[div] Discover jobs around the world -> CLICK\n[ppc-content] Ireland -> CLICK\n[button] Flexible Workstyles -> CLICK\n[span] -> CLICK\n[button] Category -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.042, 0.139, 0.052, 0.146] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7220", "image": {"bytes": "", "path": "./images/a67318a4-5049-4ac5-8c6b-c14fc527483f_6911a96a-5cf1-45ea-a4ac-7b020fa68506.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Browse used BWM cars made in 2010 and compare the four cheapest listings\nPrevious actions:\n[select] 1992 -> SELECT: 2010\n[button] Close dialog -> CLICK\n[select] 2024 -> SELECT: 2010\n[combobox] Sort By -> SELECT: Price: Low to High\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.42, 0.614, 0.489, 0.63] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7221", "image": {"bytes": "", "path": "./images/4c4ab02e-a617-490d-96c6-333d7fa82fe9_9d13499c-38c1-4c07-9165-7af7d7dc7bee.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the status of my upcoming trip with booking number 123456789 and last name Smith.\nPrevious actions:\n[tab] \ue90cMy Trips -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.06, 0.285, 0.353, 0.317] in the image\nAnd my action is Action: TYPE\nValue: 123456789"}]}, {"id": "mind2web_7222", "image": {"bytes": "", "path": "./images/cf8da12a-e1fd-48b5-8c53-b88f301ca1e0_cde960df-47f6-41cd-bd34-9082cede2dfa.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for video game consoles and filter the results to show only those that have Wi-Fi Capability.\nPrevious actions:\n[link] Electronics -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.013, 0.415, 0.173, 0.429] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7223", "image": {"bytes": "", "path": "./images/9ebd069a-7703-47b5-9c75-53958637e7c0_ba2bbee3-bfcc-4bab-91ef-2fd4893e6c39.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Watch the halftime show from the most recent Super Bowl\nPrevious actions:\n[use] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.73, 0.038, 0.787, 0.044] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7224", "image": {"bytes": "", "path": "./images/c175fe34-143b-4a87-a462-e8e9d69d0b95_3cd2999e-b440-48bd-9a23-54e47a560466.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check flight status from London to New York City for next day.\nPrevious actions:\n[link] Flight status -> CLICK\n[textbox] From , required. -> TYPE: London\n[a] LON - London, United Kingdom -> CLICK\n[textbox] To , required. -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.348, 0.138, 0.641, 0.154] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7225", "image": {"bytes": "", "path": "./images/712d9d7f-d7ee-4554-affa-133449231ae4_f7af6222-9fbe-4bed-9d34-344c135ddca0.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest medical audiobook on complementary medicine for under 20 dollars and add the top book to the cart.\nPrevious actions:\n[link] Shop by category\u2228 -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.177, 0.182, 0.331, 0.191] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7226", "image": {"bytes": "", "path": "./images/1337eca1-4ca6-4102-964e-e7f6ecebcd0b_347640d1-0b66-485b-9360-cf59dc1ce10c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find things to do in Singapore on April 2 that are family friendly and offer free cancellation\nPrevious actions:\n[tab] Things to do -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.069, 0.143, 0.568, 0.183] in the image\nAnd my action is Action: TYPE\nValue: singapore"}]}, {"id": "mind2web_7227", "image": {"bytes": "", "path": "./images/767faaa6-220e-4e6c-ac1d-39b1501c69cf_869e8d1b-c77b-48a8-9a52-9b34eace9019.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Hire a large car with air conditioning for a small group to pick up from Berlin on May 15, 11:30 am, and drop off at the exact location on May 16, 11 am, book with full protection and payment with a credit card.\nPrevious actions:\n[textbox] I need a car in -> TYPE: berlin\n[button] Berlin Berlin Federal State, Germany (City) -> CLICK\n[div] -> CLICK\n[generic] 15 -> CLICK\n[generic] 16 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.266, 0.827, 0.372, 0.858] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7228", "image": {"bytes": "", "path": "./images/277bdab6-e924-45dd-ba26-d659d864d671_a69f9ab4-9419-40c0-a22b-d1bad1fd7c55.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add 2 different flavor dog treats and one dog food with lowest price to cart from Safeway.\nPrevious actions:\n[textbox] Search -> TYPE: dog treats\n[span] dog treats -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.05, 0.156, 0.956, 0.193] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7229", "image": {"bytes": "", "path": "./images/06497906-8b76-4367-a1b3-ebc8238bd470_f00c7706-22fe-42e0-bc77-2312bea8c3d4.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Choose a lowest priced hybrid SUV car to pick up from Bangkok downtown on May 24 at 9:30 am and drop off on May 28, at 9 am.\nPrevious actions:\n[checkbox] 28 May 2023 -> CLICK\n[combobox] Pick up time -> SELECT: 9:30 AM\n[combobox] Drop off time -> SELECT: 9:00 AM\n[div] Search -> CLICK\n[div] SUVs -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.084, 0.333, 0.331, 0.343] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7230", "image": {"bytes": "", "path": "./images/240952bd-853b-4653-a358-49c2784cf568_1b7055a0-0d97-4373-acf5-fd2eb15e484b.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find a train scheduled between Chicago to Los Angeles for the next day, and if a southwest chief train is available, then print the route details.\nPrevious actions:\n[link] SCHEDULES -> CLICK\n[input] -> TYPE: chicago\n[option] Chicago, IL - Union Station (CHI) -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.374, 0.071, 0.553, 0.082] in the image\nAnd my action is Action: TYPE\nValue: los angeles"}]}, {"id": "mind2web_7231", "image": {"bytes": "", "path": "./images/da679cfe-9180-49f7-b8a2-a2b1bd6d8803_14aad43c-c6e4-4205-beaa-94ff9abb394c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Show me all the artists with smith in their name\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.531, 0.059, 0.838, 0.09] in the image\nAnd my action is Action: TYPE\nValue: smith"}]}, {"id": "mind2web_7232", "image": {"bytes": "", "path": "./images/2177b546-5718-478b-8535-bc37975b0d0c_aea31efd-c391-4099-a13e-3a9417cca68f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book 2 hot seat VIP tickets through email id adelefan@hotmail.com for Adele's concert in Las Vegas on the last weekend of June.\nPrevious actions:\n[link] TICKETS -> CLICK\n[div] Quantity -> CLICK\n[label] 2 -> CLICK\n[button] $3,535/ea -> CLICK\n[textbox] *Email Address -> TYPE: adelefan@hotmail.com"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.477, 0.448, 0.64, 0.479] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7233", "image": {"bytes": "", "path": "./images/64779409-c396-4779-a574-b219c3b22282_ee6dfe65-aa46-4181-97d8-3c1944f1ba7c.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the longest skiing activity in August 10\nPrevious actions:\n[path] -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK\n[button] Next -> CLICK\n[gridcell] Thu Aug 10 2023 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.656, 0.458, 0.78, 0.48] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7234", "image": {"bytes": "", "path": "./images/627a99f1-b2e4-4ce4-93f5-89e691f8c879_89295463-6730-47b5-9111-ef25b548fe7a.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find Kevin Durant's bio\nPrevious actions:\n[button] Open Search -> CLICK\n[textbox] Search -> TYPE: Kevin Durant"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.678, 0.07, 0.931, 0.092] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7235", "image": {"bytes": "", "path": "./images/298c854d-2987-498b-b43e-6a6452fb11c7_349619be-15c9-4731-a1c9-c020a40df044.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find unique experiences in London of maximum one hour duration which are rated four stars and above\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.271, 0.217, 0.463, 0.224] in the image\nAnd my action is Action: TYPE\nValue: London"}]}, {"id": "mind2web_7236", "image": {"bytes": "", "path": "./images/6ca55141-9ba6-4ed7-8c07-77df6c24029d_148ed09c-e612-460c-8704-bafa61872edf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the lowest-priced gray-colored round-shaped mirror in stock in Atlanta, Georgia, with a maximum price of 20 dollars.\nPrevious actions:\n[button] Sort Best match -> CLICK\n[radio] Price: low to high -> CLICK\n[button] Sort -> CLICK\n[button] Shape -> CLICK\n[span] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.95, 0.146, 0.969, 0.156] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7237", "image": {"bytes": "", "path": "./images/d4d4a01e-47d0-4e54-8bd5-3d85acebb6dd_b47f5256-ae9f-4c40-8a64-189b47fe6849.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the latest action tv shows which are rated fresh by both the audience and the rotten tomatoes\nPrevious actions:\n[link] TV SHOWS -> CLICK\n[span] Sort: Most popular -> CLICK\n[span] Newest -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.204, 0.409, 0.274, 0.438] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7238", "image": {"bytes": "", "path": "./images/94bd2a0f-fe08-4420-a0e8-d48248630b6e_9df6a79e-671a-40f0-bc71-b7394d96f511.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the Leaderboard for the top 10 fantasy Basketball players for the Rotisserie challenge.\nPrevious actions:\n[link] Fantasy . -> CLICK\n[link] Fantasy Basketball -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.169, 0.148, 0.221, 0.165] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7239", "image": {"bytes": "", "path": "./images/cc643c1f-e73e-4e9f-abe9-5ee2d3b0a0dd_08603353-f42f-498c-8b7e-74aceb336815.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Check the lowest price for a white 2017 certified pre owned Lexus Rx350 near 90012.\nPrevious actions:\n[LabelText] Certified Pre-Owned -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.249, 0.144, 0.358, 0.167] in the image\nAnd my action is Action: SELECT\nValue: Lexus"}]}, {"id": "mind2web_7240", "image": {"bytes": "", "path": "./images/87b23bf1-3c23-45a6-82bf-5a535c7cdc2f_eac5aaaf-f188-42f8-a20d-90b6695ad4b9.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book the cheapest long-term car rental with a fuel plan, GPS, and satellite radio from June 29 to July 28, pick up at 9 am at Orlando airport and dropping off at the same location.\nPrevious actions:\n[span] Orlando Intl Airport -> CLICK\n[textbox] mm/dd/yyyy -> CLICK\n[link] 29 -> CLICK\n[link] 28 -> CLICK\n[combobox] Pick Up Time -> SELECT: 9:00 AM"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.701, 0.154, 0.902, 0.187] in the image\nAnd my action is Action: SELECT\nValue: 9 00 AM"}]}, {"id": "mind2web_7241", "image": {"bytes": "", "path": "./images/1f128c19-3093-4c49-bfc7-13589aa8e88a_6651b25d-7ba0-4963-a7d0-3211b2eb79b6.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get a Hasbro Hulk action figure manufactured in 1990 with the lowest price + shipping.\nPrevious actions:\n[checkbox] Hulk -> CLICK\n[tab] Year Manufactured -> CLICK\n[checkbox] 1990 -> CLICK\n[button] Apply -> CLICK\n[input] -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.831, 0.062, 0.906, 0.076] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7242", "image": {"bytes": "", "path": "./images/b2dd00ff-3f09-46bf-a043-7b996f3815da_5edb6bac-5471-482d-904a-09635fe4ee2d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for holiday campground in Alaska.\nPrevious actions:\n[link] CAMPING AT KOA \uf0d7 -> CLICK\n[link] KOA HOLIDAY CAMPGROUNDS \uf0da -> CLICK\n[button] Alaska -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.298, 0.137, 0.481, 0.145] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7243", "image": {"bytes": "", "path": "./images/6fd2fbb1-9f43-4c46-be42-569a892648e6_5aaf3df7-66f5-437f-8051-2cb596fbdcde.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Add a smartwatch to the cart that is compatible with an iPhone.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.28, 0.009, 0.651, 0.04] in the image\nAnd my action is Action: TYPE\nValue: smartwatch"}]}, {"id": "mind2web_7244", "image": {"bytes": "", "path": "./images/2eaf60d5-d2af-4f50-a3ad-5cc9132a770a_da2be31e-ad27-4939-b2f1-d7ad426c736d.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Buy the cheapest used women's running shoes available in a size 5\nPrevious actions:\n[menuitem] Running Shoes -> CLICK\n[label] 5 (10) -> CLICK\n[combobox] Sort By: -> SELECT: Price: Low - High\n[img] -> CLICK\n[radio] 5 -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.5, 0.574, 0.62, 0.604] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7245", "image": {"bytes": "", "path": "./images/aaade1d2-3a7b-4373-be01-ef98ed86288e_bda0171c-5a26-4bdc-83a3-b7f80d13498e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the cheapest new trash can with automated lid that is under $60 and available for Buy It Now.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.214, 0.033, 0.652, 0.058] in the image\nAnd my action is Action: TYPE\nValue: trash can automatic lid"}]}, {"id": "mind2web_7246", "image": {"bytes": "", "path": "./images/27724810-0bc8-446a-a2f4-b53a87e190df_63a067c3-55b3-4dfd-934a-c0bec0d8dccf.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Find the location and operating hours of the nearest CVS pharmacy to zip code 90028\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.887, 0.013, 0.911, 0.028] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7247", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_71b5600a-25ce-4afd-9929-242be8f64d5f.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\nNone"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.188, 0.147, 0.26, 0.155] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7248", "image": {"bytes": "", "path": "./images/a3edc9c7-397d-478b-a0b9-0ffa0897acca_6fc4eaa0-463a-4839-87d9-6df70024abef.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Search for person named John Smith in New York.\nPrevious actions:\n[link] Find People -> CLICK\n[input] -> TYPE: John\n[input] -> TYPE: Smith\n[input] -> TYPE: New York"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.805, 0.282, 0.852, 0.305] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7249", "image": {"bytes": "", "path": "./images/2879afa9-05f9-4d97-bbfe-f95f5d665174_8e5a056f-17cf-401a-8338-09bd7aad3e3e.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Play the Wakanda Forever trailer\nPrevious actions:\n[link] Navigate to on-demand -> HOVER\n[link] select to navigate to Genres -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.031, 0.332, 0.152, 0.453] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7250", "image": {"bytes": "", "path": "./images/7219145b-ada1-480e-a8ea-f4fe164ca84a_6c9158f0-6c3b-4ef9-9e89-c09c74149da8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a one way ticket from Faro to Bournemouth leaving on 30th April, with a value fare, for a passenger named Mr. Joe Bloggs. Don't select now and only take 1 small bag onboard.\nPrevious actions:\n[div] -> CLICK\n[button] Search -> CLICK\n[button] Select -> CLICK\n[button] Continue for $ 148 . 16 -> CLICK\n[button] Continue with Value fare -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.123, 0.783, 0.877, 0.839] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7251", "image": {"bytes": "", "path": "./images/5a15ea92-571d-4a45-ad18-5b4eddbc70a6_56dff79d-4441-4435-a096-71da4f343a09.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Get the lowest priced plus size one piece active swimsuit in color black.\nPrevious actions:\n[link] Shop by Category -> CLICK\n[link] Women -> HOVER"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.312, 0.311, 0.36, 0.32] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7252", "image": {"bytes": "", "path": "./images/4d311ce8-c660-43dc-bde9-d06e85f6a12c_2895a313-9711-4a8b-9467-88df99b6dd48.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a room for a business meeting from Apr 30 to May 1 with 7 people within 5 miles of New Delhi. The hotel must have a business center and offer high-speed internet, select the nearest hotel.\nPrevious actions:\n[link] Business Meetings \ue922 -> CLICK\n[textbox] Destination -> TYPE: new delhi"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.041, 0.225, 0.335, 0.261] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7253", "image": {"bytes": "", "path": "./images/16e81dcf-9762-4b83-be3e-60a65a15efec_5541dc40-3d3c-4624-8e10-e78d62032873.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Book a first class vacation travel for 2 adults from San Francisco to New york leavind on april 9 and returning on april 19\nPrevious actions:\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 9 -> CLICK\n[button] \udbf8\udc3a View Calendar: To navigate the calendar, use the... -> CLICK\n[link] 19 -> CLICK\n[combobox] Fare preference -> SELECT: First"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.636, 0.322, 0.875, 0.337] in the image\nAnd my action is Action: CLICK"}]}, {"id": "mind2web_7254", "image": {"bytes": "", "path": "./images/58f811fd-0f17-430b-befc-885605e13e41_45d235df-68bb-4808-aab1-97e2fdf6fee8.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: Cancel an order with order number X123456 of Harry Potter Box Set as not available at address.\nPrevious actions:\n[link] Cancel my order -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.235, 0.25, 0.759, 0.278] in the image\nAnd my action is Action: TYPE\nValue: James Smith"}]}, {"id": "mind2web_7255", "image": {"bytes": "", "path": "./images/a531a379-7770-4b72-a8cc-ee19419536cb_b477c115-47bc-4eeb-8d34-b128c643d648.png"}, "conversations": [{"from": "human", "value": "\nImagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.\nPlease pinpoint the bounding box coordinates of the element\u2019s region that I should click in order to complete the following instruction: search for art galleries in midtown\nPrevious actions:\n[link] attractions. -> CLICK"}, {"from": "gpt", "value": "For my next step, the target element of my action is bounded by the box [0.145, 0.238, 0.395, 0.261] in the image\nAnd my action is Action: CLICK"}]}] \ No newline at end of file